Bug 1492121 - Copy most of Gecko Profiler code to mozglue/baseprofiler - r=njn

Almost-straight copy of a subset of files from tools/profiler to
mozglue/baseprofiler.
Some minor changes first:
- Reduced moz.build to only mention actually-copied files.
- Headers in 'public' prefixed with "Base" (to distinguish them from their
  originals, in case they later get #included from the same units).
- Also copied profiling categories from js/src/vm/GeckoProfiler.cpp to
  ProfilingCategory.cpp, and copied js/src/vm/ProfilingStack.cpp, and their
  respective headers -- as they are needed for a significant part of
  the profiler API, and are not strictly js-specific.

baseprofiler not yet added to parent mozglue/moz.build, so it won't be built yet.

Differential Revision: https://phabricator.services.mozilla.com/D31923

--HG--
rename : tools/profiler/core/EHABIStackWalk.cpp => mozglue/baseprofiler/core/EHABIStackWalk.cpp
rename : tools/profiler/core/EHABIStackWalk.h => mozglue/baseprofiler/core/EHABIStackWalk.h
rename : tools/profiler/core/PageInformation.cpp => mozglue/baseprofiler/core/PageInformation.cpp
rename : tools/profiler/core/PageInformation.h => mozglue/baseprofiler/core/PageInformation.h
rename : tools/profiler/core/PlatformMacros.h => mozglue/baseprofiler/core/PlatformMacros.h
rename : tools/profiler/core/ProfileBuffer.cpp => mozglue/baseprofiler/core/ProfileBuffer.cpp
rename : tools/profiler/core/ProfileBuffer.h => mozglue/baseprofiler/core/ProfileBuffer.h
rename : tools/profiler/core/ProfileBufferEntry.cpp => mozglue/baseprofiler/core/ProfileBufferEntry.cpp
rename : tools/profiler/core/ProfileBufferEntry.h => mozglue/baseprofiler/core/ProfileBufferEntry.h
rename : tools/profiler/core/ProfileJSONWriter.cpp => mozglue/baseprofiler/core/ProfileJSONWriter.cpp
rename : tools/profiler/core/ProfiledThreadData.cpp => mozglue/baseprofiler/core/ProfiledThreadData.cpp
rename : tools/profiler/core/ProfiledThreadData.h => mozglue/baseprofiler/core/ProfiledThreadData.h
rename : tools/profiler/core/ProfilerBacktrace.cpp => mozglue/baseprofiler/core/ProfilerBacktrace.cpp
rename : tools/profiler/core/ProfilerBacktrace.h => mozglue/baseprofiler/core/ProfilerBacktrace.h
rename : tools/profiler/core/ProfilerMarker.h => mozglue/baseprofiler/core/ProfilerMarker.h
rename : tools/profiler/core/ProfilerMarkerPayload.cpp => mozglue/baseprofiler/core/ProfilerMarkerPayload.cpp
rename : js/src/vm/GeckoProfiler.cpp => mozglue/baseprofiler/core/ProfilingCategory.cpp
rename : js/src/vm/ProfilingStack.cpp => mozglue/baseprofiler/core/ProfilingStack.cpp
rename : tools/profiler/core/RegisteredThread.cpp => mozglue/baseprofiler/core/RegisteredThread.cpp
rename : tools/profiler/core/RegisteredThread.h => mozglue/baseprofiler/core/RegisteredThread.h
rename : tools/profiler/core/ThreadInfo.h => mozglue/baseprofiler/core/ThreadInfo.h
rename : tools/profiler/core/VTuneProfiler.cpp => mozglue/baseprofiler/core/VTuneProfiler.cpp
rename : tools/profiler/core/VTuneProfiler.h => mozglue/baseprofiler/core/VTuneProfiler.h
rename : tools/profiler/core/platform-linux-android.cpp => mozglue/baseprofiler/core/platform-linux-android.cpp
rename : tools/profiler/core/platform-macos.cpp => mozglue/baseprofiler/core/platform-macos.cpp
rename : tools/profiler/core/platform-win32.cpp => mozglue/baseprofiler/core/platform-win32.cpp
rename : tools/profiler/core/platform.cpp => mozglue/baseprofiler/core/platform.cpp
rename : tools/profiler/core/platform.h => mozglue/baseprofiler/core/platform.h
rename : tools/profiler/core/shared-libraries-linux.cc => mozglue/baseprofiler/core/shared-libraries-linux.cc
rename : tools/profiler/core/shared-libraries-macos.cc => mozglue/baseprofiler/core/shared-libraries-macos.cc
rename : tools/profiler/core/shared-libraries-win32.cc => mozglue/baseprofiler/core/shared-libraries-win32.cc
rename : tools/profiler/core/vtune/ittnotify.h => mozglue/baseprofiler/core/vtune/ittnotify.h
rename : tools/profiler/lul/AutoObjectMapper.cpp => mozglue/baseprofiler/lul/AutoObjectMapper.cpp
rename : tools/profiler/lul/AutoObjectMapper.h => mozglue/baseprofiler/lul/AutoObjectMapper.h
rename : tools/profiler/lul/LulCommon.cpp => mozglue/baseprofiler/lul/LulCommon.cpp
rename : tools/profiler/lul/LulCommonExt.h => mozglue/baseprofiler/lul/LulCommonExt.h
rename : tools/profiler/lul/LulDwarf.cpp => mozglue/baseprofiler/lul/LulDwarf.cpp
rename : tools/profiler/lul/LulDwarfExt.h => mozglue/baseprofiler/lul/LulDwarfExt.h
rename : tools/profiler/lul/LulDwarfInt.h => mozglue/baseprofiler/lul/LulDwarfInt.h
rename : tools/profiler/lul/LulDwarfSummariser.cpp => mozglue/baseprofiler/lul/LulDwarfSummariser.cpp
rename : tools/profiler/lul/LulDwarfSummariser.h => mozglue/baseprofiler/lul/LulDwarfSummariser.h
rename : tools/profiler/lul/LulElf.cpp => mozglue/baseprofiler/lul/LulElf.cpp
rename : tools/profiler/lul/LulElfExt.h => mozglue/baseprofiler/lul/LulElfExt.h
rename : tools/profiler/lul/LulElfInt.h => mozglue/baseprofiler/lul/LulElfInt.h
rename : tools/profiler/lul/LulMain.cpp => mozglue/baseprofiler/lul/LulMain.cpp
rename : tools/profiler/lul/LulMain.h => mozglue/baseprofiler/lul/LulMain.h
rename : tools/profiler/lul/LulMainInt.h => mozglue/baseprofiler/lul/LulMainInt.h
rename : tools/profiler/lul/platform-linux-lul.cpp => mozglue/baseprofiler/lul/platform-linux-lul.cpp
rename : tools/profiler/lul/platform-linux-lul.h => mozglue/baseprofiler/lul/platform-linux-lul.h
rename : tools/profiler/moz.build => mozglue/baseprofiler/moz.build
rename : tools/profiler/public/ProfileJSONWriter.h => mozglue/baseprofiler/public/BaseProfileJSONWriter.h
rename : tools/profiler/public/GeckoProfiler.h => mozglue/baseprofiler/public/BaseProfiler.h
rename : tools/profiler/public/ProfilerCounts.h => mozglue/baseprofiler/public/BaseProfilerCounts.h
rename : tools/profiler/public/ProfilerMarkerPayload.h => mozglue/baseprofiler/public/BaseProfilerMarkerPayload.h
rename : tools/profiler/public/shared-libraries.h => mozglue/baseprofiler/public/BaseProfilerSharedLibraries.h
rename : js/public/ProfilingCategory.h => mozglue/baseprofiler/public/BaseProfilingCategory.h
rename : js/public/ProfilingStack.h => mozglue/baseprofiler/public/BaseProfilingStack.h
extra : moz-landing-system : lando
This commit is contained in:
Gerald Squelart 2019-06-04 06:52:15 +00:00
Родитель abbd1fe8d2
Коммит 38eb2d609e
57 изменённых файлов: 28026 добавлений и 0 удалений

Просмотреть файл

@ -0,0 +1,641 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* This is an implementation of stack unwinding according to a subset
* of the ARM Exception Handling ABI, as described in:
* http://infocenter.arm.com/help/topic/com.arm.doc.ihi0038a/IHI0038A_ehabi.pdf
*
* This handles only the ARM-defined "personality routines" (chapter
* 9), and don't track the value of FP registers, because profiling
* needs only chain of PC/SP values.
*
* Because the exception handling info may not be accurate for all
* possible places where an async signal could occur (e.g., in a
* prologue or epilogue), this bounds-checks all stack accesses.
*
* This file uses "struct" for structures in the exception tables and
* "class" otherwise. We should avoid violating the C++11
* standard-layout rules in the former.
*/
#include "EHABIStackWalk.h"
#include "BaseProfilerSharedLibraries.h"
#include "platform.h"
#include "mozilla/Atomics.h"
#include "mozilla/Attributes.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/EndianUtils.h"
#include <algorithm>
#include <elf.h>
#include <stdint.h>
#include <vector>
#include <string>
#ifndef PT_ARM_EXIDX
# define PT_ARM_EXIDX 0x70000001
#endif
// Bug 1082817: ICS B2G has a buggy linker that doesn't always ensure
// that the EXIDX is sorted by address, as the spec requires. So in
// that case we build and sort an array of pointers into the index,
// and binary-search that; otherwise, we search the index in place
// (avoiding the time and space overhead of the indirection).
#if defined(ANDROID_VERSION) && ANDROID_VERSION < 16
# define HAVE_UNSORTED_EXIDX
#endif
namespace mozilla {
struct PRel31 {
uint32_t mBits;
bool topBit() const { return mBits & 0x80000000; }
uint32_t value() const { return mBits & 0x7fffffff; }
int32_t offset() const { return (static_cast<int32_t>(mBits) << 1) >> 1; }
const void* compute() const {
return reinterpret_cast<const char*>(this) + offset();
}
private:
PRel31(const PRel31& copied) = delete;
PRel31() = delete;
};
struct EHEntry {
PRel31 startPC;
PRel31 exidx;
private:
EHEntry(const EHEntry& copied) = delete;
EHEntry() = delete;
};
class EHState {
// Note that any core register can be used as a "frame pointer" to
// influence the unwinding process, so this must track all of them.
uint32_t mRegs[16];
public:
bool unwind(const EHEntry* aEntry, const void* stackBase);
uint32_t& operator[](int i) { return mRegs[i]; }
const uint32_t& operator[](int i) const { return mRegs[i]; }
explicit EHState(const mcontext_t&);
};
enum { R_SP = 13, R_LR = 14, R_PC = 15 };
#ifdef HAVE_UNSORTED_EXIDX
class EHEntryHandle {
const EHEntry* mValue;
public:
EHEntryHandle(const EHEntry* aEntry) : mValue(aEntry) {}
const EHEntry* value() const { return mValue; }
};
bool operator<(const EHEntryHandle& lhs, const EHEntryHandle& rhs) {
return lhs.value()->startPC.compute() < rhs.value()->startPC.compute();
}
#endif
class EHTable {
uint32_t mStartPC;
uint32_t mEndPC;
uint32_t mBaseAddress;
#ifdef HAVE_UNSORTED_EXIDX
// In principle we should be able to binary-search the index section in
// place, but the ICS toolchain's linker is noncompliant and produces
// indices that aren't entirely sorted (e.g., libc). So we have this:
std::vector<EHEntryHandle> mEntries;
typedef std::vector<EHEntryHandle>::const_iterator EntryIterator;
EntryIterator entriesBegin() const { return mEntries.begin(); }
EntryIterator entriesEnd() const { return mEntries.end(); }
static const EHEntry* entryGet(EntryIterator aEntry) {
return aEntry->value();
}
#else
typedef const EHEntry* EntryIterator;
EntryIterator mEntriesBegin, mEntriesEnd;
EntryIterator entriesBegin() const { return mEntriesBegin; }
EntryIterator entriesEnd() const { return mEntriesEnd; }
static const EHEntry* entryGet(EntryIterator aEntry) { return aEntry; }
#endif
std::string mName;
public:
EHTable(const void* aELF, size_t aSize, const std::string& aName);
const EHEntry* lookup(uint32_t aPC) const;
bool isValid() const { return entriesEnd() != entriesBegin(); }
const std::string& name() const { return mName; }
uint32_t startPC() const { return mStartPC; }
uint32_t endPC() const { return mEndPC; }
uint32_t baseAddress() const { return mBaseAddress; }
};
class EHAddrSpace {
std::vector<uint32_t> mStarts;
std::vector<EHTable> mTables;
static mozilla::Atomic<const EHAddrSpace*> sCurrent;
public:
explicit EHAddrSpace(const std::vector<EHTable>& aTables);
const EHTable* lookup(uint32_t aPC) const;
static void Update();
static const EHAddrSpace* Get();
};
void EHABIStackWalkInit() { EHAddrSpace::Update(); }
size_t EHABIStackWalk(const mcontext_t& aContext, void* stackBase, void** aSPs,
void** aPCs, const size_t aNumFrames) {
const EHAddrSpace* space = EHAddrSpace::Get();
EHState state(aContext);
size_t count = 0;
while (count < aNumFrames) {
uint32_t pc = state[R_PC], sp = state[R_SP];
aPCs[count] = reinterpret_cast<void*>(pc);
aSPs[count] = reinterpret_cast<void*>(sp);
count++;
if (!space) break;
// TODO: cache these lookups. Binary-searching libxul is
// expensive (possibly more expensive than doing the actual
// unwind), and even a small cache should help.
const EHTable* table = space->lookup(pc);
if (!table) break;
const EHEntry* entry = table->lookup(pc);
if (!entry) break;
if (!state.unwind(entry, stackBase)) break;
}
return count;
}
class EHInterp {
public:
// Note that stackLimit is exclusive and stackBase is inclusive
// (i.e, stackLimit < SP <= stackBase), following the convention
// set by the AAPCS spec.
EHInterp(EHState& aState, const EHEntry* aEntry, uint32_t aStackLimit,
uint32_t aStackBase)
: mState(aState),
mStackLimit(aStackLimit),
mStackBase(aStackBase),
mNextWord(0),
mWordsLeft(0),
mFailed(false) {
const PRel31& exidx = aEntry->exidx;
uint32_t firstWord;
if (exidx.mBits == 1) { // EXIDX_CANTUNWIND
mFailed = true;
return;
}
if (exidx.topBit()) {
firstWord = exidx.mBits;
} else {
mNextWord = reinterpret_cast<const uint32_t*>(exidx.compute());
firstWord = *mNextWord++;
}
switch (firstWord >> 24) {
case 0x80: // short
mWord = firstWord << 8;
mBytesLeft = 3;
break;
case 0x81:
case 0x82: // long; catch descriptor size ignored
mWord = firstWord << 16;
mBytesLeft = 2;
mWordsLeft = (firstWord >> 16) & 0xff;
break;
default:
// unknown personality
mFailed = true;
}
}
bool unwind();
private:
// TODO: GCC has been observed not CSEing repeated reads of
// mState[R_SP] with writes to mFailed between them, suggesting that
// it hasn't determined that they can't alias and is thus missing
// optimization opportunities. So, we may want to flatten EHState
// into this class; this may also make the code simpler.
EHState& mState;
uint32_t mStackLimit;
uint32_t mStackBase;
const uint32_t* mNextWord;
uint32_t mWord;
uint8_t mWordsLeft;
uint8_t mBytesLeft;
bool mFailed;
enum {
I_ADDSP = 0x00, // 0sxxxxxx (subtract if s)
M_ADDSP = 0x80,
I_POPMASK = 0x80, // 1000iiii iiiiiiii (if any i set)
M_POPMASK = 0xf0,
I_MOVSP = 0x90, // 1001nnnn
M_MOVSP = 0xf0,
I_POPN = 0xa0, // 1010lnnn
M_POPN = 0xf0,
I_FINISH = 0xb0, // 10110000
I_POPLO = 0xb1, // 10110001 0000iiii (if any i set)
I_ADDSPBIG = 0xb2, // 10110010 uleb128
I_POPFDX = 0xb3, // 10110011 sssscccc
I_POPFDX8 = 0xb8, // 10111nnn
M_POPFDX8 = 0xf8,
// "Intel Wireless MMX" extensions omitted.
I_POPFDD = 0xc8, // 1100100h sssscccc
M_POPFDD = 0xfe,
I_POPFDD8 = 0xd0, // 11010nnn
M_POPFDD8 = 0xf8
};
uint8_t next() {
if (mBytesLeft == 0) {
if (mWordsLeft == 0) {
return I_FINISH;
}
mWordsLeft--;
mWord = *mNextWord++;
mBytesLeft = 4;
}
mBytesLeft--;
mWord = (mWord << 8) | (mWord >> 24); // rotate
return mWord;
}
uint32_t& vSP() { return mState[R_SP]; }
uint32_t* ptrSP() { return reinterpret_cast<uint32_t*>(vSP()); }
void checkStackBase() {
if (vSP() > mStackBase) mFailed = true;
}
void checkStackLimit() {
if (vSP() <= mStackLimit) mFailed = true;
}
void checkStackAlign() {
if ((vSP() & 3) != 0) mFailed = true;
}
void checkStack() {
checkStackBase();
checkStackLimit();
checkStackAlign();
}
void popRange(uint8_t first, uint8_t last, uint16_t mask) {
bool hasSP = false;
uint32_t tmpSP;
if (mask == 0) mFailed = true;
for (uint8_t r = first; r <= last; ++r) {
if (mask & 1) {
if (r == R_SP) {
hasSP = true;
tmpSP = *ptrSP();
} else
mState[r] = *ptrSP();
vSP() += 4;
checkStackBase();
if (mFailed) return;
}
mask >>= 1;
}
if (hasSP) {
vSP() = tmpSP;
checkStack();
}
}
};
bool EHState::unwind(const EHEntry* aEntry, const void* stackBasePtr) {
// The unwinding program cannot set SP to less than the initial value.
uint32_t stackLimit = mRegs[R_SP] - 4;
uint32_t stackBase = reinterpret_cast<uint32_t>(stackBasePtr);
EHInterp interp(*this, aEntry, stackLimit, stackBase);
return interp.unwind();
}
bool EHInterp::unwind() {
mState[R_PC] = 0;
checkStack();
while (!mFailed) {
uint8_t insn = next();
#if DEBUG_EHABI_UNWIND
LOG("unwind insn = %02x", (unsigned)insn);
#endif
// Try to put the common cases first.
// 00xxxxxx: vsp = vsp + (xxxxxx << 2) + 4
// 01xxxxxx: vsp = vsp - (xxxxxx << 2) - 4
if ((insn & M_ADDSP) == I_ADDSP) {
uint32_t offset = ((insn & 0x3f) << 2) + 4;
if (insn & 0x40) {
vSP() -= offset;
checkStackLimit();
} else {
vSP() += offset;
checkStackBase();
}
continue;
}
// 10100nnn: Pop r4-r[4+nnn]
// 10101nnn: Pop r4-r[4+nnn], r14
if ((insn & M_POPN) == I_POPN) {
uint8_t n = (insn & 0x07) + 1;
bool lr = insn & 0x08;
uint32_t* ptr = ptrSP();
vSP() += (n + (lr ? 1 : 0)) * 4;
checkStackBase();
for (uint8_t r = 4; r < 4 + n; ++r) mState[r] = *ptr++;
if (lr) mState[R_LR] = *ptr++;
continue;
}
// 1011000: Finish
if (insn == I_FINISH) {
if (mState[R_PC] == 0) {
mState[R_PC] = mState[R_LR];
// Non-standard change (bug 916106): Prevent the caller from
// re-using LR. Since the caller is by definition not a leaf
// routine, it will have to restore LR from somewhere to
// return to its own caller, so we can safely zero it here.
// This makes a difference only if an error in unwinding
// (e.g., caused by starting from within a prologue/epilogue)
// causes us to load a pointer to a leaf routine as LR; if we
// don't do something, we'll go into an infinite loop of
// "returning" to that same function.
mState[R_LR] = 0;
}
return true;
}
// 1001nnnn: Set vsp = r[nnnn]
if ((insn & M_MOVSP) == I_MOVSP) {
vSP() = mState[insn & 0x0f];
checkStack();
continue;
}
// 11001000 sssscccc: Pop VFP regs D[16+ssss]-D[16+ssss+cccc] (as FLDMFDD)
// 11001001 sssscccc: Pop VFP regs D[ssss]-D[ssss+cccc] (as FLDMFDD)
if ((insn & M_POPFDD) == I_POPFDD) {
uint8_t n = (next() & 0x0f) + 1;
// Note: if the 16+ssss+cccc > 31, the encoding is reserved.
// As the space is currently unused, we don't try to check.
vSP() += 8 * n;
checkStackBase();
continue;
}
// 11010nnn: Pop VFP regs D[8]-D[8+nnn] (as FLDMFDD)
if ((insn & M_POPFDD8) == I_POPFDD8) {
uint8_t n = (insn & 0x07) + 1;
vSP() += 8 * n;
checkStackBase();
continue;
}
// 10110010 uleb128: vsp = vsp + 0x204 + (uleb128 << 2)
if (insn == I_ADDSPBIG) {
uint32_t acc = 0;
uint8_t shift = 0;
uint8_t byte;
do {
if (shift >= 32) return false;
byte = next();
acc |= (byte & 0x7f) << shift;
shift += 7;
} while (byte & 0x80);
uint32_t offset = 0x204 + (acc << 2);
// The calculations above could have overflowed.
// But the one we care about is this:
if (vSP() + offset < vSP()) mFailed = true;
vSP() += offset;
// ...so that this is the only other check needed:
checkStackBase();
continue;
}
// 1000iiii iiiiiiii (i not all 0): Pop under masks {r15-r12}, {r11-r4}
if ((insn & M_POPMASK) == I_POPMASK) {
popRange(4, 15, ((insn & 0x0f) << 8) | next());
continue;
}
// 1011001 0000iiii (i not all 0): Pop under mask {r3-r0}
if (insn == I_POPLO) {
popRange(0, 3, next() & 0x0f);
continue;
}
// 10110011 sssscccc: Pop VFP regs D[ssss]-D[ssss+cccc] (as FLDMFDX)
if (insn == I_POPFDX) {
uint8_t n = (next() & 0x0f) + 1;
vSP() += 8 * n + 4;
checkStackBase();
continue;
}
// 10111nnn: Pop VFP regs D[8]-D[8+nnn] (as FLDMFDX)
if ((insn & M_POPFDX8) == I_POPFDX8) {
uint8_t n = (insn & 0x07) + 1;
vSP() += 8 * n + 4;
checkStackBase();
continue;
}
// unhandled instruction
#ifdef DEBUG_EHABI_UNWIND
LOG("Unhandled EHABI instruction 0x%02x", insn);
#endif
mFailed = true;
}
return false;
}
bool operator<(const EHTable& lhs, const EHTable& rhs) {
return lhs.startPC() < rhs.startPC();
}
// Async signal unsafe.
EHAddrSpace::EHAddrSpace(const std::vector<EHTable>& aTables)
: mTables(aTables) {
std::sort(mTables.begin(), mTables.end());
DebugOnly<uint32_t> lastEnd = 0;
for (std::vector<EHTable>::iterator i = mTables.begin(); i != mTables.end();
++i) {
MOZ_ASSERT(i->startPC() >= lastEnd);
mStarts.push_back(i->startPC());
lastEnd = i->endPC();
}
}
const EHTable* EHAddrSpace::lookup(uint32_t aPC) const {
ptrdiff_t i = (std::upper_bound(mStarts.begin(), mStarts.end(), aPC) -
mStarts.begin()) -
1;
if (i < 0 || aPC >= mTables[i].endPC()) return 0;
return &mTables[i];
}
const EHEntry* EHTable::lookup(uint32_t aPC) const {
MOZ_ASSERT(aPC >= mStartPC);
if (aPC >= mEndPC) return nullptr;
EntryIterator begin = entriesBegin();
EntryIterator end = entriesEnd();
MOZ_ASSERT(begin < end);
if (aPC < reinterpret_cast<uint32_t>(entryGet(begin)->startPC.compute()))
return nullptr;
while (end - begin > 1) {
#ifdef EHABI_UNWIND_MORE_ASSERTS
if (entryGet(end - 1)->startPC.compute() <
entryGet(begin)->startPC.compute()) {
MOZ_CRASH("unsorted exidx");
}
#endif
EntryIterator mid = begin + (end - begin) / 2;
if (aPC < reinterpret_cast<uint32_t>(entryGet(mid)->startPC.compute()))
end = mid;
else
begin = mid;
}
return entryGet(begin);
}
#if MOZ_LITTLE_ENDIAN
static const unsigned char hostEndian = ELFDATA2LSB;
#elif MOZ_BIG_ENDIAN
static const unsigned char hostEndian = ELFDATA2MSB;
#else
# error "No endian?"
#endif
// Async signal unsafe: std::vector::reserve, std::string copy ctor.
EHTable::EHTable(const void* aELF, size_t aSize, const std::string& aName)
: mStartPC(~0), // largest uint32_t
mEndPC(0),
#ifndef HAVE_UNSORTED_EXIDX
mEntriesBegin(nullptr),
mEntriesEnd(nullptr),
#endif
mName(aName) {
const uint32_t fileHeaderAddr = reinterpret_cast<uint32_t>(aELF);
if (aSize < sizeof(Elf32_Ehdr)) return;
const Elf32_Ehdr& file = *(reinterpret_cast<Elf32_Ehdr*>(fileHeaderAddr));
if (memcmp(&file.e_ident[EI_MAG0], ELFMAG, SELFMAG) != 0 ||
file.e_ident[EI_CLASS] != ELFCLASS32 ||
file.e_ident[EI_DATA] != hostEndian ||
file.e_ident[EI_VERSION] != EV_CURRENT || file.e_machine != EM_ARM ||
file.e_version != EV_CURRENT)
// e_flags?
return;
MOZ_ASSERT(file.e_phoff + file.e_phnum * file.e_phentsize <= aSize);
const Elf32_Phdr *exidxHdr = 0, *zeroHdr = 0;
for (unsigned i = 0; i < file.e_phnum; ++i) {
const Elf32_Phdr& phdr = *(reinterpret_cast<Elf32_Phdr*>(
fileHeaderAddr + file.e_phoff + i * file.e_phentsize));
if (phdr.p_type == PT_ARM_EXIDX) {
exidxHdr = &phdr;
} else if (phdr.p_type == PT_LOAD) {
if (phdr.p_offset == 0) {
zeroHdr = &phdr;
}
if (phdr.p_flags & PF_X) {
mStartPC = std::min(mStartPC, phdr.p_vaddr);
mEndPC = std::max(mEndPC, phdr.p_vaddr + phdr.p_memsz);
}
}
}
if (!exidxHdr) return;
if (!zeroHdr) return;
mBaseAddress = fileHeaderAddr - zeroHdr->p_vaddr;
mStartPC += mBaseAddress;
mEndPC += mBaseAddress;
// Create a sorted index of the index to work around linker bugs.
const EHEntry* startTable =
reinterpret_cast<const EHEntry*>(mBaseAddress + exidxHdr->p_vaddr);
const EHEntry* endTable = reinterpret_cast<const EHEntry*>(
mBaseAddress + exidxHdr->p_vaddr + exidxHdr->p_memsz);
#ifdef HAVE_UNSORTED_EXIDX
mEntries.reserve(endTable - startTable);
for (const EHEntry* i = startTable; i < endTable; ++i) mEntries.push_back(i);
std::sort(mEntries.begin(), mEntries.end());
#else
mEntriesBegin = startTable;
mEntriesEnd = endTable;
#endif
}
mozilla::Atomic<const EHAddrSpace*> EHAddrSpace::sCurrent(nullptr);
// Async signal safe; can fail if Update() hasn't returned yet.
const EHAddrSpace* EHAddrSpace::Get() { return sCurrent; }
// Collect unwinding information from loaded objects. Calls after the
// first have no effect. Async signal unsafe.
void EHAddrSpace::Update() {
const EHAddrSpace* space = sCurrent;
if (space) return;
SharedLibraryInfo info = SharedLibraryInfo::GetInfoForSelf();
std::vector<EHTable> tables;
for (size_t i = 0; i < info.GetSize(); ++i) {
const SharedLibrary& lib = info.GetEntry(i);
// FIXME: This isn't correct if the start address isn't p_offset 0, because
// the start address will not point at the file header. But this is worked
// around by magic number checks in the EHTable constructor.
EHTable tab(reinterpret_cast<const void*>(lib.GetStart()),
lib.GetEnd() - lib.GetStart(), lib.GetNativeDebugPath());
if (tab.isValid()) tables.push_back(tab);
}
space = new EHAddrSpace(tables);
if (!sCurrent.compareExchange(nullptr, space)) {
delete space;
space = sCurrent;
}
}
EHState::EHState(const mcontext_t& context) {
#ifdef linux
mRegs[0] = context.arm_r0;
mRegs[1] = context.arm_r1;
mRegs[2] = context.arm_r2;
mRegs[3] = context.arm_r3;
mRegs[4] = context.arm_r4;
mRegs[5] = context.arm_r5;
mRegs[6] = context.arm_r6;
mRegs[7] = context.arm_r7;
mRegs[8] = context.arm_r8;
mRegs[9] = context.arm_r9;
mRegs[10] = context.arm_r10;
mRegs[11] = context.arm_fp;
mRegs[12] = context.arm_ip;
mRegs[13] = context.arm_sp;
mRegs[14] = context.arm_lr;
mRegs[15] = context.arm_pc;
#else
# error "Unhandled OS for ARM EHABI unwinding"
#endif
}
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,28 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* This is an implementation of stack unwinding according to a subset
* of the ARM Exception Handling ABI; see the comment at the top of
* the .cpp file for details.
*/
#ifndef mozilla_EHABIStackWalk_h__
#define mozilla_EHABIStackWalk_h__
#include <stddef.h>
#include <ucontext.h>
namespace mozilla {
void EHABIStackWalkInit();
size_t EHABIStackWalk(const mcontext_t& aContext, void* stackBase, void** aSPs,
void** aPCs, size_t aNumFrames);
} // namespace mozilla
#endif

Просмотреть файл

@ -0,0 +1,37 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "PageInformation.h"
#include "BaseProfileJSONWriter.h"
PageInformation::PageInformation(const nsID& aDocShellId,
uint32_t aDocShellHistoryId,
const nsCString& aUrl, bool aIsSubFrame)
: mDocShellId(aDocShellId),
mDocShellHistoryId(aDocShellHistoryId),
mUrl(aUrl),
mIsSubFrame(aIsSubFrame) {}
bool PageInformation::Equals(PageInformation* aOtherPageInfo) {
return DocShellHistoryId() == aOtherPageInfo->DocShellHistoryId() &&
DocShellId().Equals(aOtherPageInfo->DocShellId()) &&
IsSubFrame() == aOtherPageInfo->IsSubFrame();
}
void PageInformation::StreamJSON(SpliceableJSONWriter& aWriter) {
aWriter.StartObjectElement();
aWriter.StringProperty("docshellId", nsIDToCString(DocShellId()).get());
aWriter.DoubleProperty("historyId", DocShellHistoryId());
aWriter.StringProperty("url", Url().get());
aWriter.BoolProperty("isSubFrame", IsSubFrame());
aWriter.EndObject();
}
size_t PageInformation::SizeOfIncludingThis(
mozilla::MallocSizeOf aMallocSizeOf) const {
return aMallocSizeOf(this);
}

Просмотреть файл

@ -0,0 +1,62 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef PageInformation_h
#define PageInformation_h
#include "mozilla/Maybe.h"
#include "mozilla/MemoryReporting.h"
#include "nsID.h"
#include "nsISupportsImpl.h"
#include "nsString.h"
class SpliceableJSONWriter;
// This class contains information that's relevant to a single page only
// while the page information is important and registered with the profiler,
// but regardless of whether the profiler is running. All accesses to it are
// protected by the profiler state lock.
// When the page gets unregistered, we keep the profiler buffer position
// to determine if we are still using this page. If not, we unregister
// it in the next page registration.
class PageInformation final {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(PageInformation)
PageInformation(const nsID& aDocShellId, uint32_t aDocShellHistoryId,
const nsCString& aUrl, bool aIsSubFrame);
size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
bool Equals(PageInformation* aOtherDocShellInfo);
void StreamJSON(SpliceableJSONWriter& aWriter);
uint32_t DocShellHistoryId() { return mDocShellHistoryId; }
const nsID& DocShellId() { return mDocShellId; }
const nsCString& Url() { return mUrl; }
bool IsSubFrame() { return mIsSubFrame; }
mozilla::Maybe<uint64_t> BufferPositionWhenUnregistered() {
return mBufferPositionWhenUnregistered;
}
void NotifyUnregistered(uint64_t aBufferPosition) {
mBufferPositionWhenUnregistered = mozilla::Some(aBufferPosition);
}
private:
const nsID mDocShellId;
const uint32_t mDocShellHistoryId;
const nsCString mUrl;
const bool mIsSubFrame;
// Holds the buffer position when DocShell is unregistered.
// It's used to determine if we still use this DocShell in the profiler or
// not.
mozilla::Maybe<uint64_t> mBufferPositionWhenUnregistered;
virtual ~PageInformation() = default;
};
#endif // PageInformation_h

Просмотреть файл

@ -0,0 +1,114 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef PLATFORM_MACROS_H
#define PLATFORM_MACROS_H
// Define platform selection macros in a consistent way. Don't add anything
// else to this file, so it can remain freestanding. The primary factorisation
// is on (ARCH,OS) pairs ("PLATforms") but ARCH_ and OS_ macros are defined
// too, since they are sometimes convenient.
//
// Note: "GP" is short for "Gecko Profiler".
#undef GP_PLAT_x86_android
#undef GP_PLAT_amd64_android
#undef GP_PLAT_arm_android
#undef GP_PLAT_arm64_android
#undef GP_PLAT_x86_linux
#undef GP_PLAT_amd64_linux
#undef GP_PLAT_arm_linux
#undef GP_PLAT_mips64_linux
#undef GP_PLAT_amd64_darwin
#undef GP_PLAT_x86_windows
#undef GP_PLAT_amd64_windows
#undef GP_PLAT_arm64_windows
#undef GP_ARCH_x86
#undef GP_ARCH_amd64
#undef GP_ARCH_arm
#undef GP_ARCH_arm64
#undef GP_ARCH_mips64
#undef GP_OS_android
#undef GP_OS_linux
#undef GP_OS_darwin
#undef GP_OS_windows
// We test __ANDROID__ before __linux__ because __linux__ is defined on both
// Android and Linux, whereas GP_OS_android is not defined on vanilla Linux.
#if defined(__ANDROID__) && defined(__i386__)
# define GP_PLAT_x86_android 1
# define GP_ARCH_x86 1
# define GP_OS_android 1
#elif defined(__ANDROID__) && defined(__x86_64__)
# define GP_PLAT_amd64_android 1
# define GP_ARCH_amd64 1
# define GP_OS_android 1
#elif defined(__ANDROID__) && defined(__arm__)
# define GP_PLAT_arm_android 1
# define GP_ARCH_arm 1
# define GP_OS_android 1
#elif defined(__ANDROID__) && defined(__aarch64__)
# define GP_PLAT_arm64_android 1
# define GP_ARCH_arm64 1
# define GP_OS_android 1
#elif defined(__linux__) && defined(__i386__)
# define GP_PLAT_x86_linux 1
# define GP_ARCH_x86 1
# define GP_OS_linux 1
#elif defined(__linux__) && defined(__x86_64__)
# define GP_PLAT_amd64_linux 1
# define GP_ARCH_amd64 1
# define GP_OS_linux 1
#elif defined(__linux__) && defined(__arm__)
# define GP_PLAT_arm_linux 1
# define GP_ARCH_arm 1
# define GP_OS_linux 1
#elif defined(__linux__) && defined(__aarch64__)
# define GP_PLAT_arm64_linux 1
# define GP_ARCH_arm64 1
# define GP_OS_linux 1
#elif defined(__linux__) && defined(__mips64)
# define GP_PLAT_mips64_linux 1
# define GP_ARCH_mips64 1
# define GP_OS_linux 1
#elif defined(__APPLE__) && defined(__x86_64__)
# define GP_PLAT_amd64_darwin 1
# define GP_ARCH_amd64 1
# define GP_OS_darwin 1
#elif (defined(_MSC_VER) || defined(__MINGW32__)) && \
(defined(_M_IX86) || defined(__i386__))
# define GP_PLAT_x86_windows 1
# define GP_ARCH_x86 1
# define GP_OS_windows 1
#elif (defined(_MSC_VER) || defined(__MINGW32__)) && \
(defined(_M_X64) || defined(__x86_64__))
# define GP_PLAT_amd64_windows 1
# define GP_ARCH_amd64 1
# define GP_OS_windows 1
#elif defined(_MSC_VER) && defined(_M_ARM64)
# define GP_PLAT_arm64_windows 1
# define GP_ARCH_arm64 1
# define GP_OS_windows 1
#else
# error "Unsupported platform"
#endif
#endif /* ndef PLATFORM_MACROS_H */

Просмотреть файл

@ -0,0 +1,190 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ProfileBuffer.h"
#include "ProfilerMarker.h"
#include "jsfriendapi.h"
#include "mozilla/MathAlgorithms.h"
#include "nsJSPrincipals.h"
#include "nsScriptSecurityManager.h"
using namespace mozilla;
ProfileBuffer::ProfileBuffer(uint32_t aCapacity)
: mEntryIndexMask(0), mRangeStart(0), mRangeEnd(0), mCapacity(0) {
// Round aCapacity up to the nearest power of two, so that we can index
// mEntries with a simple mask and don't need to do a slow modulo operation.
const uint32_t UINT32_MAX_POWER_OF_TWO = 1 << 31;
MOZ_RELEASE_ASSERT(aCapacity <= UINT32_MAX_POWER_OF_TWO,
"aCapacity is larger than what we support");
mCapacity = RoundUpPow2(aCapacity);
mEntryIndexMask = mCapacity - 1;
mEntries = MakeUnique<ProfileBufferEntry[]>(mCapacity);
}
ProfileBuffer::~ProfileBuffer() {
while (mStoredMarkers.peek()) {
delete mStoredMarkers.popHead();
}
}
// Called from signal, call only reentrant functions
void ProfileBuffer::AddEntry(const ProfileBufferEntry& aEntry) {
GetEntry(mRangeEnd++) = aEntry;
// The distance between mRangeStart and mRangeEnd must never exceed
// mCapacity, so advance mRangeStart if necessary.
if (mRangeEnd - mRangeStart > mCapacity) {
mRangeStart++;
}
}
uint64_t ProfileBuffer::AddThreadIdEntry(int aThreadId) {
uint64_t pos = mRangeEnd;
AddEntry(ProfileBufferEntry::ThreadId(aThreadId));
return pos;
}
void ProfileBuffer::AddStoredMarker(ProfilerMarker* aStoredMarker) {
aStoredMarker->SetPositionInBuffer(mRangeEnd);
mStoredMarkers.insert(aStoredMarker);
}
void ProfileBuffer::CollectCodeLocation(
const char* aLabel, const char* aStr, uint32_t aFrameFlags,
const Maybe<uint32_t>& aLineNumber, const Maybe<uint32_t>& aColumnNumber,
const Maybe<JS::ProfilingCategoryPair>& aCategoryPair) {
AddEntry(ProfileBufferEntry::Label(aLabel));
AddEntry(ProfileBufferEntry::FrameFlags(uint64_t(aFrameFlags)));
if (aStr) {
// Store the string using one or more DynamicStringFragment entries.
size_t strLen = strlen(aStr) + 1; // +1 for the null terminator
for (size_t j = 0; j < strLen;) {
// Store up to kNumChars characters in the entry.
char chars[ProfileBufferEntry::kNumChars];
size_t len = ProfileBufferEntry::kNumChars;
if (j + len >= strLen) {
len = strLen - j;
}
memcpy(chars, &aStr[j], len);
j += ProfileBufferEntry::kNumChars;
AddEntry(ProfileBufferEntry::DynamicStringFragment(chars));
}
}
if (aLineNumber) {
AddEntry(ProfileBufferEntry::LineNumber(*aLineNumber));
}
if (aColumnNumber) {
AddEntry(ProfileBufferEntry::ColumnNumber(*aColumnNumber));
}
if (aCategoryPair.isSome()) {
AddEntry(ProfileBufferEntry::CategoryPair(int(*aCategoryPair)));
}
}
void ProfileBuffer::DeleteExpiredStoredMarkers() {
// Delete markers of samples that have been overwritten due to circular
// buffer wraparound.
while (mStoredMarkers.peek() &&
mStoredMarkers.peek()->HasExpired(mRangeStart)) {
delete mStoredMarkers.popHead();
}
}
size_t ProfileBuffer::SizeOfIncludingThis(
mozilla::MallocSizeOf aMallocSizeOf) const {
size_t n = aMallocSizeOf(this);
n += aMallocSizeOf(mEntries.get());
// Measurement of the following members may be added later if DMD finds it
// is worthwhile:
// - memory pointed to by the elements within mEntries
// - mStoredMarkers
return n;
}
/* ProfileBufferCollector */
static bool IsChromeJSScript(JSScript* aScript) {
// WARNING: this function runs within the profiler's "critical section".
auto realm = js::GetScriptRealm(aScript);
return js::IsSystemRealm(realm);
}
void ProfileBufferCollector::CollectNativeLeafAddr(void* aAddr) {
mBuf.AddEntry(ProfileBufferEntry::NativeLeafAddr(aAddr));
}
void ProfileBufferCollector::CollectJitReturnAddr(void* aAddr) {
mBuf.AddEntry(ProfileBufferEntry::JitReturnAddr(aAddr));
}
void ProfileBufferCollector::CollectWasmFrame(const char* aLabel) {
mBuf.CollectCodeLocation("", aLabel, 0, Nothing(), Nothing(), Nothing());
}
void ProfileBufferCollector::CollectProfilingStackFrame(
const js::ProfilingStackFrame& aFrame) {
// WARNING: this function runs within the profiler's "critical section".
MOZ_ASSERT(aFrame.isLabelFrame() ||
(aFrame.isJsFrame() && !aFrame.isOSRFrame()));
const char* label = aFrame.label();
const char* dynamicString = aFrame.dynamicString();
bool isChromeJSEntry = false;
Maybe<uint32_t> line;
Maybe<uint32_t> column;
if (aFrame.isJsFrame()) {
// There are two kinds of JS frames that get pushed onto the ProfilingStack.
//
// - label = "", dynamic string = <something>
// - label = "js::RunScript", dynamic string = nullptr
//
// The line number is only interesting in the first case.
if (label[0] == '\0') {
MOZ_ASSERT(dynamicString);
// We call aFrame.script() repeatedly -- rather than storing the result in
// a local variable in order -- to avoid rooting hazards.
if (aFrame.script()) {
isChromeJSEntry = IsChromeJSScript(aFrame.script());
if (aFrame.pc()) {
unsigned col = 0;
line = Some(JS_PCToLineNumber(aFrame.script(), aFrame.pc(), &col));
column = Some(col);
}
}
} else {
MOZ_ASSERT(strcmp(label, "js::RunScript") == 0 && !dynamicString);
}
} else {
MOZ_ASSERT(aFrame.isLabelFrame());
}
if (dynamicString) {
// Adjust the dynamic string as necessary.
if (ProfilerFeature::HasPrivacy(mFeatures) && !isChromeJSEntry) {
dynamicString = "(private)";
} else if (strlen(dynamicString) >= ProfileBuffer::kMaxFrameKeyLength) {
dynamicString = "(too long)";
}
}
mBuf.CollectCodeLocation(label, dynamicString, aFrame.flags(), line, column,
Some(aFrame.categoryPair()));
}

Просмотреть файл

@ -0,0 +1,177 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MOZ_PROFILE_BUFFER_H
#define MOZ_PROFILE_BUFFER_H
#include "ProfileBufferEntry.h"
#include "ProfilerMarker.h"
#include "mozilla/Maybe.h"
// A fixed-capacity circular buffer.
// This class is used as a queue of entries which, after construction, never
// allocates. This makes it safe to use in the profiler's "critical section".
// Entries are appended at the end. Once the queue capacity has been reached,
// adding a new entry will evict an old entry from the start of the queue.
// Positions in the queue are represented as 64-bit unsigned integers which
// only increase and never wrap around.
// mRangeStart and mRangeEnd describe the range in that uint64_t space which is
// covered by the queue contents.
// Internally, the buffer uses a fixed-size storage and applies a modulo
// operation when accessing entries in that storage buffer. "Evicting" an entry
// really just means that an existing entry in the storage buffer gets
// overwritten and that mRangeStart gets incremented.
class ProfileBuffer final {
public:
// ProfileBuffer constructor
// @param aCapacity The minimum capacity of the buffer. The actual buffer
// capacity will be rounded up to the next power of two.
explicit ProfileBuffer(uint32_t aCapacity);
~ProfileBuffer();
// Add |aEntry| to the buffer, ignoring what kind of entry it is.
void AddEntry(const ProfileBufferEntry& aEntry);
// Add to the buffer a sample start (ThreadId) entry for aThreadId.
// Returns the position of the entry.
uint64_t AddThreadIdEntry(int aThreadId);
void CollectCodeLocation(
const char* aLabel, const char* aStr, uint32_t aFrameFlags,
const mozilla::Maybe<uint32_t>& aLineNumber,
const mozilla::Maybe<uint32_t>& aColumnNumber,
const mozilla::Maybe<JS::ProfilingCategoryPair>& aCategoryPair);
// Maximum size of a frameKey string that we'll handle.
static const size_t kMaxFrameKeyLength = 512;
// Add JIT frame information to aJITFrameInfo for any JitReturnAddr entries
// that are currently in the buffer at or after aRangeStart, in samples
// for the given thread.
void AddJITInfoForRange(uint64_t aRangeStart, int aThreadId,
JSContext* aContext,
JITFrameInfo& aJITFrameInfo) const;
// Stream JSON for samples in the buffer to aWriter, using the supplied
// UniqueStacks object.
// Only streams samples for the given thread ID and which were taken at or
// after aSinceTime.
// aUniqueStacks needs to contain information about any JIT frames that we
// might encounter in the buffer, before this method is called. In other
// words, you need to have called AddJITInfoForRange for every range that
// might contain JIT frame information before calling this method.
void StreamSamplesToJSON(SpliceableJSONWriter& aWriter, int aThreadId,
double aSinceTime,
UniqueStacks& aUniqueStacks) const;
void StreamMarkersToJSON(SpliceableJSONWriter& aWriter, int aThreadId,
const mozilla::TimeStamp& aProcessStartTime,
double aSinceTime,
UniqueStacks& aUniqueStacks) const;
void StreamPausedRangesToJSON(SpliceableJSONWriter& aWriter,
double aSinceTime) const;
void StreamProfilerOverheadToJSON(SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime,
double aSinceTime) const;
void StreamCountersToJSON(SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime,
double aSinceTime) const;
void StreamMemoryToJSON(SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime,
double aSinceTime) const;
// Find (via |aLastSample|) the most recent sample for the thread denoted by
// |aThreadId| and clone it, patching in the current time as appropriate.
// Mutate |aLastSample| to point to the newly inserted sample.
// Returns whether duplication was successful.
bool DuplicateLastSample(int aThreadId,
const mozilla::TimeStamp& aProcessStartTime,
mozilla::Maybe<uint64_t>& aLastSample);
void DiscardSamplesBeforeTime(double aTime);
void AddStoredMarker(ProfilerMarker* aStoredMarker);
// The following method is not signal safe!
void DeleteExpiredStoredMarkers();
// Access an entry in the buffer.
ProfileBufferEntry& GetEntry(uint64_t aPosition) const {
return mEntries[aPosition & mEntryIndexMask];
}
size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
private:
// The storage that backs our buffer. Holds mCapacity entries.
// All accesses to entries in mEntries need to go through GetEntry(), which
// translates the given buffer position from the near-infinite uint64_t space
// into the entry storage space.
mozilla::UniquePtr<ProfileBufferEntry[]> mEntries;
// A mask such that pos & mEntryIndexMask == pos % mCapacity.
uint32_t mEntryIndexMask;
public:
// mRangeStart and mRangeEnd are uint64_t values that strictly advance and
// never wrap around. mRangeEnd is always greater than or equal to
// mRangeStart, but never gets more than mCapacity steps ahead of
// mRangeStart, because we can only store a fixed number of entries in the
// buffer. Once the entire buffer is in use, adding a new entry will evict an
// entry from the front of the buffer (and increase mRangeStart).
// In other words, the following conditions hold true at all times:
// (1) mRangeStart <= mRangeEnd
// (2) mRangeEnd - mRangeStart <= mCapacity
//
// If there are no live entries, then mRangeStart == mRangeEnd.
// Otherwise, mRangeStart is the first live entry and mRangeEnd is one past
// the last live entry, and also the position at which the next entry will be
// added.
// (mRangeEnd - mRangeStart) always gives the number of live entries.
uint64_t mRangeStart;
uint64_t mRangeEnd;
// The number of entries in our buffer. Always a power of two.
uint32_t mCapacity;
// Markers that marker entries in the buffer might refer to.
ProfilerMarkerLinkedList mStoredMarkers;
};
/**
* Helper type used to implement ProfilerStackCollector. This type is used as
* the collector for MergeStacks by ProfileBuffer. It holds a reference to the
* buffer, as well as additional feature flags which are needed to control the
* data collection strategy
*/
class ProfileBufferCollector final : public ProfilerStackCollector {
public:
ProfileBufferCollector(ProfileBuffer& aBuf, uint32_t aFeatures,
uint64_t aSamplePos)
: mBuf(aBuf), mSamplePositionInBuffer(aSamplePos), mFeatures(aFeatures) {}
mozilla::Maybe<uint64_t> SamplePositionInBuffer() override {
return mozilla::Some(mSamplePositionInBuffer);
}
mozilla::Maybe<uint64_t> BufferRangeStart() override {
return mozilla::Some(mBuf.mRangeStart);
}
virtual void CollectNativeLeafAddr(void* aAddr) override;
virtual void CollectJitReturnAddr(void* aAddr) override;
virtual void CollectWasmFrame(const char* aLabel) override;
virtual void CollectProfilingStackFrame(
const js::ProfilingStackFrame& aFrame) override;
private:
ProfileBuffer& mBuf;
uint64_t mSamplePositionInBuffer;
uint32_t mFeatures;
};
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,553 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef ProfileBufferEntry_h
#define ProfileBufferEntry_h
#include "BaseProfileJSONWriter.h"
#include "gtest/MozGtestFriend.h"
#include "js/ProfilingCategory.h"
#include "js/ProfilingFrameIterator.h"
#include "js/TrackedOptimizationInfo.h"
#include "mozilla/HashFunctions.h"
#include "mozilla/HashTable.h"
#include "mozilla/Maybe.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/Variant.h"
#include "mozilla/Vector.h"
#include "nsString.h"
class ProfilerMarker;
// NOTE! If you add entries, you need to verify if they need to be added to the
// switch statement in DuplicateLastSample!
#define FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(MACRO) \
MACRO(CategoryPair, int) \
MACRO(CollectionStart, double) \
MACRO(CollectionEnd, double) \
MACRO(Label, const char*) \
MACRO(FrameFlags, uint64_t) \
MACRO(DynamicStringFragment, char*) /* char[kNumChars], really */ \
MACRO(JitReturnAddr, void*) \
MACRO(LineNumber, int) \
MACRO(ColumnNumber, int) \
MACRO(NativeLeafAddr, void*) \
MACRO(Marker, ProfilerMarker*) \
MACRO(Pause, double) \
MACRO(Responsiveness, double) \
MACRO(Resume, double) \
MACRO(ThreadId, int) \
MACRO(Time, double) \
MACRO(ResidentMemory, uint64_t) \
MACRO(UnsharedMemory, uint64_t) \
MACRO(CounterId, void*) \
MACRO(CounterKey, uint64_t) \
MACRO(Number, uint64_t) \
MACRO(Count, int64_t) \
MACRO(ProfilerOverheadTime, double) \
MACRO(ProfilerOverheadDuration, double)
class ProfileBufferEntry {
public:
enum class Kind : uint8_t {
INVALID = 0,
#define KIND(k, t) k,
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(KIND)
#undef KIND
LIMIT
};
ProfileBufferEntry();
// This is equal to sizeof(double), which is the largest non-char variant in
// |u|.
static const size_t kNumChars = 8;
private:
// aString must be a static string.
ProfileBufferEntry(Kind aKind, const char* aString);
ProfileBufferEntry(Kind aKind, char aChars[kNumChars]);
ProfileBufferEntry(Kind aKind, void* aPtr);
ProfileBufferEntry(Kind aKind, ProfilerMarker* aMarker);
ProfileBufferEntry(Kind aKind, double aDouble);
ProfileBufferEntry(Kind aKind, int64_t aInt64);
ProfileBufferEntry(Kind aKind, uint64_t aUint64);
ProfileBufferEntry(Kind aKind, int aInt);
public:
#define CTOR(k, t) \
static ProfileBufferEntry k(t aVal) { \
return ProfileBufferEntry(Kind::k, aVal); \
}
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(CTOR)
#undef CTOR
Kind GetKind() const { return mKind; }
#define IS_KIND(k, t) \
bool Is##k() const { return mKind == Kind::k; }
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(IS_KIND)
#undef IS_KIND
private:
FRIEND_TEST(ThreadProfile, InsertOneEntry);
FRIEND_TEST(ThreadProfile, InsertOneEntryWithTinyBuffer);
FRIEND_TEST(ThreadProfile, InsertEntriesNoWrap);
FRIEND_TEST(ThreadProfile, InsertEntriesWrap);
FRIEND_TEST(ThreadProfile, MemoryMeasure);
friend class ProfileBuffer;
Kind mKind;
uint8_t mStorage[kNumChars];
const char* GetString() const;
void* GetPtr() const;
ProfilerMarker* GetMarker() const;
double GetDouble() const;
int GetInt() const;
int64_t GetInt64() const;
uint64_t GetUint64() const;
void CopyCharsInto(char (&aOutArray)[kNumChars]) const;
};
// Packed layout: 1 byte for the tag + 8 bytes for the value.
static_assert(sizeof(ProfileBufferEntry) == 9, "bad ProfileBufferEntry size");
class UniqueJSONStrings {
public:
UniqueJSONStrings();
explicit UniqueJSONStrings(const UniqueJSONStrings& aOther);
void SpliceStringTableElements(SpliceableJSONWriter& aWriter) {
aWriter.TakeAndSplice(mStringTableWriter.WriteFunc());
}
void WriteProperty(mozilla::JSONWriter& aWriter, const char* aName,
const char* aStr) {
aWriter.IntProperty(aName, GetOrAddIndex(aStr));
}
void WriteElement(mozilla::JSONWriter& aWriter, const char* aStr) {
aWriter.IntElement(GetOrAddIndex(aStr));
}
uint32_t GetOrAddIndex(const char* aStr);
private:
SpliceableChunkedJSONWriter mStringTableWriter;
mozilla::HashMap<mozilla::HashNumber, uint32_t> mStringHashToIndexMap;
};
// Contains all the information about JIT frames that is needed to stream stack
// frames for JitReturnAddr entries in the profiler buffer.
// Every return address (void*) is mapped to one or more JITFrameKeys, and
// every JITFrameKey is mapped to a JSON string for that frame.
// mRangeStart and mRangeEnd describe the range in the buffer for which this
// mapping is valid. Only JitReturnAddr entries within that buffer range can be
// processed using this JITFrameInfoForBufferRange object.
struct JITFrameInfoForBufferRange final {
JITFrameInfoForBufferRange Clone() const;
uint64_t mRangeStart;
uint64_t mRangeEnd; // mRangeEnd marks the first invalid index.
struct JITFrameKey {
bool operator==(const JITFrameKey& aOther) const {
return mCanonicalAddress == aOther.mCanonicalAddress &&
mDepth == aOther.mDepth;
}
bool operator!=(const JITFrameKey& aOther) const {
return !(*this == aOther);
}
void* mCanonicalAddress;
uint32_t mDepth;
};
struct JITFrameKeyHasher {
using Lookup = JITFrameKey;
static mozilla::HashNumber hash(const JITFrameKey& aLookup) {
mozilla::HashNumber hash = 0;
hash = mozilla::AddToHash(hash, aLookup.mCanonicalAddress);
hash = mozilla::AddToHash(hash, aLookup.mDepth);
return hash;
}
static bool match(const JITFrameKey& aKey, const JITFrameKey& aLookup) {
return aKey == aLookup;
}
static void rekey(JITFrameKey& aKey, const JITFrameKey& aNewKey) {
aKey = aNewKey;
}
};
using JITAddressToJITFramesMap =
mozilla::HashMap<void*, mozilla::Vector<JITFrameKey>>;
JITAddressToJITFramesMap mJITAddressToJITFramesMap;
using JITFrameToFrameJSONMap =
mozilla::HashMap<JITFrameKey, nsCString, JITFrameKeyHasher>;
JITFrameToFrameJSONMap mJITFrameToFrameJSONMap;
};
// Contains JITFrameInfoForBufferRange objects for multiple profiler buffer
// ranges.
struct JITFrameInfo final {
JITFrameInfo() : mUniqueStrings(mozilla::MakeUnique<UniqueJSONStrings>()) {}
MOZ_IMPLICIT JITFrameInfo(const JITFrameInfo& aOther);
// Creates a new JITFrameInfoForBufferRange object in mRanges by looking up
// information about the provided JIT return addresses using aCx.
// Addresses are provided like this:
// The caller of AddInfoForRange supplies a function in aJITAddressProvider.
// This function will be called once, synchronously, with an
// aJITAddressConsumer argument, which is a function that needs to be called
// for every address. That function can be called multiple times for the same
// address.
void AddInfoForRange(
uint64_t aRangeStart, uint64_t aRangeEnd, JSContext* aCx,
const std::function<void(const std::function<void(void*)>&)>&
aJITAddressProvider);
// Returns whether the information stored in this object is still relevant
// for any entries in the buffer.
bool HasExpired(uint64_t aCurrentBufferRangeStart) const {
if (mRanges.empty()) {
// No information means no relevant information. Allow this object to be
// discarded.
return true;
}
return mRanges.back().mRangeEnd <= aCurrentBufferRangeStart;
}
// The array of ranges of JIT frame information, sorted by buffer position.
// Ranges are non-overlapping.
// The JSON of the cached frames can contain string indexes, which refer
// to strings in mUniqueStrings.
mozilla::Vector<JITFrameInfoForBufferRange> mRanges;
// The string table which contains strings used in the frame JSON that's
// cached in mRanges.
mozilla::UniquePtr<UniqueJSONStrings> mUniqueStrings;
};
class UniqueStacks {
public:
struct FrameKey {
explicit FrameKey(const char* aLocation)
: mData(NormalFrameData{nsCString(aLocation), false, mozilla::Nothing(),
mozilla::Nothing()}) {}
FrameKey(nsCString&& aLocation, bool aRelevantForJS,
const mozilla::Maybe<unsigned>& aLine,
const mozilla::Maybe<unsigned>& aColumn,
const mozilla::Maybe<JS::ProfilingCategoryPair>& aCategoryPair)
: mData(NormalFrameData{aLocation, aRelevantForJS, aLine, aColumn,
aCategoryPair}) {}
FrameKey(void* aJITAddress, uint32_t aJITDepth, uint32_t aRangeIndex)
: mData(JITFrameData{aJITAddress, aJITDepth, aRangeIndex}) {}
FrameKey(const FrameKey& aToCopy) = default;
uint32_t Hash() const;
bool operator==(const FrameKey& aOther) const {
return mData == aOther.mData;
}
struct NormalFrameData {
bool operator==(const NormalFrameData& aOther) const;
nsCString mLocation;
bool mRelevantForJS;
mozilla::Maybe<unsigned> mLine;
mozilla::Maybe<unsigned> mColumn;
mozilla::Maybe<JS::ProfilingCategoryPair> mCategoryPair;
};
struct JITFrameData {
bool operator==(const JITFrameData& aOther) const;
void* mCanonicalAddress;
uint32_t mDepth;
uint32_t mRangeIndex;
};
mozilla::Variant<NormalFrameData, JITFrameData> mData;
};
struct FrameKeyHasher {
using Lookup = FrameKey;
static mozilla::HashNumber hash(const FrameKey& aLookup) {
mozilla::HashNumber hash = 0;
if (aLookup.mData.is<FrameKey::NormalFrameData>()) {
const FrameKey::NormalFrameData& data =
aLookup.mData.as<FrameKey::NormalFrameData>();
if (!data.mLocation.IsEmpty()) {
hash = mozilla::AddToHash(hash,
mozilla::HashString(data.mLocation.get()));
}
hash = mozilla::AddToHash(hash, data.mRelevantForJS);
if (data.mLine.isSome()) {
hash = mozilla::AddToHash(hash, *data.mLine);
}
if (data.mColumn.isSome()) {
hash = mozilla::AddToHash(hash, *data.mColumn);
}
if (data.mCategoryPair.isSome()) {
hash = mozilla::AddToHash(hash,
static_cast<uint32_t>(*data.mCategoryPair));
}
} else {
const FrameKey::JITFrameData& data =
aLookup.mData.as<FrameKey::JITFrameData>();
hash = mozilla::AddToHash(hash, data.mCanonicalAddress);
hash = mozilla::AddToHash(hash, data.mDepth);
hash = mozilla::AddToHash(hash, data.mRangeIndex);
}
return hash;
}
static bool match(const FrameKey& aKey, const FrameKey& aLookup) {
return aKey == aLookup;
}
static void rekey(FrameKey& aKey, const FrameKey& aNewKey) {
aKey = aNewKey;
}
};
struct StackKey {
mozilla::Maybe<uint32_t> mPrefixStackIndex;
uint32_t mFrameIndex;
explicit StackKey(uint32_t aFrame)
: mFrameIndex(aFrame), mHash(mozilla::HashGeneric(aFrame)) {}
StackKey(const StackKey& aPrefix, uint32_t aPrefixStackIndex,
uint32_t aFrame)
: mPrefixStackIndex(mozilla::Some(aPrefixStackIndex)),
mFrameIndex(aFrame),
mHash(mozilla::AddToHash(aPrefix.mHash, aFrame)) {}
mozilla::HashNumber Hash() const { return mHash; }
bool operator==(const StackKey& aOther) const {
return mPrefixStackIndex == aOther.mPrefixStackIndex &&
mFrameIndex == aOther.mFrameIndex;
}
private:
mozilla::HashNumber mHash;
};
struct StackKeyHasher {
using Lookup = StackKey;
static mozilla::HashNumber hash(const StackKey& aLookup) {
return aLookup.Hash();
}
static bool match(const StackKey& aKey, const StackKey& aLookup) {
return aKey == aLookup;
}
static void rekey(StackKey& aKey, const StackKey& aNewKey) {
aKey = aNewKey;
}
};
explicit UniqueStacks(JITFrameInfo&& aJITFrameInfo);
// Return a StackKey for aFrame as the stack's root frame (no prefix).
MOZ_MUST_USE StackKey BeginStack(const FrameKey& aFrame);
// Return a new StackKey that is obtained by appending aFrame to aStack.
MOZ_MUST_USE StackKey AppendFrame(const StackKey& aStack,
const FrameKey& aFrame);
// Look up frame keys for the given JIT address, and ensure that our frame
// table has entries for the returned frame keys. The JSON for these frames
// is taken from mJITInfoRanges.
// aBufferPosition is needed in order to look up the correct JIT frame info
// object in mJITInfoRanges.
MOZ_MUST_USE mozilla::Maybe<mozilla::Vector<UniqueStacks::FrameKey>>
LookupFramesForJITAddressFromBufferPos(void* aJITAddress,
uint64_t aBufferPosition);
MOZ_MUST_USE uint32_t GetOrAddFrameIndex(const FrameKey& aFrame);
MOZ_MUST_USE uint32_t GetOrAddStackIndex(const StackKey& aStack);
void SpliceFrameTableElements(SpliceableJSONWriter& aWriter);
void SpliceStackTableElements(SpliceableJSONWriter& aWriter);
private:
void StreamNonJITFrame(const FrameKey& aFrame);
void StreamStack(const StackKey& aStack);
public:
mozilla::UniquePtr<UniqueJSONStrings> mUniqueStrings;
private:
SpliceableChunkedJSONWriter mFrameTableWriter;
mozilla::HashMap<FrameKey, uint32_t, FrameKeyHasher> mFrameToIndexMap;
SpliceableChunkedJSONWriter mStackTableWriter;
mozilla::HashMap<StackKey, uint32_t, StackKeyHasher> mStackToIndexMap;
mozilla::Vector<JITFrameInfoForBufferRange> mJITInfoRanges;
};
//
// Thread profile JSON Format
// --------------------------
//
// The profile contains much duplicate information. The output JSON of the
// profile attempts to deduplicate strings, frames, and stack prefixes, to cut
// down on size and to increase JSON streaming speed. Deduplicated values are
// streamed as indices into their respective tables.
//
// Further, arrays of objects with the same set of properties (e.g., samples,
// frames) are output as arrays according to a schema instead of an object
// with property names. A property that is not present is represented in the
// array as null or undefined.
//
// The format of the thread profile JSON is shown by the following example
// with 1 sample and 1 marker:
//
// {
// "name": "Foo",
// "tid": 42,
// "samples":
// {
// "schema":
// {
// "stack": 0, /* index into stackTable */
// "time": 1, /* number */
// "responsiveness": 2, /* number */
// },
// "data":
// [
// [ 1, 0.0, 0.0 ] /* { stack: 1, time: 0.0, responsiveness: 0.0 } */
// ]
// },
//
// "markers":
// {
// "schema":
// {
// "name": 0, /* index into stringTable */
// "time": 1, /* number */
// "data": 2 /* arbitrary JSON */
// },
// "data":
// [
// [ 3, 0.1 ] /* { name: 'example marker', time: 0.1 } */
// ]
// },
//
// "stackTable":
// {
// "schema":
// {
// "prefix": 0, /* index into stackTable */
// "frame": 1 /* index into frameTable */
// },
// "data":
// [
// [ null, 0 ], /* (root) */
// [ 0, 1 ] /* (root) > foo.js */
// ]
// },
//
// "frameTable":
// {
// "schema":
// {
// "location": 0, /* index into stringTable */
// "implementation": 1, /* index into stringTable */
// "optimizations": 2, /* arbitrary JSON */
// "line": 3, /* number */
// "column": 4, /* number */
// "category": 5 /* number */
// },
// "data":
// [
// [ 0 ], /* { location: '(root)' } */
// [ 1, 2 ] /* { location: 'foo.js',
// implementation: 'baseline' } */
// ]
// },
//
// "stringTable":
// [
// "(root)",
// "foo.js",
// "baseline",
// "example marker"
// ]
// }
//
// Process:
// {
// "name": "Bar",
// "pid": 24,
// "threads":
// [
// <0-N threads from above>
// ],
// "counters": /* includes the memory counter */
// [
// {
// "name": "qwerty",
// "category": "uiop",
// "description": "this is qwerty uiop",
// "sample_groups:
// [
// {
// "id": 42, /* number (thread id, or object identifier (tab), etc) */
// "samples:
// {
// "schema":
// {
// "time": 1, /* number */
// "number": 2, /* number (of times the counter was touched) */
// "count": 3 /* number (total for the counter) */
// },
// "data":
// [
// [ 0.1, 1824,
// 454622 ] /* { time: 0.1, number: 1824, count: 454622 } */
// ]
// },
// },
// /* more sample-group objects with different id's */
// ]
// },
// /* more counters */
// ],
// "memory":
// {
// "initial_heap": 12345678,
// "samples:
// {
// "schema":
// {
// "time": 1, /* number */
// "rss": 2, /* number */
// "uss": 3 /* number */
// },
// "data":
// [
// /* { time: 0.1, rss: 12345678, uss: 87654321} */
// [ 0.1, 12345678, 87654321 ]
// ]
// },
// },
// }
//
#endif /* ndef ProfileBufferEntry_h */

Просмотреть файл

@ -0,0 +1,114 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "BaseProfileJSONWriter.h"
#include "mozilla/HashFunctions.h"
void ChunkedJSONWriteFunc::Write(const char* aStr) {
MOZ_ASSERT(mChunkPtr >= mChunkList.back().get() && mChunkPtr <= mChunkEnd);
MOZ_ASSERT(mChunkEnd >= mChunkList.back().get() + mChunkLengths.back());
MOZ_ASSERT(*mChunkPtr == '\0');
size_t len = strlen(aStr);
// Most strings to be written are small, but subprocess profiles (e.g.,
// from the content process in e10s) may be huge. If the string is larger
// than a chunk, allocate its own chunk.
char* newPtr;
if (len >= kChunkSize) {
AllocChunk(len + 1);
newPtr = mChunkPtr + len;
} else {
newPtr = mChunkPtr + len;
if (newPtr >= mChunkEnd) {
AllocChunk(kChunkSize);
newPtr = mChunkPtr + len;
}
}
memcpy(mChunkPtr, aStr, len);
*newPtr = '\0';
mChunkPtr = newPtr;
mChunkLengths.back() += len;
}
size_t ChunkedJSONWriteFunc::GetTotalLength() const {
MOZ_ASSERT(mChunkLengths.length() == mChunkList.length());
size_t totalLen = 1;
for (size_t i = 0; i < mChunkLengths.length(); i++) {
MOZ_ASSERT(strlen(mChunkList[i].get()) == mChunkLengths[i]);
totalLen += mChunkLengths[i];
}
return totalLen;
}
void ChunkedJSONWriteFunc::CopyDataIntoLazilyAllocatedBuffer(
const std::function<char*(size_t)>& aAllocator) const {
size_t totalLen = GetTotalLength();
char* ptr = aAllocator(totalLen);
for (size_t i = 0; i < mChunkList.length(); i++) {
size_t len = mChunkLengths[i];
memcpy(ptr, mChunkList[i].get(), len);
ptr += len;
}
*ptr = '\0';
}
mozilla::UniquePtr<char[]> ChunkedJSONWriteFunc::CopyData() const {
mozilla::UniquePtr<char[]> c;
CopyDataIntoLazilyAllocatedBuffer([&](size_t allocationSize) {
c = mozilla::MakeUnique<char[]>(allocationSize);
return c.get();
});
return c;
}
void ChunkedJSONWriteFunc::Take(ChunkedJSONWriteFunc&& aOther) {
for (size_t i = 0; i < aOther.mChunkList.length(); i++) {
MOZ_ALWAYS_TRUE(mChunkLengths.append(aOther.mChunkLengths[i]));
MOZ_ALWAYS_TRUE(mChunkList.append(std::move(aOther.mChunkList[i])));
}
mChunkPtr = mChunkList.back().get() + mChunkLengths.back();
mChunkEnd = mChunkPtr;
aOther.mChunkPtr = nullptr;
aOther.mChunkEnd = nullptr;
aOther.mChunkList.clear();
aOther.mChunkLengths.clear();
}
void ChunkedJSONWriteFunc::AllocChunk(size_t aChunkSize) {
MOZ_ASSERT(mChunkLengths.length() == mChunkList.length());
mozilla::UniquePtr<char[]> newChunk = mozilla::MakeUnique<char[]>(aChunkSize);
mChunkPtr = newChunk.get();
mChunkEnd = mChunkPtr + aChunkSize;
*mChunkPtr = '\0';
MOZ_ALWAYS_TRUE(mChunkLengths.append(0));
MOZ_ALWAYS_TRUE(mChunkList.append(std::move(newChunk)));
}
void SpliceableJSONWriter::TakeAndSplice(ChunkedJSONWriteFunc* aFunc) {
Separator();
for (size_t i = 0; i < aFunc->mChunkList.length(); i++) {
WriteFunc()->Write(aFunc->mChunkList[i].get());
}
aFunc->mChunkPtr = nullptr;
aFunc->mChunkEnd = nullptr;
aFunc->mChunkList.clear();
aFunc->mChunkLengths.clear();
mNeedComma[mDepth] = true;
}
void SpliceableJSONWriter::Splice(const char* aStr) {
Separator();
WriteFunc()->Write(aStr);
mNeedComma[mDepth] = true;
}
void SpliceableChunkedJSONWriter::TakeAndSplice(ChunkedJSONWriteFunc* aFunc) {
Separator();
WriteFunc()->Take(std::move(*aFunc));
mNeedComma[mDepth] = true;
}

Просмотреть файл

@ -0,0 +1,303 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ProfiledThreadData.h"
#include "ProfileBuffer.h"
#include "BaseProfileJSONWriter.h"
#include "js/TraceLoggerAPI.h"
#include "mozilla/dom/ContentChild.h"
#if defined(GP_OS_darwin)
# include <pthread.h>
#endif
ProfiledThreadData::ProfiledThreadData(ThreadInfo* aThreadInfo,
nsIEventTarget* aEventTarget,
bool aIncludeResponsiveness)
: mThreadInfo(aThreadInfo) {
MOZ_COUNT_CTOR(ProfiledThreadData);
if (aIncludeResponsiveness) {
mResponsiveness.emplace(aEventTarget, aThreadInfo->IsMainThread());
}
}
ProfiledThreadData::~ProfiledThreadData() {
MOZ_COUNT_DTOR(ProfiledThreadData);
}
void ProfiledThreadData::StreamJSON(const ProfileBuffer& aBuffer,
JSContext* aCx,
SpliceableJSONWriter& aWriter,
const nsACString& aProcessName,
const mozilla::TimeStamp& aProcessStartTime,
double aSinceTime, bool JSTracerEnabled) {
if (mJITFrameInfoForPreviousJSContexts &&
mJITFrameInfoForPreviousJSContexts->HasExpired(aBuffer.mRangeStart)) {
mJITFrameInfoForPreviousJSContexts = nullptr;
}
// If we have an existing JITFrameInfo in mJITFrameInfoForPreviousJSContexts,
// copy the data from it.
JITFrameInfo jitFrameInfo =
mJITFrameInfoForPreviousJSContexts
? JITFrameInfo(*mJITFrameInfoForPreviousJSContexts)
: JITFrameInfo();
if (aCx && mBufferPositionWhenReceivedJSContext) {
aBuffer.AddJITInfoForRange(*mBufferPositionWhenReceivedJSContext,
mThreadInfo->ThreadId(), aCx, jitFrameInfo);
}
UniqueStacks uniqueStacks(std::move(jitFrameInfo));
aWriter.Start();
{
StreamSamplesAndMarkers(mThreadInfo->Name(), mThreadInfo->ThreadId(),
aBuffer, aWriter, aProcessName, aProcessStartTime,
mThreadInfo->RegisterTime(), mUnregisterTime,
aSinceTime, uniqueStacks);
aWriter.StartObjectProperty("stackTable");
{
{
JSONSchemaWriter schema(aWriter);
schema.WriteField("prefix");
schema.WriteField("frame");
}
aWriter.StartArrayProperty("data");
{ uniqueStacks.SpliceStackTableElements(aWriter); }
aWriter.EndArray();
}
aWriter.EndObject();
aWriter.StartObjectProperty("frameTable");
{
{
JSONSchemaWriter schema(aWriter);
schema.WriteField("location");
schema.WriteField("relevantForJS");
schema.WriteField("implementation");
schema.WriteField("optimizations");
schema.WriteField("line");
schema.WriteField("column");
schema.WriteField("category");
}
aWriter.StartArrayProperty("data");
{ uniqueStacks.SpliceFrameTableElements(aWriter); }
aWriter.EndArray();
}
aWriter.EndObject();
aWriter.StartArrayProperty("stringTable");
{ uniqueStacks.mUniqueStrings->SpliceStringTableElements(aWriter); }
aWriter.EndArray();
}
if (aCx && JSTracerEnabled) {
StreamTraceLoggerJSON(aCx, aWriter, aProcessStartTime);
}
aWriter.End();
}
void ProfiledThreadData::StreamTraceLoggerJSON(
JSContext* aCx, SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime) {
aWriter.StartObjectProperty("jsTracerEvents");
{
JS::AutoTraceLoggerLockGuard lockGuard;
JS::SpewTraceLoggerThread(aCx);
uint32_t length = 0;
// Collect Event Ids
aWriter.StartArrayProperty("events", mozilla::JSONWriter::SingleLineStyle);
{
JS::TraceLoggerIdBuffer collectionBuffer(lockGuard, aCx);
while (collectionBuffer.NextChunk()) {
for (uint32_t val : collectionBuffer) {
aWriter.IntElement(val);
length++;
}
}
}
aWriter.EndArray();
// Collect Event Timestamps
aWriter.StartArrayProperty("timestamps",
mozilla::JSONWriter::SingleLineStyle);
{
JS::TraceLoggerTimeStampBuffer collectionBuffer(lockGuard, aCx);
while (collectionBuffer.NextChunk()) {
for (mozilla::TimeStamp val : collectionBuffer) {
aWriter.DoubleElement((val - aProcessStartTime).ToMicroseconds());
}
}
}
aWriter.EndArray();
// Collect Event Durations
aWriter.StartArrayProperty("durations",
mozilla::JSONWriter::SingleLineStyle);
{
JS::TraceLoggerDurationBuffer collectionBuffer(lockGuard, aCx);
while (collectionBuffer.NextChunk()) {
for (double val : collectionBuffer) {
if (val == -1) {
aWriter.NullElement();
} else {
aWriter.DoubleElement(val);
}
}
}
}
aWriter.EndArray();
// Collect Event LineNo
aWriter.StartArrayProperty("line", mozilla::JSONWriter::SingleLineStyle);
{
JS::TraceLoggerLineNoBuffer collectionBuffer(lockGuard, aCx);
while (collectionBuffer.NextChunk()) {
for (int32_t val : collectionBuffer) {
if (val == -1) {
aWriter.NullElement();
} else {
aWriter.IntElement(val);
}
}
}
}
aWriter.EndArray();
// Collect Event ColNo
aWriter.StartArrayProperty("column", mozilla::JSONWriter::SingleLineStyle);
{
JS::TraceLoggerColNoBuffer collectionBuffer(lockGuard, aCx);
while (collectionBuffer.NextChunk()) {
for (int32_t val : collectionBuffer) {
if (val == -1) {
aWriter.NullElement();
} else {
aWriter.IntElement(val);
}
}
}
}
aWriter.EndArray();
aWriter.IntProperty("length", length);
}
aWriter.EndObject();
}
void StreamSamplesAndMarkers(const char* aName, int aThreadId,
const ProfileBuffer& aBuffer,
SpliceableJSONWriter& aWriter,
const nsACString& aProcessName,
const mozilla::TimeStamp& aProcessStartTime,
const mozilla::TimeStamp& aRegisterTime,
const mozilla::TimeStamp& aUnregisterTime,
double aSinceTime, UniqueStacks& aUniqueStacks) {
aWriter.StringProperty("processType",
XRE_ChildProcessTypeToString(XRE_GetProcessType()));
aWriter.StringProperty("name", aName);
// Use given process name (if any), unless we're the parent process.
if (XRE_IsParentProcess()) {
aWriter.StringProperty("processName", "Parent Process");
} else if (!aProcessName.IsEmpty()) {
aWriter.StringProperty("processName", aProcessName.Data());
}
aWriter.IntProperty("tid", static_cast<int64_t>(aThreadId));
aWriter.IntProperty("pid",
static_cast<int64_t>(profiler_current_process_id()));
if (aRegisterTime) {
aWriter.DoubleProperty(
"registerTime", (aRegisterTime - aProcessStartTime).ToMilliseconds());
} else {
aWriter.NullProperty("registerTime");
}
if (aUnregisterTime) {
aWriter.DoubleProperty(
"unregisterTime",
(aUnregisterTime - aProcessStartTime).ToMilliseconds());
} else {
aWriter.NullProperty("unregisterTime");
}
aWriter.StartObjectProperty("samples");
{
{
JSONSchemaWriter schema(aWriter);
schema.WriteField("stack");
schema.WriteField("time");
schema.WriteField("responsiveness");
schema.WriteField("rss");
schema.WriteField("uss");
}
aWriter.StartArrayProperty("data");
{
aBuffer.StreamSamplesToJSON(aWriter, aThreadId, aSinceTime,
aUniqueStacks);
}
aWriter.EndArray();
}
aWriter.EndObject();
aWriter.StartObjectProperty("markers");
{
{
JSONSchemaWriter schema(aWriter);
schema.WriteField("name");
schema.WriteField("time");
schema.WriteField("category");
schema.WriteField("data");
}
aWriter.StartArrayProperty("data");
{
aBuffer.StreamMarkersToJSON(aWriter, aThreadId, aProcessStartTime,
aSinceTime, aUniqueStacks);
}
aWriter.EndArray();
}
aWriter.EndObject();
}
void ProfiledThreadData::NotifyAboutToLoseJSContext(
JSContext* aContext, const mozilla::TimeStamp& aProcessStartTime,
ProfileBuffer& aBuffer) {
if (!mBufferPositionWhenReceivedJSContext) {
return;
}
MOZ_RELEASE_ASSERT(aContext);
if (mJITFrameInfoForPreviousJSContexts &&
mJITFrameInfoForPreviousJSContexts->HasExpired(aBuffer.mRangeStart)) {
mJITFrameInfoForPreviousJSContexts = nullptr;
}
mozilla::UniquePtr<JITFrameInfo> jitFrameInfo =
mJITFrameInfoForPreviousJSContexts
? std::move(mJITFrameInfoForPreviousJSContexts)
: mozilla::MakeUnique<JITFrameInfo>();
aBuffer.AddJITInfoForRange(*mBufferPositionWhenReceivedJSContext,
mThreadInfo->ThreadId(), aContext, *jitFrameInfo);
mJITFrameInfoForPreviousJSContexts = std::move(jitFrameInfo);
mBufferPositionWhenReceivedJSContext = mozilla::Nothing();
}

Просмотреть файл

@ -0,0 +1,139 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef ProfiledThreadData_h
#define ProfiledThreadData_h
#include "platform.h"
#include "ProfileBufferEntry.h"
#include "ThreadInfo.h"
#include "ThreadResponsiveness.h"
#include "js/ProfilingStack.h"
#include "mozilla/TimeStamp.h"
#include "mozilla/UniquePtr.h"
class ProfileBuffer;
// This class contains information about a thread that is only relevant while
// the profiler is running, for any threads (both alive and dead) whose thread
// name matches the "thread filter" in the current profiler run.
// ProfiledThreadData objects may be kept alive even after the thread is
// unregistered, as long as there is still data for that thread in the profiler
// buffer.
//
// Accesses to this class are protected by the profiler state lock.
//
// Created as soon as the following are true for the thread:
// - The profiler is running, and
// - the thread matches the profiler's thread filter, and
// - the thread is registered with the profiler.
// So it gets created in response to either (1) the profiler being started (for
// an existing registered thread) or (2) the thread being registered (if the
// profiler is already running).
//
// The thread may be unregistered during the lifetime of ProfiledThreadData.
// If that happens, NotifyUnregistered() is called.
//
// This class is the right place to store buffer positions. Profiler buffer
// positions become invalid if the profiler buffer is destroyed, which happens
// when the profiler is stopped.
class ProfiledThreadData final {
public:
ProfiledThreadData(ThreadInfo* aThreadInfo, nsIEventTarget* aEventTarget,
bool aIncludeResponsiveness);
~ProfiledThreadData();
void NotifyUnregistered(uint64_t aBufferPosition) {
mResponsiveness.reset();
mLastSample = mozilla::Nothing();
MOZ_ASSERT(!mBufferPositionWhenReceivedJSContext,
"JSContext should have been cleared before the thread was "
"unregistered");
mUnregisterTime = mozilla::TimeStamp::Now();
mBufferPositionWhenUnregistered = mozilla::Some(aBufferPosition);
}
mozilla::Maybe<uint64_t> BufferPositionWhenUnregistered() {
return mBufferPositionWhenUnregistered;
}
mozilla::Maybe<uint64_t>& LastSample() { return mLastSample; }
void StreamJSON(const ProfileBuffer& aBuffer, JSContext* aCx,
SpliceableJSONWriter& aWriter, const nsACString& aProcessName,
const mozilla::TimeStamp& aProcessStartTime,
double aSinceTime, bool aJSTracerEnabled);
void StreamTraceLoggerJSON(JSContext* aCx, SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime);
// Returns nullptr if this is not the main thread, the responsiveness
// feature is not turned on, or if this thread is not being profiled.
ThreadResponsiveness* GetThreadResponsiveness() {
ThreadResponsiveness* responsiveness = mResponsiveness.ptrOr(nullptr);
return responsiveness;
}
const RefPtr<ThreadInfo> Info() const { return mThreadInfo; }
void NotifyReceivedJSContext(uint64_t aCurrentBufferPosition) {
mBufferPositionWhenReceivedJSContext =
mozilla::Some(aCurrentBufferPosition);
}
// Call this method when the JS entries inside the buffer are about to
// become invalid, i.e., just before JS shutdown.
void NotifyAboutToLoseJSContext(JSContext* aCx,
const mozilla::TimeStamp& aProcessStartTime,
ProfileBuffer& aBuffer);
private:
// Group A:
// The following fields are interesting for the entire lifetime of a
// ProfiledThreadData object.
// This thread's thread info.
const RefPtr<ThreadInfo> mThreadInfo;
// Contains JSON for JIT frames from any JSContexts that were used for this
// thread in the past.
// Null if this thread has never lost a JSContext or if all samples from
// previous JSContexts have been evicted from the profiler buffer.
mozilla::UniquePtr<JITFrameInfo> mJITFrameInfoForPreviousJSContexts;
// Group B:
// The following fields are only used while this thread is alive and
// registered. They become Nothing() once the thread is unregistered.
// A helper object that instruments nsIThreads to obtain responsiveness
// information about their event loop.
mozilla::Maybe<ThreadResponsiveness> mResponsiveness;
// When sampling, this holds the position in ActivePS::mBuffer of the most
// recent sample for this thread, or Nothing() if there is no sample for this
// thread in the buffer.
mozilla::Maybe<uint64_t> mLastSample;
// Only non-Nothing() if the thread currently has a JSContext.
mozilla::Maybe<uint64_t> mBufferPositionWhenReceivedJSContext;
// Group C:
// The following fields are only used once this thread has been unregistered.
mozilla::Maybe<uint64_t> mBufferPositionWhenUnregistered;
mozilla::TimeStamp mUnregisterTime;
};
void StreamSamplesAndMarkers(const char* aName, int aThreadId,
const ProfileBuffer& aBuffer,
SpliceableJSONWriter& aWriter,
const nsACString& aProcessName,
const mozilla::TimeStamp& aProcessStartTime,
const mozilla::TimeStamp& aRegisterTime,
const mozilla::TimeStamp& aUnregisterTime,
double aSinceTime, UniqueStacks& aUniqueStacks);
#endif // ProfiledThreadData_h

Просмотреть файл

@ -0,0 +1,34 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ProfilerBacktrace.h"
#include "ProfileBuffer.h"
#include "ProfiledThreadData.h"
#include "BaseProfileJSONWriter.h"
#include "ThreadInfo.h"
ProfilerBacktrace::ProfilerBacktrace(const char* aName, int aThreadId,
mozilla::UniquePtr<ProfileBuffer> aBuffer)
: mName(strdup(aName)), mThreadId(aThreadId), mBuffer(std::move(aBuffer)) {
MOZ_COUNT_CTOR(ProfilerBacktrace);
}
ProfilerBacktrace::~ProfilerBacktrace() { MOZ_COUNT_DTOR(ProfilerBacktrace); }
void ProfilerBacktrace::StreamJSON(SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
// Unlike ProfiledThreadData::StreamJSON, we don't need to call
// ProfileBuffer::AddJITInfoForRange because mBuffer does not contain any
// JitReturnAddr entries. For synchronous samples, JIT frames get expanded
// at sample time.
StreamSamplesAndMarkers(mName.get(), mThreadId, *mBuffer.get(), aWriter,
NS_LITERAL_CSTRING(""), aProcessStartTime,
/* aRegisterTime */ mozilla::TimeStamp(),
/* aUnregisterTime */ mozilla::TimeStamp(),
/* aSinceTime */ 0, aUniqueStacks);
}

Просмотреть файл

@ -0,0 +1,47 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef __PROFILER_BACKTRACE_H
#define __PROFILER_BACKTRACE_H
#include "mozilla/UniquePtrExtensions.h"
class ProfileBuffer;
class SpliceableJSONWriter;
class ThreadInfo;
class UniqueStacks;
namespace mozilla {
class TimeStamp;
}
// ProfilerBacktrace encapsulates a synchronous sample.
class ProfilerBacktrace {
public:
ProfilerBacktrace(const char* aName, int aThreadId,
mozilla::UniquePtr<ProfileBuffer> aBuffer);
~ProfilerBacktrace();
// ProfilerBacktraces' stacks are deduplicated in the context of the
// profile that contains the backtrace as a marker payload.
//
// That is, markers that contain backtraces should not need their own stack,
// frame, and string tables. They should instead reuse their parent
// profile's tables.
void StreamJSON(SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks);
private:
ProfilerBacktrace(const ProfilerBacktrace&);
ProfilerBacktrace& operator=(const ProfilerBacktrace&);
mozilla::UniqueFreePtr<char> mName;
int mThreadId;
mozilla::UniquePtr<ProfileBuffer> mBuffer;
};
#endif // __PROFILER_BACKTRACE_H

Просмотреть файл

@ -0,0 +1,170 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef ProfilerMarker_h
#define ProfilerMarker_h
#include "ProfileBufferEntry.h"
#include "BaseProfileJSONWriter.h"
#include "BaseProfilerMarkerPayload.h"
#include "mozilla/UniquePtrExtensions.h"
template <typename T>
class ProfilerLinkedList;
class ProfilerMarker {
friend class ProfilerLinkedList<ProfilerMarker>;
public:
explicit ProfilerMarker(
const char* aMarkerName, JS::ProfilingCategoryPair aCategoryPair,
int aThreadId,
mozilla::UniquePtr<ProfilerMarkerPayload> aPayload = nullptr,
double aTime = 0)
: mMarkerName(strdup(aMarkerName)),
mPayload(std::move(aPayload)),
mNext{nullptr},
mTime(aTime),
mPositionInBuffer{0},
mThreadId{aThreadId},
mCategoryPair{aCategoryPair} {}
void SetPositionInBuffer(uint64_t aPosition) {
mPositionInBuffer = aPosition;
}
bool HasExpired(uint64_t aBufferRangeStart) const {
return mPositionInBuffer < aBufferRangeStart;
}
double GetTime() const { return mTime; }
int GetThreadId() const { return mThreadId; }
void StreamJSON(SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
// Schema:
// [name, time, category, data]
aWriter.StartArrayElement();
{
aUniqueStacks.mUniqueStrings->WriteElement(aWriter, mMarkerName.get());
aWriter.DoubleElement(mTime);
const JS::ProfilingCategoryPairInfo& info =
JS::GetProfilingCategoryPairInfo(mCategoryPair);
aWriter.IntElement(unsigned(info.mCategory));
// TODO: Store the callsite for this marker if available:
// if have location data
// b.NameValue(marker, "location", ...);
if (mPayload) {
aWriter.StartObjectElement(SpliceableJSONWriter::SingleLineStyle);
{ mPayload->StreamPayload(aWriter, aProcessStartTime, aUniqueStacks); }
aWriter.EndObject();
}
}
aWriter.EndArray();
}
private:
mozilla::UniqueFreePtr<char> mMarkerName;
mozilla::UniquePtr<ProfilerMarkerPayload> mPayload;
ProfilerMarker* mNext;
double mTime;
uint64_t mPositionInBuffer;
int mThreadId;
JS::ProfilingCategoryPair mCategoryPair;
};
template <typename T>
class ProfilerLinkedList {
public:
ProfilerLinkedList() : mHead(nullptr), mTail(nullptr) {}
void insert(T* aElem) {
if (!mTail) {
mHead = aElem;
mTail = aElem;
} else {
mTail->mNext = aElem;
mTail = aElem;
}
aElem->mNext = nullptr;
}
T* popHead() {
if (!mHead) {
MOZ_ASSERT(false);
return nullptr;
}
T* head = mHead;
mHead = head->mNext;
if (!mHead) {
mTail = nullptr;
}
return head;
}
const T* peek() { return mHead; }
private:
T* mHead;
T* mTail;
};
typedef ProfilerLinkedList<ProfilerMarker> ProfilerMarkerLinkedList;
template <typename T>
class ProfilerSignalSafeLinkedList {
public:
ProfilerSignalSafeLinkedList() : mSignalLock(false) {}
~ProfilerSignalSafeLinkedList() {
if (mSignalLock) {
// Some thread is modifying the list. We should only be released on that
// thread.
abort();
}
while (mList.peek()) {
delete mList.popHead();
}
}
// Insert an item into the list. Must only be called from the owning thread.
// Must not be called while the list from accessList() is being accessed.
// In the profiler, we ensure that by interrupting the profiled thread
// (which is the one that owns this list and calls insert() on it) until
// we're done reading the list from the signal handler.
void insert(T* aElement) {
MOZ_ASSERT(aElement);
mSignalLock = true;
mList.insert(aElement);
mSignalLock = false;
}
// Called within signal, from any thread, possibly while insert() is in the
// middle of modifying the list (on the owning thread). Will return null if
// that is the case.
// Function must be reentrant.
ProfilerLinkedList<T>* accessList() { return mSignalLock ? nullptr : &mList; }
private:
ProfilerLinkedList<T> mList;
// If this is set, then it's not safe to read the list because its contents
// are being changed.
mozilla::Atomic<bool> mSignalLock;
};
#endif // ProfilerMarker_h

Просмотреть файл

@ -0,0 +1,298 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "BaseProfilerMarkerPayload.h"
#include "BaseProfiler.h"
#include "ProfileBufferEntry.h"
#include "BaseProfileJSONWriter.h"
#include "ProfilerBacktrace.h"
#include "gfxASurface.h"
#include "Layers.h"
#include "mozilla/Maybe.h"
#include "mozilla/net/HttpBaseChannel.h"
#include "mozilla/Sprintf.h"
#include <inttypes.h>
using namespace mozilla;
static void MOZ_ALWAYS_INLINE WriteTime(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
const TimeStamp& aTime,
const char* aName) {
if (!aTime.IsNull()) {
aWriter.DoubleProperty(aName, (aTime - aProcessStartTime).ToMilliseconds());
}
}
void ProfilerMarkerPayload::StreamType(const char* aMarkerType,
SpliceableJSONWriter& aWriter) {
MOZ_ASSERT(aMarkerType);
aWriter.StringProperty("type", aMarkerType);
}
void ProfilerMarkerPayload::StreamCommonProps(
const char* aMarkerType, SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime, UniqueStacks& aUniqueStacks) {
StreamType(aMarkerType, aWriter);
WriteTime(aWriter, aProcessStartTime, mStartTime, "startTime");
WriteTime(aWriter, aProcessStartTime, mEndTime, "endTime");
if (mDocShellId) {
aWriter.StringProperty("docShellId", nsIDToCString(*mDocShellId).get());
}
if (mDocShellHistoryId) {
aWriter.DoubleProperty("docshellHistoryId", mDocShellHistoryId.ref());
}
if (mStack) {
aWriter.StartObjectProperty("stack");
{ mStack->StreamJSON(aWriter, aProcessStartTime, aUniqueStacks); }
aWriter.EndObject();
}
}
void TracingMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamCommonProps("tracing", aWriter, aProcessStartTime, aUniqueStacks);
if (mCategory) {
aWriter.StringProperty("category", mCategory);
}
if (mKind == TRACING_INTERVAL_START) {
aWriter.StringProperty("interval", "start");
} else if (mKind == TRACING_INTERVAL_END) {
aWriter.StringProperty("interval", "end");
}
}
void FileIOMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamCommonProps("FileIO", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("operation", mOperation.get());
aWriter.StringProperty("source", mSource);
if (mFilename) {
aWriter.StringProperty("filename", mFilename.get());
}
}
void UserTimingMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamCommonProps("UserTiming", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("name", NS_ConvertUTF16toUTF8(mName).get());
aWriter.StringProperty("entryType", mEntryType);
if (mStartMark.isSome()) {
aWriter.StringProperty("startMark",
NS_ConvertUTF16toUTF8(mStartMark.value()).get());
} else {
aWriter.NullProperty("startMark");
}
if (mEndMark.isSome()) {
aWriter.StringProperty("endMark",
NS_ConvertUTF16toUTF8(mEndMark.value()).get());
} else {
aWriter.NullProperty("endMark");
}
}
void TextMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamCommonProps("Text", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("name", mText.get());
}
void LogMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamCommonProps("Log", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("name", mText.get());
aWriter.StringProperty("module", mModule.get());
}
void DOMEventMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
TracingMarkerPayload::StreamPayload(aWriter, aProcessStartTime,
aUniqueStacks);
WriteTime(aWriter, aProcessStartTime, mTimeStamp, "timeStamp");
aWriter.StringProperty("eventType", NS_ConvertUTF16toUTF8(mEventType).get());
}
void LayerTranslationMarkerPayload::StreamPayload(
SpliceableJSONWriter& aWriter, const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamType("LayerTranslation", aWriter);
const size_t bufferSize = 32;
char buffer[bufferSize];
SprintfLiteral(buffer, "%p", mLayer);
aWriter.StringProperty("layer", buffer);
aWriter.IntProperty("x", mPoint.x);
aWriter.IntProperty("y", mPoint.y);
}
void VsyncMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamType("VsyncTimestamp", aWriter);
}
static const char* GetNetworkState(NetworkLoadType aType) {
switch (aType) {
case NetworkLoadType::LOAD_START:
return "STATUS_START";
case NetworkLoadType::LOAD_STOP:
return "STATUS_STOP";
case NetworkLoadType::LOAD_REDIRECT:
return "STATUS_REDIRECT";
}
return "";
}
static const char* GetCacheState(
mozilla::net::CacheDisposition aCacheDisposition) {
switch (aCacheDisposition) {
case mozilla::net::kCacheUnresolved:
return "Unresolved";
case mozilla::net::kCacheHit:
return "Hit";
case mozilla::net::kCacheHitViaReval:
return "HitViaReval";
case mozilla::net::kCacheMissedViaReval:
return "MissedViaReval";
case mozilla::net::kCacheMissed:
return "Missed";
case mozilla::net::kCacheUnknown:
default:
return nullptr;
}
}
void NetworkMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamCommonProps("Network", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.IntProperty("id", mID);
const char* typeString = GetNetworkState(mType);
const char* cacheString = GetCacheState(mCacheDisposition);
// want to use aUniqueStacks.mUniqueStrings->WriteElement(aWriter,
// typeString);
aWriter.StringProperty("status", typeString);
if (cacheString) {
aWriter.StringProperty("cache", cacheString);
}
aWriter.IntProperty("pri", mPri);
if (mCount > 0) {
aWriter.IntProperty("count", mCount);
}
if (mURI) {
aWriter.StringProperty("URI", mURI.get());
}
if (mRedirectURI) {
aWriter.StringProperty("RedirectURI", mRedirectURI.get());
}
if (mType != NetworkLoadType::LOAD_START) {
WriteTime(aWriter, aProcessStartTime, mTimings.domainLookupStart,
"domainLookupStart");
WriteTime(aWriter, aProcessStartTime, mTimings.domainLookupEnd,
"domainLookupEnd");
WriteTime(aWriter, aProcessStartTime, mTimings.connectStart,
"connectStart");
WriteTime(aWriter, aProcessStartTime, mTimings.tcpConnectEnd,
"tcpConnectEnd");
WriteTime(aWriter, aProcessStartTime, mTimings.secureConnectionStart,
"secureConnectionStart");
WriteTime(aWriter, aProcessStartTime, mTimings.connectEnd, "connectEnd");
WriteTime(aWriter, aProcessStartTime, mTimings.requestStart,
"requestStart");
WriteTime(aWriter, aProcessStartTime, mTimings.responseStart,
"responseStart");
WriteTime(aWriter, aProcessStartTime, mTimings.responseEnd, "responseEnd");
}
}
void ScreenshotPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamType("CompositorScreenshot", aWriter);
aUniqueStacks.mUniqueStrings->WriteProperty(aWriter, "url",
mScreenshotDataURL.get());
char hexWindowID[32];
SprintfLiteral(hexWindowID, "0x%" PRIXPTR, mWindowIdentifier);
aWriter.StringProperty("windowID", hexWindowID);
aWriter.DoubleProperty("windowWidth", mWindowSize.width);
aWriter.DoubleProperty("windowHeight", mWindowSize.height);
}
void GCSliceMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
MOZ_ASSERT(mTimingJSON);
StreamCommonProps("GCSlice", aWriter, aProcessStartTime, aUniqueStacks);
if (mTimingJSON) {
aWriter.SplicedJSONProperty("timings", mTimingJSON.get());
} else {
aWriter.NullProperty("timings");
}
}
void GCMajorMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
MOZ_ASSERT(mTimingJSON);
StreamCommonProps("GCMajor", aWriter, aProcessStartTime, aUniqueStacks);
if (mTimingJSON) {
aWriter.SplicedJSONProperty("timings", mTimingJSON.get());
} else {
aWriter.NullProperty("timings");
}
}
void GCMinorMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
MOZ_ASSERT(mTimingData);
StreamCommonProps("GCMinor", aWriter, aProcessStartTime, aUniqueStacks);
if (mTimingData) {
aWriter.SplicedJSONProperty("nursery", mTimingData.get());
} else {
aWriter.NullProperty("nursery");
}
}
void HangMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamCommonProps("BHR-detected hang", aWriter, aProcessStartTime,
aUniqueStacks);
}
void StyleMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamCommonProps("Styles", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("category", "Paint");
aWriter.IntProperty("elementsTraversed", mStats.mElementsTraversed);
aWriter.IntProperty("elementsStyled", mStats.mElementsStyled);
aWriter.IntProperty("elementsMatched", mStats.mElementsMatched);
aWriter.IntProperty("stylesShared", mStats.mStylesShared);
aWriter.IntProperty("stylesReused", mStats.mStylesReused);
}
void LongTaskMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamCommonProps("MainThreadLongTask", aWriter, aProcessStartTime,
aUniqueStacks);
aWriter.StringProperty("category", "LongTask");
}

Просмотреть файл

@ -0,0 +1,573 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "vm/GeckoProfiler-inl.h"
#include "mozilla/ArrayUtils.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/Sprintf.h"
#include "jsnum.h"
#include "gc/GC.h"
#include "gc/PublicIterators.h"
#include "jit/BaselineFrame.h"
#include "jit/BaselineJIT.h"
#include "jit/JitcodeMap.h"
#include "jit/JitFrames.h"
#include "jit/JitRealm.h"
#include "jit/JSJitFrameIter.h"
#include "js/TraceLoggerAPI.h"
#include "util/StringBuffer.h"
#include "vm/JSScript.h"
#include "gc/Marking-inl.h"
#include "vm/JSScript-inl.h"
using namespace js;
using mozilla::DebugOnly;
GeckoProfilerThread::GeckoProfilerThread()
: profilingStack_(nullptr), profilingStackIfEnabled_(nullptr) {}
GeckoProfilerRuntime::GeckoProfilerRuntime(JSRuntime* rt)
: rt(rt),
strings_(),
slowAssertions(false),
enabled_(false),
eventMarker_(nullptr) {
MOZ_ASSERT(rt != nullptr);
}
void GeckoProfilerThread::setProfilingStack(ProfilingStack* profilingStack,
bool enabled) {
profilingStack_ = profilingStack;
profilingStackIfEnabled_ = enabled ? profilingStack : nullptr;
}
void GeckoProfilerRuntime::setEventMarker(void (*fn)(const char*)) {
eventMarker_ = fn;
}
// Get a pointer to the top-most profiling frame, given the exit frame pointer.
static void* GetTopProfilingJitFrame(Activation* act) {
if (!act || !act->isJit()) {
return nullptr;
}
jit::JitActivation* jitActivation = act->asJit();
// If there is no exit frame set, just return.
if (!jitActivation->hasExitFP()) {
return nullptr;
}
// Skip wasm frames that might be in the way.
OnlyJSJitFrameIter iter(jitActivation);
if (iter.done()) {
return nullptr;
}
jit::JSJitProfilingFrameIterator jitIter(
(jit::CommonFrameLayout*)iter.frame().fp());
MOZ_ASSERT(!jitIter.done());
return jitIter.fp();
}
void GeckoProfilerRuntime::enable(bool enabled) {
JSContext* cx = rt->mainContextFromAnyThread();
MOZ_ASSERT(cx->geckoProfiler().infraInstalled());
if (enabled_ == enabled) {
return;
}
/*
* Ensure all future generated code will be instrumented, or that all
* currently instrumented code is discarded
*/
ReleaseAllJITCode(rt->defaultFreeOp());
// This function is called when the Gecko profiler makes a new Sampler
// (and thus, a new circular buffer). Set all current entries in the
// JitcodeGlobalTable as expired and reset the buffer range start.
if (rt->hasJitRuntime() && rt->jitRuntime()->hasJitcodeGlobalTable()) {
rt->jitRuntime()->getJitcodeGlobalTable()->setAllEntriesAsExpired();
}
rt->setProfilerSampleBufferRangeStart(0);
// Ensure that lastProfilingFrame is null for the main thread.
if (cx->jitActivation) {
cx->jitActivation->setLastProfilingFrame(nullptr);
cx->jitActivation->setLastProfilingCallSite(nullptr);
}
// Reset the tracelogger, if toggled on
JS::ResetTraceLogger();
enabled_ = enabled;
/* Toggle Gecko Profiler-related jumps on baseline jitcode.
* The call to |ReleaseAllJITCode| above will release most baseline jitcode,
* but not jitcode for scripts with active frames on the stack. These scripts
* need to have their profiler state toggled so they behave properly.
*/
jit::ToggleBaselineProfiling(rt, enabled);
// Update lastProfilingFrame to point to the top-most JS jit-frame currently
// on stack.
if (cx->jitActivation) {
// Walk through all activations, and set their lastProfilingFrame
// appropriately.
if (enabled) {
Activation* act = cx->activation();
void* lastProfilingFrame = GetTopProfilingJitFrame(act);
jit::JitActivation* jitActivation = cx->jitActivation;
while (jitActivation) {
jitActivation->setLastProfilingFrame(lastProfilingFrame);
jitActivation->setLastProfilingCallSite(nullptr);
jitActivation = jitActivation->prevJitActivation();
lastProfilingFrame = GetTopProfilingJitFrame(jitActivation);
}
} else {
jit::JitActivation* jitActivation = cx->jitActivation;
while (jitActivation) {
jitActivation->setLastProfilingFrame(nullptr);
jitActivation->setLastProfilingCallSite(nullptr);
jitActivation = jitActivation->prevJitActivation();
}
}
}
// WebAssembly code does not need to be released, but profiling string
// labels have to be generated so that they are available during async
// profiling stack iteration.
for (RealmsIter r(rt); !r.done(); r.next()) {
r->wasm.ensureProfilingLabels(enabled);
}
#ifdef JS_STRUCTURED_SPEW
// Enable the structured spewer if the environment variable is set.
if (enabled) {
cx->spewer().enableSpewing();
} else {
cx->spewer().disableSpewing();
}
#endif
}
/* Lookup the string for the function/script, creating one if necessary */
const char* GeckoProfilerRuntime::profileString(JSContext* cx,
JSScript* script) {
ProfileStringMap::AddPtr s = strings().lookupForAdd(script);
if (!s) {
UniqueChars str = allocProfileString(cx, script);
if (!str) {
return nullptr;
}
if (!strings().add(s, script, std::move(str))) {
ReportOutOfMemory(cx);
return nullptr;
}
}
return s->value().get();
}
void GeckoProfilerRuntime::onScriptFinalized(JSScript* script) {
/*
* This function is called whenever a script is destroyed, regardless of
* whether profiling has been turned on, so don't invoke a function on an
* invalid hash set. Also, even if profiling was enabled but then turned
* off, we still want to remove the string, so no check of enabled() is
* done.
*/
if (ProfileStringMap::Ptr entry = strings().lookup(script)) {
strings().remove(entry);
}
}
void GeckoProfilerRuntime::markEvent(const char* event) {
MOZ_ASSERT(enabled());
if (eventMarker_) {
JS::AutoSuppressGCAnalysis nogc;
eventMarker_(event);
}
}
bool GeckoProfilerThread::enter(JSContext* cx, JSScript* script) {
const char* dynamicString =
cx->runtime()->geckoProfiler().profileString(cx, script);
if (dynamicString == nullptr) {
return false;
}
#ifdef DEBUG
// In debug builds, assert the JS profiling stack frames already on the
// stack have a non-null pc. Only look at the top frames to avoid quadratic
// behavior.
uint32_t sp = profilingStack_->stackPointer;
if (sp > 0 && sp - 1 < profilingStack_->stackCapacity()) {
size_t start = (sp > 4) ? sp - 4 : 0;
for (size_t i = start; i < sp - 1; i++) {
MOZ_ASSERT_IF(profilingStack_->frames[i].isJsFrame(),
profilingStack_->frames[i].pc());
}
}
#endif
profilingStack_->pushJsFrame("", dynamicString, script, script->code());
return true;
}
void GeckoProfilerThread::exit(JSContext* cx, JSScript* script) {
profilingStack_->pop();
#ifdef DEBUG
/* Sanity check to make sure push/pop balanced */
uint32_t sp = profilingStack_->stackPointer;
if (sp < profilingStack_->stackCapacity()) {
JSRuntime* rt = script->runtimeFromMainThread();
const char* dynamicString = rt->geckoProfiler().profileString(cx, script);
/* Can't fail lookup because we should already be in the set */
MOZ_ASSERT(dynamicString);
// Bug 822041
if (!profilingStack_->frames[sp].isJsFrame()) {
fprintf(stderr, "--- ABOUT TO FAIL ASSERTION ---\n");
fprintf(stderr, " frames=%p size=%u/%u\n", (void*)profilingStack_->frames,
uint32_t(profilingStack_->stackPointer),
profilingStack_->stackCapacity());
for (int32_t i = sp; i >= 0; i--) {
ProfilingStackFrame& frame = profilingStack_->frames[i];
if (frame.isJsFrame()) {
fprintf(stderr, " [%d] JS %s\n", i, frame.dynamicString());
} else {
fprintf(stderr, " [%d] Label %s\n", i, frame.dynamicString());
}
}
}
ProfilingStackFrame& frame = profilingStack_->frames[sp];
MOZ_ASSERT(frame.isJsFrame());
MOZ_ASSERT(frame.script() == script);
MOZ_ASSERT(strcmp((const char*)frame.dynamicString(), dynamicString) == 0);
}
#endif
}
/*
* Serializes the script/function pair into a "descriptive string" which is
* allowed to fail. This function cannot trigger a GC because it could finalize
* some scripts, resize the hash table of profile strings, and invalidate the
* AddPtr held while invoking allocProfileString.
*/
/* static */
UniqueChars GeckoProfilerRuntime::allocProfileString(JSContext* cx,
JSScript* script) {
// Note: this profiler string is regexp-matched by
// devtools/client/profiler/cleopatra/js/parserWorker.js.
// If the script has a function, try calculating its name.
bool hasName = false;
size_t nameLength = 0;
UniqueChars nameStr;
JSFunction* func = script->functionDelazifying();
if (func && func->displayAtom()) {
nameStr = StringToNewUTF8CharsZ(cx, *func->displayAtom());
if (!nameStr) {
return nullptr;
}
nameLength = strlen(nameStr.get());
hasName = true;
}
// Calculate filename length.
const char* filenameStr = script->filename() ? script->filename() : "(null)";
size_t filenameLength = strlen(filenameStr);
// Calculate line + column length.
bool hasLineAndColumn = false;
size_t lineAndColumnLength = 0;
char lineAndColumnStr[30];
if (hasName || script->functionNonDelazifying() || script->isForEval()) {
lineAndColumnLength = SprintfLiteral(lineAndColumnStr, "%u:%u",
script->lineno(), script->column());
hasLineAndColumn = true;
}
// Full profile string for scripts with functions is:
// FuncName (FileName:Lineno:Column)
// Full profile string for scripts without functions is:
// FileName:Lineno:Column
// Full profile string for scripts without functions and without lines is:
// FileName
// Calculate full string length.
size_t fullLength = 0;
if (hasName) {
MOZ_ASSERT(hasLineAndColumn);
fullLength = nameLength + 2 + filenameLength + 1 + lineAndColumnLength + 1;
} else if (hasLineAndColumn) {
fullLength = filenameLength + 1 + lineAndColumnLength;
} else {
fullLength = filenameLength;
}
// Allocate string.
UniqueChars str(cx->pod_malloc<char>(fullLength + 1));
if (!str) {
return nullptr;
}
size_t cur = 0;
// Fill string with function name if needed.
if (hasName) {
memcpy(str.get() + cur, nameStr.get(), nameLength);
cur += nameLength;
str[cur++] = ' ';
str[cur++] = '(';
}
// Fill string with filename chars.
memcpy(str.get() + cur, filenameStr, filenameLength);
cur += filenameLength;
// Fill line + column chars.
if (hasLineAndColumn) {
str[cur++] = ':';
memcpy(str.get() + cur, lineAndColumnStr, lineAndColumnLength);
cur += lineAndColumnLength;
}
// Terminal ')' if necessary.
if (hasName) {
str[cur++] = ')';
}
MOZ_ASSERT(cur == fullLength);
str[cur] = 0;
return str;
}
void GeckoProfilerThread::trace(JSTracer* trc) {
if (profilingStack_) {
size_t size = profilingStack_->stackSize();
for (size_t i = 0; i < size; i++) {
profilingStack_->frames[i].trace(trc);
}
}
}
void GeckoProfilerRuntime::fixupStringsMapAfterMovingGC() {
for (ProfileStringMap::Enum e(strings()); !e.empty(); e.popFront()) {
JSScript* script = e.front().key();
if (IsForwarded(script)) {
script = Forwarded(script);
e.rekeyFront(script);
}
}
}
#ifdef JSGC_HASH_TABLE_CHECKS
void GeckoProfilerRuntime::checkStringsMapAfterMovingGC() {
for (auto r = strings().all(); !r.empty(); r.popFront()) {
JSScript* script = r.front().key();
CheckGCThingAfterMovingGC(script);
auto ptr = strings().lookup(script);
MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
}
}
#endif
void ProfilingStackFrame::trace(JSTracer* trc) {
if (isJsFrame()) {
JSScript* s = rawScript();
TraceNullableRoot(trc, &s, "ProfilingStackFrame script");
spOrScript = s;
}
}
GeckoProfilerBaselineOSRMarker::GeckoProfilerBaselineOSRMarker(
JSContext* cx,
bool hasProfilerFrame MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
: profiler(&cx->geckoProfiler()) {
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
if (!hasProfilerFrame || !cx->runtime()->geckoProfiler().enabled()) {
profiler = nullptr;
return;
}
uint32_t sp = profiler->profilingStack_->stackPointer;
if (sp >= profiler->profilingStack_->stackCapacity()) {
profiler = nullptr;
return;
}
spBefore_ = sp;
if (sp == 0) {
return;
}
ProfilingStackFrame& frame = profiler->profilingStack_->frames[sp - 1];
MOZ_ASSERT(!frame.isOSRFrame());
frame.setIsOSRFrame(true);
}
GeckoProfilerBaselineOSRMarker::~GeckoProfilerBaselineOSRMarker() {
if (profiler == nullptr) {
return;
}
uint32_t sp = profiler->stackPointer();
MOZ_ASSERT(spBefore_ == sp);
if (sp == 0) {
return;
}
ProfilingStackFrame& frame = profiler->stack()[sp - 1];
MOZ_ASSERT(frame.isOSRFrame());
frame.setIsOSRFrame(false);
}
JS_PUBLIC_API JSScript* ProfilingStackFrame::script() const {
MOZ_ASSERT(isJsFrame());
auto script = reinterpret_cast<JSScript*>(spOrScript.operator void*());
if (!script) {
return nullptr;
}
// If profiling is supressed then we can't trust the script pointers to be
// valid as they could be in the process of being moved by a compacting GC
// (although it's still OK to get the runtime from them).
JSContext* cx = script->runtimeFromAnyThread()->mainContextFromAnyThread();
if (!cx->isProfilerSamplingEnabled()) {
return nullptr;
}
MOZ_ASSERT(!IsForwarded(script));
return script;
}
JS_FRIEND_API jsbytecode* ProfilingStackFrame::pc() const {
MOZ_ASSERT(isJsFrame());
if (pcOffsetIfJS_ == NullPCOffset) {
return nullptr;
}
JSScript* script = this->script();
return script ? script->offsetToPC(pcOffsetIfJS_) : nullptr;
}
/* static */
int32_t ProfilingStackFrame::pcToOffset(JSScript* aScript, jsbytecode* aPc) {
return aPc ? aScript->pcToOffset(aPc) : NullPCOffset;
}
void ProfilingStackFrame::setPC(jsbytecode* pc) {
MOZ_ASSERT(isJsFrame());
JSScript* script = this->script();
MOZ_ASSERT(
script); // This should not be called while profiling is suppressed.
pcOffsetIfJS_ = pcToOffset(script, pc);
}
JS_FRIEND_API void js::SetContextProfilingStack(
JSContext* cx, ProfilingStack* profilingStack) {
cx->geckoProfiler().setProfilingStack(
profilingStack, cx->runtime()->geckoProfiler().enabled());
}
JS_FRIEND_API void js::EnableContextProfilingStack(JSContext* cx,
bool enabled) {
cx->geckoProfiler().enable(enabled);
cx->runtime()->geckoProfiler().enable(enabled);
}
JS_FRIEND_API void js::RegisterContextProfilingEventMarker(
JSContext* cx, void (*fn)(const char*)) {
MOZ_ASSERT(cx->runtime()->geckoProfiler().enabled());
cx->runtime()->geckoProfiler().setEventMarker(fn);
}
AutoSuppressProfilerSampling::AutoSuppressProfilerSampling(
JSContext* cx MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
: cx_(cx), previouslyEnabled_(cx->isProfilerSamplingEnabled()) {
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
if (previouslyEnabled_) {
cx_->disableProfilerSampling();
}
}
AutoSuppressProfilerSampling::~AutoSuppressProfilerSampling() {
if (previouslyEnabled_) {
cx_->enableProfilerSampling();
}
}
namespace JS {
// clang-format off
// ProfilingSubcategory_X:
// One enum for each category X, listing that category's subcategories. This
// allows the sProfilingCategoryInfo macro construction below to look up a
// per-category index for a subcategory.
#define SUBCATEGORY_ENUMS_BEGIN_CATEGORY(name, labelAsString, color) \
enum class ProfilingSubcategory_##name : uint32_t {
#define SUBCATEGORY_ENUMS_SUBCATEGORY(category, name, labelAsString) \
name,
#define SUBCATEGORY_ENUMS_END_CATEGORY \
};
PROFILING_CATEGORY_LIST(SUBCATEGORY_ENUMS_BEGIN_CATEGORY,
SUBCATEGORY_ENUMS_SUBCATEGORY,
SUBCATEGORY_ENUMS_END_CATEGORY)
#undef SUBCATEGORY_ENUMS_BEGIN_CATEGORY
#undef SUBCATEGORY_ENUMS_SUBCATEGORY
#undef SUBCATEGORY_ENUMS_END_CATEGORY
// sProfilingCategoryPairInfo:
// A list of ProfilingCategoryPairInfos with the same order as
// ProfilingCategoryPair, which can be used to map a ProfilingCategoryPair to
// its information.
#define CATEGORY_INFO_BEGIN_CATEGORY(name, labelAsString, color)
#define CATEGORY_INFO_SUBCATEGORY(category, name, labelAsString) \
{ProfilingCategory::category, \
uint32_t(ProfilingSubcategory_##category::name), labelAsString},
#define CATEGORY_INFO_END_CATEGORY
const ProfilingCategoryPairInfo sProfilingCategoryPairInfo[] = {
PROFILING_CATEGORY_LIST(CATEGORY_INFO_BEGIN_CATEGORY,
CATEGORY_INFO_SUBCATEGORY,
CATEGORY_INFO_END_CATEGORY)
};
#undef CATEGORY_INFO_BEGIN_CATEGORY
#undef CATEGORY_INFO_SUBCATEGORY
#undef CATEGORY_INFO_END_CATEGORY
// clang-format on
JS_FRIEND_API const ProfilingCategoryPairInfo& GetProfilingCategoryPairInfo(
ProfilingCategoryPair aCategoryPair) {
static_assert(
MOZ_ARRAY_LENGTH(sProfilingCategoryPairInfo) ==
uint32_t(ProfilingCategoryPair::COUNT),
"sProfilingCategoryPairInfo and ProfilingCategory need to have the "
"same order and the same length");
uint32_t categoryPairIndex = uint32_t(aCategoryPair);
MOZ_RELEASE_ASSERT(categoryPairIndex <=
uint32_t(ProfilingCategoryPair::LAST));
return sProfilingCategoryPairInfo[categoryPairIndex];
}
} // namespace JS

Просмотреть файл

@ -0,0 +1,46 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "js/ProfilingStack.h"
#include "mozilla/IntegerRange.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/UniquePtrExtensions.h"
#include <algorithm>
using namespace js;
ProfilingStack::~ProfilingStack() {
// The label macros keep a reference to the ProfilingStack to avoid a TLS
// access. If these are somehow not all cleared we will get a
// use-after-free so better to crash now.
MOZ_RELEASE_ASSERT(stackPointer == 0);
delete[] frames;
}
void ProfilingStack::ensureCapacitySlow() {
MOZ_ASSERT(stackPointer >= capacity);
const uint32_t kInitialCapacity = 128;
uint32_t sp = stackPointer;
auto newCapacity =
std::max(sp + 1, capacity ? capacity * 2 : kInitialCapacity);
auto* newFrames = new js::ProfilingStackFrame[newCapacity];
// It's important that `frames` / `capacity` / `stackPointer` remain
// consistent here at all times.
for (auto i : mozilla::IntegerRange(capacity)) {
newFrames[i] = frames[i];
}
js::ProfilingStackFrame* oldFrames = frames;
frames = newFrames;
capacity = newCapacity;
delete[] oldFrames;
}

Просмотреть файл

@ -0,0 +1,43 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "RegisteredThread.h"
RegisteredThread::RegisteredThread(ThreadInfo* aInfo, nsIEventTarget* aThread,
void* aStackTop)
: mRacyRegisteredThread(aInfo->ThreadId()),
mPlatformData(AllocPlatformData(aInfo->ThreadId())),
mStackTop(aStackTop),
mThreadInfo(aInfo),
mThread(aThread),
mContext(nullptr),
mJSSampling(INACTIVE),
mJSFlags(0) {
MOZ_COUNT_CTOR(RegisteredThread);
// We don't have to guess on mac
#if defined(GP_OS_darwin)
pthread_t self = pthread_self();
mStackTop = pthread_get_stackaddr_np(self);
#endif
}
RegisteredThread::~RegisteredThread() { MOZ_COUNT_DTOR(RegisteredThread); }
size_t RegisteredThread::SizeOfIncludingThis(
mozilla::MallocSizeOf aMallocSizeOf) const {
size_t n = aMallocSizeOf(this);
// Measurement of the following members may be added later if DMD finds it
// is worthwhile:
// - mPlatformData
// - mRacyRegisteredThread.mPendingMarkers
//
// The following members are not measured:
// - mThreadInfo: because it is non-owning
return n;
}

Просмотреть файл

@ -0,0 +1,337 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef RegisteredThread_h
#define RegisteredThread_h
#include "platform.h"
#include "ProfilerMarker.h"
#include "BaseProfilerMarkerPayload.h"
#include "ThreadInfo.h"
#include "js/TraceLoggerAPI.h"
#include "jsapi.h"
#include "mozilla/UniquePtr.h"
#include "nsIEventTarget.h"
// This class contains the state for a single thread that is accessible without
// protection from gPSMutex in platform.cpp. Because there is no external
// protection against data races, it must provide internal protection. Hence
// the "Racy" prefix.
//
class RacyRegisteredThread final {
public:
explicit RacyRegisteredThread(int aThreadId)
: mThreadId(aThreadId), mSleep(AWAKE), mIsBeingProfiled(false) {
MOZ_COUNT_CTOR(RacyRegisteredThread);
}
~RacyRegisteredThread() { MOZ_COUNT_DTOR(RacyRegisteredThread); }
void SetIsBeingProfiled(bool aIsBeingProfiled) {
mIsBeingProfiled = aIsBeingProfiled;
}
bool IsBeingProfiled() const { return mIsBeingProfiled; }
void AddPendingMarker(const char* aMarkerName,
JS::ProfilingCategoryPair aCategoryPair,
mozilla::UniquePtr<ProfilerMarkerPayload> aPayload,
double aTime) {
// Note: We don't assert on mIsBeingProfiled, because it could have changed
// between the check in the caller and now.
ProfilerMarker* marker = new ProfilerMarker(
aMarkerName, aCategoryPair, mThreadId, std::move(aPayload), aTime);
mPendingMarkers.insert(marker);
}
// Called within signal. Function must be reentrant.
ProfilerMarkerLinkedList* GetPendingMarkers() {
// The profiled thread is interrupted, so we can access the list safely.
// Unless the profiled thread was in the middle of changing the list when
// we interrupted it - in that case, accessList() will return null.
return mPendingMarkers.accessList();
}
// This is called on every profiler restart. Put things that should happen at
// that time here.
void ReinitializeOnResume() {
// This is needed to cause an initial sample to be taken from sleeping
// threads that had been observed prior to the profiler stopping and
// restarting. Otherwise sleeping threads would not have any samples to
// copy forward while sleeping.
(void)mSleep.compareExchange(SLEEPING_OBSERVED, SLEEPING_NOT_OBSERVED);
}
// This returns true for the second and subsequent calls in each sleep cycle.
bool CanDuplicateLastSampleDueToSleep() {
if (mSleep == AWAKE) {
return false;
}
if (mSleep.compareExchange(SLEEPING_NOT_OBSERVED, SLEEPING_OBSERVED)) {
return false;
}
return true;
}
// Call this whenever the current thread sleeps. Calling it twice in a row
// without an intervening setAwake() call is an error.
void SetSleeping() {
MOZ_ASSERT(mSleep == AWAKE);
mSleep = SLEEPING_NOT_OBSERVED;
}
// Call this whenever the current thread wakes. Calling it twice in a row
// without an intervening setSleeping() call is an error.
void SetAwake() {
MOZ_ASSERT(mSleep != AWAKE);
mSleep = AWAKE;
}
bool IsSleeping() { return mSleep != AWAKE; }
int ThreadId() const { return mThreadId; }
class ProfilingStack& ProfilingStack() {
return mProfilingStack;
}
const class ProfilingStack& ProfilingStack() const { return mProfilingStack; }
private:
class ProfilingStack mProfilingStack;
// A list of pending markers that must be moved to the circular buffer.
ProfilerSignalSafeLinkedList<ProfilerMarker> mPendingMarkers;
// mThreadId contains the thread ID of the current thread. It is safe to read
// this from multiple threads concurrently, as it will never be mutated.
const int mThreadId;
// mSleep tracks whether the thread is sleeping, and if so, whether it has
// been previously observed. This is used for an optimization: in some cases,
// when a thread is asleep, we duplicate the previous sample, which is
// cheaper than taking a new sample.
//
// mSleep is atomic because it is accessed from multiple threads.
//
// - It is written only by this thread, via setSleeping() and setAwake().
//
// - It is read by SamplerThread::Run().
//
// There are two cases where racing between threads can cause an issue.
//
// - If CanDuplicateLastSampleDueToSleep() returns false but that result is
// invalidated before being acted upon, we will take a full sample
// unnecessarily. This is additional work but won't cause any correctness
// issues. (In actual fact, this case is impossible. In order to go from
// CanDuplicateLastSampleDueToSleep() returning false to it returning true
// requires an intermediate call to it in order for mSleep to go from
// SLEEPING_NOT_OBSERVED to SLEEPING_OBSERVED.)
//
// - If CanDuplicateLastSampleDueToSleep() returns true but that result is
// invalidated before being acted upon -- i.e. the thread wakes up before
// DuplicateLastSample() is called -- we will duplicate the previous
// sample. This is inaccurate, but only slightly... we will effectively
// treat the thread as having slept a tiny bit longer than it really did.
//
// This latter inaccuracy could be avoided by moving the
// CanDuplicateLastSampleDueToSleep() check within the thread-freezing code,
// e.g. the section where Tick() is called. But that would reduce the
// effectiveness of the optimization because more code would have to be run
// before we can tell that duplication is allowed.
//
static const int AWAKE = 0;
static const int SLEEPING_NOT_OBSERVED = 1;
static const int SLEEPING_OBSERVED = 2;
mozilla::Atomic<int> mSleep;
// Is this thread being profiled? (e.g., should markers be recorded?)
// Accesses to this atomic are not recorded by web replay as they may occur
// at non-deterministic points.
mozilla::Atomic<bool, mozilla::MemoryOrdering::Relaxed,
mozilla::recordreplay::Behavior::DontPreserve>
mIsBeingProfiled;
};
// This class contains information that's relevant to a single thread only
// while that thread is running and registered with the profiler, but
// regardless of whether the profiler is running. All accesses to it are
// protected by the profiler state lock.
class RegisteredThread final {
public:
RegisteredThread(ThreadInfo* aInfo, nsIEventTarget* aThread, void* aStackTop);
~RegisteredThread();
class RacyRegisteredThread& RacyRegisteredThread() {
return mRacyRegisteredThread;
}
const class RacyRegisteredThread& RacyRegisteredThread() const {
return mRacyRegisteredThread;
}
PlatformData* GetPlatformData() const { return mPlatformData.get(); }
const void* StackTop() const { return mStackTop; }
size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
// Set the JSContext of the thread to be sampled. Sampling cannot begin until
// this has been set.
void SetJSContext(JSContext* aContext) {
// This function runs on-thread.
MOZ_ASSERT(aContext && !mContext);
mContext = aContext;
// We give the JS engine a non-owning reference to the ProfilingStack. It's
// important that the JS engine doesn't touch this once the thread dies.
js::SetContextProfilingStack(aContext,
&RacyRegisteredThread().ProfilingStack());
}
void ClearJSContext() {
// This function runs on-thread.
mContext = nullptr;
}
JSContext* GetJSContext() const { return mContext; }
const RefPtr<ThreadInfo> Info() const { return mThreadInfo; }
const nsCOMPtr<nsIEventTarget> GetEventTarget() const { return mThread; }
// Request that this thread start JS sampling. JS sampling won't actually
// start until a subsequent PollJSSampling() call occurs *and* mContext has
// been set.
void StartJSSampling(uint32_t aJSFlags) {
// This function runs on-thread or off-thread.
MOZ_RELEASE_ASSERT(mJSSampling == INACTIVE ||
mJSSampling == INACTIVE_REQUESTED);
mJSSampling = ACTIVE_REQUESTED;
mJSFlags = aJSFlags;
}
// Request that this thread stop JS sampling. JS sampling won't actually stop
// until a subsequent PollJSSampling() call occurs.
void StopJSSampling() {
// This function runs on-thread or off-thread.
MOZ_RELEASE_ASSERT(mJSSampling == ACTIVE ||
mJSSampling == ACTIVE_REQUESTED);
mJSSampling = INACTIVE_REQUESTED;
}
// Poll to see if JS sampling should be started/stopped.
void PollJSSampling() {
// This function runs on-thread.
// We can't start/stop profiling until we have the thread's JSContext.
if (mContext) {
// It is possible for mJSSampling to go through the following sequences.
//
// - INACTIVE, ACTIVE_REQUESTED, INACTIVE_REQUESTED, INACTIVE
//
// - ACTIVE, INACTIVE_REQUESTED, ACTIVE_REQUESTED, ACTIVE
//
// Therefore, the if and else branches here aren't always interleaved.
// This is ok because the JS engine can handle that.
//
if (mJSSampling == ACTIVE_REQUESTED) {
mJSSampling = ACTIVE;
js::EnableContextProfilingStack(mContext, true);
JS_SetGlobalJitCompilerOption(mContext,
JSJITCOMPILER_TRACK_OPTIMIZATIONS,
TrackOptimizationsEnabled());
if (JSTracerEnabled()) {
JS::StartTraceLogger(mContext);
}
js::RegisterContextProfilingEventMarker(mContext,
profiler_add_js_marker);
} else if (mJSSampling == INACTIVE_REQUESTED) {
mJSSampling = INACTIVE;
js::EnableContextProfilingStack(mContext, false);
if (JSTracerEnabled()) {
JS::StopTraceLogger(mContext);
}
}
}
}
private:
class RacyRegisteredThread mRacyRegisteredThread;
const UniquePlatformData mPlatformData;
const void* mStackTop;
const RefPtr<ThreadInfo> mThreadInfo;
const nsCOMPtr<nsIEventTarget> mThread;
// If this is a JS thread, this is its JSContext, which is required for any
// JS sampling.
JSContext* mContext;
// The profiler needs to start and stop JS sampling of JS threads at various
// times. However, the JS engine can only do the required actions on the
// JS thread itself ("on-thread"), not from another thread ("off-thread").
// Therefore, we have the following two-step process.
//
// - The profiler requests (on-thread or off-thread) that the JS sampling be
// started/stopped, by changing mJSSampling to the appropriate REQUESTED
// state.
//
// - The relevant JS thread polls (on-thread) for changes to mJSSampling.
// When it sees a REQUESTED state, it performs the appropriate actions to
// actually start/stop JS sampling, and changes mJSSampling out of the
// REQUESTED state.
//
// The state machine is as follows.
//
// INACTIVE --> ACTIVE_REQUESTED
// ^ ^ |
// | _/ |
// | _/ |
// | / |
// | v v
// INACTIVE_REQUESTED <-- ACTIVE
//
// The polling is done in the following two ways.
//
// - Via the interrupt callback mechanism; the JS thread must call
// profiler_js_interrupt_callback() from its own interrupt callback.
// This is how sampling must be started/stopped for threads where the
// request was made off-thread.
//
// - When {Start,Stop}JSSampling() is called on-thread, we can immediately
// follow it with a PollJSSampling() call to avoid the delay between the
// two steps. Likewise, setJSContext() calls PollJSSampling().
//
// One non-obvious thing about all this: these JS sampling requests are made
// on all threads, even non-JS threads. mContext needs to also be set (via
// setJSContext(), which can only happen for JS threads) for any JS sampling
// to actually happen.
//
enum {
INACTIVE = 0,
ACTIVE_REQUESTED = 1,
ACTIVE = 2,
INACTIVE_REQUESTED = 3,
} mJSSampling;
uint32_t mJSFlags;
bool TrackOptimizationsEnabled() {
return mJSFlags & uint32_t(JSSamplingFlags::TrackOptimizations);
}
bool JSTracerEnabled() {
return mJSFlags & uint32_t(JSSamplingFlags::TraceLogging);
}
};
#endif // RegisteredThread_h

Просмотреть файл

@ -0,0 +1,48 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef ThreadInfo_h
#define ThreadInfo_h
#include "mozilla/TimeStamp.h"
#include "nsISupportsImpl.h"
#include "nsString.h"
// This class contains information about a thread which needs to be stored
// across restarts of the profiler and which can be useful even after the
// thread has stopped running.
// It uses threadsafe refcounting and only contains immutable data.
class ThreadInfo final {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ThreadInfo)
ThreadInfo(
const char* aName, int aThreadId, bool aIsMainThread,
const mozilla::TimeStamp& aRegisterTime = mozilla::TimeStamp::Now())
: mName(aName),
mRegisterTime(aRegisterTime),
mThreadId(aThreadId),
mIsMainThread(aIsMainThread) {
// I don't know if we can assert this. But we should warn.
MOZ_ASSERT(aThreadId >= 0, "native thread ID is < 0");
MOZ_ASSERT(aThreadId <= INT32_MAX, "native thread ID is > INT32_MAX");
}
const char* Name() const { return mName.get(); }
mozilla::TimeStamp RegisterTime() const { return mRegisterTime; }
int ThreadId() const { return mThreadId; }
bool IsMainThread() const { return mIsMainThread; }
private:
~ThreadInfo() {}
const nsCString mName;
const mozilla::TimeStamp mRegisterTime;
const int mThreadId;
const bool mIsMainThread;
};
#endif // ThreadInfo_h

Просмотреть файл

@ -0,0 +1,82 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifdef XP_WIN
# undef UNICODE
# undef _UNICODE
#endif
#include "VTuneProfiler.h"
#include "mozilla/Bootstrap.h"
#include <memory>
using namespace std;
VTuneProfiler* VTuneProfiler::mInstance = nullptr;
void VTuneProfiler::Initialize() {
// This is just a 'dirty trick' to find out if the ittnotify DLL was found.
// If it wasn't this function always returns 0, otherwise it returns
// incrementing numbers, if the library was found this wastes 2 events but
// that should be okay.
__itt_event testEvent =
__itt_event_create("Test event", strlen("Test event"));
testEvent = __itt_event_create("Test event 2", strlen("Test event 2"));
if (testEvent) {
mInstance = new VTuneProfiler();
}
}
void VTuneProfiler::Shutdown() {}
void VTuneProfiler::TraceInternal(const char* aName, TracingKind aKind) {
string str(aName);
auto iter = mStrings.find(str);
__itt_event event;
if (iter != mStrings.end()) {
event = iter->second;
} else {
event = __itt_event_create(aName, str.length());
mStrings.insert({str, event});
}
if (aKind == TRACING_INTERVAL_START || aKind == TRACING_EVENT) {
// VTune will consider starts not matched with an end to be single point in
// time events.
__itt_event_start(event);
} else {
__itt_event_end(event);
}
}
void VTuneProfiler::RegisterThreadInternal(const char* aName) {
string str(aName);
if (!str.compare("GeckoMain")) {
// Process main thread.
switch (XRE_GetProcessType()) {
case GeckoProcessType::GeckoProcessType_Default:
__itt_thread_set_name("Main Process");
break;
case GeckoProcessType::GeckoProcessType_Content:
__itt_thread_set_name("Content Process");
break;
case GeckoProcessType::GeckoProcessType_GMPlugin:
__itt_thread_set_name("Plugin Process");
break;
case GeckoProcessType::GeckoProcessType_GPU:
__itt_thread_set_name("GPU Process");
break;
default:
__itt_thread_set_name("Unknown Process");
}
return;
}
__itt_thread_set_name(aName);
}

Просмотреть файл

@ -0,0 +1,72 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef VTuneProfiler_h
#define VTuneProfiler_h
// The intent here is to add 0 overhead for regular users. In order to build
// the VTune profiler code at all --enable-vtune-instrumentation needs to be
// set as a build option. Even then, when none of the environment variables
// is specified that allow us to find the ittnotify DLL, these functions
// should be minimal overhead. When starting Firefox under VTune, these
// env vars will be automatically defined, otherwise INTEL_LIBITTNOTIFY32/64
// should be set to point at the ittnotify DLL.
#ifndef MOZ_VTUNE_INSTRUMENTATION
# define VTUNE_INIT()
# define VTUNE_SHUTDOWN()
# define VTUNE_TRACING(name, kind)
# define VTUNE_REGISTER_THREAD(name)
#else
# include "BaseProfiler.h"
// This is the regular Intel header, these functions are actually defined for
// us inside js/src/vtune by an intel C file which actually dynamically resolves
// them to the correct DLL. Through libxul these will 'magically' resolve.
# include "vtune/ittnotify.h"
# include <stddef.h>
# include <unordered_map>
# include <string>
class VTuneProfiler {
public:
static void Initialize();
static void Shutdown();
static void Trace(const char* aName, TracingKind aKind) {
if (mInstance) {
mInstance->TraceInternal(aName, aKind);
}
}
static void RegisterThread(const char* aName) {
if (mInstance) {
mInstance->RegisterThreadInternal(aName);
}
}
private:
void TraceInternal(const char* aName, TracingKind aKind);
void RegisterThreadInternal(const char* aName);
// This is null when the ittnotify DLL could not be found.
static VTuneProfiler* mInstance;
std::unordered_map<std::string, __itt_event> mStrings;
};
# define VTUNE_INIT() VTuneProfiler::Initialize()
# define VTUNE_SHUTDOWN() VTuneProfiler::Shutdown()
# define VTUNE_TRACING(name, kind) VTuneProfiler::Trace(name, kind)
# define VTUNE_REGISTER_THREAD(name) VTuneProfiler::RegisterThread(name)
#endif
#endif /* VTuneProfiler_h */

Просмотреть файл

@ -0,0 +1,513 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
// Copyright (c) 2006-2011 The Chromium Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google, Inc. nor the names of its contributors
// may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
// OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
// SUCH DAMAGE.
// This file is used for both Linux and Android.
#include <stdio.h>
#include <math.h>
#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <stdlib.h>
#include <sched.h>
#include <ucontext.h>
// Ubuntu Dapper requires memory pages to be marked as
// executable. Otherwise, OS raises an exception when executing code
// in that page.
#include <sys/types.h> // mmap & munmap
#include <sys/mman.h> // mmap & munmap
#include <sys/stat.h> // open
#include <fcntl.h> // open
#include <unistd.h> // sysconf
#include <semaphore.h>
#ifdef __GLIBC__
# include <execinfo.h> // backtrace, backtrace_symbols
#endif // def __GLIBC__
#include <strings.h> // index
#include <errno.h>
#include <stdarg.h>
#include "prenv.h"
#include "mozilla/LinuxSignal.h"
#include "mozilla/PodOperations.h"
#include "mozilla/DebugOnly.h"
#include <string.h>
#include <list>
using namespace mozilla;
int profiler_current_process_id() { return getpid(); }
int profiler_current_thread_id() {
// glibc doesn't provide a wrapper for gettid().
#if defined(__GLIBC__)
return static_cast<int>(static_cast<pid_t>(syscall(SYS_gettid)));
#else
return static_cast<int>(gettid());
#endif
}
void* GetStackTop(void* aGuess) { return aGuess; }
static void PopulateRegsFromContext(Registers& aRegs, ucontext_t* aContext) {
aRegs.mContext = aContext;
mcontext_t& mcontext = aContext->uc_mcontext;
// Extracting the sample from the context is extremely machine dependent.
#if defined(GP_ARCH_x86)
aRegs.mPC = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
aRegs.mSP = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
aRegs.mFP = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
aRegs.mLR = 0;
#elif defined(GP_ARCH_amd64)
aRegs.mPC = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
aRegs.mSP = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
aRegs.mFP = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
aRegs.mLR = 0;
#elif defined(GP_ARCH_arm)
aRegs.mPC = reinterpret_cast<Address>(mcontext.arm_pc);
aRegs.mSP = reinterpret_cast<Address>(mcontext.arm_sp);
aRegs.mFP = reinterpret_cast<Address>(mcontext.arm_fp);
aRegs.mLR = reinterpret_cast<Address>(mcontext.arm_lr);
#elif defined(GP_ARCH_arm64)
aRegs.mPC = reinterpret_cast<Address>(mcontext.pc);
aRegs.mSP = reinterpret_cast<Address>(mcontext.sp);
aRegs.mFP = reinterpret_cast<Address>(mcontext.regs[29]);
aRegs.mLR = reinterpret_cast<Address>(mcontext.regs[30]);
#elif defined(GP_ARCH_mips64)
aRegs.mPC = reinterpret_cast<Address>(mcontext.pc);
aRegs.mSP = reinterpret_cast<Address>(mcontext.gregs[29]);
aRegs.mFP = reinterpret_cast<Address>(mcontext.gregs[30]);
#else
# error "bad platform"
#endif
}
#if defined(GP_OS_android)
# define SYS_tgkill __NR_tgkill
#endif
int tgkill(pid_t tgid, pid_t tid, int signalno) {
return syscall(SYS_tgkill, tgid, tid, signalno);
}
class PlatformData {
public:
explicit PlatformData(int aThreadId) { MOZ_COUNT_CTOR(PlatformData); }
~PlatformData() { MOZ_COUNT_DTOR(PlatformData); }
};
////////////////////////////////////////////////////////////////////////
// BEGIN Sampler target specifics
// The only way to reliably interrupt a Linux thread and inspect its register
// and stack state is by sending a signal to it, and doing the work inside the
// signal handler. But we don't want to run much code inside the signal
// handler, since POSIX severely restricts what we can do in signal handlers.
// So we use a system of semaphores to suspend the thread and allow the
// sampler thread to do all the work of unwinding and copying out whatever
// data it wants.
//
// A four-message protocol is used to reliably suspend and later resume the
// thread to be sampled (the samplee):
//
// Sampler (signal sender) thread Samplee (thread to be sampled)
//
// Prepare the SigHandlerCoordinator
// and point sSigHandlerCoordinator at it
//
// send SIGPROF to samplee ------- MSG 1 ----> (enter signal handler)
// wait(mMessage2) Copy register state
// into sSigHandlerCoordinator
// <------ MSG 2 ----- post(mMessage2)
// Samplee is now suspended. wait(mMessage3)
// Examine its stack/register
// state at leisure
//
// Release samplee:
// post(mMessage3) ------- MSG 3 ----->
// wait(mMessage4) Samplee now resumes. Tell
// the sampler that we are done.
// <------ MSG 4 ------ post(mMessage4)
// Now we know the samplee's signal (leave signal handler)
// handler has finished using
// sSigHandlerCoordinator. We can
// safely reuse it for some other thread.
//
// A type used to coordinate between the sampler (signal sending) thread and
// the thread currently being sampled (the samplee, which receives the
// signals).
//
// The first message is sent using a SIGPROF signal delivery. The subsequent
// three are sent using sem_wait/sem_post pairs. They are named accordingly
// in the following struct.
struct SigHandlerCoordinator {
SigHandlerCoordinator() {
PodZero(&mUContext);
int r = sem_init(&mMessage2, /* pshared */ 0, 0);
r |= sem_init(&mMessage3, /* pshared */ 0, 0);
r |= sem_init(&mMessage4, /* pshared */ 0, 0);
MOZ_ASSERT(r == 0);
}
~SigHandlerCoordinator() {
int r = sem_destroy(&mMessage2);
r |= sem_destroy(&mMessage3);
r |= sem_destroy(&mMessage4);
MOZ_ASSERT(r == 0);
}
sem_t mMessage2; // To sampler: "context is in sSigHandlerCoordinator"
sem_t mMessage3; // To samplee: "resume"
sem_t mMessage4; // To sampler: "finished with sSigHandlerCoordinator"
ucontext_t mUContext; // Context at signal
};
struct SigHandlerCoordinator* Sampler::sSigHandlerCoordinator = nullptr;
static void SigprofHandler(int aSignal, siginfo_t* aInfo, void* aContext) {
// Avoid TSan warning about clobbering errno.
int savedErrno = errno;
MOZ_ASSERT(aSignal == SIGPROF);
MOZ_ASSERT(Sampler::sSigHandlerCoordinator);
// By sending us this signal, the sampler thread has sent us message 1 in
// the comment above, with the meaning "|sSigHandlerCoordinator| is ready
// for use, please copy your register context into it."
Sampler::sSigHandlerCoordinator->mUContext =
*static_cast<ucontext_t*>(aContext);
// Send message 2: tell the sampler thread that the context has been copied
// into |sSigHandlerCoordinator->mUContext|. sem_post can never fail by
// being interrupted by a signal, so there's no loop around this call.
int r = sem_post(&Sampler::sSigHandlerCoordinator->mMessage2);
MOZ_ASSERT(r == 0);
// At this point, the sampler thread assumes we are suspended, so we must
// not touch any global state here.
// Wait for message 3: the sampler thread tells us to resume.
while (true) {
r = sem_wait(&Sampler::sSigHandlerCoordinator->mMessage3);
if (r == -1 && errno == EINTR) {
// Interrupted by a signal. Try again.
continue;
}
// We don't expect any other kind of failure
MOZ_ASSERT(r == 0);
break;
}
// Send message 4: tell the sampler thread that we are finished accessing
// |sSigHandlerCoordinator|. After this point it is not safe to touch
// |sSigHandlerCoordinator|.
r = sem_post(&Sampler::sSigHandlerCoordinator->mMessage4);
MOZ_ASSERT(r == 0);
errno = savedErrno;
}
Sampler::Sampler(PSLockRef aLock)
: mMyPid(profiler_current_process_id())
// We don't know what the sampler thread's ID will be until it runs, so
// set mSamplerTid to a dummy value and fill it in for real in
// SuspendAndSampleAndResumeThread().
,
mSamplerTid(-1) {
#if defined(USE_EHABI_STACKWALK)
mozilla::EHABIStackWalkInit();
#endif
// NOTE: We don't initialize LUL here, instead initializing it in
// SamplerThread's constructor. This is because with the
// profiler_suspend_and_sample_thread entry point, we want to be able to
// sample without waiting for LUL to be initialized.
// Request profiling signals.
struct sigaction sa;
sa.sa_sigaction = MOZ_SIGNAL_TRAMPOLINE(SigprofHandler);
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART | SA_SIGINFO;
if (sigaction(SIGPROF, &sa, &mOldSigprofHandler) != 0) {
MOZ_CRASH("Error installing SIGPROF handler in the profiler");
}
}
void Sampler::Disable(PSLockRef aLock) {
// Restore old signal handler. This is global state so it's important that
// we do it now, while gPSMutex is locked.
sigaction(SIGPROF, &mOldSigprofHandler, 0);
}
template <typename Func>
void Sampler::SuspendAndSampleAndResumeThread(
PSLockRef aLock, const RegisteredThread& aRegisteredThread,
const Func& aProcessRegs) {
// Only one sampler thread can be sampling at once. So we expect to have
// complete control over |sSigHandlerCoordinator|.
MOZ_ASSERT(!sSigHandlerCoordinator);
if (mSamplerTid == -1) {
mSamplerTid = profiler_current_thread_id();
}
int sampleeTid = aRegisteredThread.Info()->ThreadId();
MOZ_RELEASE_ASSERT(sampleeTid != mSamplerTid);
//----------------------------------------------------------------//
// Suspend the samplee thread and get its context.
SigHandlerCoordinator coord; // on sampler thread's stack
sSigHandlerCoordinator = &coord;
// Send message 1 to the samplee (the thread to be sampled), by
// signalling at it.
int r = tgkill(mMyPid, sampleeTid, SIGPROF);
MOZ_ASSERT(r == 0);
// Wait for message 2 from the samplee, indicating that the context
// is available and that the thread is suspended.
while (true) {
r = sem_wait(&sSigHandlerCoordinator->mMessage2);
if (r == -1 && errno == EINTR) {
// Interrupted by a signal. Try again.
continue;
}
// We don't expect any other kind of failure.
MOZ_ASSERT(r == 0);
break;
}
//----------------------------------------------------------------//
// Sample the target thread.
// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
//
// The profiler's "critical section" begins here. In the critical section,
// we must not do any dynamic memory allocation, nor try to acquire any lock
// or any other unshareable resource. This is because the thread to be
// sampled has been suspended at some entirely arbitrary point, and we have
// no idea which unsharable resources (locks, essentially) it holds. So any
// attempt to acquire any lock, including the implied locks used by the
// malloc implementation, risks deadlock. This includes TimeStamp::Now(),
// which gets a lock on Windows.
// The samplee thread is now frozen and sSigHandlerCoordinator->mUContext is
// valid. We can poke around in it and unwind its stack as we like.
// Extract the current register values.
Registers regs;
PopulateRegsFromContext(regs, &sSigHandlerCoordinator->mUContext);
aProcessRegs(regs);
//----------------------------------------------------------------//
// Resume the target thread.
// Send message 3 to the samplee, which tells it to resume.
r = sem_post(&sSigHandlerCoordinator->mMessage3);
MOZ_ASSERT(r == 0);
// Wait for message 4 from the samplee, which tells us that it has
// finished with |sSigHandlerCoordinator|.
while (true) {
r = sem_wait(&sSigHandlerCoordinator->mMessage4);
if (r == -1 && errno == EINTR) {
continue;
}
MOZ_ASSERT(r == 0);
break;
}
// The profiler's critical section ends here. After this point, none of the
// critical section limitations documented above apply.
//
// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
// This isn't strictly necessary, but doing so does help pick up anomalies
// in which the signal handler is running when it shouldn't be.
sSigHandlerCoordinator = nullptr;
}
// END Sampler target specifics
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// BEGIN SamplerThread target specifics
static void* ThreadEntry(void* aArg) {
auto thread = static_cast<SamplerThread*>(aArg);
thread->Run();
return nullptr;
}
SamplerThread::SamplerThread(PSLockRef aLock, uint32_t aActivityGeneration,
double aIntervalMilliseconds)
: Sampler(aLock),
mActivityGeneration(aActivityGeneration),
mIntervalMicroseconds(
std::max(1, int(floor(aIntervalMilliseconds * 1000 + 0.5)))) {
#if defined(USE_LUL_STACKWALK)
lul::LUL* lul = CorePS::Lul(aLock);
if (!lul) {
CorePS::SetLul(aLock, MakeUnique<lul::LUL>(logging_sink_for_LUL));
// Read all the unwind info currently available.
lul = CorePS::Lul(aLock);
read_procmaps(lul);
// Switch into unwind mode. After this point, we can't add or remove any
// unwind info to/from this LUL instance. The only thing we can do with
// it is Unwind() calls.
lul->EnableUnwinding();
// Has a test been requested?
if (PR_GetEnv("MOZ_PROFILER_LUL_TEST")) {
int nTests = 0, nTestsPassed = 0;
RunLulUnitTests(&nTests, &nTestsPassed, lul);
}
}
#endif
// Start the sampling thread. It repeatedly sends a SIGPROF signal. Sending
// the signal ourselves instead of relying on itimer provides much better
// accuracy.
if (pthread_create(&mThread, nullptr, ThreadEntry, this) != 0) {
MOZ_CRASH("pthread_create failed");
}
}
SamplerThread::~SamplerThread() { pthread_join(mThread, nullptr); }
void SamplerThread::SleepMicro(uint32_t aMicroseconds) {
if (aMicroseconds >= 1000000) {
// Use usleep for larger intervals, because the nanosleep
// code below only supports intervals < 1 second.
MOZ_ALWAYS_TRUE(!::usleep(aMicroseconds));
return;
}
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = aMicroseconds * 1000UL;
int rv = ::nanosleep(&ts, &ts);
while (rv != 0 && errno == EINTR) {
// Keep waiting in case of interrupt.
// nanosleep puts the remaining time back into ts.
rv = ::nanosleep(&ts, &ts);
}
MOZ_ASSERT(!rv, "nanosleep call failed");
}
void SamplerThread::Stop(PSLockRef aLock) {
// Restore old signal handler. This is global state so it's important that
// we do it now, while gPSMutex is locked. It's safe to do this now even
// though this SamplerThread is still alive, because the next time the main
// loop of Run() iterates it won't get past the mActivityGeneration check,
// and so won't send any signals.
Sampler::Disable(aLock);
}
// END SamplerThread target specifics
////////////////////////////////////////////////////////////////////////
#if defined(GP_OS_linux)
// We use pthread_atfork() to temporarily disable signal delivery during any
// fork() call. Without that, fork() can be repeatedly interrupted by signal
// delivery, requiring it to be repeatedly restarted, which can lead to *long*
// delays. See bug 837390.
//
// We provide no paf_child() function to run in the child after forking. This
// is fine because we always immediately exec() after fork(), and exec()
// clobbers all process state. (At one point we did have a paf_child()
// function, but it caused problems related to locking gPSMutex. See bug
// 1348374.)
//
// Unfortunately all this is only doable on non-Android because Bionic doesn't
// have pthread_atfork.
// In the parent, before the fork, record IsPaused, and then pause.
static void paf_prepare() {
MOZ_RELEASE_ASSERT(CorePS::Exists());
PSAutoLock lock(gPSMutex);
if (ActivePS::Exists(lock)) {
ActivePS::SetWasPaused(lock, ActivePS::IsPaused(lock));
ActivePS::SetIsPaused(lock, true);
}
}
// In the parent, after the fork, return IsPaused to the pre-fork state.
static void paf_parent() {
MOZ_RELEASE_ASSERT(CorePS::Exists());
PSAutoLock lock(gPSMutex);
if (ActivePS::Exists(lock)) {
ActivePS::SetIsPaused(lock, ActivePS::WasPaused(lock));
ActivePS::SetWasPaused(lock, false);
}
}
static void PlatformInit(PSLockRef aLock) {
// Set up the fork handlers.
pthread_atfork(paf_prepare, paf_parent, nullptr);
}
#else
static void PlatformInit(PSLockRef aLock) {}
#endif
#if defined(HAVE_NATIVE_UNWIND)
// Context used by synchronous samples. It's safe to have a single one because
// only one synchronous sample can be taken at a time (due to
// profiler_get_backtrace()'s PSAutoLock).
ucontext_t sSyncUContext;
void Registers::SyncPopulate() {
if (!getcontext(&sSyncUContext)) {
PopulateRegsFromContext(*this, &sSyncUContext);
}
}
#endif

Просмотреть файл

@ -0,0 +1,192 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include <unistd.h>
#include <sys/mman.h>
#include <mach/mach_init.h>
#include <mach-o/getsect.h>
#include <AvailabilityMacros.h>
#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
#include <libkern/OSAtomic.h>
#include <mach/mach.h>
#include <mach/semaphore.h>
#include <mach/task.h>
#include <mach/thread_act.h>
#include <mach/vm_statistics.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/sysctl.h>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <math.h>
// this port is based off of v8 svn revision 9837
int profiler_current_process_id() { return getpid(); }
int profiler_current_thread_id() {
return static_cast<int>(static_cast<pid_t>(syscall(SYS_thread_selfid)));
}
void* GetStackTop(void* aGuess) {
pthread_t thread = pthread_self();
return pthread_get_stackaddr_np(thread);
}
class PlatformData {
public:
explicit PlatformData(int aThreadId) : mProfiledThread(mach_thread_self()) {
MOZ_COUNT_CTOR(PlatformData);
}
~PlatformData() {
// Deallocate Mach port for thread.
mach_port_deallocate(mach_task_self(), mProfiledThread);
MOZ_COUNT_DTOR(PlatformData);
}
thread_act_t ProfiledThread() { return mProfiledThread; }
private:
// Note: for mProfiledThread Mach primitives are used instead of pthread's
// because the latter doesn't provide thread manipulation primitives required.
// For details, consult "Mac OS X Internals" book, Section 7.3.
thread_act_t mProfiledThread;
};
////////////////////////////////////////////////////////////////////////
// BEGIN Sampler target specifics
Sampler::Sampler(PSLockRef aLock) {}
void Sampler::Disable(PSLockRef aLock) {}
template <typename Func>
void Sampler::SuspendAndSampleAndResumeThread(
PSLockRef aLock, const RegisteredThread& aRegisteredThread,
const Func& aProcessRegs) {
thread_act_t samplee_thread =
aRegisteredThread.GetPlatformData()->ProfiledThread();
//----------------------------------------------------------------//
// Suspend the samplee thread and get its context.
// We're using thread_suspend on OS X because pthread_kill (which is what we
// at one time used on Linux) has less consistent performance and causes
// strange crashes, see bug 1166778 and bug 1166808. thread_suspend
// is also just a lot simpler to use.
if (KERN_SUCCESS != thread_suspend(samplee_thread)) {
return;
}
//----------------------------------------------------------------//
// Sample the target thread.
// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
//
// The profiler's "critical section" begins here. We must be very careful
// what we do here, or risk deadlock. See the corresponding comment in
// platform-linux-android.cpp for details.
thread_state_flavor_t flavor = x86_THREAD_STATE64;
x86_thread_state64_t state;
mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
#if __DARWIN_UNIX03
# define REGISTER_FIELD(name) __r##name
#else
# define REGISTER_FIELD(name) r##name
#endif // __DARWIN_UNIX03
if (thread_get_state(samplee_thread, flavor,
reinterpret_cast<natural_t*>(&state),
&count) == KERN_SUCCESS) {
Registers regs;
regs.mPC = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
regs.mSP = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
regs.mFP = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
regs.mLR = 0;
aProcessRegs(regs);
}
#undef REGISTER_FIELD
//----------------------------------------------------------------//
// Resume the target thread.
thread_resume(samplee_thread);
// The profiler's critical section ends here.
//
// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
}
// END Sampler target specifics
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// BEGIN SamplerThread target specifics
static void* ThreadEntry(void* aArg) {
auto thread = static_cast<SamplerThread*>(aArg);
thread->Run();
return nullptr;
}
SamplerThread::SamplerThread(PSLockRef aLock, uint32_t aActivityGeneration,
double aIntervalMilliseconds)
: Sampler(aLock),
mActivityGeneration(aActivityGeneration),
mIntervalMicroseconds(
std::max(1, int(floor(aIntervalMilliseconds * 1000 + 0.5)))),
mThread{nullptr} {
pthread_attr_t* attr_ptr = nullptr;
if (pthread_create(&mThread, attr_ptr, ThreadEntry, this) != 0) {
MOZ_CRASH("pthread_create failed");
}
}
SamplerThread::~SamplerThread() { pthread_join(mThread, nullptr); }
void SamplerThread::SleepMicro(uint32_t aMicroseconds) {
usleep(aMicroseconds);
// FIXME: the OSX 10.12 page for usleep says "The usleep() function is
// obsolescent. Use nanosleep(2) instead." This implementation could be
// merged with the linux-android version. Also, this doesn't handle the
// case where the usleep call is interrupted by a signal.
}
void SamplerThread::Stop(PSLockRef aLock) { Sampler::Disable(aLock); }
// END SamplerThread target specifics
////////////////////////////////////////////////////////////////////////
static void PlatformInit(PSLockRef aLock) {}
#if defined(HAVE_NATIVE_UNWIND)
void Registers::SyncPopulate() {
asm(
// Compute caller's %rsp by adding to %rbp:
// 8 bytes for previous %rbp, 8 bytes for return address
"leaq 0x10(%%rbp), %0\n\t"
// Dereference %rbp to get previous %rbp
"movq (%%rbp), %1\n\t"
: "=r"(mSP), "=r"(mFP));
mPC = reinterpret_cast<Address>(
__builtin_extract_return_addr(__builtin_return_address(0)));
mLR = 0;
}
#endif

Просмотреть файл

@ -0,0 +1,311 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
// Copyright (c) 2006-2011 The Chromium Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google, Inc. nor the names of its contributors
// may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
// OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
// SUCH DAMAGE.
#include <windows.h>
#include <mmsystem.h>
#include <process.h>
#include "nsWindowsDllInterceptor.h"
#include "mozilla/StackWalk_windows.h"
#include "mozilla/WindowsVersion.h"
int profiler_current_process_id() { return _getpid(); }
int profiler_current_thread_id() {
DWORD threadId = GetCurrentThreadId();
MOZ_ASSERT(threadId <= INT32_MAX, "native thread ID is > INT32_MAX");
return int(threadId);
}
void* GetStackTop(void* aGuess) {
PNT_TIB pTib = reinterpret_cast<PNT_TIB>(NtCurrentTeb());
return reinterpret_cast<void*>(pTib->StackBase);
}
static void PopulateRegsFromContext(Registers& aRegs, CONTEXT* aContext) {
#if defined(GP_ARCH_amd64)
aRegs.mPC = reinterpret_cast<Address>(aContext->Rip);
aRegs.mSP = reinterpret_cast<Address>(aContext->Rsp);
aRegs.mFP = reinterpret_cast<Address>(aContext->Rbp);
#elif defined(GP_ARCH_x86)
aRegs.mPC = reinterpret_cast<Address>(aContext->Eip);
aRegs.mSP = reinterpret_cast<Address>(aContext->Esp);
aRegs.mFP = reinterpret_cast<Address>(aContext->Ebp);
#elif defined(GP_ARCH_arm64)
aRegs.mPC = reinterpret_cast<Address>(aContext->Pc);
aRegs.mSP = reinterpret_cast<Address>(aContext->Sp);
aRegs.mFP = reinterpret_cast<Address>(aContext->Fp);
#else
# error "bad arch"
#endif
aRegs.mLR = 0;
}
class PlatformData {
public:
// Get a handle to the calling thread. This is the thread that we are
// going to profile. We need to make a copy of the handle because we are
// going to use it in the sampler thread. Using GetThreadHandle() will
// not work in this case. We're using OpenThread because DuplicateHandle
// for some reason doesn't work in Chrome's sandbox.
explicit PlatformData(int aThreadId)
: mProfiledThread(OpenThread(THREAD_GET_CONTEXT | THREAD_SUSPEND_RESUME |
THREAD_QUERY_INFORMATION,
false, aThreadId)) {
MOZ_COUNT_CTOR(PlatformData);
}
~PlatformData() {
if (mProfiledThread != nullptr) {
CloseHandle(mProfiledThread);
mProfiledThread = nullptr;
}
MOZ_COUNT_DTOR(PlatformData);
}
HANDLE ProfiledThread() { return mProfiledThread; }
private:
HANDLE mProfiledThread;
};
#if defined(USE_MOZ_STACK_WALK)
HANDLE
GetThreadHandle(PlatformData* aData) { return aData->ProfiledThread(); }
#endif
static const HANDLE kNoThread = INVALID_HANDLE_VALUE;
////////////////////////////////////////////////////////////////////////
// BEGIN Sampler target specifics
Sampler::Sampler(PSLockRef aLock) {}
void Sampler::Disable(PSLockRef aLock) {}
template <typename Func>
void Sampler::SuspendAndSampleAndResumeThread(
PSLockRef aLock, const RegisteredThread& aRegisteredThread,
const Func& aProcessRegs) {
HANDLE profiled_thread =
aRegisteredThread.GetPlatformData()->ProfiledThread();
if (profiled_thread == nullptr) {
return;
}
// Context used for sampling the register state of the profiled thread.
CONTEXT context;
memset(&context, 0, sizeof(context));
//----------------------------------------------------------------//
// Suspend the samplee thread and get its context.
static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
if (SuspendThread(profiled_thread) == kSuspendFailed) {
return;
}
// SuspendThread is asynchronous, so the thread may still be running.
// Call GetThreadContext first to ensure the thread is really suspended.
// See https://blogs.msdn.microsoft.com/oldnewthing/20150205-00/?p=44743.
// Using only CONTEXT_CONTROL is faster but on 64-bit it causes crashes in
// RtlVirtualUnwind (see bug 1120126) so we set all the flags.
#if defined(GP_ARCH_amd64)
context.ContextFlags = CONTEXT_FULL;
#else
context.ContextFlags = CONTEXT_CONTROL;
#endif
if (!GetThreadContext(profiled_thread, &context)) {
ResumeThread(profiled_thread);
return;
}
//----------------------------------------------------------------//
// Sample the target thread.
// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
//
// The profiler's "critical section" begins here. We must be very careful
// what we do here, or risk deadlock. See the corresponding comment in
// platform-linux-android.cpp for details.
Registers regs;
PopulateRegsFromContext(regs, &context);
aProcessRegs(regs);
//----------------------------------------------------------------//
// Resume the target thread.
ResumeThread(profiled_thread);
// The profiler's critical section ends here.
//
// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
}
// END Sampler target specifics
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// BEGIN SamplerThread target specifics
static unsigned int __stdcall ThreadEntry(void* aArg) {
auto thread = static_cast<SamplerThread*>(aArg);
thread->Run();
return 0;
}
SamplerThread::SamplerThread(PSLockRef aLock, uint32_t aActivityGeneration,
double aIntervalMilliseconds)
: Sampler(aLock),
mActivityGeneration(aActivityGeneration),
mIntervalMicroseconds(
std::max(1, int(floor(aIntervalMilliseconds * 1000 + 0.5)))) {
// By default we'll not adjust the timer resolution which tends to be
// around 16ms. However, if the requested interval is sufficiently low
// we'll try to adjust the resolution to match.
if (mIntervalMicroseconds < 10 * 1000) {
::timeBeginPeriod(mIntervalMicroseconds / 1000);
}
// Create a new thread. It is important to use _beginthreadex() instead of
// the Win32 function CreateThread(), because the CreateThread() does not
// initialize thread-specific structures in the C runtime library.
mThread = reinterpret_cast<HANDLE>(_beginthreadex(nullptr,
/* stack_size */ 0,
ThreadEntry, this,
/* initflag */ 0, nullptr));
if (mThread == 0) {
MOZ_CRASH("_beginthreadex failed");
}
}
SamplerThread::~SamplerThread() {
WaitForSingleObject(mThread, INFINITE);
// Close our own handle for the thread.
if (mThread != kNoThread) {
CloseHandle(mThread);
}
}
void SamplerThread::SleepMicro(uint32_t aMicroseconds) {
// For now, keep the old behaviour of minimum Sleep(1), even for
// smaller-than-usual sleeps after an overshoot, unless the user has
// explicitly opted into a sub-millisecond profiler interval.
if (mIntervalMicroseconds >= 1000) {
::Sleep(std::max(1u, aMicroseconds / 1000));
} else {
TimeStamp start = TimeStamp::Now();
TimeStamp end = start + TimeDuration::FromMicroseconds(aMicroseconds);
// First, sleep for as many whole milliseconds as possible.
if (aMicroseconds >= 1000) {
::Sleep(aMicroseconds / 1000);
}
// Then, spin until enough time has passed.
while (TimeStamp::Now() < end) {
YieldProcessor();
}
}
}
void SamplerThread::Stop(PSLockRef aLock) {
// Disable any timer resolution changes we've made. Do it now while
// gPSMutex is locked, i.e. before any other SamplerThread can be created
// and call ::timeBeginPeriod().
//
// It's safe to do this now even though this SamplerThread is still alive,
// because the next time the main loop of Run() iterates it won't get past
// the mActivityGeneration check, and so it won't make any more ::Sleep()
// calls.
if (mIntervalMicroseconds < 10 * 1000) {
::timeEndPeriod(mIntervalMicroseconds / 1000);
}
Sampler::Disable(aLock);
}
// END SamplerThread target specifics
////////////////////////////////////////////////////////////////////////
static void PlatformInit(PSLockRef aLock) {}
#if defined(HAVE_NATIVE_UNWIND)
void Registers::SyncPopulate() {
CONTEXT context;
RtlCaptureContext(&context);
PopulateRegsFromContext(*this, &context);
}
#endif
#if defined(GP_PLAT_amd64_windows)
static WindowsDllInterceptor NtDllIntercept;
typedef NTSTATUS(NTAPI* LdrUnloadDll_func)(HMODULE module);
static WindowsDllInterceptor::FuncHookType<LdrUnloadDll_func> stub_LdrUnloadDll;
static NTSTATUS NTAPI patched_LdrUnloadDll(HMODULE module) {
// Prevent the stack walker from suspending this thread when LdrUnloadDll
// holds the RtlLookupFunctionEntry lock.
AutoSuppressStackWalking suppress;
return stub_LdrUnloadDll(module);
}
// These pointers are disguised as PVOID to avoid pulling in obscure headers
typedef PVOID(WINAPI* LdrResolveDelayLoadedAPI_func)(
PVOID ParentModuleBase, PVOID DelayloadDescriptor, PVOID FailureDllHook,
PVOID FailureSystemHook, PVOID ThunkAddress, ULONG Flags);
static WindowsDllInterceptor::FuncHookType<LdrResolveDelayLoadedAPI_func>
stub_LdrResolveDelayLoadedAPI;
static PVOID WINAPI patched_LdrResolveDelayLoadedAPI(
PVOID ParentModuleBase, PVOID DelayloadDescriptor, PVOID FailureDllHook,
PVOID FailureSystemHook, PVOID ThunkAddress, ULONG Flags) {
// Prevent the stack walker from suspending this thread when
// LdrResolveDelayLoadAPI holds the RtlLookupFunctionEntry lock.
AutoSuppressStackWalking suppress;
return stub_LdrResolveDelayLoadedAPI(ParentModuleBase, DelayloadDescriptor,
FailureDllHook, FailureSystemHook,
ThunkAddress, Flags);
}
void InitializeWin64ProfilerHooks() {
NtDllIntercept.Init("ntdll.dll");
stub_LdrUnloadDll.Set(NtDllIntercept, "LdrUnloadDll", &patched_LdrUnloadDll);
if (IsWin8OrLater()) { // LdrResolveDelayLoadedAPI was introduced in Win8
stub_LdrResolveDelayLoadedAPI.Set(NtDllIntercept,
"LdrResolveDelayLoadedAPI",
&patched_LdrResolveDelayLoadedAPI);
}
}
#endif // defined(GP_PLAT_amd64_windows)

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,105 @@
// Copyright (c) 2006-2011 The Chromium Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google, Inc. nor the names of its contributors
// may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
// OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
// SUCH DAMAGE.
#ifndef TOOLS_PLATFORM_H_
#define TOOLS_PLATFORM_H_
#include "PlatformMacros.h"
#include "BaseProfiler.h"
#include "mozilla/Logging.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/Vector.h"
#include "nsString.h"
#include <functional>
#include <stdint.h>
extern mozilla::LazyLogModule gProfilerLog;
// These are for MOZ_LOG="prof:3" or higher. It's the default logging level for
// the profiler, and should be used sparingly.
#define LOG_TEST MOZ_LOG_TEST(gProfilerLog, mozilla::LogLevel::Info)
#define LOG(arg, ...) \
MOZ_LOG(gProfilerLog, mozilla::LogLevel::Info, \
("[%d] " arg, profiler_current_process_id(), ##__VA_ARGS__))
// These are for MOZ_LOG="prof:4" or higher. It should be used for logging that
// is somewhat more verbose than LOG.
#define DEBUG_LOG_TEST MOZ_LOG_TEST(gProfilerLog, mozilla::LogLevel::Debug)
#define DEBUG_LOG(arg, ...) \
MOZ_LOG(gProfilerLog, mozilla::LogLevel::Debug, \
("[%d] " arg, profiler_current_process_id(), ##__VA_ARGS__))
typedef uint8_t* Address;
// ----------------------------------------------------------------------------
// Miscellaneous
class PlatformData;
// We can't new/delete the type safely without defining it
// (-Wdelete-incomplete). Use these to hide the details from clients.
struct PlatformDataDestructor {
void operator()(PlatformData*);
};
typedef mozilla::UniquePtr<PlatformData, PlatformDataDestructor>
UniquePlatformData;
UniquePlatformData AllocPlatformData(int aThreadId);
namespace mozilla {
class JSONWriter;
}
void AppendSharedLibraries(mozilla::JSONWriter& aWriter);
// Convert the array of strings to a bitfield.
uint32_t ParseFeaturesFromStringArray(const char** aFeatures,
uint32_t aFeatureCount,
bool aIsStartup = false);
void profiler_get_profile_json_into_lazily_allocated_buffer(
const std::function<char*(size_t)>& aAllocator, double aSinceTime,
bool aIsShuttingDown);
// Flags to conveniently track various JS features.
enum class JSSamplingFlags {
StackSampling = 0x1,
TrackOptimizations = 0x2,
TraceLogging = 0x4
};
// Record an exit profile from a child process.
void profiler_received_exit_profile(const nsCString& aExitProfile);
// Extract all received exit profiles that have not yet expired (i.e., they
// still intersect with this process' buffer range).
mozilla::Vector<nsCString> profiler_move_exit_profiles();
#endif /* ndef TOOLS_PLATFORM_H_ */

Просмотреть файл

@ -0,0 +1,263 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "BaseProfilerSharedLibraries.h"
#define PATH_MAX_TOSTRING(x) #x
#define PATH_MAX_STRING(x) PATH_MAX_TOSTRING(x)
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <limits.h>
#include <unistd.h>
#include <fstream>
#include "platform.h"
#include "mozilla/Sprintf.h"
#include "mozilla/Unused.h"
#include "nsDebug.h"
#include "nsNativeCharsetUtils.h"
#include <nsTArray.h>
#include "common/linux/file_id.h"
#include <algorithm>
#include <dlfcn.h>
#include <features.h>
#include <sys/types.h>
#if defined(GP_OS_linux)
# include <link.h> // dl_phdr_info
#elif defined(GP_OS_android)
# include "AutoObjectMapper.h"
# include "ElfLoader.h" // dl_phdr_info
extern "C" MOZ_EXPORT __attribute__((weak)) int dl_iterate_phdr(
int (*callback)(struct dl_phdr_info* info, size_t size, void* data),
void* data);
#else
# error "Unexpected configuration"
#endif
struct LoadedLibraryInfo {
LoadedLibraryInfo(const char* aName, unsigned long aBaseAddress,
unsigned long aFirstMappingStart,
unsigned long aLastMappingEnd)
: mName(aName),
mBaseAddress(aBaseAddress),
mFirstMappingStart(aFirstMappingStart),
mLastMappingEnd(aLastMappingEnd) {}
nsCString mName;
unsigned long mBaseAddress;
unsigned long mFirstMappingStart;
unsigned long mLastMappingEnd;
};
#if defined(GP_OS_android)
static void outputMapperLog(const char* aBuf) { LOG("%s", aBuf); }
#endif
static nsCString IDtoUUIDString(
const google_breakpad::wasteful_vector<uint8_t>& aIdentifier) {
using namespace google_breakpad;
nsCString uuid;
const std::string str = FileID::ConvertIdentifierToUUIDString(aIdentifier);
uuid.Append(str.c_str(), str.size());
// This is '0', not '\0', since it represents the breakpad id age.
uuid.Append('0');
return uuid;
}
// Get the breakpad Id for the binary file pointed by bin_name
static nsCString getId(const char* bin_name) {
using namespace google_breakpad;
PageAllocator allocator;
auto_wasteful_vector<uint8_t, kDefaultBuildIdSize> identifier(&allocator);
#if defined(GP_OS_android)
if (nsDependentCString(bin_name).Find("!/") != kNotFound) {
AutoObjectMapperFaultyLib mapper(outputMapperLog);
void* image = nullptr;
size_t size = 0;
if (mapper.Map(&image, &size, bin_name) && image && size) {
if (FileID::ElfFileIdentifierFromMappedFile(image, identifier)) {
return IDtoUUIDString(identifier);
}
}
}
#endif
FileID file_id(bin_name);
if (file_id.ElfFileIdentifier(identifier)) {
return IDtoUUIDString(identifier);
}
return EmptyCString();
}
static SharedLibrary SharedLibraryAtPath(const char* path,
unsigned long libStart,
unsigned long libEnd,
unsigned long offset = 0) {
nsAutoString pathStr;
mozilla::Unused << NS_WARN_IF(
NS_FAILED(NS_CopyNativeToUnicode(nsDependentCString(path), pathStr)));
nsAutoString nameStr = pathStr;
int32_t pos = nameStr.RFindChar('/');
if (pos != kNotFound) {
nameStr.Cut(0, pos + 1);
}
return SharedLibrary(libStart, libEnd, offset, getId(path), nameStr, pathStr,
nameStr, pathStr, EmptyCString(), "");
}
static int dl_iterate_callback(struct dl_phdr_info* dl_info, size_t size,
void* data) {
auto libInfoList = reinterpret_cast<nsTArray<LoadedLibraryInfo>*>(data);
if (dl_info->dlpi_phnum <= 0) return 0;
unsigned long baseAddress = dl_info->dlpi_addr;
unsigned long firstMappingStart = -1;
unsigned long lastMappingEnd = 0;
for (size_t i = 0; i < dl_info->dlpi_phnum; i++) {
if (dl_info->dlpi_phdr[i].p_type != PT_LOAD) {
continue;
}
unsigned long start = dl_info->dlpi_addr + dl_info->dlpi_phdr[i].p_vaddr;
unsigned long end = start + dl_info->dlpi_phdr[i].p_memsz;
if (start < firstMappingStart) {
firstMappingStart = start;
}
if (end > lastMappingEnd) {
lastMappingEnd = end;
}
}
libInfoList->AppendElement(LoadedLibraryInfo(
dl_info->dlpi_name, baseAddress, firstMappingStart, lastMappingEnd));
return 0;
}
SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() {
SharedLibraryInfo info;
#if defined(GP_OS_linux)
// We need to find the name of the executable (exeName, exeNameLen) and the
// address of its executable section (exeExeAddr) in the running image.
char exeName[PATH_MAX];
memset(exeName, 0, sizeof(exeName));
ssize_t exeNameLen = readlink("/proc/self/exe", exeName, sizeof(exeName) - 1);
if (exeNameLen == -1) {
// readlink failed for whatever reason. Note this, but keep going.
exeName[0] = '\0';
exeNameLen = 0;
LOG("SharedLibraryInfo::GetInfoForSelf(): readlink failed");
} else {
// Assert no buffer overflow.
MOZ_RELEASE_ASSERT(exeNameLen >= 0 &&
exeNameLen < static_cast<ssize_t>(sizeof(exeName)));
}
unsigned long exeExeAddr = 0;
#endif
#if defined(GP_OS_android)
// If dl_iterate_phdr doesn't exist, we give up immediately.
if (!dl_iterate_phdr) {
// On ARM Android, dl_iterate_phdr is provided by the custom linker.
// So if libxul was loaded by the system linker (e.g. as part of
// xpcshell when running tests), it won't be available and we should
// not call it.
return info;
}
#endif
// Read info from /proc/self/maps. We ignore most of it.
pid_t pid = profiler_current_process_id();
char path[PATH_MAX];
SprintfLiteral(path, "/proc/%d/maps", pid);
std::ifstream maps(path);
std::string line;
while (std::getline(maps, line)) {
int ret;
unsigned long start;
unsigned long end;
char perm[6 + 1] = "";
unsigned long offset;
char modulePath[PATH_MAX + 1] = "";
ret = sscanf(line.c_str(),
"%lx-%lx %6s %lx %*s %*x %" PATH_MAX_STRING(PATH_MAX) "s\n",
&start, &end, perm, &offset, modulePath);
if (!strchr(perm, 'x')) {
// Ignore non executable entries
continue;
}
if (ret != 5 && ret != 4) {
LOG("SharedLibraryInfo::GetInfoForSelf(): "
"reading /proc/self/maps failed");
continue;
}
#if defined(GP_OS_linux)
// Try to establish the main executable's load address.
if (exeNameLen > 0 && strcmp(modulePath, exeName) == 0) {
exeExeAddr = start;
}
#elif defined(GP_OS_android)
// Use /proc/pid/maps to get the dalvik-jit section since it has no
// associated phdrs.
if (0 == strcmp(modulePath, "/dev/ashmem/dalvik-jit-code-cache")) {
info.AddSharedLibrary(
SharedLibraryAtPath(modulePath, start, end, offset));
if (info.GetSize() > 10000) {
LOG("SharedLibraryInfo::GetInfoForSelf(): "
"implausibly large number of mappings acquired");
break;
}
}
#endif
}
nsTArray<LoadedLibraryInfo> libInfoList;
// We collect the bulk of the library info using dl_iterate_phdr.
dl_iterate_phdr(dl_iterate_callback, &libInfoList);
for (const auto& libInfo : libInfoList) {
info.AddSharedLibrary(
SharedLibraryAtPath(libInfo.mName.get(), libInfo.mFirstMappingStart,
libInfo.mLastMappingEnd,
libInfo.mFirstMappingStart - libInfo.mBaseAddress));
}
#if defined(GP_OS_linux)
// Make another pass over the information we just harvested from
// dl_iterate_phdr. If we see a nameless object mapped at what we earlier
// established to be the main executable's load address, attach the
// executable's name to that entry.
for (size_t i = 0; i < info.GetSize(); i++) {
SharedLibrary& lib = info.GetMutableEntry(i);
if (lib.GetStart() == exeExeAddr && lib.GetNativeDebugPath().empty()) {
lib = SharedLibraryAtPath(exeName, lib.GetStart(), lib.GetEnd(),
lib.GetOffset());
// We only expect to see one such entry.
break;
}
}
#endif
return info;
}
void SharedLibraryInfo::Initialize() { /* do nothing */
}

Просмотреть файл

@ -0,0 +1,185 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "BaseProfilerSharedLibraries.h"
#include "ClearOnShutdown.h"
#include "mozilla/StaticMutex.h"
#include "mozilla/Unused.h"
#include "nsNativeCharsetUtils.h"
#include <AvailabilityMacros.h>
#include <dlfcn.h>
#include <mach-o/arch.h>
#include <mach-o/dyld_images.h>
#include <mach-o/dyld.h>
#include <mach-o/loader.h>
#include <mach/mach_init.h>
#include <mach/mach_traps.h>
#include <mach/task_info.h>
#include <mach/task.h>
#include <sstream>
#include <stdlib.h>
#include <string.h>
#include <vector>
// Architecture specific abstraction.
#if defined(GP_ARCH_x86)
typedef mach_header platform_mach_header;
typedef segment_command mach_segment_command_type;
# define MACHO_MAGIC_NUMBER MH_MAGIC
# define CMD_SEGMENT LC_SEGMENT
# define seg_size uint32_t
#else
typedef mach_header_64 platform_mach_header;
typedef segment_command_64 mach_segment_command_type;
# define MACHO_MAGIC_NUMBER MH_MAGIC_64
# define CMD_SEGMENT LC_SEGMENT_64
# define seg_size uint64_t
#endif
struct NativeSharedLibrary {
const platform_mach_header* header;
std::string path;
};
static std::vector<NativeSharedLibrary>* sSharedLibrariesList = nullptr;
static mozilla::StaticMutex sSharedLibrariesMutex;
static void SharedLibraryAddImage(const struct mach_header* mh,
intptr_t vmaddr_slide) {
// NOTE: Presumably for backwards-compatibility reasons, this function accepts
// a mach_header even on 64-bit where it ought to be a mach_header_64. We cast
// it to the right type here.
auto header = reinterpret_cast<const platform_mach_header*>(mh);
Dl_info info;
if (!dladdr(header, &info)) {
return;
}
mozilla::StaticMutexAutoLock lock(sSharedLibrariesMutex);
if (!sSharedLibrariesList) {
return;
}
NativeSharedLibrary lib = {header, info.dli_fname};
sSharedLibrariesList->push_back(lib);
}
static void SharedLibraryRemoveImage(const struct mach_header* mh,
intptr_t vmaddr_slide) {
// NOTE: Presumably for backwards-compatibility reasons, this function accepts
// a mach_header even on 64-bit where it ought to be a mach_header_64. We cast
// it to the right type here.
auto header = reinterpret_cast<const platform_mach_header*>(mh);
mozilla::StaticMutexAutoLock lock(sSharedLibrariesMutex);
if (!sSharedLibrariesList) {
return;
}
uint32_t count = sSharedLibrariesList->size();
for (uint32_t i = 0; i < count; ++i) {
if ((*sSharedLibrariesList)[i].header == header) {
sSharedLibrariesList->erase(sSharedLibrariesList->begin() + i);
return;
}
}
}
void SharedLibraryInfo::Initialize() {
// NOTE: We intentionally leak this memory here. We're allocating dynamically
// in order to avoid static initializers.
sSharedLibrariesList = new std::vector<NativeSharedLibrary>();
_dyld_register_func_for_add_image(SharedLibraryAddImage);
_dyld_register_func_for_remove_image(SharedLibraryRemoveImage);
}
static void addSharedLibrary(const platform_mach_header* header,
const char* path, SharedLibraryInfo& info) {
const struct load_command* cmd =
reinterpret_cast<const struct load_command*>(header + 1);
seg_size size = 0;
unsigned long long start = reinterpret_cast<unsigned long long>(header);
// Find the cmd segment in the macho image. It will contain the offset we care
// about.
const uint8_t* uuid_bytes = nullptr;
for (unsigned int i = 0;
cmd && (i < header->ncmds) && (uuid_bytes == nullptr || size == 0);
++i) {
if (cmd->cmd == CMD_SEGMENT) {
const mach_segment_command_type* seg =
reinterpret_cast<const mach_segment_command_type*>(cmd);
if (!strcmp(seg->segname, "__TEXT")) {
size = seg->vmsize;
}
} else if (cmd->cmd == LC_UUID) {
const uuid_command* ucmd = reinterpret_cast<const uuid_command*>(cmd);
uuid_bytes = ucmd->uuid;
}
cmd = reinterpret_cast<const struct load_command*>(
reinterpret_cast<const char*>(cmd) + cmd->cmdsize);
}
nsAutoCString uuid;
if (uuid_bytes != nullptr) {
uuid.AppendPrintf(
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"0" /* breakpad id age */,
uuid_bytes[0], uuid_bytes[1], uuid_bytes[2], uuid_bytes[3],
uuid_bytes[4], uuid_bytes[5], uuid_bytes[6], uuid_bytes[7],
uuid_bytes[8], uuid_bytes[9], uuid_bytes[10], uuid_bytes[11],
uuid_bytes[12], uuid_bytes[13], uuid_bytes[14], uuid_bytes[15]);
}
nsAutoString pathStr;
mozilla::Unused << NS_WARN_IF(
NS_FAILED(NS_CopyNativeToUnicode(nsDependentCString(path), pathStr)));
nsAutoString nameStr = pathStr;
int32_t pos = nameStr.RFindChar('/');
if (pos != kNotFound) {
nameStr.Cut(0, pos + 1);
}
const NXArchInfo* archInfo =
NXGetArchInfoFromCpuType(header->cputype, header->cpusubtype);
info.AddSharedLibrary(SharedLibrary(start, start + size, 0, uuid, nameStr,
pathStr, nameStr, pathStr, EmptyCString(),
archInfo ? archInfo->name : ""));
}
// Translate the statically stored sSharedLibrariesList information into a
// SharedLibraryInfo object.
SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() {
mozilla::StaticMutexAutoLock lock(sSharedLibrariesMutex);
SharedLibraryInfo sharedLibraryInfo;
for (auto& info : *sSharedLibrariesList) {
addSharedLibrary(info.header, info.path.c_str(), sharedLibraryInfo);
}
return sharedLibraryInfo;
}

Просмотреть файл

@ -0,0 +1,209 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include <windows.h>
#include <dbghelp.h>
#include <sstream>
#include <psapi.h>
#include "BaseProfilerSharedLibraries.h"
#include "nsWindowsHelpers.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/Unused.h"
#include "nsNativeCharsetUtils.h"
#include "nsPrintfCString.h"
#include "nsReadableUtils.h"
#define CV_SIGNATURE 0x53445352 // 'SDSR'
struct CodeViewRecord70 {
uint32_t signature;
GUID pdbSignature;
uint32_t pdbAge;
// A UTF-8 string, according to
// https://github.com/Microsoft/microsoft-pdb/blob/082c5290e5aff028ae84e43affa8be717aa7af73/PDB/dbi/locator.cpp#L785
char pdbFileName[1];
};
static bool GetPdbInfo(uintptr_t aStart, nsID& aSignature, uint32_t& aAge,
char** aPdbName) {
if (!aStart) {
return false;
}
PIMAGE_DOS_HEADER dosHeader = reinterpret_cast<PIMAGE_DOS_HEADER>(aStart);
if (dosHeader->e_magic != IMAGE_DOS_SIGNATURE) {
return false;
}
PIMAGE_NT_HEADERS ntHeaders =
reinterpret_cast<PIMAGE_NT_HEADERS>(aStart + dosHeader->e_lfanew);
if (ntHeaders->Signature != IMAGE_NT_SIGNATURE) {
return false;
}
uint32_t relativeVirtualAddress =
ntHeaders->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_DEBUG]
.VirtualAddress;
if (!relativeVirtualAddress) {
return false;
}
PIMAGE_DEBUG_DIRECTORY debugDirectory =
reinterpret_cast<PIMAGE_DEBUG_DIRECTORY>(aStart + relativeVirtualAddress);
if (!debugDirectory || debugDirectory->Type != IMAGE_DEBUG_TYPE_CODEVIEW) {
return false;
}
CodeViewRecord70* debugInfo = reinterpret_cast<CodeViewRecord70*>(
aStart + debugDirectory->AddressOfRawData);
if (!debugInfo || debugInfo->signature != CV_SIGNATURE) {
return false;
}
aAge = debugInfo->pdbAge;
GUID& pdbSignature = debugInfo->pdbSignature;
aSignature.m0 = pdbSignature.Data1;
aSignature.m1 = pdbSignature.Data2;
aSignature.m2 = pdbSignature.Data3;
memcpy(aSignature.m3, pdbSignature.Data4, sizeof(pdbSignature.Data4));
// The PDB file name could be different from module filename, so report both
// e.g. The PDB for C:\Windows\SysWOW64\ntdll.dll is wntdll.pdb
*aPdbName = debugInfo->pdbFileName;
return true;
}
static nsCString GetVersion(WCHAR* dllPath) {
DWORD infoSize = GetFileVersionInfoSizeW(dllPath, nullptr);
if (infoSize == 0) {
return EmptyCString();
}
mozilla::UniquePtr<unsigned char[]> infoData =
mozilla::MakeUnique<unsigned char[]>(infoSize);
if (!GetFileVersionInfoW(dllPath, 0, infoSize, infoData.get())) {
return EmptyCString();
}
VS_FIXEDFILEINFO* vInfo;
UINT vInfoLen;
if (!VerQueryValueW(infoData.get(), L"\\", (LPVOID*)&vInfo, &vInfoLen)) {
return EmptyCString();
}
if (!vInfo) {
return EmptyCString();
}
nsPrintfCString version("%d.%d.%d.%d", vInfo->dwFileVersionMS >> 16,
vInfo->dwFileVersionMS & 0xFFFF,
vInfo->dwFileVersionLS >> 16,
vInfo->dwFileVersionLS & 0xFFFF);
return std::move(version);
}
SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() {
SharedLibraryInfo sharedLibraryInfo;
HANDLE hProcess = GetCurrentProcess();
mozilla::UniquePtr<HMODULE[]> hMods;
size_t modulesNum = 0;
if (hProcess != NULL) {
DWORD modulesSize;
if (!EnumProcessModules(hProcess, nullptr, 0, &modulesSize)) {
return sharedLibraryInfo;
}
modulesNum = modulesSize / sizeof(HMODULE);
hMods = mozilla::MakeUnique<HMODULE[]>(modulesNum);
if (!EnumProcessModules(hProcess, hMods.get(), modulesNum * sizeof(HMODULE),
&modulesSize)) {
return sharedLibraryInfo;
}
// The list may have shrunk between calls
if (modulesSize / sizeof(HMODULE) < modulesNum) {
modulesNum = modulesSize / sizeof(HMODULE);
}
}
for (unsigned int i = 0; i < modulesNum; i++) {
nsAutoString pdbPathStr;
nsAutoString pdbNameStr;
char* pdbName = NULL;
WCHAR modulePath[MAX_PATH + 1];
if (!GetModuleFileNameEx(hProcess, hMods[i], modulePath,
sizeof(modulePath) / sizeof(WCHAR))) {
continue;
}
MODULEINFO module = {0};
if (!GetModuleInformation(hProcess, hMods[i], &module,
sizeof(MODULEINFO))) {
continue;
}
nsCString breakpadId;
// Load the module again to make sure that its handle will remain
// valid as we attempt to read the PDB information from it. We load the
// DLL as a datafile so that if the module actually gets unloaded between
// the call to EnumProcessModules and the following LoadLibraryEx, we don't
// end up running the now newly loaded module's DllMain function. If the
// module is already loaded, LoadLibraryEx just increments its refcount.
//
// Note that because of the race condition above, merely loading the DLL
// again is not safe enough, therefore we also need to make sure that we
// can read the memory mapped at the base address before we can safely
// proceed to actually access those pages.
HMODULE handleLock =
LoadLibraryEx(modulePath, NULL, LOAD_LIBRARY_AS_DATAFILE);
MEMORY_BASIC_INFORMATION vmemInfo = {0};
nsID pdbSig;
uint32_t pdbAge;
if (handleLock &&
sizeof(vmemInfo) ==
VirtualQuery(module.lpBaseOfDll, &vmemInfo, sizeof(vmemInfo)) &&
vmemInfo.State == MEM_COMMIT &&
GetPdbInfo((uintptr_t)module.lpBaseOfDll, pdbSig, pdbAge, &pdbName)) {
MOZ_ASSERT(breakpadId.IsEmpty());
breakpadId.AppendPrintf(
"%08X" // m0
"%04X%04X" // m1,m2
"%02X%02X%02X%02X%02X%02X%02X%02X" // m3
"%X", // pdbAge
pdbSig.m0, pdbSig.m1, pdbSig.m2, pdbSig.m3[0], pdbSig.m3[1],
pdbSig.m3[2], pdbSig.m3[3], pdbSig.m3[4], pdbSig.m3[5], pdbSig.m3[6],
pdbSig.m3[7], pdbAge);
pdbPathStr = NS_ConvertUTF8toUTF16(pdbName);
pdbNameStr = pdbPathStr;
int32_t pos = pdbNameStr.RFindChar('\\');
if (pos != kNotFound) {
pdbNameStr.Cut(0, pos + 1);
}
}
nsAutoString modulePathStr(modulePath);
nsAutoString moduleNameStr = modulePathStr;
int32_t pos = moduleNameStr.RFindChar('\\');
if (pos != kNotFound) {
moduleNameStr.Cut(0, pos + 1);
}
SharedLibrary shlib((uintptr_t)module.lpBaseOfDll,
(uintptr_t)module.lpBaseOfDll + module.SizeOfImage,
0, // DLLs are always mapped at offset 0 on Windows
breakpadId, moduleNameStr, modulePathStr, pdbNameStr,
pdbPathStr, GetVersion(modulePath), "");
sharedLibraryInfo.AddSharedLibrary(shlib);
FreeLibrary(handleLock); // ok to free null handles
}
return sharedLibraryInfo;
}
void SharedLibraryInfo::Initialize() { /* do nothing */
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,188 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include <sys/mman.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "mozilla/Assertions.h"
#include "mozilla/Sprintf.h"
#include "PlatformMacros.h"
#include "AutoObjectMapper.h"
#if defined(GP_OS_android)
# include <dlfcn.h>
# include "mozilla/Types.h"
// FIXME move these out of mozglue/linker/ElfLoader.h into their
// own header, so as to avoid conflicts arising from two definitions
// of Array
extern "C" {
MFBT_API size_t __dl_get_mappable_length(void* handle);
MFBT_API void* __dl_mmap(void* handle, void* addr, size_t length, off_t offset);
MFBT_API void __dl_munmap(void* handle, void* addr, size_t length);
}
// The following are for get_installation_lib_dir()
# include "nsString.h"
# include "nsDirectoryServiceUtils.h"
# include "nsDirectoryServiceDefs.h"
#endif
// A helper function for creating failure error messages in
// AutoObjectMapper*::Map.
static void failedToMessage(void (*aLog)(const char*), const char* aHowFailed,
std::string aFileName) {
char buf[300];
SprintfLiteral(buf, "AutoObjectMapper::Map: Failed to %s \'%s\'", aHowFailed,
aFileName.c_str());
buf[sizeof(buf) - 1] = 0;
aLog(buf);
}
AutoObjectMapperPOSIX::AutoObjectMapperPOSIX(void (*aLog)(const char*))
: mImage(nullptr), mSize(0), mLog(aLog), mIsMapped(false) {}
AutoObjectMapperPOSIX::~AutoObjectMapperPOSIX() {
if (!mIsMapped) {
// There's nothing to do.
MOZ_ASSERT(!mImage);
MOZ_ASSERT(mSize == 0);
return;
}
MOZ_ASSERT(mSize > 0);
// The following assertion doesn't necessarily have to be true,
// but we assume (reasonably enough) that no mmap facility would
// be crazy enough to map anything at page zero.
MOZ_ASSERT(mImage);
munmap(mImage, mSize);
}
bool AutoObjectMapperPOSIX::Map(/*OUT*/ void** start, /*OUT*/ size_t* length,
std::string fileName) {
MOZ_ASSERT(!mIsMapped);
int fd = open(fileName.c_str(), O_RDONLY);
if (fd == -1) {
failedToMessage(mLog, "open", fileName);
return false;
}
struct stat st;
int err = fstat(fd, &st);
size_t sz = (err == 0) ? st.st_size : 0;
if (err != 0 || sz == 0) {
failedToMessage(mLog, "fstat", fileName);
close(fd);
return false;
}
void* image = mmap(nullptr, sz, PROT_READ, MAP_SHARED, fd, 0);
if (image == MAP_FAILED) {
failedToMessage(mLog, "mmap", fileName);
close(fd);
return false;
}
close(fd);
mIsMapped = true;
mImage = *start = image;
mSize = *length = sz;
return true;
}
#if defined(GP_OS_android)
// A helper function for AutoObjectMapperFaultyLib::Map. Finds out
// where the installation's lib directory is, since we'll have to look
// in there to get hold of libmozglue.so. Returned C string is heap
// allocated and the caller must deallocate it.
static char* get_installation_lib_dir() {
nsCOMPtr<nsIProperties> directoryService(
do_GetService(NS_DIRECTORY_SERVICE_CONTRACTID));
if (!directoryService) {
return nullptr;
}
nsCOMPtr<nsIFile> greDir;
nsresult rv = directoryService->Get(NS_GRE_DIR, NS_GET_IID(nsIFile),
getter_AddRefs(greDir));
if (NS_FAILED(rv)) return nullptr;
nsCString path;
rv = greDir->GetNativePath(path);
if (NS_FAILED(rv)) {
return nullptr;
}
return strdup(path.get());
}
AutoObjectMapperFaultyLib::AutoObjectMapperFaultyLib(void (*aLog)(const char*))
: AutoObjectMapperPOSIX(aLog), mHdl(nullptr) {}
AutoObjectMapperFaultyLib::~AutoObjectMapperFaultyLib() {
if (mHdl) {
// We've got an object mapped by faulty.lib. Unmap it via faulty.lib.
MOZ_ASSERT(mSize > 0);
// Assert on the basis that no valid mapping would start at page zero.
MOZ_ASSERT(mImage);
__dl_munmap(mHdl, mImage, mSize);
dlclose(mHdl);
// Stop assertions in ~AutoObjectMapperPOSIX from failing.
mImage = nullptr;
mSize = 0;
}
// At this point the parent class destructor, ~AutoObjectMapperPOSIX,
// gets called. If that has something mapped in the normal way, it
// will unmap it in the normal way. Unfortunately there's no
// obvious way to enforce the requirement that the object is mapped
// either by faulty.lib or by the parent class, but not by both.
}
bool AutoObjectMapperFaultyLib::Map(/*OUT*/ void** start,
/*OUT*/ size_t* length,
std::string fileName) {
MOZ_ASSERT(!mHdl);
if (fileName == "libmozglue.so") {
// Do (2) in the comment above.
char* libdir = get_installation_lib_dir();
if (libdir) {
fileName = std::string(libdir) + "/lib/" + fileName;
free(libdir);
}
// Hand the problem off to the standard mapper.
return AutoObjectMapperPOSIX::Map(start, length, fileName);
} else {
// Do cases (1) and (3) in the comment above. We have to
// grapple with faulty.lib directly.
void* hdl = dlopen(fileName.c_str(), RTLD_GLOBAL | RTLD_LAZY);
if (!hdl) {
failedToMessage(mLog, "get handle for ELF file", fileName);
return false;
}
size_t sz = __dl_get_mappable_length(hdl);
if (sz == 0) {
dlclose(hdl);
failedToMessage(mLog, "get size for ELF file", fileName);
return false;
}
void* image = __dl_mmap(hdl, nullptr, sz, 0);
if (image == MAP_FAILED) {
dlclose(hdl);
failedToMessage(mLog, "mmap ELF file", fileName);
return false;
}
mHdl = hdl;
mImage = *start = image;
mSize = *length = sz;
return true;
}
}
#endif // defined(GP_OS_android)

Просмотреть файл

@ -0,0 +1,114 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef AutoObjectMapper_h
#define AutoObjectMapper_h
#include <string>
#include "mozilla/Attributes.h"
#include "PlatformMacros.h"
// A (nearly-) RAII class that maps an object in and then unmaps it on
// destruction. This base class version uses the "normal" POSIX
// functions: open, fstat, close, mmap, munmap.
class MOZ_STACK_CLASS AutoObjectMapperPOSIX {
public:
// The constructor does not attempt to map the file, because that
// might fail. Instead, once the object has been constructed,
// call Map() to attempt the mapping. There is no corresponding
// Unmap() since the unmapping is done in the destructor. Failure
// messages are sent to |aLog|.
explicit AutoObjectMapperPOSIX(void (*aLog)(const char*));
// Unmap the file on destruction of this object.
~AutoObjectMapperPOSIX();
// Map |fileName| into the address space and return the mapping
// extents. If the file is zero sized this will fail. The file is
// mapped read-only and private. Returns true iff the mapping
// succeeded, in which case *start and *length hold its extent.
// Once a call to Map succeeds, all subsequent calls to it will
// fail.
bool Map(/*OUT*/ void** start, /*OUT*/ size_t* length, std::string fileName);
protected:
// If we are currently holding a mapped object, these record the
// mapped address range.
void* mImage;
size_t mSize;
// A logging sink, for complaining about mapping failures.
void (*mLog)(const char*);
private:
// Are we currently holding a mapped object? This is private to
// the base class. Derived classes need to have their own way to
// track whether they are holding a mapped object.
bool mIsMapped;
// Disable copying and assignment.
AutoObjectMapperPOSIX(const AutoObjectMapperPOSIX&);
AutoObjectMapperPOSIX& operator=(const AutoObjectMapperPOSIX&);
// Disable heap allocation of this class.
void* operator new(size_t);
void* operator new[](size_t);
void operator delete(void*);
void operator delete[](void*);
};
#if defined(GP_OS_android)
// This is a variant of AutoObjectMapperPOSIX suitable for use in
// conjunction with faulty.lib on Android. How it behaves depends on
// the name of the file to be mapped. There are three possible cases:
//
// (1) /foo/bar/xyzzy/blah.apk!/libwurble.so
// We hand it as-is to faulty.lib and let it fish the relevant
// bits out of the APK.
//
// (2) libmozglue.so
// This is part of the Fennec installation, but is not in the
// APK. Instead we have to figure out the installation path
// and look for it there. Because of faulty.lib limitations,
// we have to use regular open/mmap instead of faulty.lib.
//
// (3) libanythingelse.so
// faulty.lib assumes this is a system library, and prepends
// "/system/lib/" to the path. So as in (1), we can give it
// as-is to faulty.lib.
//
// Hence (1) and (3) require special-casing here. Case (2) simply
// hands the problem to the parent class.
class MOZ_STACK_CLASS AutoObjectMapperFaultyLib : public AutoObjectMapperPOSIX {
public:
explicit AutoObjectMapperFaultyLib(void (*aLog)(const char*));
~AutoObjectMapperFaultyLib();
bool Map(/*OUT*/ void** start, /*OUT*/ size_t* length, std::string fileName);
private:
// faulty.lib requires us to maintain an abstract handle that can be
// used later to unmap the area. If this is non-NULL, it is assumed
// that unmapping is to be done by faulty.lib. Otherwise it goes
// via the normal mechanism.
void* mHdl;
// Disable copying and assignment.
AutoObjectMapperFaultyLib(const AutoObjectMapperFaultyLib&);
AutoObjectMapperFaultyLib& operator=(const AutoObjectMapperFaultyLib&);
// Disable heap allocation of this class.
void* operator new(size_t);
void* operator new[](size_t);
void operator delete(void*);
void operator delete[](void*);
};
#endif // defined(GP_OS_android)
#endif // AutoObjectMapper_h

Просмотреть файл

@ -0,0 +1,100 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
// Copyright (c) 2011, 2013 Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Original author: Jim Blandy <jimb@mozilla.com> <jimb@red-bean.com>
// This file is derived from the following files in
// toolkit/crashreporter/google-breakpad:
// src/common/module.cc
// src/common/unique_string.cc
// There's no internal-only interface for LulCommon. Hence include
// the external interface directly.
#include "LulCommonExt.h"
#include <stdlib.h>
#include <string.h>
#include <string>
#include <map>
namespace lul {
using std::string;
////////////////////////////////////////////////////////////////
// Module
//
Module::Module(const string& name, const string& os, const string& architecture,
const string& id)
: name_(name), os_(os), architecture_(architecture), id_(id) {}
Module::~Module() {}
////////////////////////////////////////////////////////////////
// UniqueString
//
class UniqueString {
public:
explicit UniqueString(string str) { str_ = strdup(str.c_str()); }
~UniqueString() { free(reinterpret_cast<void*>(const_cast<char*>(str_))); }
const char* str_;
};
const char* FromUniqueString(const UniqueString* ustr) { return ustr->str_; }
bool IsEmptyUniqueString(const UniqueString* ustr) {
return (ustr->str_)[0] == '\0';
}
////////////////////////////////////////////////////////////////
// UniqueStringUniverse
//
UniqueStringUniverse::~UniqueStringUniverse() {
for (std::map<string, UniqueString*>::iterator it = map_.begin();
it != map_.end(); it++) {
delete it->second;
}
}
const UniqueString* UniqueStringUniverse::ToUniqueString(string str) {
std::map<string, UniqueString*>::iterator it = map_.find(str);
if (it == map_.end()) {
UniqueString* ustr = new UniqueString(str);
map_[str] = ustr;
return ustr;
} else {
return it->second;
}
}
} // namespace lul

Просмотреть файл

@ -0,0 +1,509 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
// Copyright (c) 2006, 2010, 2012, 2013 Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Original author: Jim Blandy <jimb@mozilla.com> <jimb@red-bean.com>
// module.h: Define google_breakpad::Module. A Module holds debugging
// information, and can write that information out as a Breakpad
// symbol file.
// (C) Copyright Greg Colvin and Beman Dawes 1998, 1999.
// Copyright (c) 2001, 2002 Peter Dimov
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
// See http://www.boost.org/libs/smart_ptr/scoped_ptr.htm for documentation.
//
// This file is derived from the following files in
// toolkit/crashreporter/google-breakpad:
// src/common/unique_string.h
// src/common/scoped_ptr.h
// src/common/module.h
// External interface for the "Common" component of LUL.
#ifndef LulCommonExt_h
#define LulCommonExt_h
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string>
#include <map>
#include <vector>
#include <cstddef> // for std::ptrdiff_t
#include "mozilla/Assertions.h"
namespace lul {
using std::map;
using std::string;
////////////////////////////////////////////////////////////////
// UniqueString
//
// Abstract type
class UniqueString;
// Get the contained C string (debugging only)
const char* FromUniqueString(const UniqueString*);
// Is the given string empty (that is, "") ?
bool IsEmptyUniqueString(const UniqueString*);
////////////////////////////////////////////////////////////////
// UniqueStringUniverse
//
// All UniqueStrings live in some specific UniqueStringUniverse.
class UniqueStringUniverse {
public:
UniqueStringUniverse() {}
~UniqueStringUniverse();
// Convert a |string| to a UniqueString, that lives in this universe.
const UniqueString* ToUniqueString(string str);
private:
map<string, UniqueString*> map_;
};
////////////////////////////////////////////////////////////////
// GUID
//
typedef struct {
uint32_t data1;
uint16_t data2;
uint16_t data3;
uint8_t data4[8];
} MDGUID; // GUID
typedef MDGUID GUID;
////////////////////////////////////////////////////////////////
// scoped_ptr
//
// scoped_ptr mimics a built-in pointer except that it guarantees deletion
// of the object pointed to, either on destruction of the scoped_ptr or via
// an explicit reset(). scoped_ptr is a simple solution for simple needs;
// use shared_ptr or std::auto_ptr if your needs are more complex.
// *** NOTE ***
// If your scoped_ptr is a class member of class FOO pointing to a
// forward declared type BAR (as shown below), then you MUST use a non-inlined
// version of the destructor. The destructor of a scoped_ptr (called from
// FOO's destructor) must have a complete definition of BAR in order to
// destroy it. Example:
//
// -- foo.h --
// class BAR;
//
// class FOO {
// public:
// FOO();
// ~FOO(); // Required for sources that instantiate class FOO to compile!
//
// private:
// scoped_ptr<BAR> bar_;
// };
//
// -- foo.cc --
// #include "foo.h"
// FOO::~FOO() {} // Empty, but must be non-inlined to FOO's class definition.
// scoped_ptr_malloc added by Google
// When one of these goes out of scope, instead of doing a delete or
// delete[], it calls free(). scoped_ptr_malloc<char> is likely to see
// much more use than any other specializations.
// release() added by Google
// Use this to conditionally transfer ownership of a heap-allocated object
// to the caller, usually on method success.
template <typename T>
class scoped_ptr {
private:
T* ptr;
scoped_ptr(scoped_ptr const&);
scoped_ptr& operator=(scoped_ptr const&);
public:
typedef T element_type;
explicit scoped_ptr(T* p = 0) : ptr(p) {}
~scoped_ptr() { delete ptr; }
void reset(T* p = 0) {
if (ptr != p) {
delete ptr;
ptr = p;
}
}
T& operator*() const {
MOZ_ASSERT(ptr != 0);
return *ptr;
}
T* operator->() const {
MOZ_ASSERT(ptr != 0);
return ptr;
}
bool operator==(T* p) const { return ptr == p; }
bool operator!=(T* p) const { return ptr != p; }
T* get() const { return ptr; }
void swap(scoped_ptr& b) {
T* tmp = b.ptr;
b.ptr = ptr;
ptr = tmp;
}
T* release() {
T* tmp = ptr;
ptr = 0;
return tmp;
}
private:
// no reason to use these: each scoped_ptr should have its own object
template <typename U>
bool operator==(scoped_ptr<U> const& p) const;
template <typename U>
bool operator!=(scoped_ptr<U> const& p) const;
};
template <typename T>
inline void swap(scoped_ptr<T>& a, scoped_ptr<T>& b) {
a.swap(b);
}
template <typename T>
inline bool operator==(T* p, const scoped_ptr<T>& b) {
return p == b.get();
}
template <typename T>
inline bool operator!=(T* p, const scoped_ptr<T>& b) {
return p != b.get();
}
// scoped_array extends scoped_ptr to arrays. Deletion of the array pointed to
// is guaranteed, either on destruction of the scoped_array or via an explicit
// reset(). Use shared_array or std::vector if your needs are more complex.
template <typename T>
class scoped_array {
private:
T* ptr;
scoped_array(scoped_array const&);
scoped_array& operator=(scoped_array const&);
public:
typedef T element_type;
explicit scoped_array(T* p = 0) : ptr(p) {}
~scoped_array() { delete[] ptr; }
void reset(T* p = 0) {
if (ptr != p) {
delete[] ptr;
ptr = p;
}
}
T& operator[](std::ptrdiff_t i) const {
MOZ_ASSERT(ptr != 0);
MOZ_ASSERT(i >= 0);
return ptr[i];
}
bool operator==(T* p) const { return ptr == p; }
bool operator!=(T* p) const { return ptr != p; }
T* get() const { return ptr; }
void swap(scoped_array& b) {
T* tmp = b.ptr;
b.ptr = ptr;
ptr = tmp;
}
T* release() {
T* tmp = ptr;
ptr = 0;
return tmp;
}
private:
// no reason to use these: each scoped_array should have its own object
template <typename U>
bool operator==(scoped_array<U> const& p) const;
template <typename U>
bool operator!=(scoped_array<U> const& p) const;
};
template <class T>
inline void swap(scoped_array<T>& a, scoped_array<T>& b) {
a.swap(b);
}
template <typename T>
inline bool operator==(T* p, const scoped_array<T>& b) {
return p == b.get();
}
template <typename T>
inline bool operator!=(T* p, const scoped_array<T>& b) {
return p != b.get();
}
// This class wraps the c library function free() in a class that can be
// passed as a template argument to scoped_ptr_malloc below.
class ScopedPtrMallocFree {
public:
inline void operator()(void* x) const { free(x); }
};
// scoped_ptr_malloc<> is similar to scoped_ptr<>, but it accepts a
// second template argument, the functor used to free the object.
template <typename T, typename FreeProc = ScopedPtrMallocFree>
class scoped_ptr_malloc {
private:
T* ptr;
scoped_ptr_malloc(scoped_ptr_malloc const&);
scoped_ptr_malloc& operator=(scoped_ptr_malloc const&);
public:
typedef T element_type;
explicit scoped_ptr_malloc(T* p = 0) : ptr(p) {}
~scoped_ptr_malloc() { free_((void*)ptr); }
void reset(T* p = 0) {
if (ptr != p) {
free_((void*)ptr);
ptr = p;
}
}
T& operator*() const {
MOZ_ASSERT(ptr != 0);
return *ptr;
}
T* operator->() const {
MOZ_ASSERT(ptr != 0);
return ptr;
}
bool operator==(T* p) const { return ptr == p; }
bool operator!=(T* p) const { return ptr != p; }
T* get() const { return ptr; }
void swap(scoped_ptr_malloc& b) {
T* tmp = b.ptr;
b.ptr = ptr;
ptr = tmp;
}
T* release() {
T* tmp = ptr;
ptr = 0;
return tmp;
}
private:
// no reason to use these: each scoped_ptr_malloc should have its own object
template <typename U, typename GP>
bool operator==(scoped_ptr_malloc<U, GP> const& p) const;
template <typename U, typename GP>
bool operator!=(scoped_ptr_malloc<U, GP> const& p) const;
static FreeProc const free_;
};
template <typename T, typename FP>
FP const scoped_ptr_malloc<T, FP>::free_ = FP();
template <typename T, typename FP>
inline void swap(scoped_ptr_malloc<T, FP>& a, scoped_ptr_malloc<T, FP>& b) {
a.swap(b);
}
template <typename T, typename FP>
inline bool operator==(T* p, const scoped_ptr_malloc<T, FP>& b) {
return p == b.get();
}
template <typename T, typename FP>
inline bool operator!=(T* p, const scoped_ptr_malloc<T, FP>& b) {
return p != b.get();
}
////////////////////////////////////////////////////////////////
// Module
//
// A Module represents the contents of a module, and supports methods
// for adding information produced by parsing STABS or DWARF data
// --- possibly both from the same file --- and then writing out the
// unified contents as a Breakpad-format symbol file.
class Module {
public:
// The type of addresses and sizes in a symbol table.
typedef uint64_t Address;
// Representation of an expression. This can either be a postfix
// expression, in which case it is stored as a string, or a simple
// expression of the form (identifier + imm) or *(identifier + imm).
// It can also be invalid (denoting "no value").
enum ExprHow { kExprInvalid = 1, kExprPostfix, kExprSimple, kExprSimpleMem };
struct Expr {
// Construct a simple-form expression
Expr(const UniqueString* ident, long offset, bool deref) {
if (IsEmptyUniqueString(ident)) {
Expr();
} else {
postfix_ = "";
ident_ = ident;
offset_ = offset;
how_ = deref ? kExprSimpleMem : kExprSimple;
}
}
// Construct an invalid expression
Expr() {
postfix_ = "";
ident_ = nullptr;
offset_ = 0;
how_ = kExprInvalid;
}
// Return the postfix expression string, either directly,
// if this is a postfix expression, or by synthesising it
// for a simple expression.
std::string getExprPostfix() const {
switch (how_) {
case kExprPostfix:
return postfix_;
case kExprSimple:
case kExprSimpleMem: {
char buf[40];
sprintf(buf, " %ld %c%s", labs(offset_), offset_ < 0 ? '-' : '+',
how_ == kExprSimple ? "" : " ^");
return std::string(FromUniqueString(ident_)) + std::string(buf);
}
case kExprInvalid:
default:
MOZ_ASSERT(0 && "getExprPostfix: invalid Module::Expr type");
return "Expr::genExprPostfix: kExprInvalid";
}
}
// The identifier that gives the starting value for simple expressions.
const UniqueString* ident_;
// The offset to add for simple expressions.
long offset_;
// The Postfix expression string to evaluate for non-simple expressions.
std::string postfix_;
// The operation expressed by this expression.
ExprHow how_;
};
// A map from register names to expressions that recover
// their values. This can represent a complete set of rules to
// follow at some address, or a set of changes to be applied to an
// extant set of rules.
// NOTE! there are two completely different types called RuleMap. This
// is one of them.
typedef std::map<const UniqueString*, Expr> RuleMap;
// A map from addresses to RuleMaps, representing changes that take
// effect at given addresses.
typedef std::map<Address, RuleMap> RuleChangeMap;
// A range of 'STACK CFI' stack walking information. An instance of
// this structure corresponds to a 'STACK CFI INIT' record and the
// subsequent 'STACK CFI' records that fall within its range.
struct StackFrameEntry {
// The starting address and number of bytes of machine code this
// entry covers.
Address address, size;
// The initial register recovery rules, in force at the starting
// address.
RuleMap initial_rules;
// A map from addresses to rule changes. To find the rules in
// force at a given address, start with initial_rules, and then
// apply the changes given in this map for all addresses up to and
// including the address you're interested in.
RuleChangeMap rule_changes;
};
// Create a new module with the given name, operating system,
// architecture, and ID string.
Module(const std::string& name, const std::string& os,
const std::string& architecture, const std::string& id);
~Module();
private:
// Module header entries.
std::string name_, os_, architecture_, id_;
};
} // namespace lul
#endif // LulCommonExt_h

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,193 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
// Copyright (c) 2008, 2010 Google Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// CFI reader author: Jim Blandy <jimb@mozilla.com> <jimb@red-bean.com>
// This file is derived from the following file in
// toolkit/crashreporter/google-breakpad:
// src/common/dwarf/dwarf2enums.h
#ifndef LulDwarfInt_h
#define LulDwarfInt_h
#include "LulCommonExt.h"
#include "LulDwarfExt.h"
namespace lul {
// These enums do not follow the google3 style only because they are
// known universally (specs, other implementations) by the names in
// exactly this capitalization.
// Tag names and codes.
// Call Frame Info instructions.
enum DwarfCFI {
DW_CFA_advance_loc = 0x40,
DW_CFA_offset = 0x80,
DW_CFA_restore = 0xc0,
DW_CFA_nop = 0x00,
DW_CFA_set_loc = 0x01,
DW_CFA_advance_loc1 = 0x02,
DW_CFA_advance_loc2 = 0x03,
DW_CFA_advance_loc4 = 0x04,
DW_CFA_offset_extended = 0x05,
DW_CFA_restore_extended = 0x06,
DW_CFA_undefined = 0x07,
DW_CFA_same_value = 0x08,
DW_CFA_register = 0x09,
DW_CFA_remember_state = 0x0a,
DW_CFA_restore_state = 0x0b,
DW_CFA_def_cfa = 0x0c,
DW_CFA_def_cfa_register = 0x0d,
DW_CFA_def_cfa_offset = 0x0e,
DW_CFA_def_cfa_expression = 0x0f,
DW_CFA_expression = 0x10,
DW_CFA_offset_extended_sf = 0x11,
DW_CFA_def_cfa_sf = 0x12,
DW_CFA_def_cfa_offset_sf = 0x13,
DW_CFA_val_offset = 0x14,
DW_CFA_val_offset_sf = 0x15,
DW_CFA_val_expression = 0x16,
// Opcodes in this range are reserved for user extensions.
DW_CFA_lo_user = 0x1c,
DW_CFA_hi_user = 0x3f,
// SGI/MIPS specific.
DW_CFA_MIPS_advance_loc8 = 0x1d,
// GNU extensions.
DW_CFA_GNU_window_save = 0x2d,
DW_CFA_GNU_args_size = 0x2e,
DW_CFA_GNU_negative_offset_extended = 0x2f
};
// Exception handling 'z' augmentation letters.
enum DwarfZAugmentationCodes {
// If the CFI augmentation string begins with 'z', then the CIE and FDE
// have an augmentation data area just before the instructions, whose
// contents are determined by the subsequent augmentation letters.
DW_Z_augmentation_start = 'z',
// If this letter is present in a 'z' augmentation string, the CIE
// augmentation data includes a pointer encoding, and the FDE
// augmentation data includes a language-specific data area pointer,
// represented using that encoding.
DW_Z_has_LSDA = 'L',
// If this letter is present in a 'z' augmentation string, the CIE
// augmentation data includes a pointer encoding, followed by a pointer
// to a personality routine, represented using that encoding.
DW_Z_has_personality_routine = 'P',
// If this letter is present in a 'z' augmentation string, the CIE
// augmentation data includes a pointer encoding describing how the FDE's
// initial location, address range, and DW_CFA_set_loc operands are
// encoded.
DW_Z_has_FDE_address_encoding = 'R',
// If this letter is present in a 'z' augmentation string, then code
// addresses covered by FDEs that cite this CIE are signal delivery
// trampolines. Return addresses of frames in trampolines should not be
// adjusted as described in section 6.4.4 of the DWARF 3 spec.
DW_Z_is_signal_trampoline = 'S'
};
// Expression opcodes
enum DwarfExpressionOpcodes {
DW_OP_addr = 0x03,
DW_OP_deref = 0x06,
DW_OP_const1s = 0x09,
DW_OP_const2u = 0x0a,
DW_OP_const2s = 0x0b,
DW_OP_const4u = 0x0c,
DW_OP_const4s = 0x0d,
DW_OP_const8u = 0x0e,
DW_OP_const8s = 0x0f,
DW_OP_constu = 0x10,
DW_OP_consts = 0x11,
DW_OP_dup = 0x12,
DW_OP_drop = 0x13,
DW_OP_over = 0x14,
DW_OP_pick = 0x15,
DW_OP_swap = 0x16,
DW_OP_rot = 0x17,
DW_OP_xderef = 0x18,
DW_OP_abs = 0x19,
DW_OP_and = 0x1a,
DW_OP_div = 0x1b,
DW_OP_minus = 0x1c,
DW_OP_mod = 0x1d,
DW_OP_mul = 0x1e,
DW_OP_neg = 0x1f,
DW_OP_not = 0x20,
DW_OP_or = 0x21,
DW_OP_plus = 0x22,
DW_OP_plus_uconst = 0x23,
DW_OP_shl = 0x24,
DW_OP_shr = 0x25,
DW_OP_shra = 0x26,
DW_OP_xor = 0x27,
DW_OP_skip = 0x2f,
DW_OP_bra = 0x28,
DW_OP_eq = 0x29,
DW_OP_ge = 0x2a,
DW_OP_gt = 0x2b,
DW_OP_le = 0x2c,
DW_OP_lt = 0x2d,
DW_OP_ne = 0x2e,
DW_OP_lit0 = 0x30,
DW_OP_lit31 = 0x4f,
DW_OP_reg0 = 0x50,
DW_OP_reg31 = 0x6f,
DW_OP_breg0 = 0x70,
DW_OP_breg31 = 0x8f,
DW_OP_regx = 0x90,
DW_OP_fbreg = 0x91,
DW_OP_bregx = 0x92,
DW_OP_piece = 0x93,
DW_OP_deref_size = 0x94,
DW_OP_xderef_size = 0x95,
DW_OP_nop = 0x96,
DW_OP_push_object_address = 0x97,
DW_OP_call2 = 0x98,
DW_OP_call4 = 0x99,
DW_OP_call_ref = 0x9a,
DW_OP_form_tls_address = 0x9b,
DW_OP_call_frame_cfa = 0x9c,
DW_OP_bit_piece = 0x9d,
DW_OP_lo_user = 0xe0,
DW_OP_hi_user = 0xff
};
} // namespace lul
#endif // LulDwarfInt_h

Просмотреть файл

@ -0,0 +1,553 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "LulDwarfSummariser.h"
#include "LulDwarfExt.h"
#include "mozilla/Assertions.h"
#include "mozilla/Sprintf.h"
// Set this to 1 for verbose logging
#define DEBUG_SUMMARISER 0
namespace lul {
// Do |s64|'s lowest 32 bits sign extend back to |s64| itself?
static inline bool fitsIn32Bits(int64 s64) {
return s64 == ((s64 & 0xffffffff) ^ 0x80000000) - 0x80000000;
}
// Check a LExpr prefix expression, starting at pfxInstrs[start] up to
// the next PX_End instruction, to ensure that:
// * It only mentions registers that are tracked on this target
// * The start point is sane
// If the expression is ok, return NULL. Else return a pointer
// a const char* holding a bit of text describing the problem.
static const char* checkPfxExpr(const vector<PfxInstr>* pfxInstrs,
int64_t start) {
size_t nInstrs = pfxInstrs->size();
if (start < 0 || start >= (ssize_t)nInstrs) {
return "bogus start point";
}
size_t i;
for (i = start; i < nInstrs; i++) {
PfxInstr pxi = (*pfxInstrs)[i];
if (pxi.mOpcode == PX_End) break;
if (pxi.mOpcode == PX_DwReg &&
!registerIsTracked((DW_REG_NUMBER)pxi.mOperand)) {
return "uses untracked reg";
}
}
return nullptr; // success
}
Summariser::Summariser(SecMap* aSecMap, uintptr_t aTextBias,
void (*aLog)(const char*))
: mSecMap(aSecMap), mTextBias(aTextBias), mLog(aLog) {
mCurrAddr = 0;
mMax1Addr = 0; // Gives an empty range.
// Initialise the running RuleSet to "haven't got a clue" status.
new (&mCurrRules) RuleSet();
}
void Summariser::Entry(uintptr_t aAddress, uintptr_t aLength) {
aAddress += mTextBias;
if (DEBUG_SUMMARISER) {
char buf[100];
SprintfLiteral(buf, "LUL Entry(%llx, %llu)\n",
(unsigned long long int)aAddress,
(unsigned long long int)aLength);
mLog(buf);
}
// This throws away any previous summary, that is, assumes
// that the previous summary, if any, has been properly finished
// by a call to End().
mCurrAddr = aAddress;
mMax1Addr = aAddress + aLength;
new (&mCurrRules) RuleSet();
}
void Summariser::Rule(uintptr_t aAddress, int aNewReg, LExprHow how,
int16_t oldReg, int64_t offset) {
aAddress += mTextBias;
if (DEBUG_SUMMARISER) {
char buf[100];
if (how == NODEREF || how == DEREF) {
bool deref = how == DEREF;
SprintfLiteral(buf, "LUL 0x%llx old-r%d = %sr%d + %lld%s\n",
(unsigned long long int)aAddress, aNewReg,
deref ? "*(" : "", (int)oldReg, (long long int)offset,
deref ? ")" : "");
} else if (how == PFXEXPR) {
SprintfLiteral(buf, "LUL 0x%llx old-r%d = pfx-expr-at %lld\n",
(unsigned long long int)aAddress, aNewReg,
(long long int)offset);
} else {
SprintfLiteral(buf, "LUL 0x%llx old-r%d = (invalid LExpr!)\n",
(unsigned long long int)aAddress, aNewReg);
}
mLog(buf);
}
if (mCurrAddr < aAddress) {
// Flush the existing summary first.
mCurrRules.mAddr = mCurrAddr;
mCurrRules.mLen = aAddress - mCurrAddr;
mSecMap->AddRuleSet(&mCurrRules);
if (DEBUG_SUMMARISER) {
mLog("LUL ");
mCurrRules.Print(mLog);
mLog("\n");
}
mCurrAddr = aAddress;
}
// If for some reason summarisation fails, either or both of these
// become non-null and point at constant text describing the
// problem. Using two rather than just one avoids complications of
// having to concatenate two strings to produce a complete error message.
const char* reason1 = nullptr;
const char* reason2 = nullptr;
// |offset| needs to be a 32 bit value that sign extends to 64 bits
// on a 64 bit target. We will need to incorporate |offset| into
// any LExpr made here. So we may as well check it right now.
if (!fitsIn32Bits(offset)) {
reason1 = "offset not in signed 32-bit range";
goto cant_summarise;
}
// FIXME: factor out common parts of the arch-dependent summarisers.
#if defined(GP_ARCH_arm)
// ----------------- arm ----------------- //
// Now, can we add the rule to our summary? This depends on whether
// the registers and the overall expression are representable. This
// is the heart of the summarisation process.
switch (aNewReg) {
case DW_REG_CFA:
// This is a rule that defines the CFA. The only forms we
// choose to represent are: r7/11/12/13 + offset. The offset
// must fit into 32 bits since 'uintptr_t' is 32 bit on ARM,
// hence there is no need to check it for overflow.
if (how != NODEREF) {
reason1 = "rule for DW_REG_CFA: invalid |how|";
goto cant_summarise;
}
switch (oldReg) {
case DW_REG_ARM_R7:
case DW_REG_ARM_R11:
case DW_REG_ARM_R12:
case DW_REG_ARM_R13:
break;
default:
reason1 = "rule for DW_REG_CFA: invalid |oldReg|";
goto cant_summarise;
}
mCurrRules.mCfaExpr = LExpr(how, oldReg, offset);
break;
case DW_REG_ARM_R7:
case DW_REG_ARM_R11:
case DW_REG_ARM_R12:
case DW_REG_ARM_R13:
case DW_REG_ARM_R14:
case DW_REG_ARM_R15: {
// This is a new rule for R7, R11, R12, R13 (SP), R14 (LR) or
// R15 (the return address).
switch (how) {
case NODEREF:
case DEREF:
// Check the old register is one we're tracking.
if (!registerIsTracked((DW_REG_NUMBER)oldReg) &&
oldReg != DW_REG_CFA) {
reason1 = "rule for R7/11/12/13/14/15: uses untracked reg";
goto cant_summarise;
}
break;
case PFXEXPR: {
// Check that the prefix expression only mentions tracked registers.
const vector<PfxInstr>* pfxInstrs = mSecMap->GetPfxInstrs();
reason2 = checkPfxExpr(pfxInstrs, offset);
if (reason2) {
reason1 = "rule for R7/11/12/13/14/15: ";
goto cant_summarise;
}
break;
}
default:
goto cant_summarise;
}
LExpr expr = LExpr(how, oldReg, offset);
switch (aNewReg) {
case DW_REG_ARM_R7:
mCurrRules.mR7expr = expr;
break;
case DW_REG_ARM_R11:
mCurrRules.mR11expr = expr;
break;
case DW_REG_ARM_R12:
mCurrRules.mR12expr = expr;
break;
case DW_REG_ARM_R13:
mCurrRules.mR13expr = expr;
break;
case DW_REG_ARM_R14:
mCurrRules.mR14expr = expr;
break;
case DW_REG_ARM_R15:
mCurrRules.mR15expr = expr;
break;
default:
MOZ_ASSERT(0);
}
break;
}
default:
// Leave |reason1| and |reason2| unset here. This program point
// is reached so often that it causes a flood of "Can't
// summarise" messages. In any case, we don't really care about
// the fact that this summary would produce a new value for a
// register that we're not tracking. We do on the other hand
// care if the summary's expression *uses* a register that we're
// not tracking. But in that case one of the above failures
// should tell us which.
goto cant_summarise;
}
// Mark callee-saved registers (r4 .. r11) as unchanged, if there is
// no other information about them. FIXME: do this just once, at
// the point where the ruleset is committed.
if (mCurrRules.mR7expr.mHow == UNKNOWN) {
mCurrRules.mR7expr = LExpr(NODEREF, DW_REG_ARM_R7, 0);
}
if (mCurrRules.mR11expr.mHow == UNKNOWN) {
mCurrRules.mR11expr = LExpr(NODEREF, DW_REG_ARM_R11, 0);
}
if (mCurrRules.mR12expr.mHow == UNKNOWN) {
mCurrRules.mR12expr = LExpr(NODEREF, DW_REG_ARM_R12, 0);
}
// The old r13 (SP) value before the call is always the same as the
// CFA.
mCurrRules.mR13expr = LExpr(NODEREF, DW_REG_CFA, 0);
// If there's no information about R15 (the return address), say
// it's a copy of R14 (the link register).
if (mCurrRules.mR15expr.mHow == UNKNOWN) {
mCurrRules.mR15expr = LExpr(NODEREF, DW_REG_ARM_R14, 0);
}
#elif defined(GP_ARCH_arm64)
// ----------------- arm64 ----------------- //
switch (aNewReg) {
case DW_REG_CFA:
if (how != NODEREF) {
reason1 = "rule for DW_REG_CFA: invalid |how|";
goto cant_summarise;
}
switch (oldReg) {
case DW_REG_AARCH64_X29:
case DW_REG_AARCH64_SP:
break;
default:
reason1 = "rule for DW_REG_CFA: invalid |oldReg|";
goto cant_summarise;
}
mCurrRules.mCfaExpr = LExpr(how, oldReg, offset);
break;
case DW_REG_AARCH64_X29:
case DW_REG_AARCH64_X30:
case DW_REG_AARCH64_SP: {
switch (how) {
case NODEREF:
case DEREF:
// Check the old register is one we're tracking.
if (!registerIsTracked((DW_REG_NUMBER)oldReg) &&
oldReg != DW_REG_CFA) {
reason1 = "rule for X29/X30/SP: uses untracked reg";
goto cant_summarise;
}
break;
case PFXEXPR: {
// Check that the prefix expression only mentions tracked registers.
const vector<PfxInstr>* pfxInstrs = mSecMap->GetPfxInstrs();
reason2 = checkPfxExpr(pfxInstrs, offset);
if (reason2) {
reason1 = "rule for X29/X30/SP: ";
goto cant_summarise;
}
break;
}
default:
goto cant_summarise;
}
LExpr expr = LExpr(how, oldReg, offset);
switch (aNewReg) {
case DW_REG_AARCH64_X29:
mCurrRules.mX29expr = expr;
break;
case DW_REG_AARCH64_X30:
mCurrRules.mX30expr = expr;
break;
case DW_REG_AARCH64_SP:
mCurrRules.mSPexpr = expr;
break;
default:
MOZ_ASSERT(0);
}
break;
}
default:
// Leave |reason1| and |reason2| unset here, for the reasons explained
// in the analogous point
goto cant_summarise;
}
if (mCurrRules.mX29expr.mHow == UNKNOWN) {
mCurrRules.mX29expr = LExpr(NODEREF, DW_REG_AARCH64_X29, 0);
}
if (mCurrRules.mX30expr.mHow == UNKNOWN) {
mCurrRules.mX30expr = LExpr(NODEREF, DW_REG_AARCH64_X30, 0);
}
// On aarch64, it seems the old SP value before the call is always the
// same as the CFA. Therefore, in the absence of any other way to
// recover the SP, specify that the CFA should be copied.
if (mCurrRules.mSPexpr.mHow == UNKNOWN) {
mCurrRules.mSPexpr = LExpr(NODEREF, DW_REG_CFA, 0);
}
#elif defined(GP_ARCH_amd64) || defined(GP_ARCH_x86)
// ---------------- x64/x86 ---------------- //
// Now, can we add the rule to our summary? This depends on whether
// the registers and the overall expression are representable. This
// is the heart of the summarisation process.
switch (aNewReg) {
case DW_REG_CFA: {
// This is a rule that defines the CFA. The only forms we choose to
// represent are: = SP+offset, = FP+offset, or =prefix-expr.
switch (how) {
case NODEREF:
if (oldReg != DW_REG_INTEL_XSP && oldReg != DW_REG_INTEL_XBP) {
reason1 = "rule for DW_REG_CFA: invalid |oldReg|";
goto cant_summarise;
}
break;
case DEREF:
reason1 = "rule for DW_REG_CFA: invalid |how|";
goto cant_summarise;
case PFXEXPR: {
// Check that the prefix expression only mentions tracked registers.
const vector<PfxInstr>* pfxInstrs = mSecMap->GetPfxInstrs();
reason2 = checkPfxExpr(pfxInstrs, offset);
if (reason2) {
reason1 = "rule for CFA: ";
goto cant_summarise;
}
break;
}
default:
goto cant_summarise;
}
mCurrRules.mCfaExpr = LExpr(how, oldReg, offset);
break;
}
case DW_REG_INTEL_XSP:
case DW_REG_INTEL_XBP:
case DW_REG_INTEL_XIP: {
// This is a new rule for XSP, XBP or XIP (the return address).
switch (how) {
case NODEREF:
case DEREF:
// Check the old register is one we're tracking.
if (!registerIsTracked((DW_REG_NUMBER)oldReg) &&
oldReg != DW_REG_CFA) {
reason1 = "rule for XSP/XBP/XIP: uses untracked reg";
goto cant_summarise;
}
break;
case PFXEXPR: {
// Check that the prefix expression only mentions tracked registers.
const vector<PfxInstr>* pfxInstrs = mSecMap->GetPfxInstrs();
reason2 = checkPfxExpr(pfxInstrs, offset);
if (reason2) {
reason1 = "rule for XSP/XBP/XIP: ";
goto cant_summarise;
}
break;
}
default:
goto cant_summarise;
}
LExpr expr = LExpr(how, oldReg, offset);
switch (aNewReg) {
case DW_REG_INTEL_XBP:
mCurrRules.mXbpExpr = expr;
break;
case DW_REG_INTEL_XSP:
mCurrRules.mXspExpr = expr;
break;
case DW_REG_INTEL_XIP:
mCurrRules.mXipExpr = expr;
break;
default:
MOZ_CRASH("impossible value for aNewReg");
}
break;
}
default:
// Leave |reason1| and |reason2| unset here, for the reasons
// explained in the analogous point in the ARM case just above.
goto cant_summarise;
}
// On Intel, it seems the old SP value before the call is always the
// same as the CFA. Therefore, in the absence of any other way to
// recover the SP, specify that the CFA should be copied.
if (mCurrRules.mXspExpr.mHow == UNKNOWN) {
mCurrRules.mXspExpr = LExpr(NODEREF, DW_REG_CFA, 0);
}
// Also, gcc says "Undef" for BP when it is unchanged.
if (mCurrRules.mXbpExpr.mHow == UNKNOWN) {
mCurrRules.mXbpExpr = LExpr(NODEREF, DW_REG_INTEL_XBP, 0);
}
#elif defined(GP_ARCH_mips64)
// ---------------- mips ---------------- //
//
// Now, can we add the rule to our summary? This depends on whether
// the registers and the overall expression are representable. This
// is the heart of the summarisation process.
switch (aNewReg) {
case DW_REG_CFA:
// This is a rule that defines the CFA. The only forms we can
// represent are: = SP+offset or = FP+offset.
if (how != NODEREF) {
reason1 = "rule for DW_REG_CFA: invalid |how|";
goto cant_summarise;
}
if (oldReg != DW_REG_MIPS_SP && oldReg != DW_REG_MIPS_FP) {
reason1 = "rule for DW_REG_CFA: invalid |oldReg|";
goto cant_summarise;
}
mCurrRules.mCfaExpr = LExpr(how, oldReg, offset);
break;
case DW_REG_MIPS_SP:
case DW_REG_MIPS_FP:
case DW_REG_MIPS_PC: {
// This is a new rule for SP, FP or PC (the return address).
switch (how) {
case NODEREF:
case DEREF:
// Check the old register is one we're tracking.
if (!registerIsTracked((DW_REG_NUMBER)oldReg) &&
oldReg != DW_REG_CFA) {
reason1 = "rule for SP/FP/PC: uses untracked reg";
goto cant_summarise;
}
break;
case PFXEXPR: {
// Check that the prefix expression only mentions tracked registers.
const vector<PfxInstr>* pfxInstrs = mSecMap->GetPfxInstrs();
reason2 = checkPfxExpr(pfxInstrs, offset);
if (reason2) {
reason1 = "rule for SP/FP/PC: ";
goto cant_summarise;
}
break;
}
default:
goto cant_summarise;
}
LExpr expr = LExpr(how, oldReg, offset);
switch (aNewReg) {
case DW_REG_MIPS_FP:
mCurrRules.mFPexpr = expr;
break;
case DW_REG_MIPS_SP:
mCurrRules.mSPexpr = expr;
break;
case DW_REG_MIPS_PC:
mCurrRules.mPCexpr = expr;
break;
default:
MOZ_CRASH("impossible value for aNewReg");
}
break;
}
default:
// Leave |reason1| and |reason2| unset here, for the reasons
// explained in the analogous point in the ARM case just above.
goto cant_summarise;
}
// On MIPS, it seems the old SP value before the call is always the
// same as the CFA. Therefore, in the absence of any other way to
// recover the SP, specify that the CFA should be copied.
if (mCurrRules.mSPexpr.mHow == UNKNOWN) {
mCurrRules.mSPexpr = LExpr(NODEREF, DW_REG_CFA, 0);
}
// Also, gcc says "Undef" for FP when it is unchanged.
if (mCurrRules.mFPexpr.mHow == UNKNOWN) {
mCurrRules.mFPexpr = LExpr(NODEREF, DW_REG_MIPS_FP, 0);
}
#else
# error "Unsupported arch"
#endif
return;
cant_summarise:
if (reason1 || reason2) {
char buf[200];
SprintfLiteral(buf,
"LUL can't summarise: "
"SVMA=0x%llx: %s%s, expr=LExpr(%s,%u,%lld)\n",
(unsigned long long int)(aAddress - mTextBias),
reason1 ? reason1 : "", reason2 ? reason2 : "",
NameOf_LExprHow(how), (unsigned int)oldReg,
(long long int)offset);
mLog(buf);
}
}
uint32_t Summariser::AddPfxInstr(PfxInstr pfxi) {
return mSecMap->AddPfxInstr(pfxi);
}
void Summariser::End() {
if (DEBUG_SUMMARISER) {
mLog("LUL End\n");
}
if (mCurrAddr < mMax1Addr) {
mCurrRules.mAddr = mCurrAddr;
mCurrRules.mLen = mMax1Addr - mCurrAddr;
mSecMap->AddRuleSet(&mCurrRules);
if (DEBUG_SUMMARISER) {
mLog("LUL ");
mCurrRules.Print(mLog);
mLog("\n");
}
}
}
} // namespace lul

Просмотреть файл

@ -0,0 +1,64 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef LulDwarfSummariser_h
#define LulDwarfSummariser_h
#include "LulMainInt.h"
namespace lul {
class Summariser {
public:
Summariser(SecMap* aSecMap, uintptr_t aTextBias, void (*aLog)(const char*));
virtual void Entry(uintptr_t aAddress, uintptr_t aLength);
virtual void End();
// Tell the summariser that the value for |aNewReg| at |aAddress| is
// recovered using the LExpr that can be constructed using the
// components |how|, |oldReg| and |offset|. The summariser will
// inspect the components and may reject them for various reasons,
// but the hope is that it will find them acceptable and record this
// rule permanently.
virtual void Rule(uintptr_t aAddress, int aNewReg, LExprHow how,
int16_t oldReg, int64_t offset);
virtual uint32_t AddPfxInstr(PfxInstr pfxi);
// Send output to the logging sink, for debugging.
virtual void Log(const char* str) { mLog(str); }
private:
// The SecMap in which we park the finished summaries (RuleSets) and
// also any PfxInstrs derived from Dwarf expressions.
SecMap* mSecMap;
// Running state for the current summary (RuleSet) under construction.
RuleSet mCurrRules;
// The start of the address range to which the RuleSet under
// construction applies.
uintptr_t mCurrAddr;
// The highest address, plus one, for which the RuleSet under
// construction could possibly apply. If there are no further
// incoming events then mCurrRules will eventually be emitted
// as-is, for the range mCurrAddr.. mMax1Addr - 1, if that is
// nonempty.
uintptr_t mMax1Addr;
// The bias value (to add to the SVMAs, to get AVMAs) to be used
// when adding entries into mSecMap.
uintptr_t mTextBias;
// A logging sink, for debugging.
void (*mLog)(const char* aFmt);
};
} // namespace lul
#endif // LulDwarfSummariser_h

Просмотреть файл

@ -0,0 +1,871 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
// Copyright (c) 2006, 2011, 2012 Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Restructured in 2009 by: Jim Blandy <jimb@mozilla.com> <jimb@red-bean.com>
// (derived from)
// dump_symbols.cc: implement google_breakpad::WriteSymbolFile:
// Find all the debugging info in a file and dump it as a Breakpad symbol file.
//
// dump_symbols.h: Read debugging information from an ELF file, and write
// it out as a Breakpad symbol file.
// This file is derived from the following files in
// toolkit/crashreporter/google-breakpad:
// src/common/linux/dump_symbols.cc
// src/common/linux/elfutils.cc
// src/common/linux/file_id.cc
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <set>
#include <string>
#include <vector>
#include "mozilla/Assertions.h"
#include "mozilla/Sprintf.h"
#include "PlatformMacros.h"
#include "LulCommonExt.h"
#include "LulDwarfExt.h"
#include "LulElfInt.h"
#include "LulMainInt.h"
#if defined(GP_PLAT_arm_android) && !defined(SHT_ARM_EXIDX)
// bionic and older glibsc don't define it
# define SHT_ARM_EXIDX (SHT_LOPROC + 1)
#endif
// Old Linux header doesn't define EM_AARCH64
#ifndef EM_AARCH64
# define EM_AARCH64 183
#endif
// This namespace contains helper functions.
namespace {
using lul::DwarfCFIToModule;
using lul::FindElfSectionByName;
using lul::GetOffset;
using lul::IsValidElf;
using lul::Module;
using lul::scoped_ptr;
using lul::Summariser;
using lul::UniqueStringUniverse;
using std::set;
using std::string;
using std::vector;
//
// FDWrapper
//
// Wrapper class to make sure opened file is closed.
//
class FDWrapper {
public:
explicit FDWrapper(int fd) : fd_(fd) {}
~FDWrapper() {
if (fd_ != -1) close(fd_);
}
int get() { return fd_; }
int release() {
int fd = fd_;
fd_ = -1;
return fd;
}
private:
int fd_;
};
//
// MmapWrapper
//
// Wrapper class to make sure mapped regions are unmapped.
//
class MmapWrapper {
public:
MmapWrapper() : is_set_(false), base_(NULL), size_(0) {}
~MmapWrapper() {
if (is_set_ && base_ != NULL) {
MOZ_ASSERT(size_ > 0);
munmap(base_, size_);
}
}
void set(void* mapped_address, size_t mapped_size) {
is_set_ = true;
base_ = mapped_address;
size_ = mapped_size;
}
void release() {
MOZ_ASSERT(is_set_);
is_set_ = false;
base_ = NULL;
size_ = 0;
}
private:
bool is_set_;
void* base_;
size_t size_;
};
// Set NUM_DW_REGNAMES to be the number of Dwarf register names
// appropriate to the machine architecture given in HEADER. Return
// true on success, or false if HEADER's machine architecture is not
// supported.
template <typename ElfClass>
bool DwarfCFIRegisterNames(const typename ElfClass::Ehdr* elf_header,
unsigned int* num_dw_regnames) {
switch (elf_header->e_machine) {
case EM_386:
*num_dw_regnames = DwarfCFIToModule::RegisterNames::I386();
return true;
case EM_ARM:
*num_dw_regnames = DwarfCFIToModule::RegisterNames::ARM();
return true;
case EM_X86_64:
*num_dw_regnames = DwarfCFIToModule::RegisterNames::X86_64();
return true;
case EM_MIPS:
*num_dw_regnames = DwarfCFIToModule::RegisterNames::MIPS();
return true;
case EM_AARCH64:
*num_dw_regnames = DwarfCFIToModule::RegisterNames::ARM64();
return true;
default:
MOZ_ASSERT(0);
return false;
}
}
template <typename ElfClass>
bool LoadDwarfCFI(const string& dwarf_filename,
const typename ElfClass::Ehdr* elf_header,
const char* section_name,
const typename ElfClass::Shdr* section, const bool eh_frame,
const typename ElfClass::Shdr* got_section,
const typename ElfClass::Shdr* text_section,
const bool big_endian, SecMap* smap, uintptr_t text_bias,
UniqueStringUniverse* usu, void (*log)(const char*)) {
// Find the appropriate set of register names for this file's
// architecture.
unsigned int num_dw_regs = 0;
if (!DwarfCFIRegisterNames<ElfClass>(elf_header, &num_dw_regs)) {
fprintf(stderr,
"%s: unrecognized ELF machine architecture '%d';"
" cannot convert DWARF call frame information\n",
dwarf_filename.c_str(), elf_header->e_machine);
return false;
}
const lul::Endianness endianness =
big_endian ? lul::ENDIANNESS_BIG : lul::ENDIANNESS_LITTLE;
// Find the call frame information and its size.
const char* cfi = GetOffset<ElfClass, char>(elf_header, section->sh_offset);
size_t cfi_size = section->sh_size;
// Plug together the parser, handler, and their entourages.
// Here's a summariser, which will receive the output of the
// parser, create summaries, and add them to |smap|.
Summariser summ(smap, text_bias, log);
lul::ByteReader reader(endianness);
reader.SetAddressSize(ElfClass::kAddrSize);
DwarfCFIToModule::Reporter module_reporter(log, dwarf_filename, section_name);
DwarfCFIToModule handler(num_dw_regs, &module_reporter, &reader, usu, &summ);
// Provide the base addresses for .eh_frame encoded pointers, if
// possible.
reader.SetCFIDataBase(section->sh_addr, cfi);
if (got_section) reader.SetDataBase(got_section->sh_addr);
if (text_section) reader.SetTextBase(text_section->sh_addr);
lul::CallFrameInfo::Reporter dwarf_reporter(log, dwarf_filename,
section_name);
lul::CallFrameInfo parser(cfi, cfi_size, &reader, &handler, &dwarf_reporter,
eh_frame);
parser.Start();
return true;
}
bool LoadELF(const string& obj_file, MmapWrapper* map_wrapper,
void** elf_header) {
int obj_fd = open(obj_file.c_str(), O_RDONLY);
if (obj_fd < 0) {
fprintf(stderr, "Failed to open ELF file '%s': %s\n", obj_file.c_str(),
strerror(errno));
return false;
}
FDWrapper obj_fd_wrapper(obj_fd);
struct stat st;
if (fstat(obj_fd, &st) != 0 && st.st_size <= 0) {
fprintf(stderr, "Unable to fstat ELF file '%s': %s\n", obj_file.c_str(),
strerror(errno));
return false;
}
// Mapping it read-only is good enough. In any case, mapping it
// read-write confuses Valgrind's debuginfo acquire/discard
// heuristics, making it hard to profile the profiler.
void* obj_base = mmap(nullptr, st.st_size, PROT_READ, MAP_PRIVATE, obj_fd, 0);
if (obj_base == MAP_FAILED) {
fprintf(stderr, "Failed to mmap ELF file '%s': %s\n", obj_file.c_str(),
strerror(errno));
return false;
}
map_wrapper->set(obj_base, st.st_size);
*elf_header = obj_base;
if (!IsValidElf(*elf_header)) {
fprintf(stderr, "Not a valid ELF file: %s\n", obj_file.c_str());
return false;
}
return true;
}
// Get the endianness of ELF_HEADER. If it's invalid, return false.
template <typename ElfClass>
bool ElfEndianness(const typename ElfClass::Ehdr* elf_header,
bool* big_endian) {
if (elf_header->e_ident[EI_DATA] == ELFDATA2LSB) {
*big_endian = false;
return true;
}
if (elf_header->e_ident[EI_DATA] == ELFDATA2MSB) {
*big_endian = true;
return true;
}
fprintf(stderr, "bad data encoding in ELF header: %d\n",
elf_header->e_ident[EI_DATA]);
return false;
}
//
// LoadSymbolsInfo
//
// Holds the state between the two calls to LoadSymbols() in case it's necessary
// to follow the .gnu_debuglink section and load debug information from a
// different file.
//
template <typename ElfClass>
class LoadSymbolsInfo {
public:
typedef typename ElfClass::Addr Addr;
explicit LoadSymbolsInfo(const vector<string>& dbg_dirs)
: debug_dirs_(dbg_dirs), has_loading_addr_(false) {}
// Keeps track of which sections have been loaded so sections don't
// accidentally get loaded twice from two different files.
void LoadedSection(const string& section) {
if (loaded_sections_.count(section) == 0) {
loaded_sections_.insert(section);
} else {
fprintf(stderr, "Section %s has already been loaded.\n", section.c_str());
}
}
string debuglink_file() const { return debuglink_file_; }
private:
const vector<string>& debug_dirs_; // Directories in which to
// search for the debug ELF file.
string debuglink_file_; // Full path to the debug ELF file.
bool has_loading_addr_; // Indicate if LOADING_ADDR_ is valid.
set<string> loaded_sections_; // Tracks the Loaded ELF sections
// between calls to LoadSymbols().
};
// Find the preferred loading address of the binary.
template <typename ElfClass>
typename ElfClass::Addr GetLoadingAddress(
const typename ElfClass::Phdr* program_headers, int nheader) {
typedef typename ElfClass::Phdr Phdr;
// For non-PIC executables (e_type == ET_EXEC), the load address is
// the start address of the first PT_LOAD segment. (ELF requires
// the segments to be sorted by load address.) For PIC executables
// and dynamic libraries (e_type == ET_DYN), this address will
// normally be zero.
for (int i = 0; i < nheader; ++i) {
const Phdr& header = program_headers[i];
if (header.p_type == PT_LOAD) return header.p_vaddr;
}
return 0;
}
template <typename ElfClass>
bool LoadSymbols(const string& obj_file, const bool big_endian,
const typename ElfClass::Ehdr* elf_header,
const bool read_gnu_debug_link,
LoadSymbolsInfo<ElfClass>* info, SecMap* smap, void* rx_avma,
size_t rx_size, UniqueStringUniverse* usu,
void (*log)(const char*)) {
typedef typename ElfClass::Phdr Phdr;
typedef typename ElfClass::Shdr Shdr;
char buf[500];
SprintfLiteral(buf, "LoadSymbols: BEGIN %s\n", obj_file.c_str());
buf[sizeof(buf) - 1] = 0;
log(buf);
// This is how the text bias is calculated.
// BEGIN CALCULATE BIAS
uintptr_t loading_addr = GetLoadingAddress<ElfClass>(
GetOffset<ElfClass, Phdr>(elf_header, elf_header->e_phoff),
elf_header->e_phnum);
uintptr_t text_bias = ((uintptr_t)rx_avma) - loading_addr;
SprintfLiteral(buf, "LoadSymbols: rx_avma=%llx, text_bias=%llx",
(unsigned long long int)(uintptr_t)rx_avma,
(unsigned long long int)text_bias);
buf[sizeof(buf) - 1] = 0;
log(buf);
// END CALCULATE BIAS
const Shdr* sections =
GetOffset<ElfClass, Shdr>(elf_header, elf_header->e_shoff);
const Shdr* section_names = sections + elf_header->e_shstrndx;
const char* names =
GetOffset<ElfClass, char>(elf_header, section_names->sh_offset);
const char* names_end = names + section_names->sh_size;
bool found_usable_info = false;
// Dwarf Call Frame Information (CFI) is actually independent from
// the other DWARF debugging information, and can be used alone.
const Shdr* dwarf_cfi_section =
FindElfSectionByName<ElfClass>(".debug_frame", SHT_PROGBITS, sections,
names, names_end, elf_header->e_shnum);
if (dwarf_cfi_section) {
// Ignore the return value of this function; even without call frame
// information, the other debugging information could be perfectly
// useful.
info->LoadedSection(".debug_frame");
bool result = LoadDwarfCFI<ElfClass>(obj_file, elf_header, ".debug_frame",
dwarf_cfi_section, false, 0, 0,
big_endian, smap, text_bias, usu, log);
found_usable_info = found_usable_info || result;
if (result) log("LoadSymbols: read CFI from .debug_frame");
}
// Linux C++ exception handling information can also provide
// unwinding data.
const Shdr* eh_frame_section =
FindElfSectionByName<ElfClass>(".eh_frame", SHT_PROGBITS, sections, names,
names_end, elf_header->e_shnum);
if (eh_frame_section) {
// Pointers in .eh_frame data may be relative to the base addresses of
// certain sections. Provide those sections if present.
const Shdr* got_section = FindElfSectionByName<ElfClass>(
".got", SHT_PROGBITS, sections, names, names_end, elf_header->e_shnum);
const Shdr* text_section = FindElfSectionByName<ElfClass>(
".text", SHT_PROGBITS, sections, names, names_end, elf_header->e_shnum);
info->LoadedSection(".eh_frame");
// As above, ignore the return value of this function.
bool result = LoadDwarfCFI<ElfClass>(
obj_file, elf_header, ".eh_frame", eh_frame_section, true, got_section,
text_section, big_endian, smap, text_bias, usu, log);
found_usable_info = found_usable_info || result;
if (result) log("LoadSymbols: read CFI from .eh_frame");
}
SprintfLiteral(buf, "LoadSymbols: END %s\n", obj_file.c_str());
buf[sizeof(buf) - 1] = 0;
log(buf);
return found_usable_info;
}
// Return the breakpad symbol file identifier for the architecture of
// ELF_HEADER.
template <typename ElfClass>
const char* ElfArchitecture(const typename ElfClass::Ehdr* elf_header) {
typedef typename ElfClass::Half Half;
Half arch = elf_header->e_machine;
switch (arch) {
case EM_386:
return "x86";
case EM_ARM:
return "arm";
case EM_AARCH64:
return "arm64";
case EM_MIPS:
return "mips";
case EM_PPC64:
return "ppc64";
case EM_PPC:
return "ppc";
case EM_S390:
return "s390";
case EM_SPARC:
return "sparc";
case EM_SPARCV9:
return "sparcv9";
case EM_X86_64:
return "x86_64";
default:
return NULL;
}
}
// Format the Elf file identifier in IDENTIFIER as a UUID with the
// dashes removed.
string FormatIdentifier(unsigned char identifier[16]) {
char identifier_str[40];
lul::FileID::ConvertIdentifierToString(identifier, identifier_str,
sizeof(identifier_str));
string id_no_dash;
for (int i = 0; identifier_str[i] != '\0'; ++i)
if (identifier_str[i] != '-') id_no_dash += identifier_str[i];
// Add an extra "0" by the end. PDB files on Windows have an 'age'
// number appended to the end of the file identifier; this isn't
// really used or necessary on other platforms, but be consistent.
id_no_dash += '0';
return id_no_dash;
}
// Return the non-directory portion of FILENAME: the portion after the
// last slash, or the whole filename if there are no slashes.
string BaseFileName(const string& filename) {
// Lots of copies! basename's behavior is less than ideal.
char* c_filename = strdup(filename.c_str());
string base = basename(c_filename);
free(c_filename);
return base;
}
template <typename ElfClass>
bool ReadSymbolDataElfClass(const typename ElfClass::Ehdr* elf_header,
const string& obj_filename,
const vector<string>& debug_dirs, SecMap* smap,
void* rx_avma, size_t rx_size,
UniqueStringUniverse* usu,
void (*log)(const char*)) {
typedef typename ElfClass::Ehdr Ehdr;
unsigned char identifier[16];
if (!lul ::FileID::ElfFileIdentifierFromMappedFile(elf_header, identifier)) {
fprintf(stderr, "%s: unable to generate file identifier\n",
obj_filename.c_str());
return false;
}
const char* architecture = ElfArchitecture<ElfClass>(elf_header);
if (!architecture) {
fprintf(stderr, "%s: unrecognized ELF machine architecture: %d\n",
obj_filename.c_str(), elf_header->e_machine);
return false;
}
// Figure out what endianness this file is.
bool big_endian;
if (!ElfEndianness<ElfClass>(elf_header, &big_endian)) return false;
string name = BaseFileName(obj_filename);
string os = "Linux";
string id = FormatIdentifier(identifier);
LoadSymbolsInfo<ElfClass> info(debug_dirs);
if (!LoadSymbols<ElfClass>(obj_filename, big_endian, elf_header,
!debug_dirs.empty(), &info, smap, rx_avma, rx_size,
usu, log)) {
const string debuglink_file = info.debuglink_file();
if (debuglink_file.empty()) return false;
// Load debuglink ELF file.
fprintf(stderr, "Found debugging info in %s\n", debuglink_file.c_str());
MmapWrapper debug_map_wrapper;
Ehdr* debug_elf_header = NULL;
if (!LoadELF(debuglink_file, &debug_map_wrapper,
reinterpret_cast<void**>(&debug_elf_header)))
return false;
// Sanity checks to make sure everything matches up.
const char* debug_architecture =
ElfArchitecture<ElfClass>(debug_elf_header);
if (!debug_architecture) {
fprintf(stderr, "%s: unrecognized ELF machine architecture: %d\n",
debuglink_file.c_str(), debug_elf_header->e_machine);
return false;
}
if (strcmp(architecture, debug_architecture)) {
fprintf(stderr,
"%s with ELF machine architecture %s does not match "
"%s with ELF architecture %s\n",
debuglink_file.c_str(), debug_architecture, obj_filename.c_str(),
architecture);
return false;
}
bool debug_big_endian;
if (!ElfEndianness<ElfClass>(debug_elf_header, &debug_big_endian))
return false;
if (debug_big_endian != big_endian) {
fprintf(stderr, "%s and %s does not match in endianness\n",
obj_filename.c_str(), debuglink_file.c_str());
return false;
}
if (!LoadSymbols<ElfClass>(debuglink_file, debug_big_endian,
debug_elf_header, false, &info, smap, rx_avma,
rx_size, usu, log)) {
return false;
}
}
return true;
}
} // namespace
namespace lul {
bool ReadSymbolDataInternal(const uint8_t* obj_file, const string& obj_filename,
const vector<string>& debug_dirs, SecMap* smap,
void* rx_avma, size_t rx_size,
UniqueStringUniverse* usu,
void (*log)(const char*)) {
if (!IsValidElf(obj_file)) {
fprintf(stderr, "Not a valid ELF file: %s\n", obj_filename.c_str());
return false;
}
int elfclass = ElfClass(obj_file);
if (elfclass == ELFCLASS32) {
return ReadSymbolDataElfClass<ElfClass32>(
reinterpret_cast<const Elf32_Ehdr*>(obj_file), obj_filename, debug_dirs,
smap, rx_avma, rx_size, usu, log);
}
if (elfclass == ELFCLASS64) {
return ReadSymbolDataElfClass<ElfClass64>(
reinterpret_cast<const Elf64_Ehdr*>(obj_file), obj_filename, debug_dirs,
smap, rx_avma, rx_size, usu, log);
}
return false;
}
bool ReadSymbolData(const string& obj_file, const vector<string>& debug_dirs,
SecMap* smap, void* rx_avma, size_t rx_size,
UniqueStringUniverse* usu, void (*log)(const char*)) {
MmapWrapper map_wrapper;
void* elf_header = NULL;
if (!LoadELF(obj_file, &map_wrapper, &elf_header)) return false;
return ReadSymbolDataInternal(reinterpret_cast<uint8_t*>(elf_header),
obj_file, debug_dirs, smap, rx_avma, rx_size,
usu, log);
}
namespace {
template <typename ElfClass>
void FindElfClassSection(const char* elf_base, const char* section_name,
typename ElfClass::Word section_type,
const void** section_start, int* section_size) {
typedef typename ElfClass::Ehdr Ehdr;
typedef typename ElfClass::Shdr Shdr;
MOZ_ASSERT(elf_base);
MOZ_ASSERT(section_start);
MOZ_ASSERT(section_size);
MOZ_ASSERT(strncmp(elf_base, ELFMAG, SELFMAG) == 0);
const Ehdr* elf_header = reinterpret_cast<const Ehdr*>(elf_base);
MOZ_ASSERT(elf_header->e_ident[EI_CLASS] == ElfClass::kClass);
const Shdr* sections =
GetOffset<ElfClass, Shdr>(elf_header, elf_header->e_shoff);
const Shdr* section_names = sections + elf_header->e_shstrndx;
const char* names =
GetOffset<ElfClass, char>(elf_header, section_names->sh_offset);
const char* names_end = names + section_names->sh_size;
const Shdr* section =
FindElfSectionByName<ElfClass>(section_name, section_type, sections,
names, names_end, elf_header->e_shnum);
if (section != NULL && section->sh_size > 0) {
*section_start = elf_base + section->sh_offset;
*section_size = section->sh_size;
}
}
template <typename ElfClass>
void FindElfClassSegment(const char* elf_base,
typename ElfClass::Word segment_type,
const void** segment_start, int* segment_size) {
typedef typename ElfClass::Ehdr Ehdr;
typedef typename ElfClass::Phdr Phdr;
MOZ_ASSERT(elf_base);
MOZ_ASSERT(segment_start);
MOZ_ASSERT(segment_size);
MOZ_ASSERT(strncmp(elf_base, ELFMAG, SELFMAG) == 0);
const Ehdr* elf_header = reinterpret_cast<const Ehdr*>(elf_base);
MOZ_ASSERT(elf_header->e_ident[EI_CLASS] == ElfClass::kClass);
const Phdr* phdrs =
GetOffset<ElfClass, Phdr>(elf_header, elf_header->e_phoff);
for (int i = 0; i < elf_header->e_phnum; ++i) {
if (phdrs[i].p_type == segment_type) {
*segment_start = elf_base + phdrs[i].p_offset;
*segment_size = phdrs[i].p_filesz;
return;
}
}
}
} // namespace
bool IsValidElf(const void* elf_base) {
return strncmp(reinterpret_cast<const char*>(elf_base), ELFMAG, SELFMAG) == 0;
}
int ElfClass(const void* elf_base) {
const ElfW(Ehdr)* elf_header = reinterpret_cast<const ElfW(Ehdr)*>(elf_base);
return elf_header->e_ident[EI_CLASS];
}
bool FindElfSection(const void* elf_mapped_base, const char* section_name,
uint32_t section_type, const void** section_start,
int* section_size, int* elfclass) {
MOZ_ASSERT(elf_mapped_base);
MOZ_ASSERT(section_start);
MOZ_ASSERT(section_size);
*section_start = NULL;
*section_size = 0;
if (!IsValidElf(elf_mapped_base)) return false;
int cls = ElfClass(elf_mapped_base);
if (elfclass) {
*elfclass = cls;
}
const char* elf_base = static_cast<const char*>(elf_mapped_base);
if (cls == ELFCLASS32) {
FindElfClassSection<ElfClass32>(elf_base, section_name, section_type,
section_start, section_size);
return *section_start != NULL;
} else if (cls == ELFCLASS64) {
FindElfClassSection<ElfClass64>(elf_base, section_name, section_type,
section_start, section_size);
return *section_start != NULL;
}
return false;
}
bool FindElfSegment(const void* elf_mapped_base, uint32_t segment_type,
const void** segment_start, int* segment_size,
int* elfclass) {
MOZ_ASSERT(elf_mapped_base);
MOZ_ASSERT(segment_start);
MOZ_ASSERT(segment_size);
*segment_start = NULL;
*segment_size = 0;
if (!IsValidElf(elf_mapped_base)) return false;
int cls = ElfClass(elf_mapped_base);
if (elfclass) {
*elfclass = cls;
}
const char* elf_base = static_cast<const char*>(elf_mapped_base);
if (cls == ELFCLASS32) {
FindElfClassSegment<ElfClass32>(elf_base, segment_type, segment_start,
segment_size);
return *segment_start != NULL;
} else if (cls == ELFCLASS64) {
FindElfClassSegment<ElfClass64>(elf_base, segment_type, segment_start,
segment_size);
return *segment_start != NULL;
}
return false;
}
// (derived from)
// file_id.cc: Return a unique identifier for a file
//
// See file_id.h for documentation
//
// ELF note name and desc are 32-bits word padded.
#define NOTE_PADDING(a) ((a + 3) & ~3)
// These functions are also used inside the crashed process, so be safe
// and use the syscall/libc wrappers instead of direct syscalls or libc.
template <typename ElfClass>
static bool ElfClassBuildIDNoteIdentifier(const void* section, int length,
uint8_t identifier[kMDGUIDSize]) {
typedef typename ElfClass::Nhdr Nhdr;
const void* section_end = reinterpret_cast<const char*>(section) + length;
const Nhdr* note_header = reinterpret_cast<const Nhdr*>(section);
while (reinterpret_cast<const void*>(note_header) < section_end) {
if (note_header->n_type == NT_GNU_BUILD_ID) break;
note_header = reinterpret_cast<const Nhdr*>(
reinterpret_cast<const char*>(note_header) + sizeof(Nhdr) +
NOTE_PADDING(note_header->n_namesz) +
NOTE_PADDING(note_header->n_descsz));
}
if (reinterpret_cast<const void*>(note_header) >= section_end ||
note_header->n_descsz == 0) {
return false;
}
const char* build_id = reinterpret_cast<const char*>(note_header) +
sizeof(Nhdr) + NOTE_PADDING(note_header->n_namesz);
// Copy as many bits of the build ID as will fit
// into the GUID space.
memset(identifier, 0, kMDGUIDSize);
memcpy(identifier, build_id,
std::min(kMDGUIDSize, (size_t)note_header->n_descsz));
return true;
}
// Attempt to locate a .note.gnu.build-id section in an ELF binary
// and copy as many bytes of it as will fit into |identifier|.
static bool FindElfBuildIDNote(const void* elf_mapped_base,
uint8_t identifier[kMDGUIDSize]) {
void* note_section;
int note_size, elfclass;
if ((!FindElfSegment(elf_mapped_base, PT_NOTE, (const void**)&note_section,
&note_size, &elfclass) ||
note_size == 0) &&
(!FindElfSection(elf_mapped_base, ".note.gnu.build-id", SHT_NOTE,
(const void**)&note_section, &note_size, &elfclass) ||
note_size == 0)) {
return false;
}
if (elfclass == ELFCLASS32) {
return ElfClassBuildIDNoteIdentifier<ElfClass32>(note_section, note_size,
identifier);
} else if (elfclass == ELFCLASS64) {
return ElfClassBuildIDNoteIdentifier<ElfClass64>(note_section, note_size,
identifier);
}
return false;
}
// Attempt to locate the .text section of an ELF binary and generate
// a simple hash by XORing the first page worth of bytes into |identifier|.
static bool HashElfTextSection(const void* elf_mapped_base,
uint8_t identifier[kMDGUIDSize]) {
void* text_section;
int text_size;
if (!FindElfSection(elf_mapped_base, ".text", SHT_PROGBITS,
(const void**)&text_section, &text_size, NULL) ||
text_size == 0) {
return false;
}
memset(identifier, 0, kMDGUIDSize);
const uint8_t* ptr = reinterpret_cast<const uint8_t*>(text_section);
const uint8_t* ptr_end = ptr + std::min(text_size, 4096);
while (ptr < ptr_end) {
for (unsigned i = 0; i < kMDGUIDSize; i++) identifier[i] ^= ptr[i];
ptr += kMDGUIDSize;
}
return true;
}
// static
bool FileID::ElfFileIdentifierFromMappedFile(const void* base,
uint8_t identifier[kMDGUIDSize]) {
// Look for a build id note first.
if (FindElfBuildIDNote(base, identifier)) return true;
// Fall back on hashing the first page of the text section.
return HashElfTextSection(base, identifier);
}
// static
void FileID::ConvertIdentifierToString(const uint8_t identifier[kMDGUIDSize],
char* buffer, int buffer_length) {
uint8_t identifier_swapped[kMDGUIDSize];
// Endian-ness swap to match dump processor expectation.
memcpy(identifier_swapped, identifier, kMDGUIDSize);
uint32_t* data1 = reinterpret_cast<uint32_t*>(identifier_swapped);
*data1 = htonl(*data1);
uint16_t* data2 = reinterpret_cast<uint16_t*>(identifier_swapped + 4);
*data2 = htons(*data2);
uint16_t* data3 = reinterpret_cast<uint16_t*>(identifier_swapped + 6);
*data3 = htons(*data3);
int buffer_idx = 0;
for (unsigned int idx = 0;
(buffer_idx < buffer_length) && (idx < kMDGUIDSize); ++idx) {
int hi = (identifier_swapped[idx] >> 4) & 0x0F;
int lo = (identifier_swapped[idx]) & 0x0F;
if (idx == 4 || idx == 6 || idx == 8 || idx == 10)
buffer[buffer_idx++] = '-';
buffer[buffer_idx++] = (hi >= 10) ? 'A' + hi - 10 : '0' + hi;
buffer[buffer_idx++] = (lo >= 10) ? 'A' + lo - 10 : '0' + lo;
}
// NULL terminate
buffer[(buffer_idx < buffer_length) ? buffer_idx : buffer_idx - 1] = 0;
}
} // namespace lul

Просмотреть файл

@ -0,0 +1,69 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
// Copyright (c) 2006, 2011, 2012 Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is derived from the following files in
// toolkit/crashreporter/google-breakpad:
// src/common/linux/dump_symbols.h
#ifndef LulElfExt_h
#define LulElfExt_h
// These two functions are the external interface to the
// ELF/Dwarf/EXIDX reader.
#include "LulMainInt.h"
using lul::SecMap;
namespace lul {
class UniqueStringUniverse;
// Find all the unwind information in OBJ_FILE, an ELF executable
// or shared library, and add it to SMAP.
bool ReadSymbolData(const std::string& obj_file,
const std::vector<std::string>& debug_dirs, SecMap* smap,
void* rx_avma, size_t rx_size, UniqueStringUniverse* usu,
void (*log)(const char*));
// The same as ReadSymbolData, except that OBJ_FILE is assumed to
// point to a mapped-in image of OBJ_FILENAME.
bool ReadSymbolDataInternal(const uint8_t* obj_file,
const std::string& obj_filename,
const std::vector<std::string>& debug_dirs,
SecMap* smap, void* rx_avma, size_t rx_size,
UniqueStringUniverse* usu,
void (*log)(const char*));
} // namespace lul
#endif // LulElfExt_h

Просмотреть файл

@ -0,0 +1,210 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
// Copyright (c) 2006, 2012, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is derived from the following files in
// toolkit/crashreporter/google-breakpad:
// src/common/android/include/elf.h
// src/common/linux/elfutils.h
// src/common/linux/file_id.h
// src/common/linux/elfutils-inl.h
#ifndef LulElfInt_h
#define LulElfInt_h
// This header defines functions etc internal to the ELF reader. It
// should not be included outside of LulElf.cpp.
#include <elf.h>
#include <stdlib.h>
#include "mozilla/Assertions.h"
#include "PlatformMacros.h"
// (derived from)
// elfutils.h: Utilities for dealing with ELF files.
//
#include <link.h>
#if defined(GP_OS_android)
// From toolkit/crashreporter/google-breakpad/src/common/android/include/elf.h
// The Android headers don't always define this constant.
# ifndef EM_X86_64
# define EM_X86_64 62
# endif
# ifndef EM_PPC64
# define EM_PPC64 21
# endif
# ifndef EM_S390
# define EM_S390 22
# endif
# ifndef NT_GNU_BUILD_ID
# define NT_GNU_BUILD_ID 3
# endif
# ifndef ElfW
# define ElfW(type) _ElfW(Elf, ELFSIZE, type)
# define _ElfW(e, w, t) _ElfW_1(e, w, _##t)
# define _ElfW_1(e, w, t) e##w##t
# endif
#endif
namespace lul {
// Traits classes so consumers can write templatized code to deal
// with specific ELF bits.
struct ElfClass32 {
typedef Elf32_Addr Addr;
typedef Elf32_Ehdr Ehdr;
typedef Elf32_Nhdr Nhdr;
typedef Elf32_Phdr Phdr;
typedef Elf32_Shdr Shdr;
typedef Elf32_Half Half;
typedef Elf32_Off Off;
typedef Elf32_Word Word;
static const int kClass = ELFCLASS32;
static const size_t kAddrSize = sizeof(Elf32_Addr);
};
struct ElfClass64 {
typedef Elf64_Addr Addr;
typedef Elf64_Ehdr Ehdr;
typedef Elf64_Nhdr Nhdr;
typedef Elf64_Phdr Phdr;
typedef Elf64_Shdr Shdr;
typedef Elf64_Half Half;
typedef Elf64_Off Off;
typedef Elf64_Word Word;
static const int kClass = ELFCLASS64;
static const size_t kAddrSize = sizeof(Elf64_Addr);
};
bool IsValidElf(const void* elf_header);
int ElfClass(const void* elf_base);
// Attempt to find a section named |section_name| of type |section_type|
// in the ELF binary data at |elf_mapped_base|. On success, returns true
// and sets |*section_start| to point to the start of the section data,
// and |*section_size| to the size of the section's data. If |elfclass|
// is not NULL, set |*elfclass| to the ELF file class.
bool FindElfSection(const void* elf_mapped_base, const char* section_name,
uint32_t section_type, const void** section_start,
int* section_size, int* elfclass);
// Internal helper method, exposed for convenience for callers
// that already have more info.
template <typename ElfClass>
const typename ElfClass::Shdr* FindElfSectionByName(
const char* name, typename ElfClass::Word section_type,
const typename ElfClass::Shdr* sections, const char* section_names,
const char* names_end, int nsection);
// Attempt to find the first segment of type |segment_type| in the ELF
// binary data at |elf_mapped_base|. On success, returns true and sets
// |*segment_start| to point to the start of the segment data, and
// and |*segment_size| to the size of the segment's data. If |elfclass|
// is not NULL, set |*elfclass| to the ELF file class.
bool FindElfSegment(const void* elf_mapped_base, uint32_t segment_type,
const void** segment_start, int* segment_size,
int* elfclass);
// Convert an offset from an Elf header into a pointer to the mapped
// address in the current process. Takes an extra template parameter
// to specify the return type to avoid having to dynamic_cast the
// result.
template <typename ElfClass, typename T>
const T* GetOffset(const typename ElfClass::Ehdr* elf_header,
typename ElfClass::Off offset);
// (derived from)
// file_id.h: Return a unique identifier for a file
//
static const size_t kMDGUIDSize = sizeof(MDGUID);
class FileID {
public:
// Load the identifier for the elf file mapped into memory at |base| into
// |identifier|. Return false if the identifier could not be created for the
// file.
static bool ElfFileIdentifierFromMappedFile(const void* base,
uint8_t identifier[kMDGUIDSize]);
// Convert the |identifier| data to a NULL terminated string. The string will
// be formatted as a UUID (e.g., 22F065BB-FC9C-49F7-80FE-26A7CEBD7BCE).
// The |buffer| should be at least 37 bytes long to receive all of the data
// and termination. Shorter buffers will contain truncated data.
static void ConvertIdentifierToString(const uint8_t identifier[kMDGUIDSize],
char* buffer, int buffer_length);
};
template <typename ElfClass, typename T>
const T* GetOffset(const typename ElfClass::Ehdr* elf_header,
typename ElfClass::Off offset) {
return reinterpret_cast<const T*>(reinterpret_cast<uintptr_t>(elf_header) +
offset);
}
template <typename ElfClass>
const typename ElfClass::Shdr* FindElfSectionByName(
const char* name, typename ElfClass::Word section_type,
const typename ElfClass::Shdr* sections, const char* section_names,
const char* names_end, int nsection) {
MOZ_ASSERT(name != NULL);
MOZ_ASSERT(sections != NULL);
MOZ_ASSERT(nsection > 0);
int name_len = strlen(name);
if (name_len == 0) return NULL;
for (int i = 0; i < nsection; ++i) {
const char* section_name = section_names + sections[i].sh_name;
if (sections[i].sh_type == section_type &&
names_end - section_name >= name_len + 1 &&
strcmp(name, section_name) == 0) {
return sections + i;
}
}
return NULL;
}
} // namespace lul
// And finally, the external interface, offered to LulMain.cpp
#include "LulElfExt.h"
#endif // LulElfInt_h

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,377 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef LulMain_h
#define LulMain_h
#include "PlatformMacros.h"
#include "mozilla/Atomics.h"
#include "mozilla/MemoryReporting.h"
// LUL: A Lightweight Unwind Library.
// This file provides the end-user (external) interface for LUL.
// Some comments about naming in the implementation. These are safe
// to ignore if you are merely using LUL, but are important if you
// hack on its internals.
//
// Debuginfo readers in general have tended to use the word "address"
// to mean several different things. This sometimes makes them
// difficult to understand and maintain. LUL tries hard to avoid
// using the word "address" and instead uses the following more
// precise terms:
//
// * SVMA ("Stated Virtual Memory Address"): this is an address of a
// symbol (etc) as it is stated in the symbol table, or other
// metadata, of an object. Such values are typically small and
// start from zero or thereabouts, unless the object has been
// prelinked.
//
// * AVMA ("Actual Virtual Memory Address"): this is the address of a
// symbol (etc) in a running process, that is, once the associated
// object has been mapped into a process. Such values are typically
// much larger than SVMAs, since objects can get mapped arbitrarily
// far along the address space.
//
// * "Bias": the difference between AVMA and SVMA for a given symbol
// (specifically, AVMA - SVMA). The bias is always an integral
// number of pages. Once we know the bias for a given object's
// text section (for example), we can compute the AVMAs of all of
// its text symbols by adding the bias to their SVMAs.
//
// * "Image address": typically, to read debuginfo from an object we
// will temporarily mmap in the file so as to read symbol tables
// etc. Addresses in this temporary mapping are called "Image
// addresses". Note that the temporary mapping is entirely
// unrelated to the mappings of the file that the dynamic linker
// must perform merely in order to get the program to run. Hence
// image addresses are unrelated to either SVMAs or AVMAs.
namespace lul {
// A machine word plus validity tag.
class TaggedUWord {
public:
// RUNS IN NO-MALLOC CONTEXT
// Construct a valid one.
explicit TaggedUWord(uintptr_t w) : mValue(w), mValid(true) {}
// RUNS IN NO-MALLOC CONTEXT
// Construct an invalid one.
TaggedUWord() : mValue(0), mValid(false) {}
// RUNS IN NO-MALLOC CONTEXT
TaggedUWord operator+(TaggedUWord rhs) const {
return (Valid() && rhs.Valid()) ? TaggedUWord(Value() + rhs.Value())
: TaggedUWord();
}
// RUNS IN NO-MALLOC CONTEXT
TaggedUWord operator-(TaggedUWord rhs) const {
return (Valid() && rhs.Valid()) ? TaggedUWord(Value() - rhs.Value())
: TaggedUWord();
}
// RUNS IN NO-MALLOC CONTEXT
TaggedUWord operator&(TaggedUWord rhs) const {
return (Valid() && rhs.Valid()) ? TaggedUWord(Value() & rhs.Value())
: TaggedUWord();
}
// RUNS IN NO-MALLOC CONTEXT
TaggedUWord operator|(TaggedUWord rhs) const {
return (Valid() && rhs.Valid()) ? TaggedUWord(Value() | rhs.Value())
: TaggedUWord();
}
// RUNS IN NO-MALLOC CONTEXT
TaggedUWord CmpGEs(TaggedUWord rhs) const {
if (Valid() && rhs.Valid()) {
intptr_t s1 = (intptr_t)Value();
intptr_t s2 = (intptr_t)rhs.Value();
return TaggedUWord(s1 >= s2 ? 1 : 0);
}
return TaggedUWord();
}
// RUNS IN NO-MALLOC CONTEXT
TaggedUWord operator<<(TaggedUWord rhs) const {
if (Valid() && rhs.Valid()) {
uintptr_t shift = rhs.Value();
if (shift < 8 * sizeof(uintptr_t)) return TaggedUWord(Value() << shift);
}
return TaggedUWord();
}
// RUNS IN NO-MALLOC CONTEXT
// Is equal? Note: non-validity on either side gives non-equality.
bool operator==(TaggedUWord other) const {
return (mValid && other.Valid()) ? (mValue == other.Value()) : false;
}
// RUNS IN NO-MALLOC CONTEXT
// Is it word-aligned?
bool IsAligned() const {
return mValid && (mValue & (sizeof(uintptr_t) - 1)) == 0;
}
// RUNS IN NO-MALLOC CONTEXT
uintptr_t Value() const { return mValue; }
// RUNS IN NO-MALLOC CONTEXT
bool Valid() const { return mValid; }
private:
uintptr_t mValue;
bool mValid;
};
// The registers, with validity tags, that will be unwound.
struct UnwindRegs {
#if defined(GP_ARCH_arm)
TaggedUWord r7;
TaggedUWord r11;
TaggedUWord r12;
TaggedUWord r13;
TaggedUWord r14;
TaggedUWord r15;
#elif defined(GP_ARCH_arm64)
TaggedUWord x29;
TaggedUWord x30;
TaggedUWord sp;
TaggedUWord pc;
#elif defined(GP_ARCH_amd64) || defined(GP_ARCH_x86)
TaggedUWord xbp;
TaggedUWord xsp;
TaggedUWord xip;
#elif defined(GP_ARCH_mips64)
TaggedUWord sp;
TaggedUWord fp;
TaggedUWord pc;
#else
# error "Unknown plat"
#endif
};
// The maximum number of bytes in a stack snapshot. This value can be increased
// if necessary, but testing showed that 160k is enough to obtain good
// backtraces on x86_64 Linux. Most backtraces fit comfortably into 4-8k of
// stack space, but we do have some very deep stacks occasionally. Please see
// the comments in DoNativeBacktrace as to why it's OK to have this value be so
// large.
static const size_t N_STACK_BYTES = 160 * 1024;
// The stack chunk image that will be unwound.
struct StackImage {
// [start_avma, +len) specify the address range in the buffer.
// Obviously we require 0 <= len <= N_STACK_BYTES.
uintptr_t mStartAvma;
size_t mLen;
uint8_t mContents[N_STACK_BYTES];
};
// Statistics collection for the unwinder.
template <typename T>
class LULStats {
public:
LULStats() : mContext(0), mCFI(0), mFP(0) {}
template <typename S>
explicit LULStats(const LULStats<S>& aOther)
: mContext(aOther.mContext), mCFI(aOther.mCFI), mFP(aOther.mFP) {}
template <typename S>
LULStats<T>& operator=(const LULStats<S>& aOther) {
mContext = aOther.mContext;
mCFI = aOther.mCFI;
mFP = aOther.mFP;
return *this;
}
template <typename S>
uint32_t operator-(const LULStats<S>& aOther) {
return (mContext - aOther.mContext) + (mCFI - aOther.mCFI) +
(mFP - aOther.mFP);
}
T mContext; // Number of context frames
T mCFI; // Number of CFI/EXIDX frames
T mFP; // Number of frame-pointer recovered frames
};
// The core unwinder library class. Just one of these is needed, and
// it can be shared by multiple unwinder threads.
//
// The library operates in one of two modes.
//
// * Admin mode. The library is this state after creation. In Admin
// mode, no unwinding may be performed. It is however allowable to
// perform administrative tasks -- primarily, loading of unwind info
// -- in this mode. In particular, it is safe for the library to
// perform dynamic memory allocation in this mode. Safe in the
// sense that there is no risk of deadlock against unwinding threads
// that might -- because of where they have been sampled -- hold the
// system's malloc lock.
//
// * Unwind mode. In this mode, calls to ::Unwind may be made, but
// nothing else. ::Unwind guarantees not to make any dynamic memory
// requests, so as to guarantee that the calling thread won't
// deadlock in the case where it already holds the system's malloc lock.
//
// The library is created in Admin mode. After debuginfo is loaded,
// the caller must switch it into Unwind mode by calling
// ::EnableUnwinding. There is no way to switch it back to Admin mode
// after that. To safely switch back to Admin mode would require the
// caller (or other external agent) to guarantee that there are no
// pending ::Unwind calls.
class PriMap;
class SegArray;
class UniqueStringUniverse;
class LUL {
public:
// Create; supply a logging sink. Sets the object in Admin mode.
explicit LUL(void (*aLog)(const char*));
// Destroy. Caller is responsible for ensuring that no other
// threads are in Unwind calls. All resources are freed and all
// registered unwinder threads are deregistered. Can be called
// either in Admin or Unwind mode.
~LUL();
// Notify the library that unwinding is now allowed and so
// admin-mode calls are no longer allowed. The object is initially
// created in admin mode. The only possible transition is
// admin->unwinding, therefore.
void EnableUnwinding();
// Notify of a new r-x mapping, and load the associated unwind info.
// The filename is strdup'd and used for debug printing. If
// aMappedImage is NULL, this function will mmap/munmap the file
// itself, so as to be able to read the unwind info. If
// aMappedImage is non-NULL then it is assumed to point to a
// called-supplied and caller-managed mapped image of the file.
// May only be called in Admin mode.
void NotifyAfterMap(uintptr_t aRXavma, size_t aSize, const char* aFileName,
const void* aMappedImage);
// In rare cases we know an executable area exists but don't know
// what the associated file is. This call notifies LUL of such
// areas. This is important for correct functioning of stack
// scanning and of the x86-{linux,android} special-case
// __kernel_syscall function handling.
// This must be called only after the code area in
// question really has been mapped.
// May only be called in Admin mode.
void NotifyExecutableArea(uintptr_t aRXavma, size_t aSize);
// Notify that a mapped area has been unmapped; discard any
// associated unwind info. Acquires mRWlock for writing. Note that
// to avoid segfaulting the stack-scan unwinder, which inspects code
// areas, this must be called before the code area in question is
// really unmapped. Note that, unlike NotifyAfterMap(), this
// function takes the start and end addresses of the range to be
// unmapped, rather than a start and a length parameter. This is so
// as to make it possible to notify an unmap for the entire address
// space using a single call.
// May only be called in Admin mode.
void NotifyBeforeUnmap(uintptr_t aAvmaMin, uintptr_t aAvmaMax);
// Apply NotifyBeforeUnmap to the entire address space. This causes
// LUL to discard all unwind and executable-area information for the
// entire address space.
// May only be called in Admin mode.
void NotifyBeforeUnmapAll() { NotifyBeforeUnmap(0, UINTPTR_MAX); }
// Returns the number of mappings currently registered.
// May only be called in Admin mode.
size_t CountMappings();
// Unwind |aStackImg| starting with the context in |aStartRegs|.
// Write the number of frames recovered in *aFramesUsed. Put
// the PC values in aFramePCs[0 .. *aFramesUsed-1] and
// the SP values in aFrameSPs[0 .. *aFramesUsed-1].
// |aFramesAvail| is the size of the two output arrays and hence the
// largest possible value of *aFramesUsed. PC values are always
// valid, and the unwind will stop when the PC becomes invalid, but
// the SP values might be invalid, in which case the value zero will
// be written in the relevant frameSPs[] slot.
//
// This function assumes that the SP values increase as it unwinds
// away from the innermost frame -- that is, that the stack grows
// down. It monitors SP values as it unwinds to check they
// decrease, so as to avoid looping on corrupted stacks.
//
// May only be called in Unwind mode. Multiple threads may unwind
// at once. LUL user is responsible for ensuring that no thread makes
// any Admin calls whilst in Unwind mode.
// MOZ_CRASHes if the calling thread is not registered for unwinding.
//
// The calling thread must previously have been registered via a call to
// RegisterSampledThread.
void Unwind(/*OUT*/ uintptr_t* aFramePCs,
/*OUT*/ uintptr_t* aFrameSPs,
/*OUT*/ size_t* aFramesUsed,
/*OUT*/ size_t* aFramePointerFramesAcquired, size_t aFramesAvail,
UnwindRegs* aStartRegs, StackImage* aStackImg);
// The logging sink. Call to send debug strings to the caller-
// specified destination. Can only be called by the Admin thread.
void (*mLog)(const char*);
// Statistics relating to unwinding. These have to be atomic since
// unwinding can occur on different threads simultaneously.
LULStats<mozilla::Atomic<uint32_t>> mStats;
// Possibly show the statistics. This may not be called from any
// registered sampling thread, since it involves I/O.
void MaybeShowStats();
size_t SizeOfIncludingThis(mozilla::MallocSizeOf) const;
private:
// The statistics counters at the point where they were last printed.
LULStats<uint32_t> mStatsPrevious;
// Are we in admin mode? Initially |true| but changes to |false|
// once unwinding begins.
bool mAdminMode;
// The thread ID associated with admin mode. This is the only thread
// that is allowed do perform non-Unwind calls on this object. Conversely,
// no registered Unwinding thread may be the admin thread. This is so
// as to clearly partition the one thread that may do dynamic memory
// allocation from the threads that are being sampled, since the latter
// absolutely may not do dynamic memory allocation.
int mAdminThreadId;
// The top level mapping from code address ranges to postprocessed
// unwind info. Basically a sorted array of (addr, len, info)
// records. This field is updated by NotifyAfterMap and NotifyBeforeUnmap.
PriMap* mPriMap;
// An auxiliary structure that records which address ranges are
// mapped r-x, for the benefit of the stack scanner.
SegArray* mSegArray;
// A UniqueStringUniverse that holds all the strdup'd strings created
// whilst reading unwind information. This is included so as to make
// it possible to free them in ~LUL.
UniqueStringUniverse* mUSU;
};
// Run unit tests on an initialised, loaded-up LUL instance, and print
// summary results on |aLUL|'s logging sink. Also return the number
// of tests run in *aNTests and the number that passed in
// *aNTestsPassed.
void RunLulUnitTests(/*OUT*/ int* aNTests, /*OUT*/ int* aNTestsPassed,
LUL* aLUL);
} // namespace lul
#endif // LulMain_h

Просмотреть файл

@ -0,0 +1,419 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef LulMainInt_h
#define LulMainInt_h
#include "PlatformMacros.h"
#include "LulMain.h" // for TaggedUWord
#include <vector>
#include "mozilla/Assertions.h"
// This file is provides internal interface inside LUL. If you are an
// end-user of LUL, do not include it in your code. The end-user
// interface is in LulMain.h.
namespace lul {
using std::vector;
////////////////////////////////////////////////////////////////
// DW_REG_ constants //
////////////////////////////////////////////////////////////////
// These are the Dwarf CFI register numbers, as (presumably) defined
// in the ELF ABI supplements for each architecture.
enum DW_REG_NUMBER {
// No real register has this number. It's convenient to be able to
// treat the CFA (Canonical Frame Address) as "just another
// register", though.
DW_REG_CFA = -1,
#if defined(GP_ARCH_arm)
// ARM registers
DW_REG_ARM_R7 = 7,
DW_REG_ARM_R11 = 11,
DW_REG_ARM_R12 = 12,
DW_REG_ARM_R13 = 13,
DW_REG_ARM_R14 = 14,
DW_REG_ARM_R15 = 15,
#elif defined(GP_ARCH_arm64)
// aarch64 registers
DW_REG_AARCH64_X29 = 29,
DW_REG_AARCH64_X30 = 30,
DW_REG_AARCH64_SP = 31,
#elif defined(GP_ARCH_amd64)
// Because the X86 (32 bit) and AMD64 (64 bit) summarisers are
// combined, a merged set of register constants is needed.
DW_REG_INTEL_XBP = 6,
DW_REG_INTEL_XSP = 7,
DW_REG_INTEL_XIP = 16,
#elif defined(GP_ARCH_x86)
DW_REG_INTEL_XBP = 5,
DW_REG_INTEL_XSP = 4,
DW_REG_INTEL_XIP = 8,
#elif defined(GP_ARCH_mips64)
DW_REG_MIPS_SP = 29,
DW_REG_MIPS_FP = 30,
DW_REG_MIPS_PC = 34,
#else
# error "Unknown arch"
#endif
};
////////////////////////////////////////////////////////////////
// PfxExpr //
////////////////////////////////////////////////////////////////
enum PfxExprOp {
// meaning of mOperand effect on stack
PX_Start, // bool start-with-CFA? start, with CFA on stack, or not
PX_End, // none stop; result is at top of stack
PX_SImm32, // int32 push signed int32
PX_DwReg, // DW_REG_NUMBER push value of the specified reg
PX_Deref, // none pop X ; push *X
PX_Add, // none pop X ; pop Y ; push Y + X
PX_Sub, // none pop X ; pop Y ; push Y - X
PX_And, // none pop X ; pop Y ; push Y & X
PX_Or, // none pop X ; pop Y ; push Y | X
PX_CmpGES, // none pop X ; pop Y ; push (Y >=s X) ? 1 : 0
PX_Shl // none pop X ; pop Y ; push Y << X
};
struct PfxInstr {
PfxInstr(PfxExprOp opcode, int32_t operand)
: mOpcode(opcode), mOperand(operand) {}
explicit PfxInstr(PfxExprOp opcode) : mOpcode(opcode), mOperand(0) {}
bool operator==(const PfxInstr& other) const {
return mOpcode == other.mOpcode && mOperand == other.mOperand;
}
PfxExprOp mOpcode;
int32_t mOperand;
};
static_assert(sizeof(PfxInstr) <= 8, "PfxInstr size changed unexpectedly");
// Evaluate the prefix expression whose PfxInstrs start at aPfxInstrs[start].
// In the case of any mishap (stack over/underflow, running off the end of
// the instruction vector, obviously malformed sequences),
// return an invalid TaggedUWord.
// RUNS IN NO-MALLOC CONTEXT
TaggedUWord EvaluatePfxExpr(int32_t start, const UnwindRegs* aOldRegs,
TaggedUWord aCFA, const StackImage* aStackImg,
const vector<PfxInstr>& aPfxInstrs);
////////////////////////////////////////////////////////////////
// LExpr //
////////////////////////////////////////////////////////////////
// An expression -- very primitive. Denotes either "register +
// offset", a dereferenced version of the same, or a reference to a
// prefix expression stored elsewhere. So as to allow convenient
// handling of Dwarf-derived unwind info, the register may also denote
// the CFA. A large number of these need to be stored, so we ensure
// it fits into 8 bytes. See comment below on RuleSet to see how
// expressions fit into the bigger picture.
enum LExprHow {
UNKNOWN = 0, // This LExpr denotes no value.
NODEREF, // Value is (mReg + mOffset).
DEREF, // Value is *(mReg + mOffset).
PFXEXPR // Value is EvaluatePfxExpr(secMap->mPfxInstrs[mOffset])
};
inline static const char* NameOf_LExprHow(LExprHow how) {
switch (how) {
case UNKNOWN:
return "UNKNOWN";
case NODEREF:
return "NODEREF";
case DEREF:
return "DEREF";
case PFXEXPR:
return "PFXEXPR";
default:
return "LExpr-??";
}
}
struct LExpr {
// Denotes an expression with no value.
LExpr() : mHow(UNKNOWN), mReg(0), mOffset(0) {}
// Denotes any expressible expression.
LExpr(LExprHow how, int16_t reg, int32_t offset)
: mHow(how), mReg(reg), mOffset(offset) {
switch (how) {
case UNKNOWN:
MOZ_ASSERT(reg == 0 && offset == 0);
break;
case NODEREF:
break;
case DEREF:
break;
case PFXEXPR:
MOZ_ASSERT(reg == 0 && offset >= 0);
break;
default:
MOZ_ASSERT(0, "LExpr::LExpr: invalid how");
}
}
// Change the offset for an expression that references memory.
LExpr add_delta(long delta) {
MOZ_ASSERT(mHow == NODEREF);
// If this is a non-debug build and the above assertion would have
// failed, at least return LExpr() so that the machinery that uses
// the resulting expression fails in a repeatable way.
return (mHow == NODEREF) ? LExpr(mHow, mReg, mOffset + delta)
: LExpr(); // Gone bad
}
// Dereference an expression that denotes a memory address.
LExpr deref() {
MOZ_ASSERT(mHow == NODEREF);
// Same rationale as for add_delta().
return (mHow == NODEREF) ? LExpr(DEREF, mReg, mOffset)
: LExpr(); // Gone bad
}
// Print a rule for recovery of |aNewReg| whose recovered value
// is this LExpr.
std::string ShowRule(const char* aNewReg) const;
// Evaluate this expression, producing a TaggedUWord. |aOldRegs|
// holds register values that may be referred to by the expression.
// |aCFA| holds the CFA value, if any, that applies. |aStackImg|
// contains a chuck of stack that will be consulted if the expression
// references memory. |aPfxInstrs| holds the vector of PfxInstrs
// that will be consulted if this is a PFXEXPR.
// RUNS IN NO-MALLOC CONTEXT
TaggedUWord EvaluateExpr(const UnwindRegs* aOldRegs, TaggedUWord aCFA,
const StackImage* aStackImg,
const vector<PfxInstr>* aPfxInstrs) const;
// Representation of expressions. If |mReg| is DW_REG_CFA (-1) then
// it denotes the CFA. All other allowed values for |mReg| are
// nonnegative and are DW_REG_ values.
LExprHow mHow : 8;
int16_t mReg; // A DW_REG_ value
int32_t mOffset; // 32-bit signed offset should be more than enough.
};
static_assert(sizeof(LExpr) <= 8, "LExpr size changed unexpectedly");
////////////////////////////////////////////////////////////////
// RuleSet //
////////////////////////////////////////////////////////////////
// This is platform-dependent. For some address range, describes how
// to recover the CFA and then how to recover the registers for the
// previous frame.
//
// The set of LExprs contained in a given RuleSet describe a DAG which
// says how to compute the caller's registers ("new registers") from
// the callee's registers ("old registers"). The DAG can contain a
// single internal node, which is the value of the CFA for the callee.
// It would be possible to construct a DAG that omits the CFA, but
// including it makes the summarisers simpler, and the Dwarf CFI spec
// has the CFA as a central concept.
//
// For this to make sense, |mCfaExpr| can't have
// |mReg| == DW_REG_CFA since we have no previous value for the CFA.
// All of the other |Expr| fields can -- and usually do -- specify
// |mReg| == DW_REG_CFA.
//
// With that in place, the unwind algorithm proceeds as follows.
//
// (0) Initially: we have values for the old registers, and a memory
// image.
//
// (1) Compute the CFA by evaluating |mCfaExpr|. Add the computed
// value to the set of "old registers".
//
// (2) Compute values for the registers by evaluating all of the other
// |Expr| fields in the RuleSet. These can depend on both the old
// register values and the just-computed CFA.
//
// If we are unwinding without computing a CFA, perhaps because the
// RuleSets are derived from EXIDX instead of Dwarf, then
// |mCfaExpr.mHow| will be LExpr::UNKNOWN, so the computed value will
// be invalid -- that is, TaggedUWord() -- and so any attempt to use
// that will result in the same value. But that's OK because the
// RuleSet would make no sense if depended on the CFA but specified no
// way to compute it.
//
// A RuleSet is not allowed to cover zero address range. Having zero
// length would break binary searching in SecMaps and PriMaps.
class RuleSet {
public:
RuleSet();
void Print(void (*aLog)(const char*)) const;
// Find the LExpr* for a given DW_REG_ value in this class.
LExpr* ExprForRegno(DW_REG_NUMBER aRegno);
uintptr_t mAddr;
uintptr_t mLen;
// How to compute the CFA.
LExpr mCfaExpr;
// How to compute caller register values. These may reference the
// value defined by |mCfaExpr|.
#if defined(GP_ARCH_amd64) || defined(GP_ARCH_x86)
LExpr mXipExpr; // return address
LExpr mXspExpr;
LExpr mXbpExpr;
#elif defined(GP_ARCH_arm)
LExpr mR15expr; // return address
LExpr mR14expr;
LExpr mR13expr;
LExpr mR12expr;
LExpr mR11expr;
LExpr mR7expr;
#elif defined(GP_ARCH_arm64)
LExpr mX29expr; // frame pointer register
LExpr mX30expr; // link register
LExpr mSPexpr;
#elif defined(GP_ARCH_mips64)
LExpr mPCexpr;
LExpr mFPexpr;
LExpr mSPexpr;
#else
# error "Unknown arch"
#endif
};
// Returns |true| for Dwarf register numbers which are members
// of the set of registers that LUL unwinds on this target.
static inline bool registerIsTracked(DW_REG_NUMBER reg) {
switch (reg) {
#if defined(GP_ARCH_amd64) || defined(GP_ARCH_x86)
case DW_REG_INTEL_XBP:
case DW_REG_INTEL_XSP:
case DW_REG_INTEL_XIP:
return true;
#elif defined(GP_ARCH_arm)
case DW_REG_ARM_R7:
case DW_REG_ARM_R11:
case DW_REG_ARM_R12:
case DW_REG_ARM_R13:
case DW_REG_ARM_R14:
case DW_REG_ARM_R15:
return true;
#elif defined(GP_ARCH_arm64)
case DW_REG_AARCH64_X29:
case DW_REG_AARCH64_X30:
case DW_REG_AARCH64_SP:
return true;
#elif defined(GP_ARCH_mips64)
case DW_REG_MIPS_FP:
case DW_REG_MIPS_SP:
case DW_REG_MIPS_PC:
return true;
#else
# error "Unknown arch"
#endif
default:
return false;
}
}
////////////////////////////////////////////////////////////////
// SecMap //
////////////////////////////////////////////////////////////////
// A SecMap may have zero address range, temporarily, whilst RuleSets
// are being added to it. But adding a zero-range SecMap to a PriMap
// will make it impossible to maintain the total order of the PriMap
// entries, and so that can't be allowed to happen.
class SecMap {
public:
// These summarise the contained mRuleSets, in that they give
// exactly the lowest and highest addresses that any of the entries
// in this SecMap cover. Hence invariants:
//
// mRuleSets is nonempty
// <=> mSummaryMinAddr <= mSummaryMaxAddr
// && mSummaryMinAddr == mRuleSets[0].mAddr
// && mSummaryMaxAddr == mRuleSets[#rulesets-1].mAddr
// + mRuleSets[#rulesets-1].mLen - 1;
//
// This requires that no RuleSet has zero length.
//
// mRuleSets is empty
// <=> mSummaryMinAddr > mSummaryMaxAddr
//
// This doesn't constrain mSummaryMinAddr and mSummaryMaxAddr uniquely,
// so let's use mSummaryMinAddr == 1 and mSummaryMaxAddr == 0 to denote
// this case.
explicit SecMap(void (*aLog)(const char*));
~SecMap();
// Binary search mRuleSets to find one that brackets |ia|, or nullptr
// if none is found. It's not allowable to do this until PrepareRuleSets
// has been called first.
RuleSet* FindRuleSet(uintptr_t ia);
// Add a RuleSet to the collection. The rule is copied in. Calling
// this makes the map non-searchable.
void AddRuleSet(const RuleSet* rs);
// Add a PfxInstr to the vector of such instrs, and return the index
// in the vector. Calling this makes the map non-searchable.
uint32_t AddPfxInstr(PfxInstr pfxi);
// Returns the entire vector of PfxInstrs.
const vector<PfxInstr>* GetPfxInstrs() { return &mPfxInstrs; }
// Prepare the map for searching. Also, remove any rules for code
// address ranges which don't fall inside [start, +len). |len| may
// not be zero.
void PrepareRuleSets(uintptr_t start, size_t len);
bool IsEmpty();
size_t Size() { return mRuleSets.size(); }
size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
// The min and max addresses of the addresses in the contained
// RuleSets. See comment above for invariants.
uintptr_t mSummaryMinAddr;
uintptr_t mSummaryMaxAddr;
private:
// False whilst adding entries; true once it is safe to call FindRuleSet.
// Transition (false->true) is caused by calling PrepareRuleSets().
bool mUsable;
// A vector of RuleSets, sorted, nonoverlapping (post Prepare()).
vector<RuleSet> mRuleSets;
// A vector of PfxInstrs, which are referred to by the RuleSets.
// These are provided as a representation of Dwarf expressions
// (DW_CFA_val_expression, DW_CFA_expression, DW_CFA_def_cfa_expression),
// are relatively expensive to evaluate, and and are therefore
// expected to be used only occasionally.
//
// The vector holds a bunch of separate PfxInstr programs, each one
// starting with a PX_Start and terminated by a PX_End, all
// concatenated together. When a RuleSet can't recover a value
// using a self-contained LExpr, it uses a PFXEXPR whose mOffset is
// the index in this vector of start of the necessary PfxInstr program.
vector<PfxInstr> mPfxInstrs;
// A logging sink, for debugging.
void (*mLog)(const char*);
};
} // namespace lul
#endif // ndef LulMainInt_h

Просмотреть файл

@ -0,0 +1,78 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include <stdio.h>
#include <signal.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include "platform.h"
#include "PlatformMacros.h"
#include "LulMain.h"
#include "BaseProfilerSharedLibraries.h"
#include "AutoObjectMapper.h"
// Contains miscellaneous helpers that are used to connect the Gecko Profiler
// and LUL.
// Find out, in a platform-dependent way, where the code modules got
// mapped in the process' virtual address space, and get |aLUL| to
// load unwind info for them.
void read_procmaps(lul::LUL* aLUL) {
MOZ_ASSERT(aLUL->CountMappings() == 0);
#if defined(GP_OS_linux) || defined(GP_OS_android)
SharedLibraryInfo info = SharedLibraryInfo::GetInfoForSelf();
for (size_t i = 0; i < info.GetSize(); i++) {
const SharedLibrary& lib = info.GetEntry(i);
std::string nativePath = lib.GetNativeDebugPath();
# if defined(GP_OS_android)
// We're using faulty.lib. Use a special-case object mapper.
AutoObjectMapperFaultyLib mapper(aLUL->mLog);
# else
// We can use the standard POSIX-based mapper.
AutoObjectMapperPOSIX mapper(aLUL->mLog);
# endif
// Ask |mapper| to map the object. Then hand its mapped address
// to NotifyAfterMap().
void* image = nullptr;
size_t size = 0;
bool ok = mapper.Map(&image, &size, nativePath);
if (ok && image && size > 0) {
aLUL->NotifyAfterMap(lib.GetStart(), lib.GetEnd() - lib.GetStart(),
nativePath.c_str(), image);
} else if (!ok && lib.GetDebugName().IsEmpty()) {
// The object has no name and (as a consequence) the mapper failed to map
// it. This happens on Linux, where GetInfoForSelf() produces such a
// mapping for the VDSO. This is a problem on x86-{linux,android} because
// lack of knowledge about the mapped area inhibits LUL's special
// __kernel_syscall handling. Hence notify |aLUL| at least of the
// mapping, even though it can't read any unwind information for the area.
aLUL->NotifyExecutableArea(lib.GetStart(), lib.GetEnd() - lib.GetStart());
}
// |mapper| goes out of scope at this point and so its destructor
// unmaps the object.
}
#else
# error "Unknown platform"
#endif
}
// LUL needs a callback for its logging sink.
void logging_sink_for_LUL(const char* str) {
// These are only printed when Verbose logging is enabled (e.g. with
// MOZ_LOG="prof:5"). This is because LUL's logging is much more verbose than
// the rest of the profiler's logging, which occurs at the Info (3) and Debug
// (4) levels.
MOZ_LOG(gProfilerLog, mozilla::LogLevel::Verbose,
("[%d] %s", profiler_current_process_id(), str));
}

Просмотреть файл

@ -0,0 +1,19 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MOZ_PLATFORM_LINUX_LUL_H
#define MOZ_PLATFORM_LINUX_LUL_H
#include "platform.h"
// Find out, in a platform-dependent way, where the code modules got
// mapped in the process' virtual address space, and get |aLUL| to
// load unwind info for them.
void read_procmaps(lul::LUL* aLUL);
// LUL needs a callback for its logging sink.
void logging_sink_for_LUL(const char* str);
#endif /* ndef MOZ_PLATFORM_LINUX_LUL_H */

Просмотреть файл

@ -0,0 +1,102 @@
# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
# vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This is pretty much a copy from tools/profiler, cut down to exclude anything
# that cannot work in mozglue (because they are totally dependent on libxul-
# specific code).
# All exported headers now prefixed with "Base" to avoid #include name clashes.
if CONFIG['MOZ_GECKO_PROFILER']:
DEFINES['IMPL_MFBT'] = True
EXPORTS += [
'public/BaseProfileJSONWriter.h',
'public/BaseProfilerMarkerPayload.h',
'public/BaseProfilerSharedLibraries.h',
'public/BaseProfilingCategory.h',
'public/BaseProfilingStack.h',
]
UNIFIED_SOURCES += [
'core/PageInformation.cpp',
'core/platform.cpp',
'core/ProfileBuffer.cpp',
'core/ProfileBufferEntry.cpp',
'core/ProfiledThreadData.cpp',
'core/ProfileJSONWriter.cpp',
'core/ProfilerBacktrace.cpp',
'core/ProfilerMarkerPayload.cpp',
'core/ProfilingCategory.cpp',
'core/ProfilingStack.cpp',
'core/RegisteredThread.cpp',
]
if CONFIG['OS_TARGET'] in ('Android', 'Linux'):
if CONFIG['CPU_ARCH'] in ('arm', 'aarch64', 'x86', 'x86_64', 'mips64'):
UNIFIED_SOURCES += [
'lul/AutoObjectMapper.cpp',
'lul/LulCommon.cpp',
'lul/LulDwarf.cpp',
'lul/LulDwarfSummariser.cpp',
'lul/LulElf.cpp',
'lul/LulMain.cpp',
'lul/platform-linux-lul.cpp',
]
# These files cannot be built in unified mode because of name clashes with mozglue headers on Android.
SOURCES += [
'core/shared-libraries-linux.cc',
]
if CONFIG['CPU_ARCH'] == 'arm':
SOURCES += [
'core/EHABIStackWalk.cpp',
]
elif CONFIG['OS_TARGET'] == 'Darwin':
UNIFIED_SOURCES += [
'core/shared-libraries-macos.cc',
]
elif CONFIG['OS_TARGET'] == 'WINNT':
SOURCES += [
'core/shared-libraries-win32.cc',
]
LOCAL_INCLUDES += [
'/mozglue/baseprofiler/core/',
'/mozglue/linker',
]
if CONFIG['OS_TARGET'] == 'Android':
DEFINES['ANDROID_NDK_MAJOR_VERSION'] = CONFIG['ANDROID_NDK_MAJOR_VERSION']
DEFINES['ANDROID_NDK_MINOR_VERSION'] = CONFIG['ANDROID_NDK_MINOR_VERSION']
LOCAL_INCLUDES += [
'lul',
]
FINAL_LIBRARY = 'mozglue'
# BaseProfiler.h and BaseProfilerCounts.h are the only headers that are usable
# in non-MOZ_GECKO_PROFILER builds, and they only contains no-op macros in that
# case.
EXPORTS += [
'public/BaseProfiler.h',
]
EXPORTS.mozilla += [
'public/BaseProfilerCounts.h',
]
if CONFIG['MOZ_VTUNE']:
DEFINES['MOZ_VTUNE_INSTRUMENTATION'] = True
UNIFIED_SOURCES += [
'core/VTuneProfiler.cpp',
]
if CONFIG['CC_TYPE'] in ('clang', 'gcc'):
CXXFLAGS += [
'-Wno-error=shadow',
'-Wno-ignored-qualifiers', # due to use of breakpad headers
]
with Files('**'):
BUG_COMPONENT = ('Core', 'Gecko Profiler')

Просмотреть файл

@ -0,0 +1,141 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef PROFILEJSONWRITER_H
#define PROFILEJSONWRITER_H
#include "mozilla/JSONWriter.h"
#include "mozilla/UniquePtr.h"
#include <functional>
#include <ostream>
#include <string>
class SpliceableChunkedJSONWriter;
// On average, profile JSONs are large enough such that we want to avoid
// reallocating its buffer when expanding. Additionally, the contents of the
// profile are not accessed until the profile is entirely written. For these
// reasons we use a chunked writer that keeps an array of chunks, which is
// concatenated together after writing is finished.
class ChunkedJSONWriteFunc : public mozilla::JSONWriteFunc {
public:
friend class SpliceableJSONWriter;
ChunkedJSONWriteFunc() : mChunkPtr{nullptr}, mChunkEnd{nullptr} {
AllocChunk(kChunkSize);
}
bool IsEmpty() const {
MOZ_ASSERT_IF(!mChunkPtr, !mChunkEnd && mChunkList.length() == 0 &&
mChunkLengths.length() == 0);
return !mChunkPtr;
}
void Write(const char* aStr) override;
void CopyDataIntoLazilyAllocatedBuffer(
const std::function<char*(size_t)>& aAllocator) const;
mozilla::UniquePtr<char[]> CopyData() const;
void Take(ChunkedJSONWriteFunc&& aOther);
// Returns the byte length of the complete combined string, including the
// null terminator byte.
size_t GetTotalLength() const;
private:
void AllocChunk(size_t aChunkSize);
static const size_t kChunkSize = 4096 * 512;
// Pointer for writing inside the current chunk.
//
// The current chunk is always at the back of mChunkList, i.e.,
// mChunkList.back() <= mChunkPtr <= mChunkEnd.
char* mChunkPtr;
// Pointer to the end of the current chunk.
//
// The current chunk is always at the back of mChunkList, i.e.,
// mChunkEnd >= mChunkList.back() + mChunkLengths.back().
char* mChunkEnd;
// List of chunks and their lengths.
//
// For all i, the length of the string in mChunkList[i] is
// mChunkLengths[i].
mozilla::Vector<mozilla::UniquePtr<char[]>> mChunkList;
mozilla::Vector<size_t> mChunkLengths;
};
struct OStreamJSONWriteFunc : public mozilla::JSONWriteFunc {
explicit OStreamJSONWriteFunc(std::ostream& aStream) : mStream(aStream) {}
void Write(const char* aStr) override { mStream << aStr; }
std::ostream& mStream;
};
class SpliceableJSONWriter : public mozilla::JSONWriter {
public:
explicit SpliceableJSONWriter(
mozilla::UniquePtr<mozilla::JSONWriteFunc> aWriter)
: JSONWriter(std::move(aWriter)) {}
void StartBareList(CollectionStyle aStyle = MultiLineStyle) {
StartCollection(nullptr, "", aStyle);
}
void EndBareList() { EndCollection(""); }
void NullElements(uint32_t aCount) {
for (uint32_t i = 0; i < aCount; i++) {
NullElement();
}
}
void Splice(const ChunkedJSONWriteFunc* aFunc);
void Splice(const char* aStr);
// Splice the given JSON directly in, without quoting.
void SplicedJSONProperty(const char* aMaybePropertyName,
const char* aJsonValue) {
Scalar(aMaybePropertyName, aJsonValue);
}
// Takes the chunks from aFunc and write them. If move is not possible
// (e.g., using OStreamJSONWriteFunc), aFunc's chunks are copied and its
// storage cleared.
virtual void TakeAndSplice(ChunkedJSONWriteFunc* aFunc);
};
class SpliceableChunkedJSONWriter : public SpliceableJSONWriter {
public:
explicit SpliceableChunkedJSONWriter()
: SpliceableJSONWriter(mozilla::MakeUnique<ChunkedJSONWriteFunc>()) {}
ChunkedJSONWriteFunc* WriteFunc() const {
return static_cast<ChunkedJSONWriteFunc*>(JSONWriter::WriteFunc());
}
// Adopts the chunks from aFunc without copying.
virtual void TakeAndSplice(ChunkedJSONWriteFunc* aFunc) override;
};
class JSONSchemaWriter {
mozilla::JSONWriter& mWriter;
uint32_t mIndex;
public:
explicit JSONSchemaWriter(mozilla::JSONWriter& aWriter)
: mWriter(aWriter), mIndex(0) {
aWriter.StartObjectProperty("schema",
SpliceableJSONWriter::SingleLineStyle);
}
void WriteField(const char* aName) { mWriter.IntProperty(aName, mIndex++); }
~JSONSchemaWriter() { mWriter.EndObject(); }
};
#endif // PROFILEJSONWRITER_H

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,271 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef ProfilerCounts_h
#define ProfilerCounts_h
#ifndef MOZ_BASE_PROFILER
# define PROFILER_DEFINE_COUNT_TOTAL(label, category, description)
# define PROFILER_DEFINE_COUNT(label, category, description)
# define PROFILER_DEFINE_STATIC_COUNT_TOTAL(label, category, description)
# define AUTO_PROFILER_TOTAL(label, count)
# define AUTO_PROFILER_COUNT(label)
# define AUTO_PROFILER_STATIC_COUNT(label, count)
#else
# include "mozilla/Atomics.h"
class BaseProfilerCount;
void profiler_add_sampled_counter(BaseProfilerCount* aCounter);
void profiler_remove_sampled_counter(BaseProfilerCount* aCounter);
typedef mozilla::Atomic<int64_t, mozilla::MemoryOrdering::Relaxed>
ProfilerAtomicSigned;
typedef mozilla::Atomic<uint64_t, mozilla::MemoryOrdering::Relaxed>
ProfilerAtomicUnsigned;
// Counter support
// There are two types of counters:
// 1) a simple counter which can be added to or subtracted from. This could
// track the number of objects of a type, the number of calls to something
// (reflow, JIT, etc).
// 2) a combined counter which has the above, plus a number-of-calls counter
// that is incremented by 1 for each call to modify the count. This provides
// an optional source for a 'heatmap' of access. This can be used (for
// example) to track the amount of memory allocated, and provide a heatmap of
// memory operations (allocs/frees).
//
// Counters are sampled by the profiler once per sample-period. At this time,
// all counters are global to the process. In the future, there might be more
// versions with per-thread or other discriminators.
//
// Typical usage:
// There are two ways to use counters: With heap-created counter objects,
// or using macros. Note: the macros use statics, and will be slightly
// faster/smaller, and you need to care about creating them before using
// them. They're similar to the use-pattern for the other AUTO_PROFILER*
// macros, but they do need the PROFILER_DEFINE* to be use to instantiate
// the statics.
//
// PROFILER_DEFINE_COUNT(mything, "JIT", "Some JIT byte count")
// ...
// void foo() { ... AUTO_PROFILER_COUNT(mything, number_of_bytes_used); ... }
//
// or (to also get a heatmap)
//
// PROFILER_DEFINE_COUNT_TOTAL(mything, "JIT", "Some JIT byte count")
// ...
// void foo() {
// ...
// AUTO_PROFILER_COUNT_TOTAL(mything, number_of_bytes_generated);
// ...
// }
//
// To use without statics/macros:
//
// UniquePtr<ProfilerCounter> myCounter;
// ...
// myCounter =
// MakeUnique<ProfilerCounter>("mything", "JIT", "Some JIT byte count"));
// ...
// void foo() { ... myCounter->Add(number_of_bytes_generated0; ... }
class BaseProfilerCount {
public:
BaseProfilerCount(const char* aLabel, ProfilerAtomicSigned* aCounter,
ProfilerAtomicUnsigned* aNumber, const char* aCategory,
const char* aDescription)
: mLabel(aLabel),
mCategory(aCategory),
mDescription(aDescription),
mCounter(aCounter),
mNumber(aNumber) {
# define COUNTER_CANARY 0xDEADBEEF
# ifdef DEBUG
mCanary = COUNTER_CANARY;
mPrevNumber = 0;
# endif
// Can't call profiler_* here since this may be non-xul-library
}
# ifdef DEBUG
~BaseProfilerCount() { mCanary = 0; }
# endif
void Sample(int64_t& aCounter, uint64_t& aNumber) {
MOZ_ASSERT(mCanary == COUNTER_CANARY);
aCounter = *mCounter;
aNumber = mNumber ? *mNumber : 0;
# ifdef DEBUG
MOZ_ASSERT(aNumber >= mPrevNumber);
mPrevNumber = aNumber;
# endif
}
// We don't define ++ and Add() here, since the static defines directly
// increment the atomic counters, and the subclasses implement ++ and
// Add() directly.
// These typically are static strings (for example if you use the macros
// below)
const char* mLabel;
const char* mCategory;
const char* mDescription;
// We're ok with these being un-ordered in race conditions. These are
// pointers because we want to be able to use statics and increment them
// directly. Otherwise we could just have them inline, and not need the
// constructor args.
// These can be static globals (using the macros below), though they
// don't have to be - their lifetime must be longer than the use of them
// by the profiler (see profiler_add/remove_sampled_counter()). If you're
// using a lot of these, they probably should be allocated at runtime (see
// class ProfilerCountOnly below).
ProfilerAtomicSigned* mCounter;
ProfilerAtomicUnsigned* mNumber; // may be null
# ifdef DEBUG
uint32_t mCanary;
uint64_t mPrevNumber; // value of number from the last Sample()
# endif
};
// Designed to be allocated dynamically, and simply incremented with obj++
// or obj->Add(n)
class ProfilerCounter final : public BaseProfilerCount {
public:
ProfilerCounter(const char* aLabel, const char* aCategory,
const char* aDescription)
: BaseProfilerCount(aLabel, &mCounter, nullptr, aCategory, aDescription) {
// Assume we're in libxul
profiler_add_sampled_counter(this);
}
virtual ~ProfilerCounter() { profiler_remove_sampled_counter(this); }
BaseProfilerCount& operator++() {
Add(1);
return *this;
}
void Add(int64_t aNumber) { mCounter += aNumber; }
ProfilerAtomicSigned mCounter;
};
// Also keeps a heatmap (number of calls to ++/Add())
class ProfilerCounterTotal final : public BaseProfilerCount {
public:
ProfilerCounterTotal(const char* aLabel, const char* aCategory,
const char* aDescription)
: BaseProfilerCount(aLabel, &mCounter, &mNumber, aCategory,
aDescription) {
// Assume we're in libxul
profiler_add_sampled_counter(this);
}
virtual ~ProfilerCounterTotal() { profiler_remove_sampled_counter(this); }
BaseProfilerCount& operator++() {
Add(1);
return *this;
}
void Add(int64_t aNumber) {
mCounter += aNumber;
mNumber++;
}
ProfilerAtomicSigned mCounter;
ProfilerAtomicUnsigned mNumber;
};
// Defines a counter that is sampled on each profiler tick, with a running
// count (signed), and number-of-instances. Note that because these are two
// independent Atomics, there is a possiblity that count will not include
// the last call, but number of uses will. I think this is not worth
// worrying about
# define PROFILER_DEFINE_COUNT_TOTAL(label, category, description) \
ProfilerAtomicSigned profiler_count_##label(0); \
ProfilerAtomicUnsigned profiler_number_##label(0); \
const char profiler_category_##label[] = category; \
const char profiler_description_##label[] = description; \
mozilla::UniquePtr<BaseProfilerCount> AutoCount_##label;
// This counts, but doesn't keep track of the number of calls to
// AUTO_PROFILER_COUNT()
# define PROFILER_DEFINE_COUNT(label, category, description) \
ProfilerAtomicSigned profiler_count_##label(0); \
const char profiler_category_##label[] = category; \
const char profiler_description_##label[] = description; \
mozilla::UniquePtr<BaseProfilerCount> AutoCount_##label;
// This will create a static initializer if used, but avoids a possible
// allocation.
# define PROFILER_DEFINE_STATIC_COUNT_TOTAL(label, category, description) \
ProfilerAtomicSigned profiler_count_##label(0); \
ProfilerAtomicUnsigned profiler_number_##label(0); \
BaseProfilerCount AutoCount_##label(#label, &profiler_count_##label, \
&profiler_number_##label, category, \
description);
// If we didn't care about static initializers, we could avoid the need for
// a ptr to the BaseProfilerCount object.
// XXX It would be better to do this without the if() and without the
// theoretical race to set the UniquePtr (i.e. possible leak).
# define AUTO_PROFILER_COUNT_TOTAL(label, count) \
do { \
profiler_number_##label++; /* do this first*/ \
profiler_count_##label += count; \
if (!AutoCount_##label) { \
/* Ignore that we could call this twice in theory, and that we leak \
* them \
*/ \
AutoCount_##label.reset(new BaseProfilerCount( \
#label, &profiler_count_##label, &profiler_number_##label, \
profiler_category_##label, profiler_description_##label)); \
profiler_add_sampled_counter(AutoCount_##label.get()); \
} \
} while (0)
# define AUTO_PROFILER_COUNT(label, count) \
do { \
profiler_count_##label += count; /* do this first*/ \
if (!AutoCount_##label) { \
/* Ignore that we could call this twice in theory, and that we leak \
* them \
*/ \
AutoCount_##label.reset(new BaseProfilerCount( \
#label, nullptr, &profiler_number_##label, \
profiler_category_##label, profiler_description_##label)); \
profiler_add_sampled_counter(AutoCount_##label.get()); \
} \
} while (0)
# define AUTO_PROFILER_STATIC_COUNT(label, count) \
do { \
profiler_number_##label++; /* do this first*/ \
profiler_count_##label += count; \
} while (0)
// if we need to force the allocation
# define AUTO_PROFILER_FORCE_ALLOCATION(label) \
do { \
if (!AutoCount_##label) { \
/* Ignore that we could call this twice in theory, and that we leak \
* them \
*/ \
AutoCount_##label.reset(new BaseProfilerCount( \
#label, &profiler_count_##label, &profiler_number_##label, \
profiler_category_##label, profiler_description_##label)); \
} \
} while (0)
#endif // !MOZ_BASE_PROFILER
#endif // ProfilerCounts_h

Просмотреть файл

@ -0,0 +1,416 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef ProfilerMarkerPayload_h
#define ProfilerMarkerPayload_h
#include "mozilla/Attributes.h"
#include "mozilla/Maybe.h"
#include "mozilla/RefPtr.h"
#include "mozilla/TimeStamp.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/UniquePtrExtensions.h"
#include "mozilla/net/TimingStruct.h"
#include "nsString.h"
#include "BaseProfiler.h"
#include "js/Utility.h"
#include "gfxASurface.h"
#include "mozilla/ServoTraversalStatistics.h"
namespace mozilla {
namespace layers {
class Layer;
} // namespace layers
} // namespace mozilla
class SpliceableJSONWriter;
class UniqueStacks;
// This is an abstract class that can be implemented to supply data to be
// attached with a profiler marker.
//
// When subclassing this, note that the destructor can be called on any thread,
// i.e. not necessarily on the thread that created the object.
class ProfilerMarkerPayload {
public:
explicit ProfilerMarkerPayload(
const mozilla::Maybe<nsID>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<uint32_t>& aDocShellHistoryId = mozilla::Nothing(),
UniqueProfilerBacktrace aStack = nullptr)
: mStack(std::move(aStack)),
mDocShellId(aDocShellId),
mDocShellHistoryId(aDocShellHistoryId) {}
ProfilerMarkerPayload(
const mozilla::TimeStamp& aStartTime, const mozilla::TimeStamp& aEndTime,
const mozilla::Maybe<nsID>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<uint32_t>& aDocShellHistoryId = mozilla::Nothing(),
UniqueProfilerBacktrace aStack = nullptr)
: mStartTime(aStartTime),
mEndTime(aEndTime),
mStack(std::move(aStack)),
mDocShellId(aDocShellId),
mDocShellHistoryId(aDocShellHistoryId) {}
virtual ~ProfilerMarkerPayload() {}
virtual void StreamPayload(SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) = 0;
mozilla::TimeStamp GetStartTime() const { return mStartTime; }
protected:
void StreamType(const char* aMarkerType, SpliceableJSONWriter& aWriter);
void StreamCommonProps(const char* aMarkerType, SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks);
void SetStack(UniqueProfilerBacktrace aStack) { mStack = std::move(aStack); }
void SetDocShellHistoryId(
const mozilla::Maybe<uint32_t>& aDocShellHistoryId) {
mDocShellHistoryId = aDocShellHistoryId;
}
void SetDocShellId(const mozilla::Maybe<nsID>& aDocShellId) {
mDocShellId = aDocShellId;
}
private:
mozilla::TimeStamp mStartTime;
mozilla::TimeStamp mEndTime;
UniqueProfilerBacktrace mStack;
mozilla::Maybe<nsID> mDocShellId;
mozilla::Maybe<uint32_t> mDocShellHistoryId;
};
#define DECL_STREAM_PAYLOAD \
virtual void StreamPayload(SpliceableJSONWriter& aWriter, \
const mozilla::TimeStamp& aProcessStartTime, \
UniqueStacks& aUniqueStacks) override;
// TODO: Increase the coverage of tracing markers that include DocShell
// information
class TracingMarkerPayload : public ProfilerMarkerPayload {
public:
TracingMarkerPayload(
const char* aCategory, TracingKind aKind,
const mozilla::Maybe<nsID>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<uint32_t>& aDocShellHistoryId = mozilla::Nothing(),
UniqueProfilerBacktrace aCause = nullptr)
: mCategory(aCategory), mKind(aKind) {
if (aCause) {
SetStack(std::move(aCause));
}
SetDocShellId(aDocShellId);
SetDocShellHistoryId(aDocShellHistoryId);
}
DECL_STREAM_PAYLOAD
private:
const char* mCategory;
TracingKind mKind;
};
class FileIOMarkerPayload : public ProfilerMarkerPayload {
public:
FileIOMarkerPayload(const char* aOperation, const char* aSource,
const char* aFilename,
const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime,
UniqueProfilerBacktrace aStack)
: ProfilerMarkerPayload(aStartTime, aEndTime, mozilla::Nothing(),
mozilla::Nothing(), std::move(aStack)),
mSource(aSource),
mOperation(aOperation ? strdup(aOperation) : nullptr),
mFilename(aFilename ? strdup(aFilename) : nullptr) {
MOZ_ASSERT(aSource);
}
DECL_STREAM_PAYLOAD
private:
const char* mSource;
mozilla::UniqueFreePtr<char> mOperation;
mozilla::UniqueFreePtr<char> mFilename;
};
class DOMEventMarkerPayload : public TracingMarkerPayload {
public:
DOMEventMarkerPayload(const nsAString& aEventType,
const mozilla::TimeStamp& aTimeStamp,
const char* aCategory, TracingKind aKind,
const mozilla::Maybe<nsID>& aDocShellId,
const mozilla::Maybe<uint32_t>& aDocShellHistoryId)
: TracingMarkerPayload(aCategory, aKind, aDocShellId, aDocShellHistoryId),
mTimeStamp(aTimeStamp),
mEventType(aEventType) {}
DECL_STREAM_PAYLOAD
private:
mozilla::TimeStamp mTimeStamp;
nsString mEventType;
};
class UserTimingMarkerPayload : public ProfilerMarkerPayload {
public:
UserTimingMarkerPayload(const nsAString& aName,
const mozilla::TimeStamp& aStartTime,
const mozilla::Maybe<nsID>& aDocShellId,
const mozilla::Maybe<uint32_t>& aDocShellHistoryId)
: ProfilerMarkerPayload(aStartTime, aStartTime, aDocShellId,
aDocShellHistoryId),
mEntryType("mark"),
mName(aName) {}
UserTimingMarkerPayload(const nsAString& aName,
const mozilla::Maybe<nsString>& aStartMark,
const mozilla::Maybe<nsString>& aEndMark,
const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime,
const mozilla::Maybe<nsID>& aDocShellId,
const mozilla::Maybe<uint32_t>& aDocShellHistoryId)
: ProfilerMarkerPayload(aStartTime, aEndTime, aDocShellId,
aDocShellHistoryId),
mEntryType("measure"),
mName(aName),
mStartMark(aStartMark),
mEndMark(aEndMark) {}
DECL_STREAM_PAYLOAD
private:
// Either "mark" or "measure".
const char* mEntryType;
nsString mName;
mozilla::Maybe<nsString> mStartMark;
mozilla::Maybe<nsString> mEndMark;
};
// Contains the translation applied to a 2d layer so we can track the layer
// position at each frame.
class LayerTranslationMarkerPayload : public ProfilerMarkerPayload {
public:
LayerTranslationMarkerPayload(mozilla::layers::Layer* aLayer,
mozilla::gfx::Point aPoint,
mozilla::TimeStamp aStartTime)
: ProfilerMarkerPayload(aStartTime, aStartTime),
mLayer(aLayer),
mPoint(aPoint) {}
DECL_STREAM_PAYLOAD
private:
mozilla::layers::Layer* mLayer;
mozilla::gfx::Point mPoint;
};
#include "Units.h" // For ScreenIntPoint
// Tracks when a vsync occurs according to the HardwareComposer.
class VsyncMarkerPayload : public ProfilerMarkerPayload {
public:
explicit VsyncMarkerPayload(mozilla::TimeStamp aVsyncTimestamp)
: ProfilerMarkerPayload(aVsyncTimestamp, aVsyncTimestamp) {}
DECL_STREAM_PAYLOAD
};
class NetworkMarkerPayload : public ProfilerMarkerPayload {
public:
NetworkMarkerPayload(int64_t aID, const char* aURI, NetworkLoadType aType,
const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime, int32_t aPri,
int64_t aCount,
mozilla::net::CacheDisposition aCacheDisposition,
const mozilla::net::TimingStruct* aTimings = nullptr,
const char* aRedirectURI = nullptr)
: ProfilerMarkerPayload(aStartTime, aEndTime, mozilla::Nothing()),
mID(aID),
mURI(aURI ? strdup(aURI) : nullptr),
mRedirectURI(aRedirectURI && (strlen(aRedirectURI) > 0)
? strdup(aRedirectURI)
: nullptr),
mType(aType),
mPri(aPri),
mCount(aCount),
mCacheDisposition(aCacheDisposition) {
if (aTimings) {
mTimings = *aTimings;
}
}
DECL_STREAM_PAYLOAD
private:
int64_t mID;
mozilla::UniqueFreePtr<char> mURI;
mozilla::UniqueFreePtr<char> mRedirectURI;
NetworkLoadType mType;
int32_t mPri;
int64_t mCount;
mozilla::net::TimingStruct mTimings;
mozilla::net::CacheDisposition mCacheDisposition;
};
class ScreenshotPayload : public ProfilerMarkerPayload {
public:
explicit ScreenshotPayload(mozilla::TimeStamp aTimeStamp,
nsCString&& aScreenshotDataURL,
const mozilla::gfx::IntSize& aWindowSize,
uintptr_t aWindowIdentifier)
: ProfilerMarkerPayload(aTimeStamp, mozilla::TimeStamp()),
mScreenshotDataURL(std::move(aScreenshotDataURL)),
mWindowSize(aWindowSize),
mWindowIdentifier(aWindowIdentifier) {}
DECL_STREAM_PAYLOAD
private:
nsCString mScreenshotDataURL;
mozilla::gfx::IntSize mWindowSize;
uintptr_t mWindowIdentifier;
};
class GCSliceMarkerPayload : public ProfilerMarkerPayload {
public:
GCSliceMarkerPayload(const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime,
JS::UniqueChars&& aTimingJSON)
: ProfilerMarkerPayload(aStartTime, aEndTime),
mTimingJSON(std::move(aTimingJSON)) {}
DECL_STREAM_PAYLOAD
private:
JS::UniqueChars mTimingJSON;
};
class GCMajorMarkerPayload : public ProfilerMarkerPayload {
public:
GCMajorMarkerPayload(const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime,
JS::UniqueChars&& aTimingJSON)
: ProfilerMarkerPayload(aStartTime, aEndTime),
mTimingJSON(std::move(aTimingJSON)) {}
DECL_STREAM_PAYLOAD
private:
JS::UniqueChars mTimingJSON;
};
class GCMinorMarkerPayload : public ProfilerMarkerPayload {
public:
GCMinorMarkerPayload(const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime,
JS::UniqueChars&& aTimingData)
: ProfilerMarkerPayload(aStartTime, aEndTime),
mTimingData(std::move(aTimingData)) {}
DECL_STREAM_PAYLOAD
private:
JS::UniqueChars mTimingData;
};
class HangMarkerPayload : public ProfilerMarkerPayload {
public:
HangMarkerPayload(const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime)
: ProfilerMarkerPayload(aStartTime, aEndTime) {}
DECL_STREAM_PAYLOAD
private:
};
class StyleMarkerPayload : public ProfilerMarkerPayload {
public:
StyleMarkerPayload(const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime,
UniqueProfilerBacktrace aCause,
const mozilla::ServoTraversalStatistics& aStats,
const mozilla::Maybe<nsID>& aDocShellId,
const mozilla::Maybe<uint32_t>& aDocShellHistoryId)
: ProfilerMarkerPayload(aStartTime, aEndTime, aDocShellId,
aDocShellHistoryId),
mStats(aStats) {
if (aCause) {
SetStack(std::move(aCause));
}
}
DECL_STREAM_PAYLOAD
private:
mozilla::ServoTraversalStatistics mStats;
};
class LongTaskMarkerPayload : public ProfilerMarkerPayload {
public:
LongTaskMarkerPayload(const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime)
: ProfilerMarkerPayload(aStartTime, aEndTime) {}
DECL_STREAM_PAYLOAD
};
class TextMarkerPayload : public ProfilerMarkerPayload {
public:
TextMarkerPayload(const nsACString& aText,
const mozilla::TimeStamp& aStartTime)
: ProfilerMarkerPayload(aStartTime, aStartTime), mText(aText) {}
TextMarkerPayload(const nsACString& aText,
const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime)
: ProfilerMarkerPayload(aStartTime, aEndTime), mText(aText) {}
TextMarkerPayload(const nsACString& aText,
const mozilla::TimeStamp& aStartTime,
const mozilla::Maybe<nsID>& aDocShellId,
const mozilla::Maybe<uint32_t>& aDocShellHistoryId)
: ProfilerMarkerPayload(aStartTime, aStartTime, aDocShellId,
aDocShellHistoryId),
mText(aText) {}
TextMarkerPayload(const nsACString& aText,
const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime,
const mozilla::Maybe<nsID>& aDocShellId,
const mozilla::Maybe<uint32_t>& aDocShellHistoryId,
UniqueProfilerBacktrace aCause = nullptr)
: ProfilerMarkerPayload(aStartTime, aEndTime, aDocShellId,
aDocShellHistoryId, std::move(aCause)),
mText(aText) {}
DECL_STREAM_PAYLOAD
private:
nsCString mText;
};
class LogMarkerPayload : public ProfilerMarkerPayload {
public:
LogMarkerPayload(const char* aModule, const char* aText,
const mozilla::TimeStamp& aStartTime)
: ProfilerMarkerPayload(aStartTime, aStartTime),
mModule(aModule),
mText(aText) {}
DECL_STREAM_PAYLOAD
private:
nsAutoCStringN<32> mModule; // longest known LazyLogModule name is ~24
nsCString mText;
};
#endif // ProfilerMarkerPayload_h

Просмотреть файл

@ -0,0 +1,155 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef BASE_PROFILER_SHARED_LIBRARIES_H_
#define BASE_PROFILER_SHARED_LIBRARIES_H_
#ifndef MOZ_BASE_PROFILER
# error This header does not have a useful implementation on your platform!
#endif
#include "nsNativeCharsetUtils.h"
#include "nsString.h"
#include <nsID.h>
#include <algorithm>
#include <stdint.h>
#include <stdlib.h>
#include <string>
#include <vector>
class SharedLibrary {
public:
SharedLibrary(uintptr_t aStart, uintptr_t aEnd, uintptr_t aOffset,
const nsCString& aBreakpadId, const nsString& aModuleName,
const nsString& aModulePath, const nsString& aDebugName,
const nsString& aDebugPath, const nsCString& aVersion,
const char* aArch)
: mStart(aStart),
mEnd(aEnd),
mOffset(aOffset),
mBreakpadId(aBreakpadId),
mModuleName(aModuleName),
mModulePath(aModulePath),
mDebugName(aDebugName),
mDebugPath(aDebugPath),
mVersion(aVersion),
mArch(aArch) {}
SharedLibrary(const SharedLibrary& aEntry)
: mStart(aEntry.mStart),
mEnd(aEntry.mEnd),
mOffset(aEntry.mOffset),
mBreakpadId(aEntry.mBreakpadId),
mModuleName(aEntry.mModuleName),
mModulePath(aEntry.mModulePath),
mDebugName(aEntry.mDebugName),
mDebugPath(aEntry.mDebugPath),
mVersion(aEntry.mVersion),
mArch(aEntry.mArch) {}
SharedLibrary& operator=(const SharedLibrary& aEntry) {
// Gracefully handle self assignment
if (this == &aEntry) return *this;
mStart = aEntry.mStart;
mEnd = aEntry.mEnd;
mOffset = aEntry.mOffset;
mBreakpadId = aEntry.mBreakpadId;
mModuleName = aEntry.mModuleName;
mModulePath = aEntry.mModulePath;
mDebugName = aEntry.mDebugName;
mDebugPath = aEntry.mDebugPath;
mVersion = aEntry.mVersion;
mArch = aEntry.mArch;
return *this;
}
bool operator==(const SharedLibrary& other) const {
return (mStart == other.mStart) && (mEnd == other.mEnd) &&
(mOffset == other.mOffset) && (mModuleName == other.mModuleName) &&
(mModulePath == other.mModulePath) &&
(mDebugName == other.mDebugName) &&
(mDebugPath == other.mDebugPath) &&
(mBreakpadId == other.mBreakpadId) && (mVersion == other.mVersion) &&
(mArch == other.mArch);
}
uintptr_t GetStart() const { return mStart; }
uintptr_t GetEnd() const { return mEnd; }
uintptr_t GetOffset() const { return mOffset; }
const nsCString& GetBreakpadId() const { return mBreakpadId; }
const nsString& GetModuleName() const { return mModuleName; }
const nsString& GetModulePath() const { return mModulePath; }
const std::string GetNativeDebugPath() const {
nsAutoCString debugPathStr;
NS_CopyUnicodeToNative(mDebugPath, debugPathStr);
return debugPathStr.get();
}
const nsString& GetDebugName() const { return mDebugName; }
const nsString& GetDebugPath() const { return mDebugPath; }
const nsCString& GetVersion() const { return mVersion; }
const std::string& GetArch() const { return mArch; }
private:
SharedLibrary() : mStart{0}, mEnd{0}, mOffset{0} {}
uintptr_t mStart;
uintptr_t mEnd;
uintptr_t mOffset;
nsCString mBreakpadId;
nsString mModuleName;
nsString mModulePath;
nsString mDebugName;
nsString mDebugPath;
nsCString mVersion;
std::string mArch;
};
static bool CompareAddresses(const SharedLibrary& first,
const SharedLibrary& second) {
return first.GetStart() < second.GetStart();
}
class SharedLibraryInfo {
public:
static SharedLibraryInfo GetInfoForSelf();
static void Initialize();
SharedLibraryInfo() {}
void AddSharedLibrary(SharedLibrary entry) { mEntries.push_back(entry); }
const SharedLibrary& GetEntry(size_t i) const { return mEntries[i]; }
SharedLibrary& GetMutableEntry(size_t i) { return mEntries[i]; }
// Removes items in the range [first, last)
// i.e. element at the "last" index is not removed
void RemoveEntries(size_t first, size_t last) {
mEntries.erase(mEntries.begin() + first, mEntries.begin() + last);
}
bool Contains(const SharedLibrary& searchItem) const {
return (mEntries.end() !=
std::find(mEntries.begin(), mEntries.end(), searchItem));
}
size_t GetSize() const { return mEntries.size(); }
void SortByAddress() {
std::sort(mEntries.begin(), mEntries.end(), CompareAddresses);
}
void Clear() { mEntries.clear(); }
private:
std::vector<SharedLibrary> mEntries;
};
#endif // BASE_PROFILER_SHARED_LIBRARIES_H_

Просмотреть файл

@ -0,0 +1,123 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef js_ProfilingCategory_h
#define js_ProfilingCategory_h
#include "jstypes.h" // JS_FRIEND_API
// clang-format off
// This higher-order macro lists all categories with their subcategories.
//
// PROFILING_CATEGORY_LIST(BEGIN_CATEGORY, SUBCATEGORY, END_CATEGORY)
// BEGIN_CATEGORY(name, labelAsString, colorAsString)
// SUBCATEGORY(category, name, labelAsString)
// END_CATEGORY
//
// The list of available color names for categories is:
// transparent, grey, purple, yellow, orange, lightblue, green, blue, magenta
//
// Categories and subcategories are used for stack-based instrumentation. They
// are specified in label frames in the profiling stack, see ProfilingStack.h.
// At any point, the category pair of the topmost profiler label frame in the
// label stack determines the category pair of that stack.
// Each category describes a type of workload that the CPU can be busy with.
// Categories should be non-overlapping: the list of categories should be
// chosen in such a way that every possible stack can be mapped to a single
// category unambiguously.
#define PROFILING_CATEGORY_LIST(BEGIN_CATEGORY, SUBCATEGORY, END_CATEGORY) \
BEGIN_CATEGORY(IDLE, "Idle", "transparent") \
SUBCATEGORY(IDLE, IDLE, "Other") \
END_CATEGORY \
BEGIN_CATEGORY(OTHER, "Other", "grey") \
SUBCATEGORY(OTHER, OTHER, "Other") \
END_CATEGORY \
BEGIN_CATEGORY(LAYOUT, "Layout", "purple") \
SUBCATEGORY(LAYOUT, LAYOUT, "Other") \
SUBCATEGORY(LAYOUT, LAYOUT_FrameConstruction, "Frame construction") \
SUBCATEGORY(LAYOUT, LAYOUT_Reflow, "Reflow") \
SUBCATEGORY(LAYOUT, LAYOUT_CSSParsing, "CSS parsing") \
SUBCATEGORY(LAYOUT, LAYOUT_SelectorQuery, "Selector query") \
SUBCATEGORY(LAYOUT, LAYOUT_StyleComputation, "Style computation") \
END_CATEGORY \
BEGIN_CATEGORY(JS, "JavaScript", "yellow") \
SUBCATEGORY(JS, JS, "Other") \
SUBCATEGORY(JS, JS_Parsing, "JS Parsing") \
SUBCATEGORY(JS, JS_IonCompilation, "Ion JIT Compilation") \
SUBCATEGORY(JS, JS_BaselineCompilation, "Baseline JIT Compilation") \
END_CATEGORY \
BEGIN_CATEGORY(GCCC, "GC / CC", "orange") \
SUBCATEGORY(GCCC, GCCC, "Other") \
END_CATEGORY \
BEGIN_CATEGORY(NETWORK, "Network", "lightblue") \
SUBCATEGORY(NETWORK, NETWORK, "Other") \
END_CATEGORY \
BEGIN_CATEGORY(GRAPHICS, "Graphics", "green") \
SUBCATEGORY(GRAPHICS, GRAPHICS, "Other") \
SUBCATEGORY(GRAPHICS, GRAPHICS_DisplayListBuilding, "DisplayList building") \
SUBCATEGORY(GRAPHICS, GRAPHICS_DisplayListMerging, "DisplayList merging") \
SUBCATEGORY(GRAPHICS, GRAPHICS_LayerBuilding, "Layer building") \
SUBCATEGORY(GRAPHICS, GRAPHICS_TileAllocation, "Tile allocation") \
SUBCATEGORY(GRAPHICS, GRAPHICS_WRDisplayList, "WebRender display list") \
SUBCATEGORY(GRAPHICS, GRAPHICS_Rasterization, "Rasterization") \
SUBCATEGORY(GRAPHICS, GRAPHICS_FlushingAsyncPaints, "Flushing async paints") \
SUBCATEGORY(GRAPHICS, GRAPHICS_ImageDecoding, "Image decoding") \
END_CATEGORY \
BEGIN_CATEGORY(DOM, "DOM", "blue") \
SUBCATEGORY(DOM, DOM, "Other") \
END_CATEGORY
namespace JS {
// An enum that lists all possible category pairs in one list.
// This is the enum that is used in profiler stack labels. Having one list that
// includes subcategories from all categories in one list allows assigning the
// category pair to a stack label with just one number.
#define CATEGORY_ENUM_BEGIN_CATEGORY(name, labelAsString, color)
#define CATEGORY_ENUM_SUBCATEGORY(supercategory, name, labelAsString) name,
#define CATEGORY_ENUM_END_CATEGORY
enum class ProfilingCategoryPair : uint32_t {
PROFILING_CATEGORY_LIST(CATEGORY_ENUM_BEGIN_CATEGORY,
CATEGORY_ENUM_SUBCATEGORY,
CATEGORY_ENUM_END_CATEGORY)
COUNT,
LAST = COUNT - 1,
};
#undef CATEGORY_ENUM_BEGIN_CATEGORY
#undef CATEGORY_ENUM_SUBCATEGORY
#undef CATEGORY_ENUM_END_CATEGORY
// An enum that lists just the categories without their subcategories.
#define SUPERCATEGORY_ENUM_BEGIN_CATEGORY(name, labelAsString, color) name,
#define SUPERCATEGORY_ENUM_SUBCATEGORY(supercategory, name, labelAsString)
#define SUPERCATEGORY_ENUM_END_CATEGORY
enum class ProfilingCategory : uint32_t {
PROFILING_CATEGORY_LIST(SUPERCATEGORY_ENUM_BEGIN_CATEGORY,
SUPERCATEGORY_ENUM_SUBCATEGORY,
SUPERCATEGORY_ENUM_END_CATEGORY)
COUNT,
LAST = COUNT - 1,
};
#undef SUPERCATEGORY_ENUM_BEGIN_CATEGORY
#undef SUPERCATEGORY_ENUM_SUBCATEGORY
#undef SUPERCATEGORY_ENUM_END_CATEGORY
// clang-format on
struct ProfilingCategoryPairInfo {
ProfilingCategory mCategory;
uint32_t mSubcategoryIndex;
const char* mLabel;
};
JS_FRIEND_API const ProfilingCategoryPairInfo& GetProfilingCategoryPairInfo(
ProfilingCategoryPair aCategoryPair);
} // namespace JS
#endif /* js_ProfilingCategory_h */

Просмотреть файл

@ -0,0 +1,563 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef js_ProfilingStack_h
#define js_ProfilingStack_h
#include <algorithm>
#include <stdint.h>
#include "jstypes.h"
#include "js/ProfilingCategory.h"
#include "js/TypeDecls.h"
#include "js/Utility.h"
#ifdef JS_BROKEN_GCC_ATTRIBUTE_WARNING
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wattributes"
#endif // JS_BROKEN_GCC_ATTRIBUTE_WARNING
class JS_PUBLIC_API JSTracer;
#ifdef JS_BROKEN_GCC_ATTRIBUTE_WARNING
# pragma GCC diagnostic pop
#endif // JS_BROKEN_GCC_ATTRIBUTE_WARNING
class ProfilingStack;
// This file defines the classes ProfilingStack and ProfilingStackFrame.
// The ProfilingStack manages an array of ProfilingStackFrames.
// It keeps track of the "label stack" and the JS interpreter stack.
// The two stack types are interleaved.
//
// Usage:
//
// ProfilingStack* profilingStack = ...;
//
// // For label frames:
// profilingStack->pushLabelFrame(...);
// // Execute some code. When finished, pop the frame:
// profilingStack->pop();
//
// // For JS stack frames:
// profilingStack->pushJSFrame(...);
// // Execute some code. When finished, pop the frame:
// profilingStack->pop();
//
//
// Concurrency considerations
//
// A thread's profiling stack (and the frames inside it) is only modified by
// that thread. However, the profiling stack can be *read* by a different
// thread, the sampler thread: Whenever the profiler wants to sample a given
// thread A, the following happens:
// (1) Thread A is suspended.
// (2) The sampler thread (thread S) reads the ProfilingStack of thread A,
// including all ProfilingStackFrames that are currently in that stack
// (profilingStack->frames[0..profilingStack->stackSize()]).
// (3) Thread A is resumed.
//
// Thread suspension is achieved using platform-specific APIs; refer to each
// platform's Sampler::SuspendAndSampleAndResumeThread implementation in
// platform-*.cpp for details.
//
// When the thread is suspended, the values in profilingStack->stackPointer and
// in the stack frame range
// profilingStack->frames[0..profilingStack->stackPointer] need to be in a
// consistent state, so that thread S does not read partially- constructed stack
// frames. More specifically, we have two requirements:
// (1) When adding a new frame at the top of the stack, its ProfilingStackFrame
// data needs to be put in place *before* the stackPointer is incremented,
// and the compiler + CPU need to know that this order matters.
// (2) When popping an frame from the stack and then preparing the
// ProfilingStackFrame data for the next frame that is about to be pushed,
// the decrement of the stackPointer in pop() needs to happen *before* the
// ProfilingStackFrame for the new frame is being popuplated, and the
// compiler + CPU need to know that this order matters.
//
// We can express the relevance of these orderings in multiple ways.
// Option A is to make stackPointer an atomic with SequentiallyConsistent
// memory ordering. This would ensure that no writes in thread A would be
// reordered across any writes to stackPointer, which satisfies requirements
// (1) and (2) at the same time. Option A is the simplest.
// Option B is to use ReleaseAcquire memory ordering both for writes to
// stackPointer *and* for writes to ProfilingStackFrame fields. Release-stores
// ensure that all writes that happened *before this write in program order* are
// not reordered to happen after this write. ReleaseAcquire ordering places no
// requirements on the ordering of writes that happen *after* this write in
// program order.
// Using release-stores for writes to stackPointer expresses requirement (1),
// and using release-stores for writes to the ProfilingStackFrame fields
// expresses requirement (2).
//
// Option B is more complicated than option A, but has much better performance
// on x86/64: In a microbenchmark run on a Macbook Pro from 2017, switching
// from option A to option B reduced the overhead of pushing+popping a
// ProfilingStackFrame by 10 nanoseconds.
// On x86/64, release-stores require no explicit hardware barriers or lock
// instructions.
// On ARM/64, option B may be slower than option A, because the compiler will
// generate hardware barriers for every single release-store instead of just
// for the writes to stackPointer. However, the actual performance impact of
// this has not yet been measured on ARM, so we're currently using option B
// everywhere. This is something that we may want to change in the future once
// we've done measurements.
namespace js {
// A call stack can be specified to the JS engine such that all JS entry/exits
// to functions push/pop a stack frame to/from the specified stack.
//
// For more detailed information, see vm/GeckoProfiler.h.
//
class ProfilingStackFrame {
// A ProfilingStackFrame represents either a label frame or a JS frame.
// WARNING WARNING WARNING
//
// All the fields below are Atomic<...,ReleaseAcquire>. This is needed so
// that writes to these fields are release-writes, which ensures that
// earlier writes in this thread don't get reordered after the writes to
// these fields. In particular, the decrement of the stack pointer in
// ProfilingStack::pop() is a write that *must* happen before the values in
// this ProfilingStackFrame are changed. Otherwise, the sampler thread might
// see an inconsistent state where the stack pointer still points to a
// ProfilingStackFrame which has already been popped off the stack and whose
// fields have now been partially repopulated with new values.
// See the "Concurrency considerations" paragraph at the top of this file
// for more details.
// Descriptive label for this stack frame. Must be a static string! Can be
// an empty string, but not a null pointer.
mozilla::Atomic<const char*, mozilla::ReleaseAcquire,
mozilla::recordreplay::Behavior::DontPreserve>
label_;
// An additional descriptive string of this frame which is combined with
// |label_| in profiler output. Need not be (and usually isn't) static. Can
// be null.
mozilla::Atomic<const char*, mozilla::ReleaseAcquire,
mozilla::recordreplay::Behavior::DontPreserve>
dynamicString_;
// Stack pointer for non-JS stack frames, the script pointer otherwise.
mozilla::Atomic<void*, mozilla::ReleaseAcquire,
mozilla::recordreplay::Behavior::DontPreserve>
spOrScript;
// The bytecode offset for JS stack frames.
// Must not be used on non-JS frames; it'll contain either the default 0,
// or a leftover value from a previous JS stack frame that was using this
// ProfilingStackFrame object.
mozilla::Atomic<int32_t, mozilla::ReleaseAcquire,
mozilla::recordreplay::Behavior::DontPreserve>
pcOffsetIfJS_;
// Bits 0...8 hold the Flags. Bits 9...31 hold the category pair.
mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire,
mozilla::recordreplay::Behavior::DontPreserve>
flagsAndCategoryPair_;
static int32_t pcToOffset(JSScript* aScript, jsbytecode* aPc);
public:
ProfilingStackFrame() = default;
ProfilingStackFrame& operator=(const ProfilingStackFrame& other) {
label_ = other.label();
dynamicString_ = other.dynamicString();
void* spScript = other.spOrScript;
spOrScript = spScript;
int32_t offsetIfJS = other.pcOffsetIfJS_;
pcOffsetIfJS_ = offsetIfJS;
uint32_t flagsAndCategory = other.flagsAndCategoryPair_;
flagsAndCategoryPair_ = flagsAndCategory;
return *this;
}
// 9 bits for the flags.
// That leaves 32 - 9 = 23 bits for the category pair.
enum class Flags : uint32_t {
// The first three flags describe the kind of the frame and are
// mutually exclusive. (We still give them individual bits for
// simplicity.)
// A regular label frame. These usually come from AutoProfilerLabel.
IS_LABEL_FRAME = 1 << 0,
// A special frame indicating the start of a run of JS profiling stack
// frames. IS_SP_MARKER_FRAME frames are ignored, except for the sp
// field. These frames are needed to get correct ordering between JS
// and LABEL frames because JS frames don't carry sp information.
// SP is short for "stack pointer".
IS_SP_MARKER_FRAME = 1 << 1,
// A JS frame.
IS_JS_FRAME = 1 << 2,
// An interpreter JS frame that has OSR-ed into baseline. IS_JS_FRAME
// frames can have this flag set and unset during their lifetime.
// JS_OSR frames are ignored.
JS_OSR = 1 << 3,
// The next three are mutually exclusive.
// By default, for profiling stack frames that have both a label and a
// dynamic string, the two strings are combined into one string of the
// form "<label> <dynamicString>" during JSON serialization. The
// following flags can be used to change this preset.
STRING_TEMPLATE_METHOD = 1 << 4, // "<label>.<dynamicString>"
STRING_TEMPLATE_GETTER = 1 << 5, // "get <label>.<dynamicString>"
STRING_TEMPLATE_SETTER = 1 << 6, // "set <label>.<dynamicString>"
// If set, causes this stack frame to be marked as "relevantForJS" in
// the profile JSON, which will make it show up in the "JS only" call
// tree view.
RELEVANT_FOR_JS = 1 << 7,
// If set, causes the label on this ProfilingStackFrame to be ignored
// and to be replaced by the subcategory's label.
LABEL_DETERMINED_BY_CATEGORY_PAIR = 1 << 8,
FLAGS_BITCOUNT = 9,
FLAGS_MASK = (1 << FLAGS_BITCOUNT) - 1
};
static_assert(
uint32_t(JS::ProfilingCategoryPair::LAST) <=
(UINT32_MAX >> uint32_t(Flags::FLAGS_BITCOUNT)),
"Too many category pairs to fit into u32 with together with the "
"reserved bits for the flags");
bool isLabelFrame() const {
return uint32_t(flagsAndCategoryPair_) & uint32_t(Flags::IS_LABEL_FRAME);
}
bool isSpMarkerFrame() const {
return uint32_t(flagsAndCategoryPair_) &
uint32_t(Flags::IS_SP_MARKER_FRAME);
}
bool isJsFrame() const {
return uint32_t(flagsAndCategoryPair_) & uint32_t(Flags::IS_JS_FRAME);
}
bool isOSRFrame() const {
return uint32_t(flagsAndCategoryPair_) & uint32_t(Flags::JS_OSR);
}
void setIsOSRFrame(bool isOSR) {
if (isOSR) {
flagsAndCategoryPair_ =
uint32_t(flagsAndCategoryPair_) | uint32_t(Flags::JS_OSR);
} else {
flagsAndCategoryPair_ =
uint32_t(flagsAndCategoryPair_) & ~uint32_t(Flags::JS_OSR);
}
}
const char* label() const {
uint32_t flagsAndCategoryPair = flagsAndCategoryPair_;
if (flagsAndCategoryPair &
uint32_t(Flags::LABEL_DETERMINED_BY_CATEGORY_PAIR)) {
auto categoryPair = JS::ProfilingCategoryPair(
flagsAndCategoryPair >> uint32_t(Flags::FLAGS_BITCOUNT));
return JS::GetProfilingCategoryPairInfo(categoryPair).mLabel;
}
return label_;
}
const char* dynamicString() const { return dynamicString_; }
void initLabelFrame(const char* aLabel, const char* aDynamicString, void* sp,
JS::ProfilingCategoryPair aCategoryPair,
uint32_t aFlags) {
label_ = aLabel;
dynamicString_ = aDynamicString;
spOrScript = sp;
// pcOffsetIfJS_ is not set and must not be used on label frames.
flagsAndCategoryPair_ =
uint32_t(Flags::IS_LABEL_FRAME) |
(uint32_t(aCategoryPair) << uint32_t(Flags::FLAGS_BITCOUNT)) | aFlags;
MOZ_ASSERT(isLabelFrame());
}
void initSpMarkerFrame(void* sp) {
label_ = "";
dynamicString_ = nullptr;
spOrScript = sp;
// pcOffsetIfJS_ is not set and must not be used on sp marker frames.
flagsAndCategoryPair_ = uint32_t(Flags::IS_SP_MARKER_FRAME) |
(uint32_t(JS::ProfilingCategoryPair::OTHER)
<< uint32_t(Flags::FLAGS_BITCOUNT));
MOZ_ASSERT(isSpMarkerFrame());
}
void initJsFrame(const char* aLabel, const char* aDynamicString,
JSScript* aScript, jsbytecode* aPc) {
label_ = aLabel;
dynamicString_ = aDynamicString;
spOrScript = aScript;
pcOffsetIfJS_ = pcToOffset(aScript, aPc);
flagsAndCategoryPair_ =
uint32_t(Flags::IS_JS_FRAME) | (uint32_t(JS::ProfilingCategoryPair::JS)
<< uint32_t(Flags::FLAGS_BITCOUNT));
MOZ_ASSERT(isJsFrame());
}
uint32_t flags() const {
return uint32_t(flagsAndCategoryPair_) & uint32_t(Flags::FLAGS_MASK);
}
JS::ProfilingCategoryPair categoryPair() const {
return JS::ProfilingCategoryPair(flagsAndCategoryPair_ >>
uint32_t(Flags::FLAGS_BITCOUNT));
}
void* stackAddress() const {
MOZ_ASSERT(!isJsFrame());
return spOrScript;
}
JS_PUBLIC_API JSScript* script() const;
// Note that the pointer returned might be invalid.
JSScript* rawScript() const {
MOZ_ASSERT(isJsFrame());
void* script = spOrScript;
return static_cast<JSScript*>(script);
}
// We can't know the layout of JSScript, so look in vm/GeckoProfiler.cpp.
JS_FRIEND_API jsbytecode* pc() const;
void setPC(jsbytecode* pc);
void trace(JSTracer* trc);
// The offset of a pc into a script's code can actually be 0, so to
// signify a nullptr pc, use a -1 index. This is checked against in
// pc() and setPC() to set/get the right pc.
static const int32_t NullPCOffset = -1;
};
JS_FRIEND_API void SetContextProfilingStack(JSContext* cx,
ProfilingStack* profilingStack);
// GetContextProfilingStack also exists, but it's defined in RootingAPI.h.
JS_FRIEND_API void EnableContextProfilingStack(JSContext* cx, bool enabled);
JS_FRIEND_API void RegisterContextProfilingEventMarker(JSContext* cx,
void (*fn)(const char*));
} // namespace js
namespace JS {
typedef ProfilingStack* (*RegisterThreadCallback)(const char* threadName,
void* stackBase);
typedef void (*UnregisterThreadCallback)();
JS_FRIEND_API void SetProfilingThreadCallbacks(
RegisterThreadCallback registerThread,
UnregisterThreadCallback unregisterThread);
} // namespace JS
// Each thread has its own ProfilingStack. That thread modifies the
// ProfilingStack, pushing and popping elements as necessary.
//
// The ProfilingStack is also read periodically by the profiler's sampler
// thread. This happens only when the thread that owns the ProfilingStack is
// suspended. So there are no genuine parallel accesses.
//
// However, it is possible for pushing/popping to be interrupted by a periodic
// sample. Because of this, we need pushing/popping to be effectively atomic.
//
// - When pushing a new frame, we increment the stack pointer -- making the new
// frame visible to the sampler thread -- only after the new frame has been
// fully written. The stack pointer is Atomic<uint32_t,ReleaseAcquire>, so
// the increment is a release-store, which ensures that this store is not
// reordered before the writes of the frame.
//
// - When popping an old frame, the only operation is the decrementing of the
// stack pointer, which is obviously atomic.
//
class ProfilingStack final {
public:
ProfilingStack() : stackPointer(0) {}
~ProfilingStack();
void pushLabelFrame(const char* label, const char* dynamicString, void* sp,
JS::ProfilingCategoryPair categoryPair,
uint32_t flags = 0) {
// This thread is the only one that ever changes the value of
// stackPointer.
// Store the value of the atomic in a non-atomic local variable so that
// the compiler won't generate two separate loads from the atomic for
// the size check and the frames[] array indexing operation.
uint32_t stackPointerVal = stackPointer;
if (MOZ_UNLIKELY(stackPointerVal >= capacity)) {
ensureCapacitySlow();
}
frames[stackPointerVal].initLabelFrame(label, dynamicString, sp,
categoryPair, flags);
// This must happen at the end! The compiler will not reorder this
// update because stackPointer is Atomic<..., ReleaseAcquire>, so any
// the writes above will not be reordered below the stackPointer store.
// Do the read and the write as two separate statements, in order to
// make it clear that we don't need an atomic increment, which would be
// more expensive on x86 than the separate operations done here.
// However, don't use stackPointerVal here; instead, allow the compiler
// to turn this store into a non-atomic increment instruction which
// takes up less code size.
stackPointer = stackPointer + 1;
}
void pushSpMarkerFrame(void* sp) {
uint32_t oldStackPointer = stackPointer;
if (MOZ_UNLIKELY(oldStackPointer >= capacity)) {
ensureCapacitySlow();
}
frames[oldStackPointer].initSpMarkerFrame(sp);
// This must happen at the end, see the comment in pushLabelFrame.
stackPointer = oldStackPointer + 1;
}
void pushJsFrame(const char* label, const char* dynamicString,
JSScript* script, jsbytecode* pc) {
// This thread is the only one that ever changes the value of
// stackPointer. Only load the atomic once.
uint32_t oldStackPointer = stackPointer;
if (MOZ_UNLIKELY(oldStackPointer >= capacity)) {
ensureCapacitySlow();
}
frames[oldStackPointer].initJsFrame(label, dynamicString, script, pc);
// This must happen at the end, see the comment in pushLabelFrame.
stackPointer = stackPointer + 1;
}
void pop() {
MOZ_ASSERT(stackPointer > 0);
// Do the read and the write as two separate statements, in order to
// make it clear that we don't need an atomic decrement, which would be
// more expensive on x86 than the separate operations done here.
// This thread is the only one that ever changes the value of
// stackPointer.
uint32_t oldStackPointer = stackPointer;
stackPointer = oldStackPointer - 1;
}
uint32_t stackSize() const { return stackPointer; }
uint32_t stackCapacity() const { return capacity; }
private:
// Out of line path for expanding the buffer, since otherwise this would get
// inlined in every DOM WebIDL call.
MOZ_COLD void ensureCapacitySlow();
// No copying.
ProfilingStack(const ProfilingStack&) = delete;
void operator=(const ProfilingStack&) = delete;
// No moving either.
ProfilingStack(ProfilingStack&&) = delete;
void operator=(ProfilingStack&&) = delete;
uint32_t capacity = 0;
public:
// The pointer to the stack frames, this is read from the profiler thread and
// written from the current thread.
//
// This is effectively a unique pointer.
mozilla::Atomic<js::ProfilingStackFrame*, mozilla::SequentiallyConsistent,
mozilla::recordreplay::Behavior::DontPreserve>
frames{nullptr};
// This may exceed the capacity, so instead use the stackSize() method to
// determine the number of valid frames in stackFrames. When this is less
// than stackCapacity(), it refers to the first free stackframe past the top
// of the in-use stack (i.e. frames[stackPointer - 1] is the top stack
// frame).
//
// WARNING WARNING WARNING
//
// This is an atomic variable that uses ReleaseAcquire memory ordering.
// See the "Concurrency considerations" paragraph at the top of this file
// for more details.
mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire,
mozilla::recordreplay::Behavior::DontPreserve>
stackPointer;
};
namespace js {
class AutoGeckoProfilerEntry;
class GeckoProfilerEntryMarker;
class GeckoProfilerBaselineOSRMarker;
class GeckoProfilerThread {
friend class AutoGeckoProfilerEntry;
friend class GeckoProfilerEntryMarker;
friend class GeckoProfilerBaselineOSRMarker;
ProfilingStack* profilingStack_;
// Same as profilingStack_ if the profiler is currently active, otherwise
// null.
ProfilingStack* profilingStackIfEnabled_;
public:
GeckoProfilerThread();
uint32_t stackPointer() {
MOZ_ASSERT(infraInstalled());
return profilingStack_->stackPointer;
}
ProfilingStackFrame* stack() { return profilingStack_->frames; }
ProfilingStack* getProfilingStack() { return profilingStack_; }
ProfilingStack* getProfilingStackIfEnabled() {
return profilingStackIfEnabled_;
}
/*
* True if the profiler infrastructure is setup. Should be true in builds
* that include profiler support except during early startup or late
* shutdown. Unrelated to the presence of the Gecko Profiler addon.
*/
bool infraInstalled() { return profilingStack_ != nullptr; }
void setProfilingStack(ProfilingStack* profilingStack, bool enabled);
void enable(bool enable) {
profilingStackIfEnabled_ = enable ? profilingStack_ : nullptr;
}
void trace(JSTracer* trc);
/*
* Functions which are the actual instrumentation to track run information
*
* - enter: a function has started to execute
* - updatePC: updates the pc information about where a function
* is currently executing
* - exit: this function has ceased execution, and no further
* entries/exits will be made
*/
bool enter(JSContext* cx, JSScript* script);
void exit(JSContext* cx, JSScript* script);
inline void updatePC(JSContext* cx, JSScript* script, jsbytecode* pc);
};
} // namespace js
#endif /* js_ProfilingStack_h */