зеркало из https://github.com/mozilla/gecko-dev.git
Backed out 13 changesets (bug 1492121) for valgrind bustage
Backed out changeset e707f1890820 (bug 1492121) Backed out changeset 90aeaad4a4de (bug 1492121) Backed out changeset 2ffb6ccca437 (bug 1492121) Backed out changeset 4215fefb6ef3 (bug 1492121) Backed out changeset b54b813c4c6c (bug 1492121) Backed out changeset 46f57504c087 (bug 1492121) Backed out changeset a3fe26927b31 (bug 1492121) Backed out changeset 39c486afacec (bug 1492121) Backed out changeset bf1731627e07 (bug 1492121) Backed out changeset 77e7b13c6237 (bug 1492121) Backed out changeset 1f10b50f758f (bug 1492121) Backed out changeset db1506f94d0d (bug 1492121) Backed out changeset 72c4026e9455 (bug 1492121)
This commit is contained in:
Родитель
2d8b3e71e4
Коммит
31c85bd5fe
|
@ -38,7 +38,6 @@
|
|||
#include "mozilla/Sprintf.h"
|
||||
#include "mozilla/StartupTimeline.h"
|
||||
#include "mozilla/WindowsDllBlocklist.h"
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef LIBFUZZER
|
||||
# include "FuzzerDefs.h"
|
||||
|
@ -240,9 +239,6 @@ uint32_t gBlocklistInitFlags = eDllBlocklistInitFlagDefault;
|
|||
int main(int argc, char* argv[], char* envp[]) {
|
||||
mozilla::TimeStamp start = mozilla::TimeStamp::Now();
|
||||
|
||||
AUTO_BASE_PROFILER_INIT;
|
||||
AUTO_BASE_PROFILER_LABEL("nsBrowserApp main", OTHER);
|
||||
|
||||
#ifdef MOZ_BROWSER_CAN_BE_CONTENTPROC
|
||||
// We are launching as a content process, delegate to the appropriate
|
||||
// main
|
||||
|
|
|
@ -107,7 +107,6 @@ case $cmd in
|
|||
|
||||
${MKDIR} -p ${tgtpath}/mozglue
|
||||
cp -pPR \
|
||||
${TOPSRCDIR}/mozglue/baseprofiler \
|
||||
${TOPSRCDIR}/mozglue/build \
|
||||
${TOPSRCDIR}/mozglue/misc \
|
||||
${TOPSRCDIR}/mozglue/moz.build \
|
||||
|
|
|
@ -1,649 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
/*
|
||||
* This is an implementation of stack unwinding according to a subset
|
||||
* of the ARM Exception Handling ABI, as described in:
|
||||
* http://infocenter.arm.com/help/topic/com.arm.doc.ihi0038a/IHI0038A_ehabi.pdf
|
||||
*
|
||||
* This handles only the ARM-defined "personality routines" (chapter
|
||||
* 9), and don't track the value of FP registers, because profiling
|
||||
* needs only chain of PC/SP values.
|
||||
*
|
||||
* Because the exception handling info may not be accurate for all
|
||||
* possible places where an async signal could occur (e.g., in a
|
||||
* prologue or epilogue), this bounds-checks all stack accesses.
|
||||
*
|
||||
* This file uses "struct" for structures in the exception tables and
|
||||
* "class" otherwise. We should avoid violating the C++11
|
||||
* standard-layout rules in the former.
|
||||
*/
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include "EHABIStackWalk.h"
|
||||
|
||||
# include "BaseProfilerSharedLibraries.h"
|
||||
# include "platform.h"
|
||||
|
||||
# include "mozilla/Atomics.h"
|
||||
# include "mozilla/Attributes.h"
|
||||
# include "mozilla/DebugOnly.h"
|
||||
# include "mozilla/EndianUtils.h"
|
||||
|
||||
# include <algorithm>
|
||||
# include <elf.h>
|
||||
# include <stdint.h>
|
||||
# include <vector>
|
||||
# include <string>
|
||||
|
||||
# ifndef PT_ARM_EXIDX
|
||||
# define PT_ARM_EXIDX 0x70000001
|
||||
# endif
|
||||
|
||||
// Bug 1082817: ICS B2G has a buggy linker that doesn't always ensure
|
||||
// that the EXIDX is sorted by address, as the spec requires. So in
|
||||
// that case we build and sort an array of pointers into the index,
|
||||
// and binary-search that; otherwise, we search the index in place
|
||||
// (avoiding the time and space overhead of the indirection).
|
||||
# if defined(ANDROID_VERSION) && ANDROID_VERSION < 16
|
||||
# define HAVE_UNSORTED_EXIDX
|
||||
# endif
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
struct PRel31 {
|
||||
uint32_t mBits;
|
||||
bool topBit() const { return mBits & 0x80000000; }
|
||||
uint32_t value() const { return mBits & 0x7fffffff; }
|
||||
int32_t offset() const { return (static_cast<int32_t>(mBits) << 1) >> 1; }
|
||||
const void* compute() const {
|
||||
return reinterpret_cast<const char*>(this) + offset();
|
||||
}
|
||||
|
||||
private:
|
||||
PRel31(const PRel31& copied) = delete;
|
||||
PRel31() = delete;
|
||||
};
|
||||
|
||||
struct EHEntry {
|
||||
PRel31 startPC;
|
||||
PRel31 exidx;
|
||||
|
||||
private:
|
||||
EHEntry(const EHEntry& copied) = delete;
|
||||
EHEntry() = delete;
|
||||
};
|
||||
|
||||
class EHState {
|
||||
// Note that any core register can be used as a "frame pointer" to
|
||||
// influence the unwinding process, so this must track all of them.
|
||||
uint32_t mRegs[16];
|
||||
|
||||
public:
|
||||
bool unwind(const EHEntry* aEntry, const void* stackBase);
|
||||
uint32_t& operator[](int i) { return mRegs[i]; }
|
||||
const uint32_t& operator[](int i) const { return mRegs[i]; }
|
||||
explicit EHState(const mcontext_t&);
|
||||
};
|
||||
|
||||
enum { R_SP = 13, R_LR = 14, R_PC = 15 };
|
||||
|
||||
# ifdef HAVE_UNSORTED_EXIDX
|
||||
class EHEntryHandle {
|
||||
const EHEntry* mValue;
|
||||
|
||||
public:
|
||||
EHEntryHandle(const EHEntry* aEntry) : mValue(aEntry) {}
|
||||
const EHEntry* value() const { return mValue; }
|
||||
};
|
||||
|
||||
bool operator<(const EHEntryHandle& lhs, const EHEntryHandle& rhs) {
|
||||
return lhs.value()->startPC.compute() < rhs.value()->startPC.compute();
|
||||
}
|
||||
# endif
|
||||
|
||||
class EHTable {
|
||||
uint32_t mStartPC;
|
||||
uint32_t mEndPC;
|
||||
uint32_t mBaseAddress;
|
||||
# ifdef HAVE_UNSORTED_EXIDX
|
||||
// In principle we should be able to binary-search the index section in
|
||||
// place, but the ICS toolchain's linker is noncompliant and produces
|
||||
// indices that aren't entirely sorted (e.g., libc). So we have this:
|
||||
std::vector<EHEntryHandle> mEntries;
|
||||
typedef std::vector<EHEntryHandle>::const_iterator EntryIterator;
|
||||
EntryIterator entriesBegin() const { return mEntries.begin(); }
|
||||
EntryIterator entriesEnd() const { return mEntries.end(); }
|
||||
static const EHEntry* entryGet(EntryIterator aEntry) {
|
||||
return aEntry->value();
|
||||
}
|
||||
# else
|
||||
typedef const EHEntry* EntryIterator;
|
||||
EntryIterator mEntriesBegin, mEntriesEnd;
|
||||
EntryIterator entriesBegin() const { return mEntriesBegin; }
|
||||
EntryIterator entriesEnd() const { return mEntriesEnd; }
|
||||
static const EHEntry* entryGet(EntryIterator aEntry) { return aEntry; }
|
||||
# endif
|
||||
std::string mName;
|
||||
|
||||
public:
|
||||
EHTable(const void* aELF, size_t aSize, const std::string& aName);
|
||||
const EHEntry* lookup(uint32_t aPC) const;
|
||||
bool isValid() const { return entriesEnd() != entriesBegin(); }
|
||||
const std::string& name() const { return mName; }
|
||||
uint32_t startPC() const { return mStartPC; }
|
||||
uint32_t endPC() const { return mEndPC; }
|
||||
uint32_t baseAddress() const { return mBaseAddress; }
|
||||
};
|
||||
|
||||
class EHAddrSpace {
|
||||
std::vector<uint32_t> mStarts;
|
||||
std::vector<EHTable> mTables;
|
||||
static Atomic<const EHAddrSpace*> sCurrent;
|
||||
|
||||
public:
|
||||
explicit EHAddrSpace(const std::vector<EHTable>& aTables);
|
||||
const EHTable* lookup(uint32_t aPC) const;
|
||||
static void Update();
|
||||
static const EHAddrSpace* Get();
|
||||
};
|
||||
|
||||
void EHABIStackWalkInit() { EHAddrSpace::Update(); }
|
||||
|
||||
size_t EHABIStackWalk(const mcontext_t& aContext, void* stackBase, void** aSPs,
|
||||
void** aPCs, const size_t aNumFrames) {
|
||||
const EHAddrSpace* space = EHAddrSpace::Get();
|
||||
EHState state(aContext);
|
||||
size_t count = 0;
|
||||
|
||||
while (count < aNumFrames) {
|
||||
uint32_t pc = state[R_PC], sp = state[R_SP];
|
||||
aPCs[count] = reinterpret_cast<void*>(pc);
|
||||
aSPs[count] = reinterpret_cast<void*>(sp);
|
||||
count++;
|
||||
|
||||
if (!space) break;
|
||||
// TODO: cache these lookups. Binary-searching libxul is
|
||||
// expensive (possibly more expensive than doing the actual
|
||||
// unwind), and even a small cache should help.
|
||||
const EHTable* table = space->lookup(pc);
|
||||
if (!table) break;
|
||||
const EHEntry* entry = table->lookup(pc);
|
||||
if (!entry) break;
|
||||
if (!state.unwind(entry, stackBase)) break;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
class EHInterp {
|
||||
public:
|
||||
// Note that stackLimit is exclusive and stackBase is inclusive
|
||||
// (i.e, stackLimit < SP <= stackBase), following the convention
|
||||
// set by the AAPCS spec.
|
||||
EHInterp(EHState& aState, const EHEntry* aEntry, uint32_t aStackLimit,
|
||||
uint32_t aStackBase)
|
||||
: mState(aState),
|
||||
mStackLimit(aStackLimit),
|
||||
mStackBase(aStackBase),
|
||||
mNextWord(0),
|
||||
mWordsLeft(0),
|
||||
mFailed(false) {
|
||||
const PRel31& exidx = aEntry->exidx;
|
||||
uint32_t firstWord;
|
||||
|
||||
if (exidx.mBits == 1) { // EXIDX_CANTUNWIND
|
||||
mFailed = true;
|
||||
return;
|
||||
}
|
||||
if (exidx.topBit()) {
|
||||
firstWord = exidx.mBits;
|
||||
} else {
|
||||
mNextWord = reinterpret_cast<const uint32_t*>(exidx.compute());
|
||||
firstWord = *mNextWord++;
|
||||
}
|
||||
|
||||
switch (firstWord >> 24) {
|
||||
case 0x80: // short
|
||||
mWord = firstWord << 8;
|
||||
mBytesLeft = 3;
|
||||
break;
|
||||
case 0x81:
|
||||
case 0x82: // long; catch descriptor size ignored
|
||||
mWord = firstWord << 16;
|
||||
mBytesLeft = 2;
|
||||
mWordsLeft = (firstWord >> 16) & 0xff;
|
||||
break;
|
||||
default:
|
||||
// unknown personality
|
||||
mFailed = true;
|
||||
}
|
||||
}
|
||||
|
||||
bool unwind();
|
||||
|
||||
private:
|
||||
// TODO: GCC has been observed not CSEing repeated reads of
|
||||
// mState[R_SP] with writes to mFailed between them, suggesting that
|
||||
// it hasn't determined that they can't alias and is thus missing
|
||||
// optimization opportunities. So, we may want to flatten EHState
|
||||
// into this class; this may also make the code simpler.
|
||||
EHState& mState;
|
||||
uint32_t mStackLimit;
|
||||
uint32_t mStackBase;
|
||||
const uint32_t* mNextWord;
|
||||
uint32_t mWord;
|
||||
uint8_t mWordsLeft;
|
||||
uint8_t mBytesLeft;
|
||||
bool mFailed;
|
||||
|
||||
enum {
|
||||
I_ADDSP = 0x00, // 0sxxxxxx (subtract if s)
|
||||
M_ADDSP = 0x80,
|
||||
I_POPMASK = 0x80, // 1000iiii iiiiiiii (if any i set)
|
||||
M_POPMASK = 0xf0,
|
||||
I_MOVSP = 0x90, // 1001nnnn
|
||||
M_MOVSP = 0xf0,
|
||||
I_POPN = 0xa0, // 1010lnnn
|
||||
M_POPN = 0xf0,
|
||||
I_FINISH = 0xb0, // 10110000
|
||||
I_POPLO = 0xb1, // 10110001 0000iiii (if any i set)
|
||||
I_ADDSPBIG = 0xb2, // 10110010 uleb128
|
||||
I_POPFDX = 0xb3, // 10110011 sssscccc
|
||||
I_POPFDX8 = 0xb8, // 10111nnn
|
||||
M_POPFDX8 = 0xf8,
|
||||
// "Intel Wireless MMX" extensions omitted.
|
||||
I_POPFDD = 0xc8, // 1100100h sssscccc
|
||||
M_POPFDD = 0xfe,
|
||||
I_POPFDD8 = 0xd0, // 11010nnn
|
||||
M_POPFDD8 = 0xf8
|
||||
};
|
||||
|
||||
uint8_t next() {
|
||||
if (mBytesLeft == 0) {
|
||||
if (mWordsLeft == 0) {
|
||||
return I_FINISH;
|
||||
}
|
||||
mWordsLeft--;
|
||||
mWord = *mNextWord++;
|
||||
mBytesLeft = 4;
|
||||
}
|
||||
mBytesLeft--;
|
||||
mWord = (mWord << 8) | (mWord >> 24); // rotate
|
||||
return mWord;
|
||||
}
|
||||
|
||||
uint32_t& vSP() { return mState[R_SP]; }
|
||||
uint32_t* ptrSP() { return reinterpret_cast<uint32_t*>(vSP()); }
|
||||
|
||||
void checkStackBase() {
|
||||
if (vSP() > mStackBase) mFailed = true;
|
||||
}
|
||||
void checkStackLimit() {
|
||||
if (vSP() <= mStackLimit) mFailed = true;
|
||||
}
|
||||
void checkStackAlign() {
|
||||
if ((vSP() & 3) != 0) mFailed = true;
|
||||
}
|
||||
void checkStack() {
|
||||
checkStackBase();
|
||||
checkStackLimit();
|
||||
checkStackAlign();
|
||||
}
|
||||
|
||||
void popRange(uint8_t first, uint8_t last, uint16_t mask) {
|
||||
bool hasSP = false;
|
||||
uint32_t tmpSP;
|
||||
if (mask == 0) mFailed = true;
|
||||
for (uint8_t r = first; r <= last; ++r) {
|
||||
if (mask & 1) {
|
||||
if (r == R_SP) {
|
||||
hasSP = true;
|
||||
tmpSP = *ptrSP();
|
||||
} else
|
||||
mState[r] = *ptrSP();
|
||||
vSP() += 4;
|
||||
checkStackBase();
|
||||
if (mFailed) return;
|
||||
}
|
||||
mask >>= 1;
|
||||
}
|
||||
if (hasSP) {
|
||||
vSP() = tmpSP;
|
||||
checkStack();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
bool EHState::unwind(const EHEntry* aEntry, const void* stackBasePtr) {
|
||||
// The unwinding program cannot set SP to less than the initial value.
|
||||
uint32_t stackLimit = mRegs[R_SP] - 4;
|
||||
uint32_t stackBase = reinterpret_cast<uint32_t>(stackBasePtr);
|
||||
EHInterp interp(*this, aEntry, stackLimit, stackBase);
|
||||
return interp.unwind();
|
||||
}
|
||||
|
||||
bool EHInterp::unwind() {
|
||||
mState[R_PC] = 0;
|
||||
checkStack();
|
||||
while (!mFailed) {
|
||||
uint8_t insn = next();
|
||||
# if DEBUG_EHABI_UNWIND
|
||||
LOG("unwind insn = %02x", (unsigned)insn);
|
||||
# endif
|
||||
// Try to put the common cases first.
|
||||
|
||||
// 00xxxxxx: vsp = vsp + (xxxxxx << 2) + 4
|
||||
// 01xxxxxx: vsp = vsp - (xxxxxx << 2) - 4
|
||||
if ((insn & M_ADDSP) == I_ADDSP) {
|
||||
uint32_t offset = ((insn & 0x3f) << 2) + 4;
|
||||
if (insn & 0x40) {
|
||||
vSP() -= offset;
|
||||
checkStackLimit();
|
||||
} else {
|
||||
vSP() += offset;
|
||||
checkStackBase();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// 10100nnn: Pop r4-r[4+nnn]
|
||||
// 10101nnn: Pop r4-r[4+nnn], r14
|
||||
if ((insn & M_POPN) == I_POPN) {
|
||||
uint8_t n = (insn & 0x07) + 1;
|
||||
bool lr = insn & 0x08;
|
||||
uint32_t* ptr = ptrSP();
|
||||
vSP() += (n + (lr ? 1 : 0)) * 4;
|
||||
checkStackBase();
|
||||
for (uint8_t r = 4; r < 4 + n; ++r) mState[r] = *ptr++;
|
||||
if (lr) mState[R_LR] = *ptr++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// 1011000: Finish
|
||||
if (insn == I_FINISH) {
|
||||
if (mState[R_PC] == 0) {
|
||||
mState[R_PC] = mState[R_LR];
|
||||
// Non-standard change (bug 916106): Prevent the caller from
|
||||
// re-using LR. Since the caller is by definition not a leaf
|
||||
// routine, it will have to restore LR from somewhere to
|
||||
// return to its own caller, so we can safely zero it here.
|
||||
// This makes a difference only if an error in unwinding
|
||||
// (e.g., caused by starting from within a prologue/epilogue)
|
||||
// causes us to load a pointer to a leaf routine as LR; if we
|
||||
// don't do something, we'll go into an infinite loop of
|
||||
// "returning" to that same function.
|
||||
mState[R_LR] = 0;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// 1001nnnn: Set vsp = r[nnnn]
|
||||
if ((insn & M_MOVSP) == I_MOVSP) {
|
||||
vSP() = mState[insn & 0x0f];
|
||||
checkStack();
|
||||
continue;
|
||||
}
|
||||
|
||||
// 11001000 sssscccc: Pop VFP regs D[16+ssss]-D[16+ssss+cccc] (as FLDMFDD)
|
||||
// 11001001 sssscccc: Pop VFP regs D[ssss]-D[ssss+cccc] (as FLDMFDD)
|
||||
if ((insn & M_POPFDD) == I_POPFDD) {
|
||||
uint8_t n = (next() & 0x0f) + 1;
|
||||
// Note: if the 16+ssss+cccc > 31, the encoding is reserved.
|
||||
// As the space is currently unused, we don't try to check.
|
||||
vSP() += 8 * n;
|
||||
checkStackBase();
|
||||
continue;
|
||||
}
|
||||
|
||||
// 11010nnn: Pop VFP regs D[8]-D[8+nnn] (as FLDMFDD)
|
||||
if ((insn & M_POPFDD8) == I_POPFDD8) {
|
||||
uint8_t n = (insn & 0x07) + 1;
|
||||
vSP() += 8 * n;
|
||||
checkStackBase();
|
||||
continue;
|
||||
}
|
||||
|
||||
// 10110010 uleb128: vsp = vsp + 0x204 + (uleb128 << 2)
|
||||
if (insn == I_ADDSPBIG) {
|
||||
uint32_t acc = 0;
|
||||
uint8_t shift = 0;
|
||||
uint8_t byte;
|
||||
do {
|
||||
if (shift >= 32) return false;
|
||||
byte = next();
|
||||
acc |= (byte & 0x7f) << shift;
|
||||
shift += 7;
|
||||
} while (byte & 0x80);
|
||||
uint32_t offset = 0x204 + (acc << 2);
|
||||
// The calculations above could have overflowed.
|
||||
// But the one we care about is this:
|
||||
if (vSP() + offset < vSP()) mFailed = true;
|
||||
vSP() += offset;
|
||||
// ...so that this is the only other check needed:
|
||||
checkStackBase();
|
||||
continue;
|
||||
}
|
||||
|
||||
// 1000iiii iiiiiiii (i not all 0): Pop under masks {r15-r12}, {r11-r4}
|
||||
if ((insn & M_POPMASK) == I_POPMASK) {
|
||||
popRange(4, 15, ((insn & 0x0f) << 8) | next());
|
||||
continue;
|
||||
}
|
||||
|
||||
// 1011001 0000iiii (i not all 0): Pop under mask {r3-r0}
|
||||
if (insn == I_POPLO) {
|
||||
popRange(0, 3, next() & 0x0f);
|
||||
continue;
|
||||
}
|
||||
|
||||
// 10110011 sssscccc: Pop VFP regs D[ssss]-D[ssss+cccc] (as FLDMFDX)
|
||||
if (insn == I_POPFDX) {
|
||||
uint8_t n = (next() & 0x0f) + 1;
|
||||
vSP() += 8 * n + 4;
|
||||
checkStackBase();
|
||||
continue;
|
||||
}
|
||||
|
||||
// 10111nnn: Pop VFP regs D[8]-D[8+nnn] (as FLDMFDX)
|
||||
if ((insn & M_POPFDX8) == I_POPFDX8) {
|
||||
uint8_t n = (insn & 0x07) + 1;
|
||||
vSP() += 8 * n + 4;
|
||||
checkStackBase();
|
||||
continue;
|
||||
}
|
||||
|
||||
// unhandled instruction
|
||||
# ifdef DEBUG_EHABI_UNWIND
|
||||
LOG("Unhandled EHABI instruction 0x%02x", insn);
|
||||
# endif
|
||||
mFailed = true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool operator<(const EHTable& lhs, const EHTable& rhs) {
|
||||
return lhs.startPC() < rhs.startPC();
|
||||
}
|
||||
|
||||
// Async signal unsafe.
|
||||
EHAddrSpace::EHAddrSpace(const std::vector<EHTable>& aTables)
|
||||
: mTables(aTables) {
|
||||
std::sort(mTables.begin(), mTables.end());
|
||||
DebugOnly<uint32_t> lastEnd = 0;
|
||||
for (std::vector<EHTable>::iterator i = mTables.begin(); i != mTables.end();
|
||||
++i) {
|
||||
MOZ_ASSERT(i->startPC() >= lastEnd);
|
||||
mStarts.push_back(i->startPC());
|
||||
lastEnd = i->endPC();
|
||||
}
|
||||
}
|
||||
|
||||
const EHTable* EHAddrSpace::lookup(uint32_t aPC) const {
|
||||
ptrdiff_t i = (std::upper_bound(mStarts.begin(), mStarts.end(), aPC) -
|
||||
mStarts.begin()) -
|
||||
1;
|
||||
|
||||
if (i < 0 || aPC >= mTables[i].endPC()) return 0;
|
||||
return &mTables[i];
|
||||
}
|
||||
|
||||
const EHEntry* EHTable::lookup(uint32_t aPC) const {
|
||||
MOZ_ASSERT(aPC >= mStartPC);
|
||||
if (aPC >= mEndPC) return nullptr;
|
||||
|
||||
EntryIterator begin = entriesBegin();
|
||||
EntryIterator end = entriesEnd();
|
||||
MOZ_ASSERT(begin < end);
|
||||
if (aPC < reinterpret_cast<uint32_t>(entryGet(begin)->startPC.compute()))
|
||||
return nullptr;
|
||||
|
||||
while (end - begin > 1) {
|
||||
# ifdef EHABI_UNWIND_MORE_ASSERTS
|
||||
if (entryGet(end - 1)->startPC.compute() <
|
||||
entryGet(begin)->startPC.compute()) {
|
||||
MOZ_CRASH("unsorted exidx");
|
||||
}
|
||||
# endif
|
||||
EntryIterator mid = begin + (end - begin) / 2;
|
||||
if (aPC < reinterpret_cast<uint32_t>(entryGet(mid)->startPC.compute()))
|
||||
end = mid;
|
||||
else
|
||||
begin = mid;
|
||||
}
|
||||
return entryGet(begin);
|
||||
}
|
||||
|
||||
# if MOZ_LITTLE_ENDIAN
|
||||
static const unsigned char hostEndian = ELFDATA2LSB;
|
||||
# elif MOZ_BIG_ENDIAN
|
||||
static const unsigned char hostEndian = ELFDATA2MSB;
|
||||
# else
|
||||
# error "No endian?"
|
||||
# endif
|
||||
|
||||
// Async signal unsafe: std::vector::reserve, std::string copy ctor.
|
||||
EHTable::EHTable(const void* aELF, size_t aSize, const std::string& aName)
|
||||
: mStartPC(~0), // largest uint32_t
|
||||
mEndPC(0),
|
||||
# ifndef HAVE_UNSORTED_EXIDX
|
||||
mEntriesBegin(nullptr),
|
||||
mEntriesEnd(nullptr),
|
||||
# endif
|
||||
mName(aName) {
|
||||
const uint32_t fileHeaderAddr = reinterpret_cast<uint32_t>(aELF);
|
||||
|
||||
if (aSize < sizeof(Elf32_Ehdr)) return;
|
||||
|
||||
const Elf32_Ehdr& file = *(reinterpret_cast<Elf32_Ehdr*>(fileHeaderAddr));
|
||||
if (memcmp(&file.e_ident[EI_MAG0], ELFMAG, SELFMAG) != 0 ||
|
||||
file.e_ident[EI_CLASS] != ELFCLASS32 ||
|
||||
file.e_ident[EI_DATA] != hostEndian ||
|
||||
file.e_ident[EI_VERSION] != EV_CURRENT || file.e_machine != EM_ARM ||
|
||||
file.e_version != EV_CURRENT)
|
||||
// e_flags?
|
||||
return;
|
||||
|
||||
MOZ_ASSERT(file.e_phoff + file.e_phnum * file.e_phentsize <= aSize);
|
||||
const Elf32_Phdr *exidxHdr = 0, *zeroHdr = 0;
|
||||
for (unsigned i = 0; i < file.e_phnum; ++i) {
|
||||
const Elf32_Phdr& phdr = *(reinterpret_cast<Elf32_Phdr*>(
|
||||
fileHeaderAddr + file.e_phoff + i * file.e_phentsize));
|
||||
if (phdr.p_type == PT_ARM_EXIDX) {
|
||||
exidxHdr = &phdr;
|
||||
} else if (phdr.p_type == PT_LOAD) {
|
||||
if (phdr.p_offset == 0) {
|
||||
zeroHdr = &phdr;
|
||||
}
|
||||
if (phdr.p_flags & PF_X) {
|
||||
mStartPC = std::min(mStartPC, phdr.p_vaddr);
|
||||
mEndPC = std::max(mEndPC, phdr.p_vaddr + phdr.p_memsz);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!exidxHdr) return;
|
||||
if (!zeroHdr) return;
|
||||
mBaseAddress = fileHeaderAddr - zeroHdr->p_vaddr;
|
||||
mStartPC += mBaseAddress;
|
||||
mEndPC += mBaseAddress;
|
||||
|
||||
// Create a sorted index of the index to work around linker bugs.
|
||||
const EHEntry* startTable =
|
||||
reinterpret_cast<const EHEntry*>(mBaseAddress + exidxHdr->p_vaddr);
|
||||
const EHEntry* endTable = reinterpret_cast<const EHEntry*>(
|
||||
mBaseAddress + exidxHdr->p_vaddr + exidxHdr->p_memsz);
|
||||
# ifdef HAVE_UNSORTED_EXIDX
|
||||
mEntries.reserve(endTable - startTable);
|
||||
for (const EHEntry* i = startTable; i < endTable; ++i) mEntries.push_back(i);
|
||||
std::sort(mEntries.begin(), mEntries.end());
|
||||
# else
|
||||
mEntriesBegin = startTable;
|
||||
mEntriesEnd = endTable;
|
||||
# endif
|
||||
}
|
||||
|
||||
Atomic<const EHAddrSpace*> EHAddrSpace::sCurrent(nullptr);
|
||||
|
||||
// Async signal safe; can fail if Update() hasn't returned yet.
|
||||
const EHAddrSpace* EHAddrSpace::Get() { return sCurrent; }
|
||||
|
||||
// Collect unwinding information from loaded objects. Calls after the
|
||||
// first have no effect. Async signal unsafe.
|
||||
void EHAddrSpace::Update() {
|
||||
const EHAddrSpace* space = sCurrent;
|
||||
if (space) return;
|
||||
|
||||
SharedLibraryInfo info = SharedLibraryInfo::GetInfoForSelf();
|
||||
std::vector<EHTable> tables;
|
||||
|
||||
for (size_t i = 0; i < info.GetSize(); ++i) {
|
||||
const SharedLibrary& lib = info.GetEntry(i);
|
||||
// FIXME: This isn't correct if the start address isn't p_offset 0, because
|
||||
// the start address will not point at the file header. But this is worked
|
||||
// around by magic number checks in the EHTable constructor.
|
||||
EHTable tab(reinterpret_cast<const void*>(lib.GetStart()),
|
||||
lib.GetEnd() - lib.GetStart(), lib.GetNativeDebugPath());
|
||||
if (tab.isValid()) tables.push_back(tab);
|
||||
}
|
||||
space = new EHAddrSpace(tables);
|
||||
|
||||
if (!sCurrent.compareExchange(nullptr, space)) {
|
||||
delete space;
|
||||
space = sCurrent;
|
||||
}
|
||||
}
|
||||
|
||||
EHState::EHState(const mcontext_t& context) {
|
||||
# ifdef linux
|
||||
mRegs[0] = context.arm_r0;
|
||||
mRegs[1] = context.arm_r1;
|
||||
mRegs[2] = context.arm_r2;
|
||||
mRegs[3] = context.arm_r3;
|
||||
mRegs[4] = context.arm_r4;
|
||||
mRegs[5] = context.arm_r5;
|
||||
mRegs[6] = context.arm_r6;
|
||||
mRegs[7] = context.arm_r7;
|
||||
mRegs[8] = context.arm_r8;
|
||||
mRegs[9] = context.arm_r9;
|
||||
mRegs[10] = context.arm_r10;
|
||||
mRegs[11] = context.arm_fp;
|
||||
mRegs[12] = context.arm_ip;
|
||||
mRegs[13] = context.arm_sp;
|
||||
mRegs[14] = context.arm_lr;
|
||||
mRegs[15] = context.arm_pc;
|
||||
# else
|
||||
# error "Unhandled OS for ARM EHABI unwinding"
|
||||
# endif
|
||||
}
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,30 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
/*
|
||||
* This is an implementation of stack unwinding according to a subset
|
||||
* of the ARM Exception Handling ABI; see the comment at the top of
|
||||
* the .cpp file for details.
|
||||
*/
|
||||
|
||||
#ifndef mozilla_EHABIStackWalk_h__
|
||||
#define mozilla_EHABIStackWalk_h__
|
||||
|
||||
#include <stddef.h>
|
||||
#include <ucontext.h>
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
void EHABIStackWalkInit();
|
||||
|
||||
size_t EHABIStackWalk(const mcontext_t& aContext, void* stackBase, void** aSPs,
|
||||
void** aPCs, size_t aNumFrames);
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif
|
|
@ -1,49 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include "PageInformation.h"
|
||||
|
||||
# include "BaseProfileJSONWriter.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
PageInformation::PageInformation(const std::string& aDocShellId,
|
||||
uint32_t aDocShellHistoryId,
|
||||
const std::string& aUrl, bool aIsSubFrame)
|
||||
: mDocShellId(aDocShellId),
|
||||
mDocShellHistoryId(aDocShellHistoryId),
|
||||
mUrl(aUrl),
|
||||
mIsSubFrame(aIsSubFrame),
|
||||
mRefCnt(0) {}
|
||||
|
||||
bool PageInformation::Equals(PageInformation* aOtherPageInfo) {
|
||||
return DocShellHistoryId() == aOtherPageInfo->DocShellHistoryId() &&
|
||||
DocShellId() == aOtherPageInfo->DocShellId() &&
|
||||
IsSubFrame() == aOtherPageInfo->IsSubFrame();
|
||||
}
|
||||
|
||||
void PageInformation::StreamJSON(SpliceableJSONWriter& aWriter) {
|
||||
aWriter.StartObjectElement();
|
||||
aWriter.StringProperty("docshellId", DocShellId().c_str());
|
||||
aWriter.DoubleProperty("historyId", DocShellHistoryId());
|
||||
aWriter.StringProperty("url", Url().c_str());
|
||||
aWriter.BoolProperty("isSubFrame", IsSubFrame());
|
||||
aWriter.EndObject();
|
||||
}
|
||||
|
||||
size_t PageInformation::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
|
||||
return aMallocSizeOf(this);
|
||||
}
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,79 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef PageInformation_h
|
||||
#define PageInformation_h
|
||||
|
||||
#include "mozilla/Atomics.h"
|
||||
#include "mozilla/Maybe.h"
|
||||
#include "mozilla/MemoryReporting.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
class SpliceableJSONWriter;
|
||||
|
||||
// This class contains information that's relevant to a single page only
|
||||
// while the page information is important and registered with the profiler,
|
||||
// but regardless of whether the profiler is running. All accesses to it are
|
||||
// protected by the profiler state lock.
|
||||
// When the page gets unregistered, we keep the profiler buffer position
|
||||
// to determine if we are still using this page. If not, we unregister
|
||||
// it in the next page registration.
|
||||
class PageInformation final {
|
||||
public:
|
||||
PageInformation(const std::string& aDocShellId, uint32_t aDocShellHistoryId,
|
||||
const std::string& aUrl, bool aIsSubFrame);
|
||||
|
||||
// Using hand-rolled ref-counting, because RefCounted.h macros don't produce
|
||||
// the same code between mozglue and libxul, see bug 1536656.
|
||||
MFBT_API void AddRef() const { ++mRefCnt; }
|
||||
MFBT_API void Release() const {
|
||||
MOZ_ASSERT(int32_t(mRefCnt) > 0);
|
||||
if (--mRefCnt) {
|
||||
delete this;
|
||||
}
|
||||
}
|
||||
|
||||
size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
|
||||
bool Equals(PageInformation* aOtherDocShellInfo);
|
||||
void StreamJSON(SpliceableJSONWriter& aWriter);
|
||||
|
||||
uint32_t DocShellHistoryId() { return mDocShellHistoryId; }
|
||||
const std::string& DocShellId() { return mDocShellId; }
|
||||
const std::string& Url() { return mUrl; }
|
||||
bool IsSubFrame() { return mIsSubFrame; }
|
||||
|
||||
Maybe<uint64_t> BufferPositionWhenUnregistered() {
|
||||
return mBufferPositionWhenUnregistered;
|
||||
}
|
||||
|
||||
void NotifyUnregistered(uint64_t aBufferPosition) {
|
||||
mBufferPositionWhenUnregistered = Some(aBufferPosition);
|
||||
}
|
||||
|
||||
private:
|
||||
const std::string mDocShellId;
|
||||
const uint32_t mDocShellHistoryId;
|
||||
const std::string mUrl;
|
||||
const bool mIsSubFrame;
|
||||
|
||||
// Holds the buffer position when DocShell is unregistered.
|
||||
// It's used to determine if we still use this DocShell in the profiler or
|
||||
// not.
|
||||
Maybe<uint64_t> mBufferPositionWhenUnregistered;
|
||||
|
||||
mutable Atomic<int32_t, MemoryOrdering::ReleaseAcquire,
|
||||
recordreplay::Behavior::DontPreserve>
|
||||
mRefCnt;
|
||||
};
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // PageInformation_h
|
|
@ -1,114 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef PLATFORM_MACROS_H
|
||||
#define PLATFORM_MACROS_H
|
||||
|
||||
// Define platform selection macros in a consistent way. Don't add anything
|
||||
// else to this file, so it can remain freestanding. The primary factorisation
|
||||
// is on (ARCH,OS) pairs ("PLATforms") but ARCH_ and OS_ macros are defined
|
||||
// too, since they are sometimes convenient.
|
||||
//
|
||||
// Note: "GP" is short for "Gecko Profiler".
|
||||
|
||||
#undef GP_PLAT_x86_android
|
||||
#undef GP_PLAT_amd64_android
|
||||
#undef GP_PLAT_arm_android
|
||||
#undef GP_PLAT_arm64_android
|
||||
#undef GP_PLAT_x86_linux
|
||||
#undef GP_PLAT_amd64_linux
|
||||
#undef GP_PLAT_arm_linux
|
||||
#undef GP_PLAT_mips64_linux
|
||||
#undef GP_PLAT_amd64_darwin
|
||||
#undef GP_PLAT_x86_windows
|
||||
#undef GP_PLAT_amd64_windows
|
||||
#undef GP_PLAT_arm64_windows
|
||||
|
||||
#undef GP_ARCH_x86
|
||||
#undef GP_ARCH_amd64
|
||||
#undef GP_ARCH_arm
|
||||
#undef GP_ARCH_arm64
|
||||
#undef GP_ARCH_mips64
|
||||
|
||||
#undef GP_OS_android
|
||||
#undef GP_OS_linux
|
||||
#undef GP_OS_darwin
|
||||
#undef GP_OS_windows
|
||||
|
||||
// We test __ANDROID__ before __linux__ because __linux__ is defined on both
|
||||
// Android and Linux, whereas GP_OS_android is not defined on vanilla Linux.
|
||||
|
||||
#if defined(__ANDROID__) && defined(__i386__)
|
||||
# define GP_PLAT_x86_android 1
|
||||
# define GP_ARCH_x86 1
|
||||
# define GP_OS_android 1
|
||||
|
||||
#elif defined(__ANDROID__) && defined(__x86_64__)
|
||||
# define GP_PLAT_amd64_android 1
|
||||
# define GP_ARCH_amd64 1
|
||||
# define GP_OS_android 1
|
||||
|
||||
#elif defined(__ANDROID__) && defined(__arm__)
|
||||
# define GP_PLAT_arm_android 1
|
||||
# define GP_ARCH_arm 1
|
||||
# define GP_OS_android 1
|
||||
|
||||
#elif defined(__ANDROID__) && defined(__aarch64__)
|
||||
# define GP_PLAT_arm64_android 1
|
||||
# define GP_ARCH_arm64 1
|
||||
# define GP_OS_android 1
|
||||
|
||||
#elif defined(__linux__) && defined(__i386__)
|
||||
# define GP_PLAT_x86_linux 1
|
||||
# define GP_ARCH_x86 1
|
||||
# define GP_OS_linux 1
|
||||
|
||||
#elif defined(__linux__) && defined(__x86_64__)
|
||||
# define GP_PLAT_amd64_linux 1
|
||||
# define GP_ARCH_amd64 1
|
||||
# define GP_OS_linux 1
|
||||
|
||||
#elif defined(__linux__) && defined(__arm__)
|
||||
# define GP_PLAT_arm_linux 1
|
||||
# define GP_ARCH_arm 1
|
||||
# define GP_OS_linux 1
|
||||
|
||||
#elif defined(__linux__) && defined(__aarch64__)
|
||||
# define GP_PLAT_arm64_linux 1
|
||||
# define GP_ARCH_arm64 1
|
||||
# define GP_OS_linux 1
|
||||
|
||||
#elif defined(__linux__) && defined(__mips64)
|
||||
# define GP_PLAT_mips64_linux 1
|
||||
# define GP_ARCH_mips64 1
|
||||
# define GP_OS_linux 1
|
||||
|
||||
#elif defined(__APPLE__) && defined(__x86_64__)
|
||||
# define GP_PLAT_amd64_darwin 1
|
||||
# define GP_ARCH_amd64 1
|
||||
# define GP_OS_darwin 1
|
||||
|
||||
#elif (defined(_MSC_VER) || defined(__MINGW32__)) && \
|
||||
(defined(_M_IX86) || defined(__i386__))
|
||||
# define GP_PLAT_x86_windows 1
|
||||
# define GP_ARCH_x86 1
|
||||
# define GP_OS_windows 1
|
||||
|
||||
#elif (defined(_MSC_VER) || defined(__MINGW32__)) && \
|
||||
(defined(_M_X64) || defined(__x86_64__))
|
||||
# define GP_PLAT_amd64_windows 1
|
||||
# define GP_ARCH_amd64 1
|
||||
# define GP_OS_windows 1
|
||||
|
||||
#elif defined(_MSC_VER) && defined(_M_ARM64)
|
||||
# define GP_PLAT_arm64_windows 1
|
||||
# define GP_ARCH_arm64 1
|
||||
# define GP_OS_windows 1
|
||||
|
||||
#else
|
||||
# error "Unsupported platform"
|
||||
#endif
|
||||
|
||||
#endif /* ndef PLATFORM_MACROS_H */
|
|
@ -1,155 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include "ProfileBuffer.h"
|
||||
|
||||
# include "ProfilerMarker.h"
|
||||
|
||||
# include "mozilla/MathAlgorithms.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
ProfileBuffer::ProfileBuffer(uint32_t aCapacity)
|
||||
: mEntryIndexMask(0), mRangeStart(0), mRangeEnd(0), mCapacity(0) {
|
||||
// Round aCapacity up to the nearest power of two, so that we can index
|
||||
// mEntries with a simple mask and don't need to do a slow modulo operation.
|
||||
const uint32_t UINT32_MAX_POWER_OF_TWO = 1 << 31;
|
||||
MOZ_RELEASE_ASSERT(aCapacity <= UINT32_MAX_POWER_OF_TWO,
|
||||
"aCapacity is larger than what we support");
|
||||
mCapacity = RoundUpPow2(aCapacity);
|
||||
mEntryIndexMask = mCapacity - 1;
|
||||
mEntries = MakeUnique<ProfileBufferEntry[]>(mCapacity);
|
||||
}
|
||||
|
||||
ProfileBuffer::~ProfileBuffer() {
|
||||
while (mStoredMarkers.peek()) {
|
||||
delete mStoredMarkers.popHead();
|
||||
}
|
||||
}
|
||||
|
||||
// Called from signal, call only reentrant functions
|
||||
void ProfileBuffer::AddEntry(const ProfileBufferEntry& aEntry) {
|
||||
GetEntry(mRangeEnd++) = aEntry;
|
||||
|
||||
// The distance between mRangeStart and mRangeEnd must never exceed
|
||||
// mCapacity, so advance mRangeStart if necessary.
|
||||
if (mRangeEnd - mRangeStart > mCapacity) {
|
||||
mRangeStart++;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t ProfileBuffer::AddThreadIdEntry(int aThreadId) {
|
||||
uint64_t pos = mRangeEnd;
|
||||
AddEntry(ProfileBufferEntry::ThreadId(aThreadId));
|
||||
return pos;
|
||||
}
|
||||
|
||||
void ProfileBuffer::AddStoredMarker(ProfilerMarker* aStoredMarker) {
|
||||
aStoredMarker->SetPositionInBuffer(mRangeEnd);
|
||||
mStoredMarkers.insert(aStoredMarker);
|
||||
}
|
||||
|
||||
void ProfileBuffer::CollectCodeLocation(
|
||||
const char* aLabel, const char* aStr, uint32_t aFrameFlags,
|
||||
const Maybe<uint32_t>& aLineNumber, const Maybe<uint32_t>& aColumnNumber,
|
||||
const Maybe<ProfilingCategoryPair>& aCategoryPair) {
|
||||
AddEntry(ProfileBufferEntry::Label(aLabel));
|
||||
AddEntry(ProfileBufferEntry::FrameFlags(uint64_t(aFrameFlags)));
|
||||
|
||||
if (aStr) {
|
||||
// Store the string using one or more DynamicStringFragment entries.
|
||||
size_t strLen = strlen(aStr) + 1; // +1 for the null terminator
|
||||
for (size_t j = 0; j < strLen;) {
|
||||
// Store up to kNumChars characters in the entry.
|
||||
char chars[ProfileBufferEntry::kNumChars];
|
||||
size_t len = ProfileBufferEntry::kNumChars;
|
||||
if (j + len >= strLen) {
|
||||
len = strLen - j;
|
||||
}
|
||||
memcpy(chars, &aStr[j], len);
|
||||
j += ProfileBufferEntry::kNumChars;
|
||||
|
||||
AddEntry(ProfileBufferEntry::DynamicStringFragment(chars));
|
||||
}
|
||||
}
|
||||
|
||||
if (aLineNumber) {
|
||||
AddEntry(ProfileBufferEntry::LineNumber(*aLineNumber));
|
||||
}
|
||||
|
||||
if (aColumnNumber) {
|
||||
AddEntry(ProfileBufferEntry::ColumnNumber(*aColumnNumber));
|
||||
}
|
||||
|
||||
if (aCategoryPair.isSome()) {
|
||||
AddEntry(ProfileBufferEntry::CategoryPair(int(*aCategoryPair)));
|
||||
}
|
||||
}
|
||||
|
||||
void ProfileBuffer::DeleteExpiredStoredMarkers() {
|
||||
// Delete markers of samples that have been overwritten due to circular
|
||||
// buffer wraparound.
|
||||
while (mStoredMarkers.peek() &&
|
||||
mStoredMarkers.peek()->HasExpired(mRangeStart)) {
|
||||
delete mStoredMarkers.popHead();
|
||||
}
|
||||
}
|
||||
|
||||
size_t ProfileBuffer::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
|
||||
size_t n = aMallocSizeOf(this);
|
||||
n += aMallocSizeOf(mEntries.get());
|
||||
|
||||
// Measurement of the following members may be added later if DMD finds it
|
||||
// is worthwhile:
|
||||
// - memory pointed to by the elements within mEntries
|
||||
// - mStoredMarkers
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
/* ProfileBufferCollector */
|
||||
|
||||
void ProfileBufferCollector::CollectNativeLeafAddr(void* aAddr) {
|
||||
mBuf.AddEntry(ProfileBufferEntry::NativeLeafAddr(aAddr));
|
||||
}
|
||||
|
||||
void ProfileBufferCollector::CollectProfilingStackFrame(
|
||||
const ProfilingStackFrame& aFrame) {
|
||||
// WARNING: this function runs within the profiler's "critical section".
|
||||
|
||||
MOZ_ASSERT(aFrame.isLabelFrame() ||
|
||||
(aFrame.isJsFrame() && !aFrame.isOSRFrame()));
|
||||
|
||||
const char* label = aFrame.label();
|
||||
const char* dynamicString = aFrame.dynamicString();
|
||||
bool isChromeJSEntry = false;
|
||||
Maybe<uint32_t> line;
|
||||
Maybe<uint32_t> column;
|
||||
|
||||
MOZ_ASSERT(aFrame.isLabelFrame());
|
||||
|
||||
if (dynamicString) {
|
||||
// Adjust the dynamic string as necessary.
|
||||
if (ProfilerFeature::HasPrivacy(mFeatures) && !isChromeJSEntry) {
|
||||
dynamicString = "(private)";
|
||||
} else if (strlen(dynamicString) >= ProfileBuffer::kMaxFrameKeyLength) {
|
||||
dynamicString = "(too long)";
|
||||
}
|
||||
}
|
||||
|
||||
mBuf.CollectCodeLocation(label, dynamicString, aFrame.flags(), line, column,
|
||||
Some(aFrame.categoryPair()));
|
||||
}
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,167 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef MOZ_PROFILE_BUFFER_H
|
||||
#define MOZ_PROFILE_BUFFER_H
|
||||
|
||||
#include "ProfileBufferEntry.h"
|
||||
#include "ProfilerMarker.h"
|
||||
|
||||
#include "mozilla/Maybe.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
// A fixed-capacity circular buffer.
|
||||
// This class is used as a queue of entries which, after construction, never
|
||||
// allocates. This makes it safe to use in the profiler's "critical section".
|
||||
// Entries are appended at the end. Once the queue capacity has been reached,
|
||||
// adding a new entry will evict an old entry from the start of the queue.
|
||||
// Positions in the queue are represented as 64-bit unsigned integers which
|
||||
// only increase and never wrap around.
|
||||
// mRangeStart and mRangeEnd describe the range in that uint64_t space which is
|
||||
// covered by the queue contents.
|
||||
// Internally, the buffer uses a fixed-size storage and applies a modulo
|
||||
// operation when accessing entries in that storage buffer. "Evicting" an entry
|
||||
// really just means that an existing entry in the storage buffer gets
|
||||
// overwritten and that mRangeStart gets incremented.
|
||||
class ProfileBuffer final {
|
||||
public:
|
||||
// ProfileBuffer constructor
|
||||
// @param aCapacity The minimum capacity of the buffer. The actual buffer
|
||||
// capacity will be rounded up to the next power of two.
|
||||
explicit ProfileBuffer(uint32_t aCapacity);
|
||||
|
||||
~ProfileBuffer();
|
||||
|
||||
// Add |aEntry| to the buffer, ignoring what kind of entry it is.
|
||||
void AddEntry(const ProfileBufferEntry& aEntry);
|
||||
|
||||
// Add to the buffer a sample start (ThreadId) entry for aThreadId.
|
||||
// Returns the position of the entry.
|
||||
uint64_t AddThreadIdEntry(int aThreadId);
|
||||
|
||||
void CollectCodeLocation(const char* aLabel, const char* aStr,
|
||||
uint32_t aFrameFlags,
|
||||
const Maybe<uint32_t>& aLineNumber,
|
||||
const Maybe<uint32_t>& aColumnNumber,
|
||||
const Maybe<ProfilingCategoryPair>& aCategoryPair);
|
||||
|
||||
// Maximum size of a frameKey string that we'll handle.
|
||||
static const size_t kMaxFrameKeyLength = 512;
|
||||
|
||||
// Stream JSON for samples in the buffer to aWriter, using the supplied
|
||||
// UniqueStacks object.
|
||||
// Only streams samples for the given thread ID and which were taken at or
|
||||
// after aSinceTime.
|
||||
void StreamSamplesToJSON(SpliceableJSONWriter& aWriter, int aThreadId,
|
||||
double aSinceTime,
|
||||
UniqueStacks& aUniqueStacks) const;
|
||||
|
||||
void StreamMarkersToJSON(SpliceableJSONWriter& aWriter, int aThreadId,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
double aSinceTime,
|
||||
UniqueStacks& aUniqueStacks) const;
|
||||
void StreamPausedRangesToJSON(SpliceableJSONWriter& aWriter,
|
||||
double aSinceTime) const;
|
||||
void StreamProfilerOverheadToJSON(SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
double aSinceTime) const;
|
||||
void StreamCountersToJSON(SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
double aSinceTime) const;
|
||||
void StreamMemoryToJSON(SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
double aSinceTime) const;
|
||||
|
||||
// Find (via |aLastSample|) the most recent sample for the thread denoted by
|
||||
// |aThreadId| and clone it, patching in the current time as appropriate.
|
||||
// Mutate |aLastSample| to point to the newly inserted sample.
|
||||
// Returns whether duplication was successful.
|
||||
bool DuplicateLastSample(int aThreadId, const TimeStamp& aProcessStartTime,
|
||||
Maybe<uint64_t>& aLastSample);
|
||||
|
||||
void DiscardSamplesBeforeTime(double aTime);
|
||||
|
||||
void AddStoredMarker(ProfilerMarker* aStoredMarker);
|
||||
|
||||
// The following method is not signal safe!
|
||||
void DeleteExpiredStoredMarkers();
|
||||
|
||||
// Access an entry in the buffer.
|
||||
ProfileBufferEntry& GetEntry(uint64_t aPosition) const {
|
||||
return mEntries[aPosition & mEntryIndexMask];
|
||||
}
|
||||
|
||||
size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
|
||||
|
||||
private:
|
||||
// The storage that backs our buffer. Holds mCapacity entries.
|
||||
// All accesses to entries in mEntries need to go through GetEntry(), which
|
||||
// translates the given buffer position from the near-infinite uint64_t space
|
||||
// into the entry storage space.
|
||||
UniquePtr<ProfileBufferEntry[]> mEntries;
|
||||
|
||||
// A mask such that pos & mEntryIndexMask == pos % mCapacity.
|
||||
uint32_t mEntryIndexMask;
|
||||
|
||||
public:
|
||||
// mRangeStart and mRangeEnd are uint64_t values that strictly advance and
|
||||
// never wrap around. mRangeEnd is always greater than or equal to
|
||||
// mRangeStart, but never gets more than mCapacity steps ahead of
|
||||
// mRangeStart, because we can only store a fixed number of entries in the
|
||||
// buffer. Once the entire buffer is in use, adding a new entry will evict an
|
||||
// entry from the front of the buffer (and increase mRangeStart).
|
||||
// In other words, the following conditions hold true at all times:
|
||||
// (1) mRangeStart <= mRangeEnd
|
||||
// (2) mRangeEnd - mRangeStart <= mCapacity
|
||||
//
|
||||
// If there are no live entries, then mRangeStart == mRangeEnd.
|
||||
// Otherwise, mRangeStart is the first live entry and mRangeEnd is one past
|
||||
// the last live entry, and also the position at which the next entry will be
|
||||
// added.
|
||||
// (mRangeEnd - mRangeStart) always gives the number of live entries.
|
||||
uint64_t mRangeStart;
|
||||
uint64_t mRangeEnd;
|
||||
|
||||
// The number of entries in our buffer. Always a power of two.
|
||||
uint32_t mCapacity;
|
||||
|
||||
// Markers that marker entries in the buffer might refer to.
|
||||
ProfilerMarkerLinkedList mStoredMarkers;
|
||||
};
|
||||
|
||||
/**
|
||||
* Helper type used to implement ProfilerStackCollector. This type is used as
|
||||
* the collector for MergeStacks by ProfileBuffer. It holds a reference to the
|
||||
* buffer, as well as additional feature flags which are needed to control the
|
||||
* data collection strategy
|
||||
*/
|
||||
class ProfileBufferCollector final : public ProfilerStackCollector {
|
||||
public:
|
||||
ProfileBufferCollector(ProfileBuffer& aBuf, uint32_t aFeatures,
|
||||
uint64_t aSamplePos)
|
||||
: mBuf(aBuf), mSamplePositionInBuffer(aSamplePos), mFeatures(aFeatures) {}
|
||||
|
||||
Maybe<uint64_t> SamplePositionInBuffer() override {
|
||||
return Some(mSamplePositionInBuffer);
|
||||
}
|
||||
|
||||
Maybe<uint64_t> BufferRangeStart() override { return Some(mBuf.mRangeStart); }
|
||||
|
||||
virtual void CollectNativeLeafAddr(void* aAddr) override;
|
||||
virtual void CollectProfilingStackFrame(
|
||||
const ProfilingStackFrame& aFrame) override;
|
||||
|
||||
private:
|
||||
ProfileBuffer& mBuf;
|
||||
uint64_t mSamplePositionInBuffer;
|
||||
uint32_t mFeatures;
|
||||
};
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,432 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef ProfileBufferEntry_h
|
||||
#define ProfileBufferEntry_h
|
||||
|
||||
#include "BaseProfileJSONWriter.h"
|
||||
|
||||
#include "gtest/MozGtestFriend.h"
|
||||
#include "BaseProfilingCategory.h"
|
||||
#include "mozilla/HashFunctions.h"
|
||||
#include "mozilla/HashTable.h"
|
||||
#include "mozilla/Maybe.h"
|
||||
#include "mozilla/UniquePtr.h"
|
||||
#include "mozilla/Variant.h"
|
||||
#include "mozilla/Vector.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
class ProfilerMarker;
|
||||
|
||||
// NOTE! If you add entries, you need to verify if they need to be added to the
|
||||
// switch statement in DuplicateLastSample!
|
||||
#define FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(MACRO) \
|
||||
MACRO(CategoryPair, int) \
|
||||
MACRO(CollectionStart, double) \
|
||||
MACRO(CollectionEnd, double) \
|
||||
MACRO(Label, const char*) \
|
||||
MACRO(FrameFlags, uint64_t) \
|
||||
MACRO(DynamicStringFragment, char*) /* char[kNumChars], really */ \
|
||||
MACRO(JitReturnAddr, void*) \
|
||||
MACRO(LineNumber, int) \
|
||||
MACRO(ColumnNumber, int) \
|
||||
MACRO(NativeLeafAddr, void*) \
|
||||
MACRO(Marker, ProfilerMarker*) \
|
||||
MACRO(Pause, double) \
|
||||
MACRO(Responsiveness, double) \
|
||||
MACRO(Resume, double) \
|
||||
MACRO(ThreadId, int) \
|
||||
MACRO(Time, double) \
|
||||
MACRO(ResidentMemory, uint64_t) \
|
||||
MACRO(UnsharedMemory, uint64_t) \
|
||||
MACRO(CounterId, void*) \
|
||||
MACRO(CounterKey, uint64_t) \
|
||||
MACRO(Number, uint64_t) \
|
||||
MACRO(Count, int64_t) \
|
||||
MACRO(ProfilerOverheadTime, double) \
|
||||
MACRO(ProfilerOverheadDuration, double)
|
||||
|
||||
class ProfileBufferEntry {
|
||||
public:
|
||||
enum class Kind : uint8_t {
|
||||
INVALID = 0,
|
||||
#define KIND(k, t) k,
|
||||
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(KIND)
|
||||
#undef KIND
|
||||
LIMIT
|
||||
};
|
||||
|
||||
ProfileBufferEntry();
|
||||
|
||||
// This is equal to sizeof(double), which is the largest non-char variant in
|
||||
// |u|.
|
||||
static const size_t kNumChars = 8;
|
||||
|
||||
private:
|
||||
// aString must be a static string.
|
||||
ProfileBufferEntry(Kind aKind, const char* aString);
|
||||
ProfileBufferEntry(Kind aKind, char aChars[kNumChars]);
|
||||
ProfileBufferEntry(Kind aKind, void* aPtr);
|
||||
ProfileBufferEntry(Kind aKind, ProfilerMarker* aMarker);
|
||||
ProfileBufferEntry(Kind aKind, double aDouble);
|
||||
ProfileBufferEntry(Kind aKind, int64_t aInt64);
|
||||
ProfileBufferEntry(Kind aKind, uint64_t aUint64);
|
||||
ProfileBufferEntry(Kind aKind, int aInt);
|
||||
|
||||
public:
|
||||
#define CTOR(k, t) \
|
||||
static ProfileBufferEntry k(t aVal) { \
|
||||
return ProfileBufferEntry(Kind::k, aVal); \
|
||||
}
|
||||
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(CTOR)
|
||||
#undef CTOR
|
||||
|
||||
Kind GetKind() const { return mKind; }
|
||||
|
||||
#define IS_KIND(k, t) \
|
||||
bool Is##k() const { return mKind == Kind::k; }
|
||||
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(IS_KIND)
|
||||
#undef IS_KIND
|
||||
|
||||
private:
|
||||
FRIEND_TEST(ThreadProfile, InsertOneEntry);
|
||||
FRIEND_TEST(ThreadProfile, InsertOneEntryWithTinyBuffer);
|
||||
FRIEND_TEST(ThreadProfile, InsertEntriesNoWrap);
|
||||
FRIEND_TEST(ThreadProfile, InsertEntriesWrap);
|
||||
FRIEND_TEST(ThreadProfile, MemoryMeasure);
|
||||
friend class ProfileBuffer;
|
||||
|
||||
Kind mKind;
|
||||
uint8_t mStorage[kNumChars];
|
||||
|
||||
const char* GetString() const;
|
||||
void* GetPtr() const;
|
||||
ProfilerMarker* GetMarker() const;
|
||||
double GetDouble() const;
|
||||
int GetInt() const;
|
||||
int64_t GetInt64() const;
|
||||
uint64_t GetUint64() const;
|
||||
void CopyCharsInto(char (&aOutArray)[kNumChars]) const;
|
||||
};
|
||||
|
||||
// Packed layout: 1 byte for the tag + 8 bytes for the value.
|
||||
static_assert(sizeof(ProfileBufferEntry) == 9, "bad ProfileBufferEntry size");
|
||||
|
||||
class UniqueJSONStrings {
|
||||
public:
|
||||
UniqueJSONStrings();
|
||||
explicit UniqueJSONStrings(const UniqueJSONStrings& aOther);
|
||||
|
||||
void SpliceStringTableElements(SpliceableJSONWriter& aWriter) {
|
||||
aWriter.TakeAndSplice(mStringTableWriter.WriteFunc());
|
||||
}
|
||||
|
||||
void WriteProperty(JSONWriter& aWriter, const char* aName, const char* aStr) {
|
||||
aWriter.IntProperty(aName, GetOrAddIndex(aStr));
|
||||
}
|
||||
|
||||
void WriteElement(JSONWriter& aWriter, const char* aStr) {
|
||||
aWriter.IntElement(GetOrAddIndex(aStr));
|
||||
}
|
||||
|
||||
uint32_t GetOrAddIndex(const char* aStr);
|
||||
|
||||
private:
|
||||
SpliceableChunkedJSONWriter mStringTableWriter;
|
||||
HashMap<HashNumber, uint32_t> mStringHashToIndexMap;
|
||||
};
|
||||
|
||||
class UniqueStacks {
|
||||
public:
|
||||
struct FrameKey {
|
||||
explicit FrameKey(const char* aLocation)
|
||||
: mData(NormalFrameData{std::string(aLocation), false, Nothing(),
|
||||
Nothing()}) {}
|
||||
|
||||
FrameKey(std::string&& aLocation, bool aRelevantForJS,
|
||||
const Maybe<unsigned>& aLine, const Maybe<unsigned>& aColumn,
|
||||
const Maybe<ProfilingCategoryPair>& aCategoryPair)
|
||||
: mData(NormalFrameData{aLocation, aRelevantForJS, aLine, aColumn,
|
||||
aCategoryPair}) {}
|
||||
|
||||
FrameKey(const FrameKey& aToCopy) = default;
|
||||
|
||||
uint32_t Hash() const;
|
||||
bool operator==(const FrameKey& aOther) const {
|
||||
return mData == aOther.mData;
|
||||
}
|
||||
|
||||
struct NormalFrameData {
|
||||
bool operator==(const NormalFrameData& aOther) const;
|
||||
|
||||
std::string mLocation;
|
||||
bool mRelevantForJS;
|
||||
Maybe<unsigned> mLine;
|
||||
Maybe<unsigned> mColumn;
|
||||
Maybe<ProfilingCategoryPair> mCategoryPair;
|
||||
};
|
||||
Variant<NormalFrameData> mData;
|
||||
};
|
||||
|
||||
struct FrameKeyHasher {
|
||||
using Lookup = FrameKey;
|
||||
|
||||
static HashNumber hash(const FrameKey& aLookup) {
|
||||
HashNumber hash = 0;
|
||||
if (aLookup.mData.is<FrameKey::NormalFrameData>()) {
|
||||
const FrameKey::NormalFrameData& data =
|
||||
aLookup.mData.as<FrameKey::NormalFrameData>();
|
||||
if (!data.mLocation.empty()) {
|
||||
hash = AddToHash(hash, HashString(data.mLocation.c_str()));
|
||||
}
|
||||
hash = AddToHash(hash, data.mRelevantForJS);
|
||||
if (data.mLine.isSome()) {
|
||||
hash = AddToHash(hash, *data.mLine);
|
||||
}
|
||||
if (data.mColumn.isSome()) {
|
||||
hash = AddToHash(hash, *data.mColumn);
|
||||
}
|
||||
if (data.mCategoryPair.isSome()) {
|
||||
hash = AddToHash(hash, static_cast<uint32_t>(*data.mCategoryPair));
|
||||
}
|
||||
}
|
||||
return hash;
|
||||
}
|
||||
|
||||
static bool match(const FrameKey& aKey, const FrameKey& aLookup) {
|
||||
return aKey == aLookup;
|
||||
}
|
||||
|
||||
static void rekey(FrameKey& aKey, const FrameKey& aNewKey) {
|
||||
aKey = aNewKey;
|
||||
}
|
||||
};
|
||||
|
||||
struct StackKey {
|
||||
Maybe<uint32_t> mPrefixStackIndex;
|
||||
uint32_t mFrameIndex;
|
||||
|
||||
explicit StackKey(uint32_t aFrame)
|
||||
: mFrameIndex(aFrame), mHash(HashGeneric(aFrame)) {}
|
||||
|
||||
StackKey(const StackKey& aPrefix, uint32_t aPrefixStackIndex,
|
||||
uint32_t aFrame)
|
||||
: mPrefixStackIndex(Some(aPrefixStackIndex)),
|
||||
mFrameIndex(aFrame),
|
||||
mHash(AddToHash(aPrefix.mHash, aFrame)) {}
|
||||
|
||||
HashNumber Hash() const { return mHash; }
|
||||
|
||||
bool operator==(const StackKey& aOther) const {
|
||||
return mPrefixStackIndex == aOther.mPrefixStackIndex &&
|
||||
mFrameIndex == aOther.mFrameIndex;
|
||||
}
|
||||
|
||||
private:
|
||||
HashNumber mHash;
|
||||
};
|
||||
|
||||
struct StackKeyHasher {
|
||||
using Lookup = StackKey;
|
||||
|
||||
static HashNumber hash(const StackKey& aLookup) { return aLookup.Hash(); }
|
||||
|
||||
static bool match(const StackKey& aKey, const StackKey& aLookup) {
|
||||
return aKey == aLookup;
|
||||
}
|
||||
|
||||
static void rekey(StackKey& aKey, const StackKey& aNewKey) {
|
||||
aKey = aNewKey;
|
||||
}
|
||||
};
|
||||
|
||||
UniqueStacks();
|
||||
|
||||
// Return a StackKey for aFrame as the stack's root frame (no prefix).
|
||||
MOZ_MUST_USE StackKey BeginStack(const FrameKey& aFrame);
|
||||
|
||||
// Return a new StackKey that is obtained by appending aFrame to aStack.
|
||||
MOZ_MUST_USE StackKey AppendFrame(const StackKey& aStack,
|
||||
const FrameKey& aFrame);
|
||||
|
||||
MOZ_MUST_USE uint32_t GetOrAddFrameIndex(const FrameKey& aFrame);
|
||||
MOZ_MUST_USE uint32_t GetOrAddStackIndex(const StackKey& aStack);
|
||||
|
||||
void SpliceFrameTableElements(SpliceableJSONWriter& aWriter);
|
||||
void SpliceStackTableElements(SpliceableJSONWriter& aWriter);
|
||||
|
||||
private:
|
||||
void StreamNonJITFrame(const FrameKey& aFrame);
|
||||
void StreamStack(const StackKey& aStack);
|
||||
|
||||
public:
|
||||
UniquePtr<UniqueJSONStrings> mUniqueStrings;
|
||||
|
||||
private:
|
||||
SpliceableChunkedJSONWriter mFrameTableWriter;
|
||||
HashMap<FrameKey, uint32_t, FrameKeyHasher> mFrameToIndexMap;
|
||||
|
||||
SpliceableChunkedJSONWriter mStackTableWriter;
|
||||
HashMap<StackKey, uint32_t, StackKeyHasher> mStackToIndexMap;
|
||||
};
|
||||
|
||||
//
|
||||
// Thread profile JSON Format
|
||||
// --------------------------
|
||||
//
|
||||
// The profile contains much duplicate information. The output JSON of the
|
||||
// profile attempts to deduplicate strings, frames, and stack prefixes, to cut
|
||||
// down on size and to increase JSON streaming speed. Deduplicated values are
|
||||
// streamed as indices into their respective tables.
|
||||
//
|
||||
// Further, arrays of objects with the same set of properties (e.g., samples,
|
||||
// frames) are output as arrays according to a schema instead of an object
|
||||
// with property names. A property that is not present is represented in the
|
||||
// array as null or undefined.
|
||||
//
|
||||
// The format of the thread profile JSON is shown by the following example
|
||||
// with 1 sample and 1 marker:
|
||||
//
|
||||
// {
|
||||
// "name": "Foo",
|
||||
// "tid": 42,
|
||||
// "samples":
|
||||
// {
|
||||
// "schema":
|
||||
// {
|
||||
// "stack": 0, /* index into stackTable */
|
||||
// "time": 1, /* number */
|
||||
// "responsiveness": 2, /* number */
|
||||
// },
|
||||
// "data":
|
||||
// [
|
||||
// [ 1, 0.0, 0.0 ] /* { stack: 1, time: 0.0, responsiveness: 0.0 } */
|
||||
// ]
|
||||
// },
|
||||
//
|
||||
// "markers":
|
||||
// {
|
||||
// "schema":
|
||||
// {
|
||||
// "name": 0, /* index into stringTable */
|
||||
// "time": 1, /* number */
|
||||
// "data": 2 /* arbitrary JSON */
|
||||
// },
|
||||
// "data":
|
||||
// [
|
||||
// [ 3, 0.1 ] /* { name: 'example marker', time: 0.1 } */
|
||||
// ]
|
||||
// },
|
||||
//
|
||||
// "stackTable":
|
||||
// {
|
||||
// "schema":
|
||||
// {
|
||||
// "prefix": 0, /* index into stackTable */
|
||||
// "frame": 1 /* index into frameTable */
|
||||
// },
|
||||
// "data":
|
||||
// [
|
||||
// [ null, 0 ], /* (root) */
|
||||
// [ 0, 1 ] /* (root) > foo.js */
|
||||
// ]
|
||||
// },
|
||||
//
|
||||
// "frameTable":
|
||||
// {
|
||||
// "schema":
|
||||
// {
|
||||
// "location": 0, /* index into stringTable */
|
||||
// "implementation": 1, /* index into stringTable */
|
||||
// "optimizations": 2, /* arbitrary JSON */
|
||||
// "line": 3, /* number */
|
||||
// "column": 4, /* number */
|
||||
// "category": 5 /* number */
|
||||
// },
|
||||
// "data":
|
||||
// [
|
||||
// [ 0 ], /* { location: '(root)' } */
|
||||
// [ 1, 2 ] /* { location: 'foo.js',
|
||||
// implementation: 'baseline' } */
|
||||
// ]
|
||||
// },
|
||||
//
|
||||
// "stringTable":
|
||||
// [
|
||||
// "(root)",
|
||||
// "foo.js",
|
||||
// "baseline",
|
||||
// "example marker"
|
||||
// ]
|
||||
// }
|
||||
//
|
||||
// Process:
|
||||
// {
|
||||
// "name": "Bar",
|
||||
// "pid": 24,
|
||||
// "threads":
|
||||
// [
|
||||
// <0-N threads from above>
|
||||
// ],
|
||||
// "counters": /* includes the memory counter */
|
||||
// [
|
||||
// {
|
||||
// "name": "qwerty",
|
||||
// "category": "uiop",
|
||||
// "description": "this is qwerty uiop",
|
||||
// "sample_groups:
|
||||
// [
|
||||
// {
|
||||
// "id": 42, /* number (thread id, or object identifier (tab), etc) */
|
||||
// "samples:
|
||||
// {
|
||||
// "schema":
|
||||
// {
|
||||
// "time": 1, /* number */
|
||||
// "number": 2, /* number (of times the counter was touched) */
|
||||
// "count": 3 /* number (total for the counter) */
|
||||
// },
|
||||
// "data":
|
||||
// [
|
||||
// [ 0.1, 1824,
|
||||
// 454622 ] /* { time: 0.1, number: 1824, count: 454622 } */
|
||||
// ]
|
||||
// },
|
||||
// },
|
||||
// /* more sample-group objects with different id's */
|
||||
// ]
|
||||
// },
|
||||
// /* more counters */
|
||||
// ],
|
||||
// "memory":
|
||||
// {
|
||||
// "initial_heap": 12345678,
|
||||
// "samples:
|
||||
// {
|
||||
// "schema":
|
||||
// {
|
||||
// "time": 1, /* number */
|
||||
// "rss": 2, /* number */
|
||||
// "uss": 3 /* number */
|
||||
// },
|
||||
// "data":
|
||||
// [
|
||||
// /* { time: 0.1, rss: 12345678, uss: 87654321} */
|
||||
// [ 0.1, 12345678, 87654321 ]
|
||||
// ]
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
//
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif /* ndef ProfileBufferEntry_h */
|
|
@ -1,126 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include "BaseProfileJSONWriter.h"
|
||||
|
||||
# include "mozilla/HashFunctions.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
void ChunkedJSONWriteFunc::Write(const char* aStr) {
|
||||
MOZ_ASSERT(mChunkPtr >= mChunkList.back().get() && mChunkPtr <= mChunkEnd);
|
||||
MOZ_ASSERT(mChunkEnd >= mChunkList.back().get() + mChunkLengths.back());
|
||||
MOZ_ASSERT(*mChunkPtr == '\0');
|
||||
|
||||
size_t len = strlen(aStr);
|
||||
|
||||
// Most strings to be written are small, but subprocess profiles (e.g.,
|
||||
// from the content process in e10s) may be huge. If the string is larger
|
||||
// than a chunk, allocate its own chunk.
|
||||
char* newPtr;
|
||||
if (len >= kChunkSize) {
|
||||
AllocChunk(len + 1);
|
||||
newPtr = mChunkPtr + len;
|
||||
} else {
|
||||
newPtr = mChunkPtr + len;
|
||||
if (newPtr >= mChunkEnd) {
|
||||
AllocChunk(kChunkSize);
|
||||
newPtr = mChunkPtr + len;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(mChunkPtr, aStr, len);
|
||||
*newPtr = '\0';
|
||||
mChunkPtr = newPtr;
|
||||
mChunkLengths.back() += len;
|
||||
}
|
||||
|
||||
size_t ChunkedJSONWriteFunc::GetTotalLength() const {
|
||||
MOZ_ASSERT(mChunkLengths.length() == mChunkList.length());
|
||||
size_t totalLen = 1;
|
||||
for (size_t i = 0; i < mChunkLengths.length(); i++) {
|
||||
MOZ_ASSERT(strlen(mChunkList[i].get()) == mChunkLengths[i]);
|
||||
totalLen += mChunkLengths[i];
|
||||
}
|
||||
return totalLen;
|
||||
}
|
||||
|
||||
void ChunkedJSONWriteFunc::CopyDataIntoLazilyAllocatedBuffer(
|
||||
const std::function<char*(size_t)>& aAllocator) const {
|
||||
size_t totalLen = GetTotalLength();
|
||||
char* ptr = aAllocator(totalLen);
|
||||
for (size_t i = 0; i < mChunkList.length(); i++) {
|
||||
size_t len = mChunkLengths[i];
|
||||
memcpy(ptr, mChunkList[i].get(), len);
|
||||
ptr += len;
|
||||
}
|
||||
*ptr = '\0';
|
||||
}
|
||||
|
||||
UniquePtr<char[]> ChunkedJSONWriteFunc::CopyData() const {
|
||||
UniquePtr<char[]> c;
|
||||
CopyDataIntoLazilyAllocatedBuffer([&](size_t allocationSize) {
|
||||
c = MakeUnique<char[]>(allocationSize);
|
||||
return c.get();
|
||||
});
|
||||
return c;
|
||||
}
|
||||
|
||||
void ChunkedJSONWriteFunc::Take(ChunkedJSONWriteFunc&& aOther) {
|
||||
for (size_t i = 0; i < aOther.mChunkList.length(); i++) {
|
||||
MOZ_ALWAYS_TRUE(mChunkLengths.append(aOther.mChunkLengths[i]));
|
||||
MOZ_ALWAYS_TRUE(mChunkList.append(std::move(aOther.mChunkList[i])));
|
||||
}
|
||||
mChunkPtr = mChunkList.back().get() + mChunkLengths.back();
|
||||
mChunkEnd = mChunkPtr;
|
||||
aOther.mChunkPtr = nullptr;
|
||||
aOther.mChunkEnd = nullptr;
|
||||
aOther.mChunkList.clear();
|
||||
aOther.mChunkLengths.clear();
|
||||
}
|
||||
|
||||
void ChunkedJSONWriteFunc::AllocChunk(size_t aChunkSize) {
|
||||
MOZ_ASSERT(mChunkLengths.length() == mChunkList.length());
|
||||
UniquePtr<char[]> newChunk = MakeUnique<char[]>(aChunkSize);
|
||||
mChunkPtr = newChunk.get();
|
||||
mChunkEnd = mChunkPtr + aChunkSize;
|
||||
*mChunkPtr = '\0';
|
||||
MOZ_ALWAYS_TRUE(mChunkLengths.append(0));
|
||||
MOZ_ALWAYS_TRUE(mChunkList.append(std::move(newChunk)));
|
||||
}
|
||||
|
||||
void SpliceableJSONWriter::TakeAndSplice(ChunkedJSONWriteFunc* aFunc) {
|
||||
Separator();
|
||||
for (size_t i = 0; i < aFunc->mChunkList.length(); i++) {
|
||||
WriteFunc()->Write(aFunc->mChunkList[i].get());
|
||||
}
|
||||
aFunc->mChunkPtr = nullptr;
|
||||
aFunc->mChunkEnd = nullptr;
|
||||
aFunc->mChunkList.clear();
|
||||
aFunc->mChunkLengths.clear();
|
||||
mNeedComma[mDepth] = true;
|
||||
}
|
||||
|
||||
void SpliceableJSONWriter::Splice(const char* aStr) {
|
||||
Separator();
|
||||
WriteFunc()->Write(aStr);
|
||||
mNeedComma[mDepth] = true;
|
||||
}
|
||||
|
||||
void SpliceableChunkedJSONWriter::TakeAndSplice(ChunkedJSONWriteFunc* aFunc) {
|
||||
Separator();
|
||||
WriteFunc()->Take(std::move(*aFunc));
|
||||
mNeedComma[mDepth] = true;
|
||||
}
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,164 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include "ProfiledThreadData.h"
|
||||
|
||||
# include "ProfileBuffer.h"
|
||||
# include "BaseProfileJSONWriter.h"
|
||||
|
||||
# if defined(GP_OS_darwin)
|
||||
# include <pthread.h>
|
||||
# endif
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
ProfiledThreadData::ProfiledThreadData(ThreadInfo* aThreadInfo)
|
||||
: mThreadInfo(aThreadInfo) {}
|
||||
|
||||
ProfiledThreadData::~ProfiledThreadData() {}
|
||||
|
||||
void ProfiledThreadData::StreamJSON(const ProfileBuffer& aBuffer,
|
||||
SpliceableJSONWriter& aWriter,
|
||||
const std::string& aProcessName,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
double aSinceTime) {
|
||||
UniqueStacks uniqueStacks;
|
||||
|
||||
aWriter.Start();
|
||||
{
|
||||
StreamSamplesAndMarkers(mThreadInfo->Name(), mThreadInfo->ThreadId(),
|
||||
aBuffer, aWriter, aProcessName, aProcessStartTime,
|
||||
mThreadInfo->RegisterTime(), mUnregisterTime,
|
||||
aSinceTime, uniqueStacks);
|
||||
|
||||
aWriter.StartObjectProperty("stackTable");
|
||||
{
|
||||
{
|
||||
JSONSchemaWriter schema(aWriter);
|
||||
schema.WriteField("prefix");
|
||||
schema.WriteField("frame");
|
||||
}
|
||||
|
||||
aWriter.StartArrayProperty("data");
|
||||
{ uniqueStacks.SpliceStackTableElements(aWriter); }
|
||||
aWriter.EndArray();
|
||||
}
|
||||
aWriter.EndObject();
|
||||
|
||||
aWriter.StartObjectProperty("frameTable");
|
||||
{
|
||||
{
|
||||
JSONSchemaWriter schema(aWriter);
|
||||
schema.WriteField("location");
|
||||
schema.WriteField("relevantForJS");
|
||||
schema.WriteField("implementation");
|
||||
schema.WriteField("optimizations");
|
||||
schema.WriteField("line");
|
||||
schema.WriteField("column");
|
||||
schema.WriteField("category");
|
||||
}
|
||||
|
||||
aWriter.StartArrayProperty("data");
|
||||
{ uniqueStacks.SpliceFrameTableElements(aWriter); }
|
||||
aWriter.EndArray();
|
||||
}
|
||||
aWriter.EndObject();
|
||||
|
||||
aWriter.StartArrayProperty("stringTable");
|
||||
{ uniqueStacks.mUniqueStrings->SpliceStringTableElements(aWriter); }
|
||||
aWriter.EndArray();
|
||||
}
|
||||
|
||||
aWriter.End();
|
||||
}
|
||||
|
||||
void StreamSamplesAndMarkers(const char* aName, int aThreadId,
|
||||
const ProfileBuffer& aBuffer,
|
||||
SpliceableJSONWriter& aWriter,
|
||||
const std::string& aProcessName,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
const TimeStamp& aRegisterTime,
|
||||
const TimeStamp& aUnregisterTime,
|
||||
double aSinceTime, UniqueStacks& aUniqueStacks) {
|
||||
aWriter.StringProperty(
|
||||
"processType",
|
||||
"(unknown)" /* XRE_ChildProcessTypeToString(XRE_GetProcessType()) */);
|
||||
|
||||
aWriter.StringProperty("name", aName);
|
||||
|
||||
// Use given process name (if any).
|
||||
if (!aProcessName.empty()) {
|
||||
aWriter.StringProperty("processName", aProcessName.c_str());
|
||||
}
|
||||
|
||||
aWriter.IntProperty("tid", static_cast<int64_t>(aThreadId));
|
||||
aWriter.IntProperty("pid",
|
||||
static_cast<int64_t>(profiler_current_process_id()));
|
||||
|
||||
if (aRegisterTime) {
|
||||
aWriter.DoubleProperty(
|
||||
"registerTime", (aRegisterTime - aProcessStartTime).ToMilliseconds());
|
||||
} else {
|
||||
aWriter.NullProperty("registerTime");
|
||||
}
|
||||
|
||||
if (aUnregisterTime) {
|
||||
aWriter.DoubleProperty(
|
||||
"unregisterTime",
|
||||
(aUnregisterTime - aProcessStartTime).ToMilliseconds());
|
||||
} else {
|
||||
aWriter.NullProperty("unregisterTime");
|
||||
}
|
||||
|
||||
aWriter.StartObjectProperty("samples");
|
||||
{
|
||||
{
|
||||
JSONSchemaWriter schema(aWriter);
|
||||
schema.WriteField("stack");
|
||||
schema.WriteField("time");
|
||||
schema.WriteField("responsiveness");
|
||||
schema.WriteField("rss");
|
||||
schema.WriteField("uss");
|
||||
}
|
||||
|
||||
aWriter.StartArrayProperty("data");
|
||||
{
|
||||
aBuffer.StreamSamplesToJSON(aWriter, aThreadId, aSinceTime,
|
||||
aUniqueStacks);
|
||||
}
|
||||
aWriter.EndArray();
|
||||
}
|
||||
aWriter.EndObject();
|
||||
|
||||
aWriter.StartObjectProperty("markers");
|
||||
{
|
||||
{
|
||||
JSONSchemaWriter schema(aWriter);
|
||||
schema.WriteField("name");
|
||||
schema.WriteField("time");
|
||||
schema.WriteField("category");
|
||||
schema.WriteField("data");
|
||||
}
|
||||
|
||||
aWriter.StartArrayProperty("data");
|
||||
{
|
||||
aBuffer.StreamMarkersToJSON(aWriter, aThreadId, aProcessStartTime,
|
||||
aSinceTime, aUniqueStacks);
|
||||
}
|
||||
aWriter.EndArray();
|
||||
}
|
||||
aWriter.EndObject();
|
||||
}
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,117 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef ProfiledThreadData_h
|
||||
#define ProfiledThreadData_h
|
||||
|
||||
#include "BaseProfilingStack.h"
|
||||
#include "platform.h"
|
||||
#include "ProfileBufferEntry.h"
|
||||
#include "ThreadInfo.h"
|
||||
|
||||
#include "mozilla/RefPtr.h"
|
||||
#include "mozilla/TimeStamp.h"
|
||||
#include "mozilla/UniquePtr.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
class ProfileBuffer;
|
||||
|
||||
// This class contains information about a thread that is only relevant while
|
||||
// the profiler is running, for any threads (both alive and dead) whose thread
|
||||
// name matches the "thread filter" in the current profiler run.
|
||||
// ProfiledThreadData objects may be kept alive even after the thread is
|
||||
// unregistered, as long as there is still data for that thread in the profiler
|
||||
// buffer.
|
||||
//
|
||||
// Accesses to this class are protected by the profiler state lock.
|
||||
//
|
||||
// Created as soon as the following are true for the thread:
|
||||
// - The profiler is running, and
|
||||
// - the thread matches the profiler's thread filter, and
|
||||
// - the thread is registered with the profiler.
|
||||
// So it gets created in response to either (1) the profiler being started (for
|
||||
// an existing registered thread) or (2) the thread being registered (if the
|
||||
// profiler is already running).
|
||||
//
|
||||
// The thread may be unregistered during the lifetime of ProfiledThreadData.
|
||||
// If that happens, NotifyUnregistered() is called.
|
||||
//
|
||||
// This class is the right place to store buffer positions. Profiler buffer
|
||||
// positions become invalid if the profiler buffer is destroyed, which happens
|
||||
// when the profiler is stopped.
|
||||
class ProfiledThreadData final {
|
||||
public:
|
||||
explicit ProfiledThreadData(ThreadInfo* aThreadInfo);
|
||||
~ProfiledThreadData();
|
||||
|
||||
void NotifyUnregistered(uint64_t aBufferPosition) {
|
||||
mLastSample = Nothing();
|
||||
MOZ_ASSERT(!mBufferPositionWhenReceivedJSContext,
|
||||
"JSContext should have been cleared before the thread was "
|
||||
"unregistered");
|
||||
mUnregisterTime = TimeStamp::Now();
|
||||
mBufferPositionWhenUnregistered = Some(aBufferPosition);
|
||||
}
|
||||
Maybe<uint64_t> BufferPositionWhenUnregistered() {
|
||||
return mBufferPositionWhenUnregistered;
|
||||
}
|
||||
|
||||
Maybe<uint64_t>& LastSample() { return mLastSample; }
|
||||
|
||||
void StreamJSON(const ProfileBuffer& aBuffer, SpliceableJSONWriter& aWriter,
|
||||
const std::string& aProcessName,
|
||||
const TimeStamp& aProcessStartTime, double aSinceTime);
|
||||
|
||||
const RefPtr<ThreadInfo> Info() const { return mThreadInfo; }
|
||||
|
||||
void NotifyReceivedJSContext(uint64_t aCurrentBufferPosition) {
|
||||
mBufferPositionWhenReceivedJSContext = Some(aCurrentBufferPosition);
|
||||
}
|
||||
|
||||
private:
|
||||
// Group A:
|
||||
// The following fields are interesting for the entire lifetime of a
|
||||
// ProfiledThreadData object.
|
||||
|
||||
// This thread's thread info.
|
||||
const RefPtr<ThreadInfo> mThreadInfo;
|
||||
|
||||
// Group B:
|
||||
// The following fields are only used while this thread is alive and
|
||||
// registered. They become Nothing() once the thread is unregistered.
|
||||
|
||||
// When sampling, this holds the position in ActivePS::mBuffer of the most
|
||||
// recent sample for this thread, or Nothing() if there is no sample for this
|
||||
// thread in the buffer.
|
||||
Maybe<uint64_t> mLastSample;
|
||||
|
||||
// Only non-Nothing() if the thread currently has a JSContext.
|
||||
Maybe<uint64_t> mBufferPositionWhenReceivedJSContext;
|
||||
|
||||
// Group C:
|
||||
// The following fields are only used once this thread has been unregistered.
|
||||
|
||||
Maybe<uint64_t> mBufferPositionWhenUnregistered;
|
||||
TimeStamp mUnregisterTime;
|
||||
};
|
||||
|
||||
void StreamSamplesAndMarkers(const char* aName, int aThreadId,
|
||||
const ProfileBuffer& aBuffer,
|
||||
SpliceableJSONWriter& aWriter,
|
||||
const std::string& aProcessName,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
const TimeStamp& aRegisterTime,
|
||||
const TimeStamp& aUnregisterTime,
|
||||
double aSinceTime, UniqueStacks& aUniqueStacks);
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // ProfiledThreadData_h
|
|
@ -1,44 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include "ProfilerBacktrace.h"
|
||||
|
||||
# include "ProfileBuffer.h"
|
||||
# include "ProfiledThreadData.h"
|
||||
# include "BaseProfileJSONWriter.h"
|
||||
# include "ThreadInfo.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
ProfilerBacktrace::ProfilerBacktrace(const char* aName, int aThreadId,
|
||||
UniquePtr<ProfileBuffer> aBuffer)
|
||||
: mName(strdup(aName)), mThreadId(aThreadId), mBuffer(std::move(aBuffer)) {}
|
||||
|
||||
ProfilerBacktrace::~ProfilerBacktrace() {}
|
||||
|
||||
void ProfilerBacktrace::StreamJSON(SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
UniqueStacks& aUniqueStacks) {
|
||||
// Unlike ProfiledThreadData::StreamJSON, we don't need to call
|
||||
// ProfileBuffer::AddJITInfoForRange because mBuffer does not contain any
|
||||
// JitReturnAddr entries. For synchronous samples, JIT frames get expanded
|
||||
// at sample time.
|
||||
StreamSamplesAndMarkers(mName.get(), mThreadId, *mBuffer.get(), aWriter, "",
|
||||
aProcessStartTime,
|
||||
/* aRegisterTime */ TimeStamp(),
|
||||
/* aUnregisterTime */ TimeStamp(),
|
||||
/* aSinceTime */ 0, aUniqueStacks);
|
||||
}
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,52 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef __PROFILER_BACKTRACE_H
|
||||
#define __PROFILER_BACKTRACE_H
|
||||
|
||||
#include "mozilla/UniquePtrExtensions.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
class TimeStamp;
|
||||
|
||||
namespace baseprofiler {
|
||||
|
||||
class ProfileBuffer;
|
||||
class SpliceableJSONWriter;
|
||||
class ThreadInfo;
|
||||
class UniqueStacks;
|
||||
|
||||
// ProfilerBacktrace encapsulates a synchronous sample.
|
||||
class ProfilerBacktrace {
|
||||
public:
|
||||
ProfilerBacktrace(const char* aName, int aThreadId,
|
||||
UniquePtr<ProfileBuffer> aBuffer);
|
||||
~ProfilerBacktrace();
|
||||
|
||||
// ProfilerBacktraces' stacks are deduplicated in the context of the
|
||||
// profile that contains the backtrace as a marker payload.
|
||||
//
|
||||
// That is, markers that contain backtraces should not need their own stack,
|
||||
// frame, and string tables. They should instead reuse their parent
|
||||
// profile's tables.
|
||||
void StreamJSON(SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
UniqueStacks& aUniqueStacks);
|
||||
|
||||
private:
|
||||
ProfilerBacktrace(const ProfilerBacktrace&);
|
||||
ProfilerBacktrace& operator=(const ProfilerBacktrace&);
|
||||
|
||||
UniqueFreePtr<char> mName;
|
||||
int mThreadId;
|
||||
UniquePtr<ProfileBuffer> mBuffer;
|
||||
};
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // __PROFILER_BACKTRACE_H
|
|
@ -1,175 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef ProfilerMarker_h
|
||||
#define ProfilerMarker_h
|
||||
|
||||
#include "ProfileBufferEntry.h"
|
||||
#include "BaseProfileJSONWriter.h"
|
||||
#include "BaseProfilerMarkerPayload.h"
|
||||
|
||||
#include "mozilla/UniquePtrExtensions.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
template <typename T>
|
||||
class ProfilerLinkedList;
|
||||
|
||||
class ProfilerMarker {
|
||||
friend class ProfilerLinkedList<ProfilerMarker>;
|
||||
|
||||
public:
|
||||
explicit ProfilerMarker(const char* aMarkerName,
|
||||
ProfilingCategoryPair aCategoryPair, int aThreadId,
|
||||
UniquePtr<ProfilerMarkerPayload> aPayload = nullptr,
|
||||
double aTime = 0)
|
||||
: mMarkerName(strdup(aMarkerName)),
|
||||
mPayload(std::move(aPayload)),
|
||||
mNext{nullptr},
|
||||
mTime(aTime),
|
||||
mPositionInBuffer{0},
|
||||
mThreadId{aThreadId},
|
||||
mCategoryPair{aCategoryPair} {}
|
||||
|
||||
void SetPositionInBuffer(uint64_t aPosition) {
|
||||
mPositionInBuffer = aPosition;
|
||||
}
|
||||
|
||||
bool HasExpired(uint64_t aBufferRangeStart) const {
|
||||
return mPositionInBuffer < aBufferRangeStart;
|
||||
}
|
||||
|
||||
double GetTime() const { return mTime; }
|
||||
|
||||
int GetThreadId() const { return mThreadId; }
|
||||
|
||||
void StreamJSON(SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
UniqueStacks& aUniqueStacks) const {
|
||||
// Schema:
|
||||
// [name, time, category, data]
|
||||
|
||||
aWriter.StartArrayElement();
|
||||
{
|
||||
aUniqueStacks.mUniqueStrings->WriteElement(aWriter, mMarkerName.get());
|
||||
aWriter.DoubleElement(mTime);
|
||||
const ProfilingCategoryPairInfo& info =
|
||||
GetProfilingCategoryPairInfo(mCategoryPair);
|
||||
aWriter.IntElement(unsigned(info.mCategory));
|
||||
// TODO: Store the callsite for this marker if available:
|
||||
// if have location data
|
||||
// b.NameValue(marker, "location", ...);
|
||||
if (mPayload) {
|
||||
aWriter.StartObjectElement(SpliceableJSONWriter::SingleLineStyle);
|
||||
{ mPayload->StreamPayload(aWriter, aProcessStartTime, aUniqueStacks); }
|
||||
aWriter.EndObject();
|
||||
}
|
||||
}
|
||||
aWriter.EndArray();
|
||||
}
|
||||
|
||||
private:
|
||||
UniqueFreePtr<char> mMarkerName;
|
||||
UniquePtr<ProfilerMarkerPayload> mPayload;
|
||||
ProfilerMarker* mNext;
|
||||
double mTime;
|
||||
uint64_t mPositionInBuffer;
|
||||
int mThreadId;
|
||||
ProfilingCategoryPair mCategoryPair;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ProfilerLinkedList {
|
||||
public:
|
||||
ProfilerLinkedList() : mHead(nullptr), mTail(nullptr) {}
|
||||
|
||||
void insert(T* aElem) {
|
||||
if (!mTail) {
|
||||
mHead = aElem;
|
||||
mTail = aElem;
|
||||
} else {
|
||||
mTail->mNext = aElem;
|
||||
mTail = aElem;
|
||||
}
|
||||
aElem->mNext = nullptr;
|
||||
}
|
||||
|
||||
T* popHead() {
|
||||
if (!mHead) {
|
||||
MOZ_ASSERT(false);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
T* head = mHead;
|
||||
|
||||
mHead = head->mNext;
|
||||
if (!mHead) {
|
||||
mTail = nullptr;
|
||||
}
|
||||
|
||||
return head;
|
||||
}
|
||||
|
||||
const T* peek() { return mHead; }
|
||||
|
||||
private:
|
||||
T* mHead;
|
||||
T* mTail;
|
||||
};
|
||||
|
||||
typedef ProfilerLinkedList<ProfilerMarker> ProfilerMarkerLinkedList;
|
||||
|
||||
template <typename T>
|
||||
class ProfilerSignalSafeLinkedList {
|
||||
public:
|
||||
ProfilerSignalSafeLinkedList() : mSignalLock(false) {}
|
||||
|
||||
~ProfilerSignalSafeLinkedList() {
|
||||
if (mSignalLock) {
|
||||
// Some thread is modifying the list. We should only be released on that
|
||||
// thread.
|
||||
abort();
|
||||
}
|
||||
|
||||
while (mList.peek()) {
|
||||
delete mList.popHead();
|
||||
}
|
||||
}
|
||||
|
||||
// Insert an item into the list. Must only be called from the owning thread.
|
||||
// Must not be called while the list from accessList() is being accessed.
|
||||
// In the profiler, we ensure that by interrupting the profiled thread
|
||||
// (which is the one that owns this list and calls insert() on it) until
|
||||
// we're done reading the list from the signal handler.
|
||||
void insert(T* aElement) {
|
||||
MOZ_ASSERT(aElement);
|
||||
|
||||
mSignalLock = true;
|
||||
|
||||
mList.insert(aElement);
|
||||
|
||||
mSignalLock = false;
|
||||
}
|
||||
|
||||
// Called within signal, from any thread, possibly while insert() is in the
|
||||
// middle of modifying the list (on the owning thread). Will return null if
|
||||
// that is the case.
|
||||
// Function must be reentrant.
|
||||
ProfilerLinkedList<T>* accessList() { return mSignalLock ? nullptr : &mList; }
|
||||
|
||||
private:
|
||||
ProfilerLinkedList<T> mList;
|
||||
|
||||
// If this is set, then it's not safe to read the list because its contents
|
||||
// are being changed.
|
||||
Atomic<bool> mSignalLock;
|
||||
};
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // ProfilerMarker_h
|
|
@ -1,137 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include "BaseProfilerMarkerPayload.h"
|
||||
|
||||
# include "ProfileBufferEntry.h"
|
||||
# include "BaseProfileJSONWriter.h"
|
||||
# include "ProfilerBacktrace.h"
|
||||
|
||||
# include "mozilla/Maybe.h"
|
||||
# include "mozilla/Sprintf.h"
|
||||
|
||||
# include <inttypes.h>
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
static void MOZ_ALWAYS_INLINE WriteTime(SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
const TimeStamp& aTime,
|
||||
const char* aName) {
|
||||
if (!aTime.IsNull()) {
|
||||
aWriter.DoubleProperty(aName, (aTime - aProcessStartTime).ToMilliseconds());
|
||||
}
|
||||
}
|
||||
|
||||
void ProfilerMarkerPayload::StreamType(const char* aMarkerType,
|
||||
SpliceableJSONWriter& aWriter) {
|
||||
MOZ_ASSERT(aMarkerType);
|
||||
aWriter.StringProperty("type", aMarkerType);
|
||||
}
|
||||
|
||||
void ProfilerMarkerPayload::StreamCommonProps(
|
||||
const char* aMarkerType, SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime, UniqueStacks& aUniqueStacks) {
|
||||
StreamType(aMarkerType, aWriter);
|
||||
WriteTime(aWriter, aProcessStartTime, mStartTime, "startTime");
|
||||
WriteTime(aWriter, aProcessStartTime, mEndTime, "endTime");
|
||||
if (mDocShellId) {
|
||||
aWriter.StringProperty("docShellId", mDocShellId->c_str());
|
||||
}
|
||||
if (mDocShellHistoryId) {
|
||||
aWriter.DoubleProperty("docshellHistoryId", mDocShellHistoryId.ref());
|
||||
}
|
||||
if (mStack) {
|
||||
aWriter.StartObjectProperty("stack");
|
||||
{ mStack->StreamJSON(aWriter, aProcessStartTime, aUniqueStacks); }
|
||||
aWriter.EndObject();
|
||||
}
|
||||
}
|
||||
|
||||
void TracingMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
UniqueStacks& aUniqueStacks) {
|
||||
StreamCommonProps("tracing", aWriter, aProcessStartTime, aUniqueStacks);
|
||||
|
||||
if (mCategory) {
|
||||
aWriter.StringProperty("category", mCategory);
|
||||
}
|
||||
|
||||
if (mKind == TRACING_INTERVAL_START) {
|
||||
aWriter.StringProperty("interval", "start");
|
||||
} else if (mKind == TRACING_INTERVAL_END) {
|
||||
aWriter.StringProperty("interval", "end");
|
||||
}
|
||||
}
|
||||
|
||||
void FileIOMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
UniqueStacks& aUniqueStacks) {
|
||||
StreamCommonProps("FileIO", aWriter, aProcessStartTime, aUniqueStacks);
|
||||
aWriter.StringProperty("operation", mOperation.get());
|
||||
aWriter.StringProperty("source", mSource);
|
||||
if (mFilename) {
|
||||
aWriter.StringProperty("filename", mFilename.get());
|
||||
}
|
||||
}
|
||||
|
||||
void UserTimingMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
UniqueStacks& aUniqueStacks) {
|
||||
StreamCommonProps("UserTiming", aWriter, aProcessStartTime, aUniqueStacks);
|
||||
aWriter.StringProperty("name", mName.c_str());
|
||||
aWriter.StringProperty("entryType", mEntryType);
|
||||
|
||||
if (mStartMark.isSome()) {
|
||||
aWriter.StringProperty("startMark", mStartMark.value().c_str());
|
||||
} else {
|
||||
aWriter.NullProperty("startMark");
|
||||
}
|
||||
if (mEndMark.isSome()) {
|
||||
aWriter.StringProperty("endMark", mEndMark.value().c_str());
|
||||
} else {
|
||||
aWriter.NullProperty("endMark");
|
||||
}
|
||||
}
|
||||
|
||||
void TextMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
UniqueStacks& aUniqueStacks) {
|
||||
StreamCommonProps("Text", aWriter, aProcessStartTime, aUniqueStacks);
|
||||
aWriter.StringProperty("name", mText.c_str());
|
||||
}
|
||||
|
||||
void LogMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
UniqueStacks& aUniqueStacks) {
|
||||
StreamCommonProps("Log", aWriter, aProcessStartTime, aUniqueStacks);
|
||||
aWriter.StringProperty("name", mText.c_str());
|
||||
aWriter.StringProperty("module", mModule.c_str());
|
||||
}
|
||||
|
||||
void HangMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
UniqueStacks& aUniqueStacks) {
|
||||
StreamCommonProps("BHR-detected hang", aWriter, aProcessStartTime,
|
||||
aUniqueStacks);
|
||||
}
|
||||
|
||||
void LongTaskMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
UniqueStacks& aUniqueStacks) {
|
||||
StreamCommonProps("MainThreadLongTask", aWriter, aProcessStartTime,
|
||||
aUniqueStacks);
|
||||
aWriter.StringProperty("category", "LongTask");
|
||||
}
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,75 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
|
||||
* vim: set ts=8 sts=2 et sw=2 tw=80:
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include "BaseProfilingCategory.h"
|
||||
|
||||
# include "mozilla/ArrayUtils.h"
|
||||
# include "mozilla/Assertions.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
// clang-format off
|
||||
|
||||
// ProfilingSubcategory_X:
|
||||
// One enum for each category X, listing that category's subcategories. This
|
||||
// allows the sProfilingCategoryInfo macro construction below to look up a
|
||||
// per-category index for a subcategory.
|
||||
#define SUBCATEGORY_ENUMS_BEGIN_CATEGORY(name, labelAsString, color) \
|
||||
enum class ProfilingSubcategory_##name : uint32_t {
|
||||
#define SUBCATEGORY_ENUMS_SUBCATEGORY(category, name, labelAsString) \
|
||||
name,
|
||||
#define SUBCATEGORY_ENUMS_END_CATEGORY \
|
||||
};
|
||||
BASE_PROFILING_CATEGORY_LIST(SUBCATEGORY_ENUMS_BEGIN_CATEGORY,
|
||||
SUBCATEGORY_ENUMS_SUBCATEGORY,
|
||||
SUBCATEGORY_ENUMS_END_CATEGORY)
|
||||
#undef SUBCATEGORY_ENUMS_BEGIN_CATEGORY
|
||||
#undef SUBCATEGORY_ENUMS_SUBCATEGORY
|
||||
#undef SUBCATEGORY_ENUMS_END_CATEGORY
|
||||
|
||||
// sProfilingCategoryPairInfo:
|
||||
// A list of ProfilingCategoryPairInfos with the same order as
|
||||
// ProfilingCategoryPair, which can be used to map a ProfilingCategoryPair to
|
||||
// its information.
|
||||
#define CATEGORY_INFO_BEGIN_CATEGORY(name, labelAsString, color)
|
||||
#define CATEGORY_INFO_SUBCATEGORY(category, name, labelAsString) \
|
||||
{ProfilingCategory::category, \
|
||||
uint32_t(ProfilingSubcategory_##category::name), labelAsString},
|
||||
#define CATEGORY_INFO_END_CATEGORY
|
||||
const ProfilingCategoryPairInfo sProfilingCategoryPairInfo[] = {
|
||||
BASE_PROFILING_CATEGORY_LIST(CATEGORY_INFO_BEGIN_CATEGORY,
|
||||
CATEGORY_INFO_SUBCATEGORY,
|
||||
CATEGORY_INFO_END_CATEGORY)
|
||||
};
|
||||
#undef CATEGORY_INFO_BEGIN_CATEGORY
|
||||
#undef CATEGORY_INFO_SUBCATEGORY
|
||||
#undef CATEGORY_INFO_END_CATEGORY
|
||||
|
||||
// clang-format on
|
||||
|
||||
const ProfilingCategoryPairInfo& GetProfilingCategoryPairInfo(
|
||||
ProfilingCategoryPair aCategoryPair) {
|
||||
static_assert(
|
||||
MOZ_ARRAY_LENGTH(sProfilingCategoryPairInfo) ==
|
||||
uint32_t(ProfilingCategoryPair::COUNT),
|
||||
"sProfilingCategoryPairInfo and ProfilingCategory need to have the "
|
||||
"same order and the same length");
|
||||
|
||||
uint32_t categoryPairIndex = uint32_t(aCategoryPair);
|
||||
MOZ_RELEASE_ASSERT(categoryPairIndex <=
|
||||
uint32_t(ProfilingCategoryPair::LAST));
|
||||
return sProfilingCategoryPairInfo[categoryPairIndex];
|
||||
}
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,56 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
|
||||
* vim: set ts=8 sts=2 et sw=2 tw=80:
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include "BaseProfilingStack.h"
|
||||
|
||||
# include "mozilla/IntegerRange.h"
|
||||
# include "mozilla/UniquePtr.h"
|
||||
# include "mozilla/UniquePtrExtensions.h"
|
||||
|
||||
# include <algorithm>
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
ProfilingStack::~ProfilingStack() {
|
||||
// The label macros keep a reference to the ProfilingStack to avoid a TLS
|
||||
// access. If these are somehow not all cleared we will get a
|
||||
// use-after-free so better to crash now.
|
||||
MOZ_RELEASE_ASSERT(stackPointer == 0);
|
||||
|
||||
delete[] frames;
|
||||
}
|
||||
|
||||
void ProfilingStack::ensureCapacitySlow() {
|
||||
MOZ_ASSERT(stackPointer >= capacity);
|
||||
const uint32_t kInitialCapacity = 128;
|
||||
|
||||
uint32_t sp = stackPointer;
|
||||
auto newCapacity =
|
||||
std::max(sp + 1, capacity ? capacity * 2 : kInitialCapacity);
|
||||
|
||||
auto* newFrames = new ProfilingStackFrame[newCapacity];
|
||||
|
||||
// It's important that `frames` / `capacity` / `stackPointer` remain
|
||||
// consistent here at all times.
|
||||
for (auto i : IntegerRange(capacity)) {
|
||||
newFrames[i] = frames[i];
|
||||
}
|
||||
|
||||
ProfilingStackFrame* oldFrames = frames;
|
||||
frames = newFrames;
|
||||
capacity = newCapacity;
|
||||
delete[] oldFrames;
|
||||
}
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,47 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include "RegisteredThread.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
RegisteredThread::RegisteredThread(ThreadInfo* aInfo, void* aStackTop)
|
||||
: mRacyRegisteredThread(aInfo->ThreadId()),
|
||||
mPlatformData(AllocPlatformData(aInfo->ThreadId())),
|
||||
mStackTop(aStackTop),
|
||||
mThreadInfo(aInfo) {
|
||||
// We don't have to guess on mac
|
||||
# if defined(GP_OS_darwin)
|
||||
pthread_t self = pthread_self();
|
||||
mStackTop = pthread_get_stackaddr_np(self);
|
||||
# endif
|
||||
}
|
||||
|
||||
RegisteredThread::~RegisteredThread() {}
|
||||
|
||||
size_t RegisteredThread::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
|
||||
size_t n = aMallocSizeOf(this);
|
||||
|
||||
// Measurement of the following members may be added later if DMD finds it
|
||||
// is worthwhile:
|
||||
// - mPlatformData
|
||||
// - mRacyRegisteredThread.mPendingMarkers
|
||||
//
|
||||
// The following members are not measured:
|
||||
// - mThreadInfo: because it is non-owning
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,193 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef RegisteredThread_h
|
||||
#define RegisteredThread_h
|
||||
|
||||
#include "platform.h"
|
||||
#include "ProfilerMarker.h"
|
||||
#include "BaseProfilerMarkerPayload.h"
|
||||
#include "ThreadInfo.h"
|
||||
|
||||
#include "mozilla/UniquePtr.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
// This class contains the state for a single thread that is accessible without
|
||||
// protection from gPSMutex in platform.cpp. Because there is no external
|
||||
// protection against data races, it must provide internal protection. Hence
|
||||
// the "Racy" prefix.
|
||||
//
|
||||
class RacyRegisteredThread final {
|
||||
public:
|
||||
explicit RacyRegisteredThread(int aThreadId)
|
||||
: mThreadId(aThreadId), mSleep(AWAKE), mIsBeingProfiled(false) {}
|
||||
|
||||
~RacyRegisteredThread() {}
|
||||
|
||||
void SetIsBeingProfiled(bool aIsBeingProfiled) {
|
||||
mIsBeingProfiled = aIsBeingProfiled;
|
||||
}
|
||||
|
||||
bool IsBeingProfiled() const { return mIsBeingProfiled; }
|
||||
|
||||
void AddPendingMarker(const char* aMarkerName,
|
||||
ProfilingCategoryPair aCategoryPair,
|
||||
UniquePtr<ProfilerMarkerPayload> aPayload,
|
||||
double aTime) {
|
||||
// Note: We don't assert on mIsBeingProfiled, because it could have changed
|
||||
// between the check in the caller and now.
|
||||
ProfilerMarker* marker = new ProfilerMarker(
|
||||
aMarkerName, aCategoryPair, mThreadId, std::move(aPayload), aTime);
|
||||
mPendingMarkers.insert(marker);
|
||||
}
|
||||
|
||||
// Called within signal. Function must be reentrant.
|
||||
ProfilerMarkerLinkedList* GetPendingMarkers() {
|
||||
// The profiled thread is interrupted, so we can access the list safely.
|
||||
// Unless the profiled thread was in the middle of changing the list when
|
||||
// we interrupted it - in that case, accessList() will return null.
|
||||
return mPendingMarkers.accessList();
|
||||
}
|
||||
|
||||
// This is called on every profiler restart. Put things that should happen at
|
||||
// that time here.
|
||||
void ReinitializeOnResume() {
|
||||
// This is needed to cause an initial sample to be taken from sleeping
|
||||
// threads that had been observed prior to the profiler stopping and
|
||||
// restarting. Otherwise sleeping threads would not have any samples to
|
||||
// copy forward while sleeping.
|
||||
(void)mSleep.compareExchange(SLEEPING_OBSERVED, SLEEPING_NOT_OBSERVED);
|
||||
}
|
||||
|
||||
// This returns true for the second and subsequent calls in each sleep cycle.
|
||||
bool CanDuplicateLastSampleDueToSleep() {
|
||||
if (mSleep == AWAKE) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (mSleep.compareExchange(SLEEPING_NOT_OBSERVED, SLEEPING_OBSERVED)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Call this whenever the current thread sleeps. Calling it twice in a row
|
||||
// without an intervening setAwake() call is an error.
|
||||
void SetSleeping() {
|
||||
MOZ_ASSERT(mSleep == AWAKE);
|
||||
mSleep = SLEEPING_NOT_OBSERVED;
|
||||
}
|
||||
|
||||
// Call this whenever the current thread wakes. Calling it twice in a row
|
||||
// without an intervening setSleeping() call is an error.
|
||||
void SetAwake() {
|
||||
MOZ_ASSERT(mSleep != AWAKE);
|
||||
mSleep = AWAKE;
|
||||
}
|
||||
|
||||
bool IsSleeping() { return mSleep != AWAKE; }
|
||||
|
||||
int ThreadId() const { return mThreadId; }
|
||||
|
||||
class ProfilingStack& ProfilingStack() {
|
||||
return mProfilingStack;
|
||||
}
|
||||
const class ProfilingStack& ProfilingStack() const { return mProfilingStack; }
|
||||
|
||||
private:
|
||||
class ProfilingStack mProfilingStack;
|
||||
|
||||
// A list of pending markers that must be moved to the circular buffer.
|
||||
ProfilerSignalSafeLinkedList<ProfilerMarker> mPendingMarkers;
|
||||
|
||||
// mThreadId contains the thread ID of the current thread. It is safe to read
|
||||
// this from multiple threads concurrently, as it will never be mutated.
|
||||
const int mThreadId;
|
||||
|
||||
// mSleep tracks whether the thread is sleeping, and if so, whether it has
|
||||
// been previously observed. This is used for an optimization: in some cases,
|
||||
// when a thread is asleep, we duplicate the previous sample, which is
|
||||
// cheaper than taking a new sample.
|
||||
//
|
||||
// mSleep is atomic because it is accessed from multiple threads.
|
||||
//
|
||||
// - It is written only by this thread, via setSleeping() and setAwake().
|
||||
//
|
||||
// - It is read by SamplerThread::Run().
|
||||
//
|
||||
// There are two cases where racing between threads can cause an issue.
|
||||
//
|
||||
// - If CanDuplicateLastSampleDueToSleep() returns false but that result is
|
||||
// invalidated before being acted upon, we will take a full sample
|
||||
// unnecessarily. This is additional work but won't cause any correctness
|
||||
// issues. (In actual fact, this case is impossible. In order to go from
|
||||
// CanDuplicateLastSampleDueToSleep() returning false to it returning true
|
||||
// requires an intermediate call to it in order for mSleep to go from
|
||||
// SLEEPING_NOT_OBSERVED to SLEEPING_OBSERVED.)
|
||||
//
|
||||
// - If CanDuplicateLastSampleDueToSleep() returns true but that result is
|
||||
// invalidated before being acted upon -- i.e. the thread wakes up before
|
||||
// DuplicateLastSample() is called -- we will duplicate the previous
|
||||
// sample. This is inaccurate, but only slightly... we will effectively
|
||||
// treat the thread as having slept a tiny bit longer than it really did.
|
||||
//
|
||||
// This latter inaccuracy could be avoided by moving the
|
||||
// CanDuplicateLastSampleDueToSleep() check within the thread-freezing code,
|
||||
// e.g. the section where Tick() is called. But that would reduce the
|
||||
// effectiveness of the optimization because more code would have to be run
|
||||
// before we can tell that duplication is allowed.
|
||||
//
|
||||
static const int AWAKE = 0;
|
||||
static const int SLEEPING_NOT_OBSERVED = 1;
|
||||
static const int SLEEPING_OBSERVED = 2;
|
||||
Atomic<int> mSleep;
|
||||
|
||||
// Is this thread being profiled? (e.g., should markers be recorded?)
|
||||
// Accesses to this atomic are not recorded by web replay as they may occur
|
||||
// at non-deterministic points.
|
||||
Atomic<bool, MemoryOrdering::Relaxed, recordreplay::Behavior::DontPreserve>
|
||||
mIsBeingProfiled;
|
||||
};
|
||||
|
||||
// This class contains information that's relevant to a single thread only
|
||||
// while that thread is running and registered with the profiler, but
|
||||
// regardless of whether the profiler is running. All accesses to it are
|
||||
// protected by the profiler state lock.
|
||||
class RegisteredThread final {
|
||||
public:
|
||||
RegisteredThread(ThreadInfo* aInfo, void* aStackTop);
|
||||
~RegisteredThread();
|
||||
|
||||
class RacyRegisteredThread& RacyRegisteredThread() {
|
||||
return mRacyRegisteredThread;
|
||||
}
|
||||
const class RacyRegisteredThread& RacyRegisteredThread() const {
|
||||
return mRacyRegisteredThread;
|
||||
}
|
||||
|
||||
PlatformData* GetPlatformData() const { return mPlatformData.get(); }
|
||||
const void* StackTop() const { return mStackTop; }
|
||||
|
||||
size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
|
||||
|
||||
const RefPtr<ThreadInfo> Info() const { return mThreadInfo; }
|
||||
|
||||
private:
|
||||
class RacyRegisteredThread mRacyRegisteredThread;
|
||||
|
||||
const UniquePlatformData mPlatformData;
|
||||
const void* mStackTop;
|
||||
|
||||
const RefPtr<ThreadInfo> mThreadInfo;
|
||||
};
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // RegisteredThread_h
|
|
@ -1,63 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef ThreadInfo_h
|
||||
#define ThreadInfo_h
|
||||
|
||||
#include "mozilla/Atomics.h"
|
||||
#include "mozilla/TimeStamp.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
// This class contains information about a thread which needs to be stored
|
||||
// across restarts of the profiler and which can be useful even after the
|
||||
// thread has stopped running.
|
||||
// It uses threadsafe refcounting and only contains immutable data.
|
||||
class ThreadInfo final {
|
||||
public:
|
||||
ThreadInfo(const char* aName, int aThreadId, bool aIsMainThread,
|
||||
const TimeStamp& aRegisterTime = TimeStamp::Now())
|
||||
: mName(aName),
|
||||
mRegisterTime(aRegisterTime),
|
||||
mThreadId(aThreadId),
|
||||
mIsMainThread(aIsMainThread),
|
||||
mRefCnt(0) {
|
||||
// I don't know if we can assert this. But we should warn.
|
||||
MOZ_ASSERT(aThreadId >= 0, "native thread ID is < 0");
|
||||
MOZ_ASSERT(aThreadId <= INT32_MAX, "native thread ID is > INT32_MAX");
|
||||
}
|
||||
|
||||
// Using hand-rolled ref-counting, because RefCounted.h macros don't produce
|
||||
// the same code between mozglue and libxul, see bug 1536656.
|
||||
MFBT_API void AddRef() const { ++mRefCnt; }
|
||||
MFBT_API void Release() const {
|
||||
MOZ_ASSERT(int32_t(mRefCnt) > 0);
|
||||
if (--mRefCnt == 0) {
|
||||
delete this;
|
||||
}
|
||||
}
|
||||
|
||||
const char* Name() const { return mName.c_str(); }
|
||||
TimeStamp RegisterTime() const { return mRegisterTime; }
|
||||
int ThreadId() const { return mThreadId; }
|
||||
bool IsMainThread() const { return mIsMainThread; }
|
||||
|
||||
private:
|
||||
const std::string mName;
|
||||
const TimeStamp mRegisterTime;
|
||||
const int mThreadId;
|
||||
const bool mIsMainThread;
|
||||
|
||||
mutable Atomic<int32_t, MemoryOrdering::ReleaseAcquire,
|
||||
recordreplay::Behavior::DontPreserve>
|
||||
mRefCnt;
|
||||
};
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // ThreadInfo_h
|
|
@ -1,98 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# ifdef XP_WIN
|
||||
# undef UNICODE
|
||||
# undef _UNICODE
|
||||
# endif
|
||||
|
||||
# include "VTuneProfiler.h"
|
||||
|
||||
# include <memory>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
VTuneProfiler* VTuneProfiler::mInstance = nullptr;
|
||||
|
||||
void VTuneProfiler::Initialize() {
|
||||
// This is just a 'dirty trick' to find out if the ittnotify DLL was found.
|
||||
// If it wasn't this function always returns 0, otherwise it returns
|
||||
// incrementing numbers, if the library was found this wastes 2 events but
|
||||
// that should be okay.
|
||||
// TODO re-implement here if vtune is needed
|
||||
// __itt_event testEvent =
|
||||
// __itt_event_create("Test event", strlen("Test event"));
|
||||
// testEvent = __itt_event_create("Test event 2", strlen("Test event 2"));
|
||||
|
||||
// if (testEvent) {
|
||||
// mInstance = new VTuneProfiler();
|
||||
// }
|
||||
}
|
||||
|
||||
void VTuneProfiler::Shutdown() {}
|
||||
|
||||
void VTuneProfiler::TraceInternal(const char* aName, TracingKind aKind) {
|
||||
// TODO re-implement here if vtune is needed
|
||||
// string str(aName);
|
||||
|
||||
// auto iter = mStrings.find(str);
|
||||
|
||||
// __itt_event event;
|
||||
// if (iter != mStrings.end()) {
|
||||
// event = iter->second;
|
||||
// } else {
|
||||
// event = __itt_event_create(aName, str.length());
|
||||
// mStrings.insert({str, event});
|
||||
// }
|
||||
|
||||
// if (aKind == TRACING_INTERVAL_START || aKind == TRACING_EVENT) {
|
||||
// // VTune will consider starts not matched with an end to be single point
|
||||
// in
|
||||
// // time events.
|
||||
// __itt_event_start(event);
|
||||
// } else {
|
||||
// __itt_event_end(event);
|
||||
// }
|
||||
}
|
||||
|
||||
void VTuneProfiler::RegisterThreadInternal(const char* aName) {
|
||||
// TODO re-implement here if vtune is needed
|
||||
// string str(aName);
|
||||
|
||||
// if (!str.compare("Main Thread (Base Profiler)")) {
|
||||
// // Process main thread.
|
||||
// switch (XRE_GetProcessType()) {
|
||||
// case GeckoProcessType::GeckoProcessType_Default:
|
||||
// __itt_thread_set_name("Main Process");
|
||||
// break;
|
||||
// case GeckoProcessType::GeckoProcessType_Content:
|
||||
// __itt_thread_set_name("Content Process");
|
||||
// break;
|
||||
// case GeckoProcessType::GeckoProcessType_GMPlugin:
|
||||
// __itt_thread_set_name("Plugin Process");
|
||||
// break;
|
||||
// case GeckoProcessType::GeckoProcessType_GPU:
|
||||
// __itt_thread_set_name("GPU Process");
|
||||
// break;
|
||||
// default:
|
||||
// __itt_thread_set_name("Unknown Process");
|
||||
// }
|
||||
// return;
|
||||
// }
|
||||
// __itt_thread_set_name(aName);
|
||||
}
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,78 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef VTuneProfiler_h
|
||||
#define VTuneProfiler_h
|
||||
|
||||
// The intent here is to add 0 overhead for regular users. In order to build
|
||||
// the VTune profiler code at all --enable-vtune-instrumentation needs to be
|
||||
// set as a build option. Even then, when none of the environment variables
|
||||
// is specified that allow us to find the ittnotify DLL, these functions
|
||||
// should be minimal overhead. When starting Firefox under VTune, these
|
||||
// env vars will be automatically defined, otherwise INTEL_LIBITTNOTIFY32/64
|
||||
// should be set to point at the ittnotify DLL.
|
||||
#ifndef MOZ_VTUNE_INSTRUMENTATION
|
||||
|
||||
# define VTUNE_INIT()
|
||||
# define VTUNE_SHUTDOWN()
|
||||
|
||||
# define VTUNE_TRACING(name, kind)
|
||||
# define VTUNE_REGISTER_THREAD(name)
|
||||
|
||||
#else
|
||||
|
||||
# include "BaseProfiler.h"
|
||||
|
||||
// This is the regular Intel header, these functions are actually defined for
|
||||
// us inside js/src/vtune by an intel C file which actually dynamically resolves
|
||||
// them to the correct DLL. Through libxul these will 'magically' resolve.
|
||||
# include "vtune/ittnotify.h"
|
||||
|
||||
# include <stddef.h>
|
||||
# include <unordered_map>
|
||||
# include <string>
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
class VTuneProfiler {
|
||||
public:
|
||||
static void Initialize();
|
||||
static void Shutdown();
|
||||
|
||||
static void Trace(const char* aName, TracingKind aKind) {
|
||||
if (mInstance) {
|
||||
mInstance->TraceInternal(aName, aKind);
|
||||
}
|
||||
}
|
||||
static void RegisterThread(const char* aName) {
|
||||
if (mInstance) {
|
||||
mInstance->RegisterThreadInternal(aName);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void TraceInternal(const char* aName, TracingKind aKind);
|
||||
void RegisterThreadInternal(const char* aName);
|
||||
|
||||
// This is null when the ittnotify DLL could not be found.
|
||||
static VTuneProfiler* mInstance;
|
||||
|
||||
std::unordered_map<std::string, __itt_event> mStrings;
|
||||
};
|
||||
|
||||
# define VTUNE_INIT() VTuneProfiler::Initialize()
|
||||
# define VTUNE_SHUTDOWN() VTuneProfiler::Shutdown()
|
||||
|
||||
# define VTUNE_TRACING(name, kind) VTuneProfiler::Trace(name, kind)
|
||||
# define VTUNE_REGISTER_THREAD(name) VTuneProfiler::RegisterThread(name)
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* VTuneProfiler_h */
|
|
@ -1,527 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
// Copyright (c) 2006-2011 The Chromium Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions
|
||||
// are met:
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in
|
||||
// the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google, Inc. nor the names of its contributors
|
||||
// may be used to endorse or promote products derived from this
|
||||
// software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
// OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
// SUCH DAMAGE.
|
||||
|
||||
// This file is used for both Linux and Android.
|
||||
|
||||
#include <stdio.h>
|
||||
#include <math.h>
|
||||
|
||||
#include <pthread.h>
|
||||
#include <semaphore.h>
|
||||
#include <signal.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/types.h>
|
||||
#include <stdlib.h>
|
||||
#include <sched.h>
|
||||
#include <ucontext.h>
|
||||
// Ubuntu Dapper requires memory pages to be marked as
|
||||
// executable. Otherwise, OS raises an exception when executing code
|
||||
// in that page.
|
||||
#include <sys/types.h> // mmap & munmap
|
||||
#include <sys/mman.h> // mmap & munmap
|
||||
#include <sys/stat.h> // open
|
||||
#include <fcntl.h> // open
|
||||
#include <unistd.h> // sysconf
|
||||
#include <semaphore.h>
|
||||
#ifdef __GLIBC__
|
||||
# include <execinfo.h> // backtrace, backtrace_symbols
|
||||
#endif // def __GLIBC__
|
||||
#include <strings.h> // index
|
||||
#include <errno.h>
|
||||
#include <stdarg.h>
|
||||
|
||||
#include "prenv.h"
|
||||
#include "mozilla/LinuxSignal.h"
|
||||
#include "mozilla/PodOperations.h"
|
||||
#include "mozilla/DebugOnly.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <list>
|
||||
|
||||
using namespace mozilla;
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
int profiler_current_process_id() { return getpid(); }
|
||||
|
||||
int profiler_current_thread_id() {
|
||||
// glibc doesn't provide a wrapper for gettid().
|
||||
#if defined(__GLIBC__)
|
||||
return static_cast<int>(static_cast<pid_t>(syscall(SYS_gettid)));
|
||||
#else
|
||||
return static_cast<int>(gettid());
|
||||
#endif
|
||||
}
|
||||
|
||||
static int64_t MicrosecondsSince1970() {
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv, NULL);
|
||||
return int64_t(tv.tv_sec) * 1000000 + int64_t(tv.tv_usec);
|
||||
}
|
||||
|
||||
void* GetStackTop(void* aGuess) { return aGuess; }
|
||||
|
||||
static void PopulateRegsFromContext(Registers& aRegs, ucontext_t* aContext) {
|
||||
aRegs.mContext = aContext;
|
||||
mcontext_t& mcontext = aContext->uc_mcontext;
|
||||
|
||||
// Extracting the sample from the context is extremely machine dependent.
|
||||
#if defined(GP_ARCH_x86)
|
||||
aRegs.mPC = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
|
||||
aRegs.mSP = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
|
||||
aRegs.mFP = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
|
||||
aRegs.mLR = 0;
|
||||
#elif defined(GP_ARCH_amd64)
|
||||
aRegs.mPC = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
|
||||
aRegs.mSP = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
|
||||
aRegs.mFP = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
|
||||
aRegs.mLR = 0;
|
||||
#elif defined(GP_ARCH_arm)
|
||||
aRegs.mPC = reinterpret_cast<Address>(mcontext.arm_pc);
|
||||
aRegs.mSP = reinterpret_cast<Address>(mcontext.arm_sp);
|
||||
aRegs.mFP = reinterpret_cast<Address>(mcontext.arm_fp);
|
||||
aRegs.mLR = reinterpret_cast<Address>(mcontext.arm_lr);
|
||||
#elif defined(GP_ARCH_arm64)
|
||||
aRegs.mPC = reinterpret_cast<Address>(mcontext.pc);
|
||||
aRegs.mSP = reinterpret_cast<Address>(mcontext.sp);
|
||||
aRegs.mFP = reinterpret_cast<Address>(mcontext.regs[29]);
|
||||
aRegs.mLR = reinterpret_cast<Address>(mcontext.regs[30]);
|
||||
#elif defined(GP_ARCH_mips64)
|
||||
aRegs.mPC = reinterpret_cast<Address>(mcontext.pc);
|
||||
aRegs.mSP = reinterpret_cast<Address>(mcontext.gregs[29]);
|
||||
aRegs.mFP = reinterpret_cast<Address>(mcontext.gregs[30]);
|
||||
|
||||
#else
|
||||
# error "bad platform"
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(GP_OS_android)
|
||||
# define SYS_tgkill __NR_tgkill
|
||||
#endif
|
||||
|
||||
int tgkill(pid_t tgid, pid_t tid, int signalno) {
|
||||
return syscall(SYS_tgkill, tgid, tid, signalno);
|
||||
}
|
||||
|
||||
class PlatformData {
|
||||
public:
|
||||
explicit PlatformData(int aThreadId) {}
|
||||
|
||||
~PlatformData() {}
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// BEGIN Sampler target specifics
|
||||
|
||||
// The only way to reliably interrupt a Linux thread and inspect its register
|
||||
// and stack state is by sending a signal to it, and doing the work inside the
|
||||
// signal handler. But we don't want to run much code inside the signal
|
||||
// handler, since POSIX severely restricts what we can do in signal handlers.
|
||||
// So we use a system of semaphores to suspend the thread and allow the
|
||||
// sampler thread to do all the work of unwinding and copying out whatever
|
||||
// data it wants.
|
||||
//
|
||||
// A four-message protocol is used to reliably suspend and later resume the
|
||||
// thread to be sampled (the samplee):
|
||||
//
|
||||
// Sampler (signal sender) thread Samplee (thread to be sampled)
|
||||
//
|
||||
// Prepare the SigHandlerCoordinator
|
||||
// and point sSigHandlerCoordinator at it
|
||||
//
|
||||
// send SIGPROF to samplee ------- MSG 1 ----> (enter signal handler)
|
||||
// wait(mMessage2) Copy register state
|
||||
// into sSigHandlerCoordinator
|
||||
// <------ MSG 2 ----- post(mMessage2)
|
||||
// Samplee is now suspended. wait(mMessage3)
|
||||
// Examine its stack/register
|
||||
// state at leisure
|
||||
//
|
||||
// Release samplee:
|
||||
// post(mMessage3) ------- MSG 3 ----->
|
||||
// wait(mMessage4) Samplee now resumes. Tell
|
||||
// the sampler that we are done.
|
||||
// <------ MSG 4 ------ post(mMessage4)
|
||||
// Now we know the samplee's signal (leave signal handler)
|
||||
// handler has finished using
|
||||
// sSigHandlerCoordinator. We can
|
||||
// safely reuse it for some other thread.
|
||||
//
|
||||
|
||||
// A type used to coordinate between the sampler (signal sending) thread and
|
||||
// the thread currently being sampled (the samplee, which receives the
|
||||
// signals).
|
||||
//
|
||||
// The first message is sent using a SIGPROF signal delivery. The subsequent
|
||||
// three are sent using sem_wait/sem_post pairs. They are named accordingly
|
||||
// in the following struct.
|
||||
struct SigHandlerCoordinator {
|
||||
SigHandlerCoordinator() {
|
||||
PodZero(&mUContext);
|
||||
int r = sem_init(&mMessage2, /* pshared */ 0, 0);
|
||||
r |= sem_init(&mMessage3, /* pshared */ 0, 0);
|
||||
r |= sem_init(&mMessage4, /* pshared */ 0, 0);
|
||||
MOZ_ASSERT(r == 0);
|
||||
}
|
||||
|
||||
~SigHandlerCoordinator() {
|
||||
int r = sem_destroy(&mMessage2);
|
||||
r |= sem_destroy(&mMessage3);
|
||||
r |= sem_destroy(&mMessage4);
|
||||
MOZ_ASSERT(r == 0);
|
||||
}
|
||||
|
||||
sem_t mMessage2; // To sampler: "context is in sSigHandlerCoordinator"
|
||||
sem_t mMessage3; // To samplee: "resume"
|
||||
sem_t mMessage4; // To sampler: "finished with sSigHandlerCoordinator"
|
||||
ucontext_t mUContext; // Context at signal
|
||||
};
|
||||
|
||||
struct SigHandlerCoordinator* Sampler::sSigHandlerCoordinator = nullptr;
|
||||
|
||||
static void SigprofHandler(int aSignal, siginfo_t* aInfo, void* aContext) {
|
||||
// Avoid TSan warning about clobbering errno.
|
||||
int savedErrno = errno;
|
||||
|
||||
MOZ_ASSERT(aSignal == SIGPROF);
|
||||
MOZ_ASSERT(Sampler::sSigHandlerCoordinator);
|
||||
|
||||
// By sending us this signal, the sampler thread has sent us message 1 in
|
||||
// the comment above, with the meaning "|sSigHandlerCoordinator| is ready
|
||||
// for use, please copy your register context into it."
|
||||
Sampler::sSigHandlerCoordinator->mUContext =
|
||||
*static_cast<ucontext_t*>(aContext);
|
||||
|
||||
// Send message 2: tell the sampler thread that the context has been copied
|
||||
// into |sSigHandlerCoordinator->mUContext|. sem_post can never fail by
|
||||
// being interrupted by a signal, so there's no loop around this call.
|
||||
int r = sem_post(&Sampler::sSigHandlerCoordinator->mMessage2);
|
||||
MOZ_ASSERT(r == 0);
|
||||
|
||||
// At this point, the sampler thread assumes we are suspended, so we must
|
||||
// not touch any global state here.
|
||||
|
||||
// Wait for message 3: the sampler thread tells us to resume.
|
||||
while (true) {
|
||||
r = sem_wait(&Sampler::sSigHandlerCoordinator->mMessage3);
|
||||
if (r == -1 && errno == EINTR) {
|
||||
// Interrupted by a signal. Try again.
|
||||
continue;
|
||||
}
|
||||
// We don't expect any other kind of failure
|
||||
MOZ_ASSERT(r == 0);
|
||||
break;
|
||||
}
|
||||
|
||||
// Send message 4: tell the sampler thread that we are finished accessing
|
||||
// |sSigHandlerCoordinator|. After this point it is not safe to touch
|
||||
// |sSigHandlerCoordinator|.
|
||||
r = sem_post(&Sampler::sSigHandlerCoordinator->mMessage4);
|
||||
MOZ_ASSERT(r == 0);
|
||||
|
||||
errno = savedErrno;
|
||||
}
|
||||
|
||||
Sampler::Sampler(PSLockRef aLock)
|
||||
: mMyPid(profiler_current_process_id())
|
||||
// We don't know what the sampler thread's ID will be until it runs, so
|
||||
// set mSamplerTid to a dummy value and fill it in for real in
|
||||
// SuspendAndSampleAndResumeThread().
|
||||
,
|
||||
mSamplerTid(-1) {
|
||||
#if defined(USE_EHABI_STACKWALK)
|
||||
EHABIStackWalkInit();
|
||||
#endif
|
||||
|
||||
// NOTE: We don't initialize LUL here, instead initializing it in
|
||||
// SamplerThread's constructor. This is because with the
|
||||
// profiler_suspend_and_sample_thread entry point, we want to be able to
|
||||
// sample without waiting for LUL to be initialized.
|
||||
|
||||
// Request profiling signals.
|
||||
struct sigaction sa;
|
||||
sa.sa_sigaction = MOZ_SIGNAL_TRAMPOLINE(SigprofHandler);
|
||||
sigemptyset(&sa.sa_mask);
|
||||
sa.sa_flags = SA_RESTART | SA_SIGINFO;
|
||||
if (sigaction(SIGPROF, &sa, &mOldSigprofHandler) != 0) {
|
||||
MOZ_CRASH("Error installing SIGPROF handler in the profiler");
|
||||
}
|
||||
}
|
||||
|
||||
void Sampler::Disable(PSLockRef aLock) {
|
||||
// Restore old signal handler. This is global state so it's important that
|
||||
// we do it now, while gPSMutex is locked.
|
||||
sigaction(SIGPROF, &mOldSigprofHandler, 0);
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
void Sampler::SuspendAndSampleAndResumeThread(
|
||||
PSLockRef aLock, const RegisteredThread& aRegisteredThread,
|
||||
const Func& aProcessRegs) {
|
||||
// Only one sampler thread can be sampling at once. So we expect to have
|
||||
// complete control over |sSigHandlerCoordinator|.
|
||||
MOZ_ASSERT(!sSigHandlerCoordinator);
|
||||
|
||||
if (mSamplerTid == -1) {
|
||||
mSamplerTid = profiler_current_thread_id();
|
||||
}
|
||||
int sampleeTid = aRegisteredThread.Info()->ThreadId();
|
||||
MOZ_RELEASE_ASSERT(sampleeTid != mSamplerTid);
|
||||
|
||||
//----------------------------------------------------------------//
|
||||
// Suspend the samplee thread and get its context.
|
||||
|
||||
SigHandlerCoordinator coord; // on sampler thread's stack
|
||||
sSigHandlerCoordinator = &coord;
|
||||
|
||||
// Send message 1 to the samplee (the thread to be sampled), by
|
||||
// signalling at it.
|
||||
int r = tgkill(mMyPid, sampleeTid, SIGPROF);
|
||||
MOZ_ASSERT(r == 0);
|
||||
|
||||
// Wait for message 2 from the samplee, indicating that the context
|
||||
// is available and that the thread is suspended.
|
||||
while (true) {
|
||||
r = sem_wait(&sSigHandlerCoordinator->mMessage2);
|
||||
if (r == -1 && errno == EINTR) {
|
||||
// Interrupted by a signal. Try again.
|
||||
continue;
|
||||
}
|
||||
// We don't expect any other kind of failure.
|
||||
MOZ_ASSERT(r == 0);
|
||||
break;
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------//
|
||||
// Sample the target thread.
|
||||
|
||||
// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
|
||||
//
|
||||
// The profiler's "critical section" begins here. In the critical section,
|
||||
// we must not do any dynamic memory allocation, nor try to acquire any lock
|
||||
// or any other unshareable resource. This is because the thread to be
|
||||
// sampled has been suspended at some entirely arbitrary point, and we have
|
||||
// no idea which unsharable resources (locks, essentially) it holds. So any
|
||||
// attempt to acquire any lock, including the implied locks used by the
|
||||
// malloc implementation, risks deadlock. This includes TimeStamp::Now(),
|
||||
// which gets a lock on Windows.
|
||||
|
||||
// The samplee thread is now frozen and sSigHandlerCoordinator->mUContext is
|
||||
// valid. We can poke around in it and unwind its stack as we like.
|
||||
|
||||
// Extract the current register values.
|
||||
Registers regs;
|
||||
PopulateRegsFromContext(regs, &sSigHandlerCoordinator->mUContext);
|
||||
aProcessRegs(regs);
|
||||
|
||||
//----------------------------------------------------------------//
|
||||
// Resume the target thread.
|
||||
|
||||
// Send message 3 to the samplee, which tells it to resume.
|
||||
r = sem_post(&sSigHandlerCoordinator->mMessage3);
|
||||
MOZ_ASSERT(r == 0);
|
||||
|
||||
// Wait for message 4 from the samplee, which tells us that it has
|
||||
// finished with |sSigHandlerCoordinator|.
|
||||
while (true) {
|
||||
r = sem_wait(&sSigHandlerCoordinator->mMessage4);
|
||||
if (r == -1 && errno == EINTR) {
|
||||
continue;
|
||||
}
|
||||
MOZ_ASSERT(r == 0);
|
||||
break;
|
||||
}
|
||||
|
||||
// The profiler's critical section ends here. After this point, none of the
|
||||
// critical section limitations documented above apply.
|
||||
//
|
||||
// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
|
||||
|
||||
// This isn't strictly necessary, but doing so does help pick up anomalies
|
||||
// in which the signal handler is running when it shouldn't be.
|
||||
sSigHandlerCoordinator = nullptr;
|
||||
}
|
||||
|
||||
// END Sampler target specifics
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// BEGIN SamplerThread target specifics
|
||||
|
||||
static void* ThreadEntry(void* aArg) {
|
||||
auto thread = static_cast<SamplerThread*>(aArg);
|
||||
thread->Run();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
SamplerThread::SamplerThread(PSLockRef aLock, uint32_t aActivityGeneration,
|
||||
double aIntervalMilliseconds)
|
||||
: Sampler(aLock),
|
||||
mActivityGeneration(aActivityGeneration),
|
||||
mIntervalMicroseconds(
|
||||
std::max(1, int(floor(aIntervalMilliseconds * 1000 + 0.5)))) {
|
||||
#if defined(USE_LUL_STACKWALK)
|
||||
lul::LUL* lul = CorePS::Lul(aLock);
|
||||
if (!lul) {
|
||||
CorePS::SetLul(aLock, MakeUnique<lul::LUL>(logging_sink_for_LUL));
|
||||
// Read all the unwind info currently available.
|
||||
lul = CorePS::Lul(aLock);
|
||||
read_procmaps(lul);
|
||||
|
||||
// Switch into unwind mode. After this point, we can't add or remove any
|
||||
// unwind info to/from this LUL instance. The only thing we can do with
|
||||
// it is Unwind() calls.
|
||||
lul->EnableUnwinding();
|
||||
|
||||
// Has a test been requested?
|
||||
if (getenv("MOZ_PROFILER_LUL_TEST")) {
|
||||
int nTests = 0, nTestsPassed = 0;
|
||||
RunLulUnitTests(&nTests, &nTestsPassed, lul);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Start the sampling thread. It repeatedly sends a SIGPROF signal. Sending
|
||||
// the signal ourselves instead of relying on itimer provides much better
|
||||
// accuracy.
|
||||
if (pthread_create(&mThread, nullptr, ThreadEntry, this) != 0) {
|
||||
MOZ_CRASH("pthread_create failed");
|
||||
}
|
||||
}
|
||||
|
||||
SamplerThread::~SamplerThread() { pthread_join(mThread, nullptr); }
|
||||
|
||||
void SamplerThread::SleepMicro(uint32_t aMicroseconds) {
|
||||
if (aMicroseconds >= 1000000) {
|
||||
// Use usleep for larger intervals, because the nanosleep
|
||||
// code below only supports intervals < 1 second.
|
||||
MOZ_ALWAYS_TRUE(!::usleep(aMicroseconds));
|
||||
return;
|
||||
}
|
||||
|
||||
struct timespec ts;
|
||||
ts.tv_sec = 0;
|
||||
ts.tv_nsec = aMicroseconds * 1000UL;
|
||||
|
||||
int rv = ::nanosleep(&ts, &ts);
|
||||
|
||||
while (rv != 0 && errno == EINTR) {
|
||||
// Keep waiting in case of interrupt.
|
||||
// nanosleep puts the remaining time back into ts.
|
||||
rv = ::nanosleep(&ts, &ts);
|
||||
}
|
||||
|
||||
MOZ_ASSERT(!rv, "nanosleep call failed");
|
||||
}
|
||||
|
||||
void SamplerThread::Stop(PSLockRef aLock) {
|
||||
// Restore old signal handler. This is global state so it's important that
|
||||
// we do it now, while gPSMutex is locked. It's safe to do this now even
|
||||
// though this SamplerThread is still alive, because the next time the main
|
||||
// loop of Run() iterates it won't get past the mActivityGeneration check,
|
||||
// and so won't send any signals.
|
||||
Sampler::Disable(aLock);
|
||||
}
|
||||
|
||||
// END SamplerThread target specifics
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if defined(GP_OS_linux)
|
||||
|
||||
// We use pthread_atfork() to temporarily disable signal delivery during any
|
||||
// fork() call. Without that, fork() can be repeatedly interrupted by signal
|
||||
// delivery, requiring it to be repeatedly restarted, which can lead to *long*
|
||||
// delays. See bug 837390.
|
||||
//
|
||||
// We provide no paf_child() function to run in the child after forking. This
|
||||
// is fine because we always immediately exec() after fork(), and exec()
|
||||
// clobbers all process state. (At one point we did have a paf_child()
|
||||
// function, but it caused problems related to locking gPSMutex. See bug
|
||||
// 1348374.)
|
||||
//
|
||||
// Unfortunately all this is only doable on non-Android because Bionic doesn't
|
||||
// have pthread_atfork.
|
||||
|
||||
// In the parent, before the fork, record IsPaused, and then pause.
|
||||
static void paf_prepare() {
|
||||
MOZ_RELEASE_ASSERT(CorePS::Exists());
|
||||
|
||||
PSAutoLock lock;
|
||||
|
||||
if (ActivePS::Exists(lock)) {
|
||||
ActivePS::SetWasPaused(lock, ActivePS::IsPaused(lock));
|
||||
ActivePS::SetIsPaused(lock, true);
|
||||
}
|
||||
}
|
||||
|
||||
// In the parent, after the fork, return IsPaused to the pre-fork state.
|
||||
static void paf_parent() {
|
||||
MOZ_RELEASE_ASSERT(CorePS::Exists());
|
||||
|
||||
PSAutoLock lock;
|
||||
|
||||
if (ActivePS::Exists(lock)) {
|
||||
ActivePS::SetIsPaused(lock, ActivePS::WasPaused(lock));
|
||||
ActivePS::SetWasPaused(lock, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void PlatformInit(PSLockRef aLock) {
|
||||
// Set up the fork handlers.
|
||||
pthread_atfork(paf_prepare, paf_parent, nullptr);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static void PlatformInit(PSLockRef aLock) {}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_NATIVE_UNWIND)
|
||||
// Context used by synchronous samples. It's safe to have a single one because
|
||||
// only one synchronous sample can be taken at a time (due to
|
||||
// profiler_get_backtrace()'s PSAutoLock).
|
||||
// ucontext_t sSyncUContext;
|
||||
|
||||
void Registers::SyncPopulate() {
|
||||
// TODO port getcontext from breakpad, if profiler_get_backtrace is needed.
|
||||
MOZ_CRASH("profiler_get_backtrace() unsupported");
|
||||
// if (!getcontext(&sSyncUContext)) {
|
||||
// PopulateRegsFromContext(*this, &sSyncUContext);
|
||||
// }
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
|
@ -1,200 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/mman.h>
|
||||
#include <mach/mach_init.h>
|
||||
#include <mach-o/getsect.h>
|
||||
|
||||
#include <AvailabilityMacros.h>
|
||||
|
||||
#include <pthread.h>
|
||||
#include <semaphore.h>
|
||||
#include <signal.h>
|
||||
#include <libkern/OSAtomic.h>
|
||||
#include <mach/mach.h>
|
||||
#include <mach/semaphore.h>
|
||||
#include <mach/task.h>
|
||||
#include <mach/thread_act.h>
|
||||
#include <mach/vm_statistics.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include <math.h>
|
||||
|
||||
// this port is based off of v8 svn revision 9837
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
int profiler_current_process_id() { return getpid(); }
|
||||
|
||||
int profiler_current_thread_id() {
|
||||
return static_cast<int>(static_cast<pid_t>(syscall(SYS_thread_selfid)));
|
||||
}
|
||||
|
||||
static int64_t MicrosecondsSince1970() {
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv, NULL);
|
||||
return int64_t(tv.tv_sec) * 1000000 + int64_t(tv.tv_usec);
|
||||
}
|
||||
|
||||
void* GetStackTop(void* aGuess) {
|
||||
pthread_t thread = pthread_self();
|
||||
return pthread_get_stackaddr_np(thread);
|
||||
}
|
||||
|
||||
class PlatformData {
|
||||
public:
|
||||
explicit PlatformData(int aThreadId) : mProfiledThread(mach_thread_self()) {}
|
||||
|
||||
~PlatformData() {
|
||||
// Deallocate Mach port for thread.
|
||||
mach_port_deallocate(mach_task_self(), mProfiledThread);
|
||||
}
|
||||
|
||||
thread_act_t ProfiledThread() { return mProfiledThread; }
|
||||
|
||||
private:
|
||||
// Note: for mProfiledThread Mach primitives are used instead of pthread's
|
||||
// because the latter doesn't provide thread manipulation primitives required.
|
||||
// For details, consult "Mac OS X Internals" book, Section 7.3.
|
||||
thread_act_t mProfiledThread;
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// BEGIN Sampler target specifics
|
||||
|
||||
Sampler::Sampler(PSLockRef aLock) {}
|
||||
|
||||
void Sampler::Disable(PSLockRef aLock) {}
|
||||
|
||||
template <typename Func>
|
||||
void Sampler::SuspendAndSampleAndResumeThread(
|
||||
PSLockRef aLock, const RegisteredThread& aRegisteredThread,
|
||||
const Func& aProcessRegs) {
|
||||
thread_act_t samplee_thread =
|
||||
aRegisteredThread.GetPlatformData()->ProfiledThread();
|
||||
|
||||
//----------------------------------------------------------------//
|
||||
// Suspend the samplee thread and get its context.
|
||||
|
||||
// We're using thread_suspend on OS X because pthread_kill (which is what we
|
||||
// at one time used on Linux) has less consistent performance and causes
|
||||
// strange crashes, see bug 1166778 and bug 1166808. thread_suspend
|
||||
// is also just a lot simpler to use.
|
||||
|
||||
if (KERN_SUCCESS != thread_suspend(samplee_thread)) {
|
||||
return;
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------//
|
||||
// Sample the target thread.
|
||||
|
||||
// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
|
||||
//
|
||||
// The profiler's "critical section" begins here. We must be very careful
|
||||
// what we do here, or risk deadlock. See the corresponding comment in
|
||||
// platform-linux-android.cpp for details.
|
||||
|
||||
thread_state_flavor_t flavor = x86_THREAD_STATE64;
|
||||
x86_thread_state64_t state;
|
||||
mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
|
||||
#if __DARWIN_UNIX03
|
||||
# define REGISTER_FIELD(name) __r##name
|
||||
#else
|
||||
# define REGISTER_FIELD(name) r##name
|
||||
#endif // __DARWIN_UNIX03
|
||||
|
||||
if (thread_get_state(samplee_thread, flavor,
|
||||
reinterpret_cast<natural_t*>(&state),
|
||||
&count) == KERN_SUCCESS) {
|
||||
Registers regs;
|
||||
regs.mPC = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
|
||||
regs.mSP = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
|
||||
regs.mFP = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
|
||||
regs.mLR = 0;
|
||||
|
||||
aProcessRegs(regs);
|
||||
}
|
||||
|
||||
#undef REGISTER_FIELD
|
||||
|
||||
//----------------------------------------------------------------//
|
||||
// Resume the target thread.
|
||||
|
||||
thread_resume(samplee_thread);
|
||||
|
||||
// The profiler's critical section ends here.
|
||||
//
|
||||
// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
|
||||
}
|
||||
|
||||
// END Sampler target specifics
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// BEGIN SamplerThread target specifics
|
||||
|
||||
static void* ThreadEntry(void* aArg) {
|
||||
auto thread = static_cast<SamplerThread*>(aArg);
|
||||
thread->Run();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
SamplerThread::SamplerThread(PSLockRef aLock, uint32_t aActivityGeneration,
|
||||
double aIntervalMilliseconds)
|
||||
: Sampler(aLock),
|
||||
mActivityGeneration(aActivityGeneration),
|
||||
mIntervalMicroseconds(
|
||||
std::max(1, int(floor(aIntervalMilliseconds * 1000 + 0.5)))),
|
||||
mThread{nullptr} {
|
||||
pthread_attr_t* attr_ptr = nullptr;
|
||||
if (pthread_create(&mThread, attr_ptr, ThreadEntry, this) != 0) {
|
||||
MOZ_CRASH("pthread_create failed");
|
||||
}
|
||||
}
|
||||
|
||||
SamplerThread::~SamplerThread() { pthread_join(mThread, nullptr); }
|
||||
|
||||
void SamplerThread::SleepMicro(uint32_t aMicroseconds) {
|
||||
usleep(aMicroseconds);
|
||||
// FIXME: the OSX 10.12 page for usleep says "The usleep() function is
|
||||
// obsolescent. Use nanosleep(2) instead." This implementation could be
|
||||
// merged with the linux-android version. Also, this doesn't handle the
|
||||
// case where the usleep call is interrupted by a signal.
|
||||
}
|
||||
|
||||
void SamplerThread::Stop(PSLockRef aLock) { Sampler::Disable(aLock); }
|
||||
|
||||
// END SamplerThread target specifics
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void PlatformInit(PSLockRef aLock) {}
|
||||
|
||||
#if defined(HAVE_NATIVE_UNWIND)
|
||||
void Registers::SyncPopulate() {
|
||||
asm(
|
||||
// Compute caller's %rsp by adding to %rbp:
|
||||
// 8 bytes for previous %rbp, 8 bytes for return address
|
||||
"leaq 0x10(%%rbp), %0\n\t"
|
||||
// Dereference %rbp to get previous %rbp
|
||||
"movq (%%rbp), %1\n\t"
|
||||
: "=r"(mSP), "=r"(mFP));
|
||||
mPC = reinterpret_cast<Address>(
|
||||
__builtin_extract_return_addr(__builtin_return_address(0)));
|
||||
mLR = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
|
@ -1,337 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
// Copyright (c) 2006-2011 The Chromium Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions
|
||||
// are met:
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in
|
||||
// the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google, Inc. nor the names of its contributors
|
||||
// may be used to endorse or promote products derived from this
|
||||
// software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
// OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
// SUCH DAMAGE.
|
||||
|
||||
#include <windows.h>
|
||||
#include <mmsystem.h>
|
||||
#include <process.h>
|
||||
|
||||
#include "nsWindowsDllInterceptor.h"
|
||||
#include "mozilla/StackWalk_windows.h"
|
||||
#include "mozilla/WindowsVersion.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
int profiler_current_process_id() { return _getpid(); }
|
||||
|
||||
int profiler_current_thread_id() {
|
||||
DWORD threadId = GetCurrentThreadId();
|
||||
MOZ_ASSERT(threadId <= INT32_MAX, "native thread ID is > INT32_MAX");
|
||||
return int(threadId);
|
||||
}
|
||||
|
||||
static int64_t MicrosecondsSince1970() {
|
||||
int64_t prt;
|
||||
FILETIME ft;
|
||||
SYSTEMTIME st;
|
||||
|
||||
GetSystemTime(&st);
|
||||
SystemTimeToFileTime(&st, &ft);
|
||||
static_assert(sizeof(ft) == sizeof(prt), "Expect FILETIME to be 64 bits");
|
||||
memcpy(&prt, &ft, sizeof(prt));
|
||||
const int64_t epochBias = 116444736000000000LL;
|
||||
prt = (prt - epochBias) / 10;
|
||||
|
||||
return prt;
|
||||
}
|
||||
|
||||
void* GetStackTop(void* aGuess) {
|
||||
PNT_TIB pTib = reinterpret_cast<PNT_TIB>(NtCurrentTeb());
|
||||
return reinterpret_cast<void*>(pTib->StackBase);
|
||||
}
|
||||
|
||||
static void PopulateRegsFromContext(Registers& aRegs, CONTEXT* aContext) {
|
||||
#if defined(GP_ARCH_amd64)
|
||||
aRegs.mPC = reinterpret_cast<Address>(aContext->Rip);
|
||||
aRegs.mSP = reinterpret_cast<Address>(aContext->Rsp);
|
||||
aRegs.mFP = reinterpret_cast<Address>(aContext->Rbp);
|
||||
#elif defined(GP_ARCH_x86)
|
||||
aRegs.mPC = reinterpret_cast<Address>(aContext->Eip);
|
||||
aRegs.mSP = reinterpret_cast<Address>(aContext->Esp);
|
||||
aRegs.mFP = reinterpret_cast<Address>(aContext->Ebp);
|
||||
#elif defined(GP_ARCH_arm64)
|
||||
aRegs.mPC = reinterpret_cast<Address>(aContext->Pc);
|
||||
aRegs.mSP = reinterpret_cast<Address>(aContext->Sp);
|
||||
aRegs.mFP = reinterpret_cast<Address>(aContext->Fp);
|
||||
#else
|
||||
# error "bad arch"
|
||||
#endif
|
||||
aRegs.mLR = 0;
|
||||
}
|
||||
|
||||
class PlatformData {
|
||||
public:
|
||||
// Get a handle to the calling thread. This is the thread that we are
|
||||
// going to profile. We need to make a copy of the handle because we are
|
||||
// going to use it in the sampler thread. Using GetThreadHandle() will
|
||||
// not work in this case. We're using OpenThread because DuplicateHandle
|
||||
// for some reason doesn't work in Chrome's sandbox.
|
||||
explicit PlatformData(int aThreadId)
|
||||
: mProfiledThread(OpenThread(THREAD_GET_CONTEXT | THREAD_SUSPEND_RESUME |
|
||||
THREAD_QUERY_INFORMATION,
|
||||
false, aThreadId)) {}
|
||||
|
||||
~PlatformData() {
|
||||
if (mProfiledThread != nullptr) {
|
||||
CloseHandle(mProfiledThread);
|
||||
mProfiledThread = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
HANDLE ProfiledThread() { return mProfiledThread; }
|
||||
|
||||
private:
|
||||
HANDLE mProfiledThread;
|
||||
};
|
||||
|
||||
#if defined(USE_MOZ_STACK_WALK)
|
||||
HANDLE
|
||||
GetThreadHandle(PlatformData* aData) { return aData->ProfiledThread(); }
|
||||
#endif
|
||||
|
||||
static const HANDLE kNoThread = INVALID_HANDLE_VALUE;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// BEGIN Sampler target specifics
|
||||
|
||||
Sampler::Sampler(PSLockRef aLock) {}
|
||||
|
||||
void Sampler::Disable(PSLockRef aLock) {}
|
||||
|
||||
template <typename Func>
|
||||
void Sampler::SuspendAndSampleAndResumeThread(
|
||||
PSLockRef aLock, const RegisteredThread& aRegisteredThread,
|
||||
const Func& aProcessRegs) {
|
||||
HANDLE profiled_thread =
|
||||
aRegisteredThread.GetPlatformData()->ProfiledThread();
|
||||
if (profiled_thread == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Context used for sampling the register state of the profiled thread.
|
||||
CONTEXT context;
|
||||
memset(&context, 0, sizeof(context));
|
||||
|
||||
//----------------------------------------------------------------//
|
||||
// Suspend the samplee thread and get its context.
|
||||
|
||||
static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
|
||||
if (SuspendThread(profiled_thread) == kSuspendFailed) {
|
||||
return;
|
||||
}
|
||||
|
||||
// SuspendThread is asynchronous, so the thread may still be running.
|
||||
// Call GetThreadContext first to ensure the thread is really suspended.
|
||||
// See https://blogs.msdn.microsoft.com/oldnewthing/20150205-00/?p=44743.
|
||||
|
||||
// Using only CONTEXT_CONTROL is faster but on 64-bit it causes crashes in
|
||||
// RtlVirtualUnwind (see bug 1120126) so we set all the flags.
|
||||
#if defined(GP_ARCH_amd64)
|
||||
context.ContextFlags = CONTEXT_FULL;
|
||||
#else
|
||||
context.ContextFlags = CONTEXT_CONTROL;
|
||||
#endif
|
||||
if (!GetThreadContext(profiled_thread, &context)) {
|
||||
ResumeThread(profiled_thread);
|
||||
return;
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------//
|
||||
// Sample the target thread.
|
||||
|
||||
// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
|
||||
//
|
||||
// The profiler's "critical section" begins here. We must be very careful
|
||||
// what we do here, or risk deadlock. See the corresponding comment in
|
||||
// platform-linux-android.cpp for details.
|
||||
|
||||
Registers regs;
|
||||
PopulateRegsFromContext(regs, &context);
|
||||
aProcessRegs(regs);
|
||||
|
||||
//----------------------------------------------------------------//
|
||||
// Resume the target thread.
|
||||
|
||||
ResumeThread(profiled_thread);
|
||||
|
||||
// The profiler's critical section ends here.
|
||||
//
|
||||
// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
|
||||
}
|
||||
|
||||
// END Sampler target specifics
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// BEGIN SamplerThread target specifics
|
||||
|
||||
static unsigned int __stdcall ThreadEntry(void* aArg) {
|
||||
auto thread = static_cast<SamplerThread*>(aArg);
|
||||
thread->Run();
|
||||
return 0;
|
||||
}
|
||||
|
||||
SamplerThread::SamplerThread(PSLockRef aLock, uint32_t aActivityGeneration,
|
||||
double aIntervalMilliseconds)
|
||||
: Sampler(aLock),
|
||||
mActivityGeneration(aActivityGeneration),
|
||||
mIntervalMicroseconds(
|
||||
std::max(1, int(floor(aIntervalMilliseconds * 1000 + 0.5)))) {
|
||||
// By default we'll not adjust the timer resolution which tends to be
|
||||
// around 16ms. However, if the requested interval is sufficiently low
|
||||
// we'll try to adjust the resolution to match.
|
||||
if (mIntervalMicroseconds < 10 * 1000) {
|
||||
::timeBeginPeriod(mIntervalMicroseconds / 1000);
|
||||
}
|
||||
|
||||
// Create a new thread. It is important to use _beginthreadex() instead of
|
||||
// the Win32 function CreateThread(), because the CreateThread() does not
|
||||
// initialize thread-specific structures in the C runtime library.
|
||||
mThread = reinterpret_cast<HANDLE>(_beginthreadex(nullptr,
|
||||
/* stack_size */ 0,
|
||||
ThreadEntry, this,
|
||||
/* initflag */ 0, nullptr));
|
||||
if (mThread == 0) {
|
||||
MOZ_CRASH("_beginthreadex failed");
|
||||
}
|
||||
}
|
||||
|
||||
SamplerThread::~SamplerThread() {
|
||||
WaitForSingleObject(mThread, INFINITE);
|
||||
|
||||
// Close our own handle for the thread.
|
||||
if (mThread != kNoThread) {
|
||||
CloseHandle(mThread);
|
||||
}
|
||||
}
|
||||
|
||||
void SamplerThread::SleepMicro(uint32_t aMicroseconds) {
|
||||
// For now, keep the old behaviour of minimum Sleep(1), even for
|
||||
// smaller-than-usual sleeps after an overshoot, unless the user has
|
||||
// explicitly opted into a sub-millisecond profiler interval.
|
||||
if (mIntervalMicroseconds >= 1000) {
|
||||
::Sleep(std::max(1u, aMicroseconds / 1000));
|
||||
} else {
|
||||
TimeStamp start = TimeStamp::Now();
|
||||
TimeStamp end = start + TimeDuration::FromMicroseconds(aMicroseconds);
|
||||
|
||||
// First, sleep for as many whole milliseconds as possible.
|
||||
if (aMicroseconds >= 1000) {
|
||||
::Sleep(aMicroseconds / 1000);
|
||||
}
|
||||
|
||||
// Then, spin until enough time has passed.
|
||||
while (TimeStamp::Now() < end) {
|
||||
YieldProcessor();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SamplerThread::Stop(PSLockRef aLock) {
|
||||
// Disable any timer resolution changes we've made. Do it now while
|
||||
// gPSMutex is locked, i.e. before any other SamplerThread can be created
|
||||
// and call ::timeBeginPeriod().
|
||||
//
|
||||
// It's safe to do this now even though this SamplerThread is still alive,
|
||||
// because the next time the main loop of Run() iterates it won't get past
|
||||
// the mActivityGeneration check, and so it won't make any more ::Sleep()
|
||||
// calls.
|
||||
if (mIntervalMicroseconds < 10 * 1000) {
|
||||
::timeEndPeriod(mIntervalMicroseconds / 1000);
|
||||
}
|
||||
|
||||
Sampler::Disable(aLock);
|
||||
}
|
||||
|
||||
// END SamplerThread target specifics
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void PlatformInit(PSLockRef aLock) {}
|
||||
|
||||
#if defined(HAVE_NATIVE_UNWIND)
|
||||
void Registers::SyncPopulate() {
|
||||
CONTEXT context;
|
||||
RtlCaptureContext(&context);
|
||||
PopulateRegsFromContext(*this, &context);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(GP_PLAT_amd64_windows)
|
||||
static WindowsDllInterceptor NtDllIntercept;
|
||||
|
||||
typedef NTSTATUS(NTAPI* LdrUnloadDll_func)(HMODULE module);
|
||||
static WindowsDllInterceptor::FuncHookType<LdrUnloadDll_func> stub_LdrUnloadDll;
|
||||
|
||||
static NTSTATUS NTAPI patched_LdrUnloadDll(HMODULE module) {
|
||||
// Prevent the stack walker from suspending this thread when LdrUnloadDll
|
||||
// holds the RtlLookupFunctionEntry lock.
|
||||
AutoSuppressStackWalking suppress;
|
||||
return stub_LdrUnloadDll(module);
|
||||
}
|
||||
|
||||
// These pointers are disguised as PVOID to avoid pulling in obscure headers
|
||||
typedef PVOID(WINAPI* LdrResolveDelayLoadedAPI_func)(
|
||||
PVOID ParentModuleBase, PVOID DelayloadDescriptor, PVOID FailureDllHook,
|
||||
PVOID FailureSystemHook, PVOID ThunkAddress, ULONG Flags);
|
||||
static WindowsDllInterceptor::FuncHookType<LdrResolveDelayLoadedAPI_func>
|
||||
stub_LdrResolveDelayLoadedAPI;
|
||||
|
||||
static PVOID WINAPI patched_LdrResolveDelayLoadedAPI(
|
||||
PVOID ParentModuleBase, PVOID DelayloadDescriptor, PVOID FailureDllHook,
|
||||
PVOID FailureSystemHook, PVOID ThunkAddress, ULONG Flags) {
|
||||
// Prevent the stack walker from suspending this thread when
|
||||
// LdrResolveDelayLoadAPI holds the RtlLookupFunctionEntry lock.
|
||||
AutoSuppressStackWalking suppress;
|
||||
return stub_LdrResolveDelayLoadedAPI(ParentModuleBase, DelayloadDescriptor,
|
||||
FailureDllHook, FailureSystemHook,
|
||||
ThunkAddress, Flags);
|
||||
}
|
||||
|
||||
MFBT_API void InitializeWin64ProfilerHooks() {
|
||||
// This function could be called by both profilers, but we only want to run
|
||||
// it once.
|
||||
static bool ran = false;
|
||||
if (ran) {
|
||||
return;
|
||||
}
|
||||
ran = true;
|
||||
|
||||
NtDllIntercept.Init("ntdll.dll");
|
||||
stub_LdrUnloadDll.Set(NtDllIntercept, "LdrUnloadDll", &patched_LdrUnloadDll);
|
||||
if (IsWin8OrLater()) { // LdrResolveDelayLoadedAPI was introduced in Win8
|
||||
stub_LdrResolveDelayLoadedAPI.Set(NtDllIntercept,
|
||||
"LdrResolveDelayLoadedAPI",
|
||||
&patched_LdrResolveDelayLoadedAPI);
|
||||
}
|
||||
}
|
||||
#endif // defined(GP_PLAT_amd64_windows)
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,136 +0,0 @@
|
|||
// Copyright (c) 2006-2011 The Chromium Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions
|
||||
// are met:
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in
|
||||
// the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google, Inc. nor the names of its contributors
|
||||
// may be used to endorse or promote products derived from this
|
||||
// software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
// OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
// SUCH DAMAGE.
|
||||
|
||||
#ifndef TOOLS_PLATFORM_H_
|
||||
#define TOOLS_PLATFORM_H_
|
||||
|
||||
#include "PlatformMacros.h"
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#include "mozilla/Logging.h"
|
||||
#include "mozilla/PlatformMutex.h"
|
||||
#include "mozilla/UniquePtr.h"
|
||||
#include "mozilla/Vector.h"
|
||||
|
||||
#include <functional>
|
||||
#include <stdint.h>
|
||||
#include <string>
|
||||
|
||||
bool BaseProfilerLogTest(int aLevelToTest);
|
||||
|
||||
// These are for MOZ_BASE_PROFILER_LOGGING and above. It's the default logging
|
||||
// level for the profiler, and should be used sparingly.
|
||||
#define LOG_TEST BaseProfilerLogTest(3)
|
||||
#define LOG(arg, ...) \
|
||||
do { \
|
||||
if (LOG_TEST) { \
|
||||
printf("[I %d/%d] " arg "\n", profiler_current_process_id(), \
|
||||
profiler_current_thread_id(), ##__VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
// These are for MOZ_BASE_PROFILER_DEBUG_LOGGING. It should be used for logging
|
||||
// that is somewhat more verbose than LOG.
|
||||
#define DEBUG_LOG_TEST BaseProfilerLogTest(4)
|
||||
#define DEBUG_LOG(arg, ...) \
|
||||
do { \
|
||||
if (DEBUG_LOG_TEST) { \
|
||||
printf("[D %d/%d] " arg "\n", profiler_current_process_id(), \
|
||||
profiler_current_thread_id(), ##__VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
// These are for MOZ_BASE_PROFILER_VERBOSE_LOGGING. It should be used for
|
||||
// logging that is somewhat more verbose than DEBUG_LOG.
|
||||
#define VERBOSE_LOG_TEST BaseProfilerLogTest(5)
|
||||
#define VERBOSE_LOG(arg, ...) \
|
||||
do { \
|
||||
if (VERBOSE_LOG_TEST) { \
|
||||
printf("[V %d/%d] " arg "\n", profiler_current_process_id(), \
|
||||
profiler_current_thread_id(), ##__VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
class JSONWriter;
|
||||
|
||||
namespace baseprofiler {
|
||||
|
||||
// Thin shell around mozglue PlatformMutex, for Base Profiler internal use.
|
||||
// Does not preserve behavior in JS record/replay.
|
||||
class PSMutex : private mozilla::detail::MutexImpl {
|
||||
public:
|
||||
PSMutex()
|
||||
: mozilla::detail::MutexImpl(
|
||||
mozilla::recordreplay::Behavior::DontPreserve) {}
|
||||
void Lock() { mozilla::detail::MutexImpl::lock(); }
|
||||
void Unlock() { mozilla::detail::MutexImpl::unlock(); }
|
||||
};
|
||||
|
||||
typedef uint8_t* Address;
|
||||
|
||||
class PlatformData;
|
||||
|
||||
// We can't new/delete the type safely without defining it
|
||||
// (-Wdelete-incomplete). Use these to hide the details from clients.
|
||||
struct PlatformDataDestructor {
|
||||
void operator()(PlatformData*);
|
||||
};
|
||||
|
||||
typedef UniquePtr<PlatformData, PlatformDataDestructor> UniquePlatformData;
|
||||
UniquePlatformData AllocPlatformData(int aThreadId);
|
||||
|
||||
// Convert the array of strings to a bitfield.
|
||||
uint32_t ParseFeaturesFromStringArray(const char** aFeatures,
|
||||
uint32_t aFeatureCount,
|
||||
bool aIsStartup = false);
|
||||
|
||||
void profiler_get_profile_json_into_lazily_allocated_buffer(
|
||||
const std::function<char*(size_t)>& aAllocator, double aSinceTime,
|
||||
bool aIsShuttingDown);
|
||||
|
||||
// Flags to conveniently track various JS features.
|
||||
enum class JSSamplingFlags {
|
||||
StackSampling = 0x1,
|
||||
TrackOptimizations = 0x2,
|
||||
TraceLogging = 0x4
|
||||
};
|
||||
|
||||
// Record an exit profile from a child process.
|
||||
void profiler_received_exit_profile(const std::string& aExitProfile);
|
||||
|
||||
// Extract all received exit profiles that have not yet expired (i.e., they
|
||||
// still intersect with this process' buffer range).
|
||||
Vector<std::string> profiler_move_exit_profiles();
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif /* ndef TOOLS_PLATFORM_H_ */
|
|
@ -1,849 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include "BaseProfilerSharedLibraries.h"
|
||||
|
||||
# define PATH_MAX_TOSTRING(x) # x
|
||||
# define PATH_MAX_STRING(x) PATH_MAX_TOSTRING(x)
|
||||
# include <stdlib.h>
|
||||
# include <stdio.h>
|
||||
# include <string.h>
|
||||
# include <limits.h>
|
||||
# include <unistd.h>
|
||||
# include <fstream>
|
||||
# include "platform.h"
|
||||
# include "mozilla/Sprintf.h"
|
||||
# include "mozilla/Unused.h"
|
||||
|
||||
# include <algorithm>
|
||||
# include <arpa/inet.h>
|
||||
# include <dlfcn.h>
|
||||
# include <elf.h>
|
||||
# include <fcntl.h>
|
||||
# include <features.h>
|
||||
# include <sys/mman.h>
|
||||
# include <sys/stat.h>
|
||||
# include <sys/types.h>
|
||||
# include <vector>
|
||||
|
||||
# if defined(GP_OS_linux)
|
||||
# include <link.h> // dl_phdr_info
|
||||
# elif defined(GP_OS_android)
|
||||
# include "AutoObjectMapper.h"
|
||||
# include "ElfLoader.h" // dl_phdr_info
|
||||
extern "C" MOZ_EXPORT __attribute__((weak)) int dl_iterate_phdr(
|
||||
int (*callback)(struct dl_phdr_info* info, size_t size, void* data),
|
||||
void* data);
|
||||
# else
|
||||
# error "Unexpected configuration"
|
||||
# endif
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Starting imports from toolkit/crashreporter/google-breakpad/, as needed by
|
||||
// this file when moved to mozglue.
|
||||
|
||||
// Imported from
|
||||
// toolkit/crashreporter/google-breakpad/src/common/memory_range.h.
|
||||
// A lightweight wrapper with a pointer and a length to encapsulate a contiguous
|
||||
// range of memory. It provides helper methods for checked access of a subrange
|
||||
// of the memory. Its implemementation does not allocate memory or call into
|
||||
// libc functions, and is thus safer to use in a crashed environment.
|
||||
class MemoryRange {
|
||||
public:
|
||||
MemoryRange() : data_(NULL), length_(0) {}
|
||||
|
||||
MemoryRange(const void* data, size_t length) { Set(data, length); }
|
||||
|
||||
// Returns true if this memory range contains no data.
|
||||
bool IsEmpty() const {
|
||||
// Set() guarantees that |length_| is zero if |data_| is NULL.
|
||||
return length_ == 0;
|
||||
}
|
||||
|
||||
// Resets to an empty range.
|
||||
void Reset() {
|
||||
data_ = NULL;
|
||||
length_ = 0;
|
||||
}
|
||||
|
||||
// Sets this memory range to point to |data| and its length to |length|.
|
||||
void Set(const void* data, size_t length) {
|
||||
data_ = reinterpret_cast<const uint8_t*>(data);
|
||||
// Always set |length_| to zero if |data_| is NULL.
|
||||
length_ = data ? length : 0;
|
||||
}
|
||||
|
||||
// Returns true if this range covers a subrange of |sub_length| bytes
|
||||
// at |sub_offset| bytes of this memory range, or false otherwise.
|
||||
bool Covers(size_t sub_offset, size_t sub_length) const {
|
||||
// The following checks verify that:
|
||||
// 1. sub_offset is within [ 0 .. length_ - 1 ]
|
||||
// 2. sub_offset + sub_length is within
|
||||
// [ sub_offset .. length_ ]
|
||||
return sub_offset < length_ && sub_offset + sub_length >= sub_offset &&
|
||||
sub_offset + sub_length <= length_;
|
||||
}
|
||||
|
||||
// Returns a raw data pointer to a subrange of |sub_length| bytes at
|
||||
// |sub_offset| bytes of this memory range, or NULL if the subrange
|
||||
// is out of bounds.
|
||||
const void* GetData(size_t sub_offset, size_t sub_length) const {
|
||||
return Covers(sub_offset, sub_length) ? (data_ + sub_offset) : NULL;
|
||||
}
|
||||
|
||||
// Same as the two-argument version of GetData() but uses sizeof(DataType)
|
||||
// as the subrange length and returns an |DataType| pointer for convenience.
|
||||
template <typename DataType>
|
||||
const DataType* GetData(size_t sub_offset) const {
|
||||
return reinterpret_cast<const DataType*>(
|
||||
GetData(sub_offset, sizeof(DataType)));
|
||||
}
|
||||
|
||||
// Returns a raw pointer to the |element_index|-th element of an array
|
||||
// of elements of length |element_size| starting at |sub_offset| bytes
|
||||
// of this memory range, or NULL if the element is out of bounds.
|
||||
const void* GetArrayElement(size_t element_offset, size_t element_size,
|
||||
unsigned element_index) const {
|
||||
size_t sub_offset = element_offset + element_index * element_size;
|
||||
return GetData(sub_offset, element_size);
|
||||
}
|
||||
|
||||
// Same as the three-argument version of GetArrayElement() but deduces
|
||||
// the element size using sizeof(ElementType) and returns an |ElementType|
|
||||
// pointer for convenience.
|
||||
template <typename ElementType>
|
||||
const ElementType* GetArrayElement(size_t element_offset,
|
||||
unsigned element_index) const {
|
||||
return reinterpret_cast<const ElementType*>(
|
||||
GetArrayElement(element_offset, sizeof(ElementType), element_index));
|
||||
}
|
||||
|
||||
// Returns a subrange of |sub_length| bytes at |sub_offset| bytes of
|
||||
// this memory range, or an empty range if the subrange is out of bounds.
|
||||
MemoryRange Subrange(size_t sub_offset, size_t sub_length) const {
|
||||
return Covers(sub_offset, sub_length)
|
||||
? MemoryRange(data_ + sub_offset, sub_length)
|
||||
: MemoryRange();
|
||||
}
|
||||
|
||||
// Returns a pointer to the beginning of this memory range.
|
||||
const uint8_t* data() const { return data_; }
|
||||
|
||||
// Returns the length, in bytes, of this memory range.
|
||||
size_t length() const { return length_; }
|
||||
|
||||
private:
|
||||
// Pointer to the beginning of this memory range.
|
||||
const uint8_t* data_;
|
||||
|
||||
// Length, in bytes, of this memory range.
|
||||
size_t length_;
|
||||
};
|
||||
|
||||
// Imported from
|
||||
// toolkit/crashreporter/google-breakpad/src/common/linux/memory_mapped_file.h
|
||||
// and inlined .cc.
|
||||
// A utility class for mapping a file into memory for read-only access of the
|
||||
// file content. Its implementation avoids calling into libc functions by
|
||||
// directly making system calls for open, close, mmap, and munmap.
|
||||
class MemoryMappedFile {
|
||||
public:
|
||||
MemoryMappedFile() {}
|
||||
|
||||
// Constructor that calls Map() to map a file at |path| into memory.
|
||||
// If Map() fails, the object behaves as if it is default constructed.
|
||||
MemoryMappedFile(const char* path, size_t offset) { Map(path, offset); }
|
||||
|
||||
MemoryMappedFile(const MemoryMappedFile&) = delete;
|
||||
MemoryMappedFile& operator=(const MemoryMappedFile&) = delete;
|
||||
|
||||
~MemoryMappedFile() {}
|
||||
|
||||
// Maps a file at |path| into memory, which can then be accessed via
|
||||
// content() as a MemoryRange object or via data(), and returns true on
|
||||
// success. Mapping an empty file will succeed but with data() and size()
|
||||
// returning NULL and 0, respectively. An existing mapping is unmapped
|
||||
// before a new mapping is created.
|
||||
bool Map(const char* path, size_t offset) {
|
||||
Unmap();
|
||||
|
||||
int fd = open(path, O_RDONLY, 0);
|
||||
if (fd == -1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
# if defined(__x86_64__) || defined(__aarch64__) || \
|
||||
(defined(__mips__) && _MIPS_SIM == _ABI64)
|
||||
|
||||
struct stat st;
|
||||
if (fstat(fd, &st) == -1 || st.st_size < 0) {
|
||||
# else
|
||||
struct stat64 st;
|
||||
if (fstat64(fd, &st) == -1 || st.st_size < 0) {
|
||||
# endif
|
||||
close(fd);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Strangely file size can be negative, but we check above that it is not.
|
||||
size_t file_len = static_cast<size_t>(st.st_size);
|
||||
// If the file does not extend beyond the offset, simply use an empty
|
||||
// MemoryRange and return true. Don't bother to call mmap()
|
||||
// even though mmap() can handle an empty file on some platforms.
|
||||
if (offset >= file_len) {
|
||||
close(fd);
|
||||
return true;
|
||||
}
|
||||
|
||||
void* data = mmap(NULL, file_len, PROT_READ, MAP_PRIVATE, fd, offset);
|
||||
close(fd);
|
||||
if (data == MAP_FAILED) {
|
||||
return false;
|
||||
}
|
||||
|
||||
content_.Set(data, file_len - offset);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Unmaps the memory for the mapped file. It's a no-op if no file is
|
||||
// mapped.
|
||||
void Unmap() {
|
||||
if (content_.data()) {
|
||||
munmap(const_cast<uint8_t*>(content_.data()), content_.length());
|
||||
content_.Set(NULL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a MemoryRange object that covers the memory for the mapped
|
||||
// file. The MemoryRange object is empty if no file is mapped.
|
||||
const MemoryRange& content() const { return content_; }
|
||||
|
||||
// Returns a pointer to the beginning of the memory for the mapped file.
|
||||
// or NULL if no file is mapped or the mapped file is empty.
|
||||
const void* data() const { return content_.data(); }
|
||||
|
||||
// Returns the size in bytes of the mapped file, or zero if no file
|
||||
// is mapped.
|
||||
size_t size() const { return content_.length(); }
|
||||
|
||||
private:
|
||||
// Mapped file content as a MemoryRange object.
|
||||
MemoryRange content_;
|
||||
};
|
||||
|
||||
// Imported from
|
||||
// toolkit/crashreporter/google-breakpad/src/common/linux/file_id.h and inlined
|
||||
// .cc.
|
||||
// GNU binutils' ld defaults to 'sha1', which is 160 bits == 20 bytes,
|
||||
// so this is enough to fit that, which most binaries will use.
|
||||
// This is just a sensible default for vectors so most callers can get away with
|
||||
// stack allocation.
|
||||
static const size_t kDefaultBuildIdSize = 20;
|
||||
|
||||
// Used in a few places for backwards-compatibility.
|
||||
typedef struct {
|
||||
uint32_t data1;
|
||||
uint16_t data2;
|
||||
uint16_t data3;
|
||||
uint8_t data4[8];
|
||||
} MDGUID; /* GUID */
|
||||
|
||||
const size_t kMDGUIDSize = sizeof(MDGUID);
|
||||
|
||||
class FileID {
|
||||
public:
|
||||
explicit FileID(const char* path) : path_(path) {}
|
||||
~FileID() {}
|
||||
|
||||
// Load the identifier for the elf file path specified in the constructor into
|
||||
// |identifier|.
|
||||
//
|
||||
// The current implementation will look for a .note.gnu.build-id
|
||||
// section and use that as the file id, otherwise it falls back to
|
||||
// XORing the first 4096 bytes of the .text section to generate an identifier.
|
||||
bool ElfFileIdentifier(std::vector<uint8_t>& identifier) {
|
||||
MemoryMappedFile mapped_file(path_.c_str(), 0);
|
||||
if (!mapped_file.data()) // Should probably check if size >= ElfW(Ehdr)?
|
||||
return false;
|
||||
|
||||
return ElfFileIdentifierFromMappedFile(mapped_file.data(), identifier);
|
||||
}
|
||||
|
||||
// Traits classes so consumers can write templatized code to deal
|
||||
// with specific ELF bits.
|
||||
struct ElfClass32 {
|
||||
typedef Elf32_Addr Addr;
|
||||
typedef Elf32_Ehdr Ehdr;
|
||||
typedef Elf32_Nhdr Nhdr;
|
||||
typedef Elf32_Phdr Phdr;
|
||||
typedef Elf32_Shdr Shdr;
|
||||
typedef Elf32_Half Half;
|
||||
typedef Elf32_Off Off;
|
||||
typedef Elf32_Sym Sym;
|
||||
typedef Elf32_Word Word;
|
||||
|
||||
static const int kClass = ELFCLASS32;
|
||||
static const uint16_t kMachine = EM_386;
|
||||
static const size_t kAddrSize = sizeof(Elf32_Addr);
|
||||
static constexpr const char* kMachineName = "x86";
|
||||
};
|
||||
|
||||
struct ElfClass64 {
|
||||
typedef Elf64_Addr Addr;
|
||||
typedef Elf64_Ehdr Ehdr;
|
||||
typedef Elf64_Nhdr Nhdr;
|
||||
typedef Elf64_Phdr Phdr;
|
||||
typedef Elf64_Shdr Shdr;
|
||||
typedef Elf64_Half Half;
|
||||
typedef Elf64_Off Off;
|
||||
typedef Elf64_Sym Sym;
|
||||
typedef Elf64_Word Word;
|
||||
|
||||
static const int kClass = ELFCLASS64;
|
||||
static const uint16_t kMachine = EM_X86_64;
|
||||
static const size_t kAddrSize = sizeof(Elf64_Addr);
|
||||
static constexpr const char* kMachineName = "x86_64";
|
||||
};
|
||||
|
||||
// Internal helper method, exposed for convenience for callers
|
||||
// that already have more info.
|
||||
template <typename ElfClass>
|
||||
static const typename ElfClass::Shdr* FindElfSectionByName(
|
||||
const char* name, typename ElfClass::Word section_type,
|
||||
const typename ElfClass::Shdr* sections, const char* section_names,
|
||||
const char* names_end, int nsection) {
|
||||
if (!name || !sections || nsection == 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int name_len = strlen(name);
|
||||
if (name_len == 0) return NULL;
|
||||
|
||||
for (int i = 0; i < nsection; ++i) {
|
||||
const char* section_name = section_names + sections[i].sh_name;
|
||||
if (sections[i].sh_type == section_type &&
|
||||
names_end - section_name >= name_len + 1 &&
|
||||
strcmp(name, section_name) == 0) {
|
||||
return sections + i;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct ElfSegment {
|
||||
const void* start;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
// Convert an offset from an Elf header into a pointer to the mapped
|
||||
// address in the current process. Takes an extra template parameter
|
||||
// to specify the return type to avoid having to dynamic_cast the
|
||||
// result.
|
||||
template <typename ElfClass, typename T>
|
||||
static const T* GetOffset(const typename ElfClass::Ehdr* elf_header,
|
||||
typename ElfClass::Off offset) {
|
||||
return reinterpret_cast<const T*>(reinterpret_cast<uintptr_t>(elf_header) +
|
||||
offset);
|
||||
}
|
||||
|
||||
// ELF note name and desc are 32-bits word padded.
|
||||
# define NOTE_PADDING(a) ((a + 3) & ~3)
|
||||
|
||||
static bool ElfClassBuildIDNoteIdentifier(const void* section, size_t length,
|
||||
std::vector<uint8_t>& identifier) {
|
||||
static_assert(sizeof(ElfClass32::Nhdr) == sizeof(ElfClass64::Nhdr),
|
||||
"Elf32_Nhdr and Elf64_Nhdr should be the same");
|
||||
typedef typename ElfClass32::Nhdr Nhdr;
|
||||
|
||||
const void* section_end = reinterpret_cast<const char*>(section) + length;
|
||||
const Nhdr* note_header = reinterpret_cast<const Nhdr*>(section);
|
||||
while (reinterpret_cast<const void*>(note_header) < section_end) {
|
||||
if (note_header->n_type == NT_GNU_BUILD_ID) break;
|
||||
note_header = reinterpret_cast<const Nhdr*>(
|
||||
reinterpret_cast<const char*>(note_header) + sizeof(Nhdr) +
|
||||
NOTE_PADDING(note_header->n_namesz) +
|
||||
NOTE_PADDING(note_header->n_descsz));
|
||||
}
|
||||
if (reinterpret_cast<const void*>(note_header) >= section_end ||
|
||||
note_header->n_descsz == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const uint8_t* build_id = reinterpret_cast<const uint8_t*>(note_header) +
|
||||
sizeof(Nhdr) +
|
||||
NOTE_PADDING(note_header->n_namesz);
|
||||
identifier.insert(identifier.end(), build_id,
|
||||
build_id + note_header->n_descsz);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename ElfClass>
|
||||
static bool FindElfClassSection(const char* elf_base,
|
||||
const char* section_name,
|
||||
typename ElfClass::Word section_type,
|
||||
const void** section_start,
|
||||
size_t* section_size) {
|
||||
typedef typename ElfClass::Ehdr Ehdr;
|
||||
typedef typename ElfClass::Shdr Shdr;
|
||||
|
||||
if (!elf_base || !section_start || !section_size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (strncmp(elf_base, ELFMAG, SELFMAG) != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const Ehdr* elf_header = reinterpret_cast<const Ehdr*>(elf_base);
|
||||
if (elf_header->e_ident[EI_CLASS] != ElfClass::kClass) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const Shdr* sections =
|
||||
GetOffset<ElfClass, Shdr>(elf_header, elf_header->e_shoff);
|
||||
const Shdr* section_names = sections + elf_header->e_shstrndx;
|
||||
const char* names =
|
||||
GetOffset<ElfClass, char>(elf_header, section_names->sh_offset);
|
||||
const char* names_end = names + section_names->sh_size;
|
||||
|
||||
const Shdr* section =
|
||||
FindElfSectionByName<ElfClass>(section_name, section_type, sections,
|
||||
names, names_end, elf_header->e_shnum);
|
||||
|
||||
if (section != NULL && section->sh_size > 0) {
|
||||
*section_start = elf_base + section->sh_offset;
|
||||
*section_size = section->sh_size;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename ElfClass>
|
||||
static bool FindElfClassSegment(const char* elf_base,
|
||||
typename ElfClass::Word segment_type,
|
||||
std::vector<ElfSegment>* segments) {
|
||||
typedef typename ElfClass::Ehdr Ehdr;
|
||||
typedef typename ElfClass::Phdr Phdr;
|
||||
|
||||
if (!elf_base || !segments) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (strncmp(elf_base, ELFMAG, SELFMAG) != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const Ehdr* elf_header = reinterpret_cast<const Ehdr*>(elf_base);
|
||||
if (elf_header->e_ident[EI_CLASS] != ElfClass::kClass) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const Phdr* phdrs =
|
||||
GetOffset<ElfClass, Phdr>(elf_header, elf_header->e_phoff);
|
||||
|
||||
for (int i = 0; i < elf_header->e_phnum; ++i) {
|
||||
if (phdrs[i].p_type == segment_type) {
|
||||
ElfSegment seg = {};
|
||||
seg.start = elf_base + phdrs[i].p_offset;
|
||||
seg.size = phdrs[i].p_filesz;
|
||||
segments->push_back(seg);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool IsValidElf(const void* elf_base) {
|
||||
return strncmp(reinterpret_cast<const char*>(elf_base), ELFMAG, SELFMAG) ==
|
||||
0;
|
||||
}
|
||||
|
||||
static int ElfClass(const void* elf_base) {
|
||||
const ElfW(Ehdr)* elf_header =
|
||||
reinterpret_cast<const ElfW(Ehdr)*>(elf_base);
|
||||
|
||||
return elf_header->e_ident[EI_CLASS];
|
||||
}
|
||||
|
||||
static bool FindElfSection(const void* elf_mapped_base,
|
||||
const char* section_name, uint32_t section_type,
|
||||
const void** section_start, size_t* section_size) {
|
||||
if (!elf_mapped_base || !section_start || !section_size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
*section_start = NULL;
|
||||
*section_size = 0;
|
||||
|
||||
if (!IsValidElf(elf_mapped_base)) return false;
|
||||
|
||||
int cls = ElfClass(elf_mapped_base);
|
||||
const char* elf_base = static_cast<const char*>(elf_mapped_base);
|
||||
|
||||
if (cls == ELFCLASS32) {
|
||||
return FindElfClassSection<ElfClass32>(elf_base, section_name,
|
||||
section_type, section_start,
|
||||
section_size) &&
|
||||
*section_start != NULL;
|
||||
} else if (cls == ELFCLASS64) {
|
||||
return FindElfClassSection<ElfClass64>(elf_base, section_name,
|
||||
section_type, section_start,
|
||||
section_size) &&
|
||||
*section_start != NULL;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool FindElfSegments(const void* elf_mapped_base,
|
||||
uint32_t segment_type,
|
||||
std::vector<ElfSegment>* segments) {
|
||||
if (!elf_mapped_base || !segments) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!IsValidElf(elf_mapped_base)) return false;
|
||||
|
||||
int cls = ElfClass(elf_mapped_base);
|
||||
const char* elf_base = static_cast<const char*>(elf_mapped_base);
|
||||
|
||||
if (cls == ELFCLASS32) {
|
||||
return FindElfClassSegment<ElfClass32>(elf_base, segment_type, segments);
|
||||
} else if (cls == ELFCLASS64) {
|
||||
return FindElfClassSegment<ElfClass64>(elf_base, segment_type, segments);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Attempt to locate a .note.gnu.build-id section in an ELF binary
|
||||
// and copy it into |identifier|.
|
||||
static bool FindElfBuildIDNote(const void* elf_mapped_base,
|
||||
std::vector<uint8_t>& identifier) {
|
||||
// lld normally creates 2 PT_NOTEs, gold normally creates 1.
|
||||
std::vector<ElfSegment> segs;
|
||||
if (FindElfSegments(elf_mapped_base, PT_NOTE, &segs)) {
|
||||
for (ElfSegment& seg : segs) {
|
||||
if (ElfClassBuildIDNoteIdentifier(seg.start, seg.size, identifier)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void* note_section;
|
||||
size_t note_size;
|
||||
if (FindElfSection(elf_mapped_base, ".note.gnu.build-id", SHT_NOTE,
|
||||
(const void**)¬e_section, ¬e_size)) {
|
||||
return ElfClassBuildIDNoteIdentifier(note_section, note_size, identifier);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Attempt to locate the .text section of an ELF binary and generate
|
||||
// a simple hash by XORing the first page worth of bytes into |identifier|.
|
||||
static bool HashElfTextSection(const void* elf_mapped_base,
|
||||
std::vector<uint8_t>& identifier) {
|
||||
identifier.resize(kMDGUIDSize);
|
||||
|
||||
void* text_section;
|
||||
size_t text_size;
|
||||
if (!FindElfSection(elf_mapped_base, ".text", SHT_PROGBITS,
|
||||
(const void**)&text_section, &text_size) ||
|
||||
text_size == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Only provide |kMDGUIDSize| bytes to keep identifiers produced by this
|
||||
// function backwards-compatible.
|
||||
memset(&identifier[0], 0, kMDGUIDSize);
|
||||
const uint8_t* ptr = reinterpret_cast<const uint8_t*>(text_section);
|
||||
const uint8_t* ptr_end =
|
||||
ptr + std::min(text_size, static_cast<size_t>(4096));
|
||||
while (ptr < ptr_end) {
|
||||
for (unsigned i = 0; i < kMDGUIDSize; i++) identifier[i] ^= ptr[i];
|
||||
ptr += kMDGUIDSize;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Load the identifier for the elf file mapped into memory at |base| into
|
||||
// |identifier|. Return false if the identifier could not be created for this
|
||||
// file.
|
||||
static bool ElfFileIdentifierFromMappedFile(
|
||||
const void* base, std::vector<uint8_t>& identifier) {
|
||||
// Look for a build id note first.
|
||||
if (FindElfBuildIDNote(base, identifier)) return true;
|
||||
|
||||
// Fall back on hashing the first page of the text section.
|
||||
return HashElfTextSection(base, identifier);
|
||||
}
|
||||
|
||||
// These three functions are not ever called in an unsafe context, so it's OK
|
||||
// to allocate memory and use libc.
|
||||
static std::string bytes_to_hex_string(const uint8_t* bytes, size_t count) {
|
||||
std::string result;
|
||||
for (unsigned int idx = 0; idx < count; ++idx) {
|
||||
char buf[3];
|
||||
SprintfLiteral(buf, "%02X", bytes[idx]);
|
||||
result.append(buf);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Convert the |identifier| data to a string. The string will
|
||||
// be formatted as a UUID in all uppercase without dashes.
|
||||
// (e.g., 22F065BBFC9C49F780FE26A7CEBD7BCE).
|
||||
static std::string ConvertIdentifierToUUIDString(
|
||||
const std::vector<uint8_t>& identifier) {
|
||||
uint8_t identifier_swapped[kMDGUIDSize] = {0};
|
||||
|
||||
// Endian-ness swap to match dump processor expectation.
|
||||
memcpy(identifier_swapped, &identifier[0],
|
||||
std::min(kMDGUIDSize, identifier.size()));
|
||||
uint32_t* data1 = reinterpret_cast<uint32_t*>(identifier_swapped);
|
||||
*data1 = htonl(*data1);
|
||||
uint16_t* data2 = reinterpret_cast<uint16_t*>(identifier_swapped + 4);
|
||||
*data2 = htons(*data2);
|
||||
uint16_t* data3 = reinterpret_cast<uint16_t*>(identifier_swapped + 6);
|
||||
*data3 = htons(*data3);
|
||||
|
||||
return bytes_to_hex_string(identifier_swapped, kMDGUIDSize);
|
||||
}
|
||||
|
||||
// Convert the entire |identifier| data to a hex string.
|
||||
static std::string ConvertIdentifierToString(
|
||||
const std::vector<uint8_t>& identifier) {
|
||||
return bytes_to_hex_string(&identifier[0], identifier.size());
|
||||
}
|
||||
|
||||
private:
|
||||
// Storage for the path specified
|
||||
std::string path_;
|
||||
};
|
||||
|
||||
// End of imports from toolkit/crashreporter/google-breakpad/.
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
struct LoadedLibraryInfo {
|
||||
LoadedLibraryInfo(const char* aName, unsigned long aBaseAddress,
|
||||
unsigned long aFirstMappingStart,
|
||||
unsigned long aLastMappingEnd)
|
||||
: mName(aName),
|
||||
mBaseAddress(aBaseAddress),
|
||||
mFirstMappingStart(aFirstMappingStart),
|
||||
mLastMappingEnd(aLastMappingEnd) {}
|
||||
|
||||
std::string mName;
|
||||
unsigned long mBaseAddress;
|
||||
unsigned long mFirstMappingStart;
|
||||
unsigned long mLastMappingEnd;
|
||||
};
|
||||
|
||||
# if defined(GP_OS_android)
|
||||
static void outputMapperLog(const char* aBuf) { /* LOG("%s", aBuf); */
|
||||
}
|
||||
# endif
|
||||
|
||||
static std::string IDtoUUIDString(const std::vector<uint8_t>& aIdentifier) {
|
||||
std::string uuid = FileID::ConvertIdentifierToUUIDString(aIdentifier);
|
||||
// This is '0', not '\0', since it represents the breakpad id age.
|
||||
uuid += '0';
|
||||
return uuid;
|
||||
}
|
||||
|
||||
// Get the breakpad Id for the binary file pointed by bin_name
|
||||
static std::string getId(const char* bin_name) {
|
||||
std::vector<uint8_t> identifier;
|
||||
identifier.reserve(kDefaultBuildIdSize);
|
||||
|
||||
# if defined(GP_OS_android)
|
||||
if (nsDependentCString(bin_name).Find("!/") != kNotFound) {
|
||||
AutoObjectMapperFaultyLib mapper(outputMapperLog);
|
||||
void* image = nullptr;
|
||||
size_t size = 0;
|
||||
if (mapper.Map(&image, &size, bin_name) && image && size) {
|
||||
if (FileID::ElfFileIdentifierFromMappedFile(image, identifier)) {
|
||||
return IDtoUUIDString(identifier);
|
||||
}
|
||||
}
|
||||
}
|
||||
# endif
|
||||
|
||||
FileID file_id(bin_name);
|
||||
if (file_id.ElfFileIdentifier(identifier)) {
|
||||
return IDtoUUIDString(identifier);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
static SharedLibrary SharedLibraryAtPath(const char* path,
|
||||
unsigned long libStart,
|
||||
unsigned long libEnd,
|
||||
unsigned long offset = 0) {
|
||||
std::string pathStr = path;
|
||||
|
||||
size_t pos = pathStr.rfind('\\');
|
||||
std::string nameStr =
|
||||
(pos != std::string::npos) ? pathStr.substr(pos + 1) : pathStr;
|
||||
|
||||
return SharedLibrary(libStart, libEnd, offset, getId(path), nameStr, pathStr,
|
||||
nameStr, pathStr, std::string{}, "");
|
||||
}
|
||||
|
||||
static int dl_iterate_callback(struct dl_phdr_info* dl_info, size_t size,
|
||||
void* data) {
|
||||
auto libInfoList = reinterpret_cast<std::vector<LoadedLibraryInfo>*>(data);
|
||||
|
||||
if (dl_info->dlpi_phnum <= 0) return 0;
|
||||
|
||||
unsigned long baseAddress = dl_info->dlpi_addr;
|
||||
unsigned long firstMappingStart = -1;
|
||||
unsigned long lastMappingEnd = 0;
|
||||
|
||||
for (size_t i = 0; i < dl_info->dlpi_phnum; i++) {
|
||||
if (dl_info->dlpi_phdr[i].p_type != PT_LOAD) {
|
||||
continue;
|
||||
}
|
||||
unsigned long start = dl_info->dlpi_addr + dl_info->dlpi_phdr[i].p_vaddr;
|
||||
unsigned long end = start + dl_info->dlpi_phdr[i].p_memsz;
|
||||
if (start < firstMappingStart) {
|
||||
firstMappingStart = start;
|
||||
}
|
||||
if (end > lastMappingEnd) {
|
||||
lastMappingEnd = end;
|
||||
}
|
||||
}
|
||||
|
||||
libInfoList->push_back(LoadedLibraryInfo(dl_info->dlpi_name, baseAddress,
|
||||
firstMappingStart, lastMappingEnd));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() {
|
||||
SharedLibraryInfo info;
|
||||
|
||||
# if defined(GP_OS_linux)
|
||||
// We need to find the name of the executable (exeName, exeNameLen) and the
|
||||
// address of its executable section (exeExeAddr) in the running image.
|
||||
char exeName[PATH_MAX];
|
||||
memset(exeName, 0, sizeof(exeName));
|
||||
|
||||
ssize_t exeNameLen = readlink("/proc/self/exe", exeName, sizeof(exeName) - 1);
|
||||
if (exeNameLen == -1) {
|
||||
// readlink failed for whatever reason. Note this, but keep going.
|
||||
exeName[0] = '\0';
|
||||
exeNameLen = 0;
|
||||
// LOG("SharedLibraryInfo::GetInfoForSelf(): readlink failed");
|
||||
} else {
|
||||
// Assert no buffer overflow.
|
||||
MOZ_RELEASE_ASSERT(exeNameLen >= 0 &&
|
||||
exeNameLen < static_cast<ssize_t>(sizeof(exeName)));
|
||||
}
|
||||
|
||||
unsigned long exeExeAddr = 0;
|
||||
# endif
|
||||
|
||||
# if defined(GP_OS_android)
|
||||
// If dl_iterate_phdr doesn't exist, we give up immediately.
|
||||
if (!dl_iterate_phdr) {
|
||||
// On ARM Android, dl_iterate_phdr is provided by the custom linker.
|
||||
// So if libxul was loaded by the system linker (e.g. as part of
|
||||
// xpcshell when running tests), it won't be available and we should
|
||||
// not call it.
|
||||
return info;
|
||||
}
|
||||
# endif
|
||||
|
||||
// Read info from /proc/self/maps. We ignore most of it.
|
||||
pid_t pid = mozilla::baseprofiler::profiler_current_process_id();
|
||||
char path[PATH_MAX];
|
||||
SprintfLiteral(path, "/proc/%d/maps", pid);
|
||||
std::ifstream maps(path);
|
||||
std::string line;
|
||||
while (std::getline(maps, line)) {
|
||||
int ret;
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
char perm[6 + 1] = "";
|
||||
unsigned long offset;
|
||||
char modulePath[PATH_MAX + 1] = "";
|
||||
ret = sscanf(line.c_str(),
|
||||
"%lx-%lx %6s %lx %*s %*x %" PATH_MAX_STRING(PATH_MAX) "s\n",
|
||||
&start, &end, perm, &offset, modulePath);
|
||||
if (!strchr(perm, 'x')) {
|
||||
// Ignore non executable entries
|
||||
continue;
|
||||
}
|
||||
if (ret != 5 && ret != 4) {
|
||||
// LOG("SharedLibraryInfo::GetInfoForSelf(): "
|
||||
// "reading /proc/self/maps failed");
|
||||
continue;
|
||||
}
|
||||
|
||||
# if defined(GP_OS_linux)
|
||||
// Try to establish the main executable's load address.
|
||||
if (exeNameLen > 0 && strcmp(modulePath, exeName) == 0) {
|
||||
exeExeAddr = start;
|
||||
}
|
||||
# elif defined(GP_OS_android)
|
||||
// Use /proc/pid/maps to get the dalvik-jit section since it has no
|
||||
// associated phdrs.
|
||||
if (0 == strcmp(modulePath, "/dev/ashmem/dalvik-jit-code-cache")) {
|
||||
info.AddSharedLibrary(
|
||||
SharedLibraryAtPath(modulePath, start, end, offset));
|
||||
if (info.GetSize() > 10000) {
|
||||
// LOG("SharedLibraryInfo::GetInfoForSelf(): "
|
||||
// "implausibly large number of mappings acquired");
|
||||
break;
|
||||
}
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
std::vector<LoadedLibraryInfo> libInfoList;
|
||||
|
||||
// We collect the bulk of the library info using dl_iterate_phdr.
|
||||
dl_iterate_phdr(dl_iterate_callback, &libInfoList);
|
||||
|
||||
for (const auto& libInfo : libInfoList) {
|
||||
info.AddSharedLibrary(
|
||||
SharedLibraryAtPath(libInfo.mName.c_str(), libInfo.mFirstMappingStart,
|
||||
libInfo.mLastMappingEnd,
|
||||
libInfo.mFirstMappingStart - libInfo.mBaseAddress));
|
||||
}
|
||||
|
||||
# if defined(GP_OS_linux)
|
||||
// Make another pass over the information we just harvested from
|
||||
// dl_iterate_phdr. If we see a nameless object mapped at what we earlier
|
||||
// established to be the main executable's load address, attach the
|
||||
// executable's name to that entry.
|
||||
for (size_t i = 0; i < info.GetSize(); i++) {
|
||||
SharedLibrary& lib = info.GetMutableEntry(i);
|
||||
if (lib.GetStart() == exeExeAddr && lib.GetDebugPath().empty()) {
|
||||
lib = SharedLibraryAtPath(exeName, lib.GetStart(), lib.GetEnd(),
|
||||
lib.GetOffset());
|
||||
|
||||
// We only expect to see one such entry.
|
||||
break;
|
||||
}
|
||||
}
|
||||
# endif
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
void SharedLibraryInfo::Initialize() { /* do nothing */
|
||||
}
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,187 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include "BaseProfilerSharedLibraries.h"
|
||||
|
||||
# include "platform.h"
|
||||
|
||||
# include "mozilla/Unused.h"
|
||||
# include <AvailabilityMacros.h>
|
||||
|
||||
# include <dlfcn.h>
|
||||
# include <mach-o/arch.h>
|
||||
# include <mach-o/dyld_images.h>
|
||||
# include <mach-o/dyld.h>
|
||||
# include <mach-o/loader.h>
|
||||
# include <mach/mach_init.h>
|
||||
# include <mach/mach_traps.h>
|
||||
# include <mach/task_info.h>
|
||||
# include <mach/task.h>
|
||||
# include <sstream>
|
||||
# include <stdlib.h>
|
||||
# include <string.h>
|
||||
# include <vector>
|
||||
|
||||
// Architecture specific abstraction.
|
||||
# if defined(GP_ARCH_x86)
|
||||
typedef mach_header platform_mach_header;
|
||||
typedef segment_command mach_segment_command_type;
|
||||
# define MACHO_MAGIC_NUMBER MH_MAGIC
|
||||
# define CMD_SEGMENT LC_SEGMENT
|
||||
# define seg_size uint32_t
|
||||
# else
|
||||
typedef mach_header_64 platform_mach_header;
|
||||
typedef segment_command_64 mach_segment_command_type;
|
||||
# define MACHO_MAGIC_NUMBER MH_MAGIC_64
|
||||
# define CMD_SEGMENT LC_SEGMENT_64
|
||||
# define seg_size uint64_t
|
||||
# endif
|
||||
|
||||
struct NativeSharedLibrary {
|
||||
const platform_mach_header* header;
|
||||
std::string path;
|
||||
};
|
||||
static std::vector<NativeSharedLibrary>* sSharedLibrariesList = nullptr;
|
||||
|
||||
class MOZ_RAII SharedLibrariesLock {
|
||||
public:
|
||||
SharedLibrariesLock() { sSharedLibrariesMutex.Lock(); }
|
||||
|
||||
~SharedLibrariesLock() { sSharedLibrariesMutex.Unlock(); }
|
||||
|
||||
SharedLibrariesLock(const SharedLibrariesLock&) = delete;
|
||||
void operator=(const SharedLibrariesLock&) = delete;
|
||||
|
||||
private:
|
||||
static mozilla::baseprofiler::PSMutex sSharedLibrariesMutex;
|
||||
};
|
||||
|
||||
mozilla::baseprofiler::PSMutex SharedLibrariesLock::sSharedLibrariesMutex;
|
||||
|
||||
static void SharedLibraryAddImage(const struct mach_header* mh,
|
||||
intptr_t vmaddr_slide) {
|
||||
// NOTE: Presumably for backwards-compatibility reasons, this function accepts
|
||||
// a mach_header even on 64-bit where it ought to be a mach_header_64. We cast
|
||||
// it to the right type here.
|
||||
auto header = reinterpret_cast<const platform_mach_header*>(mh);
|
||||
|
||||
Dl_info info;
|
||||
if (!dladdr(header, &info)) {
|
||||
return;
|
||||
}
|
||||
|
||||
SharedLibrariesLock lock;
|
||||
if (!sSharedLibrariesList) {
|
||||
return;
|
||||
}
|
||||
|
||||
NativeSharedLibrary lib = {header, info.dli_fname};
|
||||
sSharedLibrariesList->push_back(lib);
|
||||
}
|
||||
|
||||
static void SharedLibraryRemoveImage(const struct mach_header* mh,
|
||||
intptr_t vmaddr_slide) {
|
||||
// NOTE: Presumably for backwards-compatibility reasons, this function accepts
|
||||
// a mach_header even on 64-bit where it ought to be a mach_header_64. We cast
|
||||
// it to the right type here.
|
||||
auto header = reinterpret_cast<const platform_mach_header*>(mh);
|
||||
|
||||
SharedLibrariesLock lock;
|
||||
if (!sSharedLibrariesList) {
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t count = sSharedLibrariesList->size();
|
||||
for (uint32_t i = 0; i < count; ++i) {
|
||||
if ((*sSharedLibrariesList)[i].header == header) {
|
||||
sSharedLibrariesList->erase(sSharedLibrariesList->begin() + i);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SharedLibraryInfo::Initialize() {
|
||||
// NOTE: We intentionally leak this memory here. We're allocating dynamically
|
||||
// in order to avoid static initializers.
|
||||
sSharedLibrariesList = new std::vector<NativeSharedLibrary>();
|
||||
|
||||
_dyld_register_func_for_add_image(SharedLibraryAddImage);
|
||||
_dyld_register_func_for_remove_image(SharedLibraryRemoveImage);
|
||||
}
|
||||
|
||||
static void addSharedLibrary(const platform_mach_header* header,
|
||||
const char* path, SharedLibraryInfo& info) {
|
||||
const struct load_command* cmd =
|
||||
reinterpret_cast<const struct load_command*>(header + 1);
|
||||
|
||||
seg_size size = 0;
|
||||
unsigned long long start = reinterpret_cast<unsigned long long>(header);
|
||||
// Find the cmd segment in the macho image. It will contain the offset we care
|
||||
// about.
|
||||
const uint8_t* uuid_bytes = nullptr;
|
||||
for (unsigned int i = 0;
|
||||
cmd && (i < header->ncmds) && (uuid_bytes == nullptr || size == 0);
|
||||
++i) {
|
||||
if (cmd->cmd == CMD_SEGMENT) {
|
||||
const mach_segment_command_type* seg =
|
||||
reinterpret_cast<const mach_segment_command_type*>(cmd);
|
||||
|
||||
if (!strcmp(seg->segname, "__TEXT")) {
|
||||
size = seg->vmsize;
|
||||
}
|
||||
} else if (cmd->cmd == LC_UUID) {
|
||||
const uuid_command* ucmd = reinterpret_cast<const uuid_command*>(cmd);
|
||||
uuid_bytes = ucmd->uuid;
|
||||
}
|
||||
|
||||
cmd = reinterpret_cast<const struct load_command*>(
|
||||
reinterpret_cast<const char*>(cmd) + cmd->cmdsize);
|
||||
}
|
||||
|
||||
std::string uuid;
|
||||
if (uuid_bytes != nullptr) {
|
||||
static constexpr char digits[16] = {'0', '1', '2', '3', '4', '5', '6', '7',
|
||||
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F'};
|
||||
for (int i = 0; i < 15; ++i) {
|
||||
uint8_t byte = uuid_bytes[i];
|
||||
uuid += digits[byte >> 4];
|
||||
uuid += digits[byte & 0xFu];
|
||||
}
|
||||
// breakpad id age.
|
||||
uuid += '0';
|
||||
}
|
||||
|
||||
std::string pathStr = path;
|
||||
|
||||
size_t pos = pathStr.rfind('\\');
|
||||
std::string nameStr =
|
||||
(pos != std::string::npos) ? pathStr.substr(pos + 1) : pathStr;
|
||||
|
||||
const NXArchInfo* archInfo =
|
||||
NXGetArchInfoFromCpuType(header->cputype, header->cpusubtype);
|
||||
|
||||
info.AddSharedLibrary(SharedLibrary(start, start + size, 0, uuid, nameStr,
|
||||
pathStr, nameStr, pathStr, std::string{},
|
||||
archInfo ? archInfo->name : ""));
|
||||
}
|
||||
|
||||
// Translate the statically stored sSharedLibrariesList information into a
|
||||
// SharedLibraryInfo object.
|
||||
SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() {
|
||||
SharedLibrariesLock lock;
|
||||
SharedLibraryInfo sharedLibraryInfo;
|
||||
|
||||
for (auto& info : *sSharedLibrariesList) {
|
||||
addSharedLibrary(info.header, info.path.c_str(), sharedLibraryInfo);
|
||||
}
|
||||
|
||||
return sharedLibraryInfo;
|
||||
}
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,241 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include <windows.h>
|
||||
# include <dbghelp.h>
|
||||
# include <sstream>
|
||||
# include <psapi.h>
|
||||
|
||||
# include "BaseProfilerSharedLibraries.h"
|
||||
|
||||
# include "mozilla/UniquePtr.h"
|
||||
# include "mozilla/Unused.h"
|
||||
|
||||
# include <string>
|
||||
|
||||
# define CV_SIGNATURE 0x53445352 // 'SDSR'
|
||||
|
||||
struct CodeViewRecord70 {
|
||||
uint32_t signature;
|
||||
GUID pdbSignature;
|
||||
uint32_t pdbAge;
|
||||
// A UTF-8 string, according to
|
||||
// https://github.com/Microsoft/microsoft-pdb/blob/082c5290e5aff028ae84e43affa8be717aa7af73/PDB/dbi/locator.cpp#L785
|
||||
char pdbFileName[1];
|
||||
};
|
||||
|
||||
static constexpr char digits[16] = {'0', '1', '2', '3', '4', '5', '6', '7',
|
||||
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F'};
|
||||
|
||||
static void AppendHex(const unsigned char* aBegin, const unsigned char* aEnd,
|
||||
std::string& aOut) {
|
||||
for (const unsigned char* p = aBegin; p < aEnd; ++p) {
|
||||
unsigned char c = *p;
|
||||
aOut += digits[c >> 4];
|
||||
aOut += digits[c & 0xFu];
|
||||
}
|
||||
}
|
||||
|
||||
static constexpr bool WITH_PADDING = true;
|
||||
static constexpr bool WITHOUT_PADDING = false;
|
||||
template <typename T>
|
||||
static void AppendHex(T aValue, std::string& aOut, bool aWithPadding) {
|
||||
for (int i = sizeof(T) * 2 - 1; i >= 0; --i) {
|
||||
unsigned nibble = (aValue >> (i * 4)) & 0xFu;
|
||||
// If no-padding requested, skip starting zeroes -- unless we're on the very
|
||||
// last nibble (so we don't output a blank).
|
||||
if (!aWithPadding && i != 0) {
|
||||
if (nibble == 0) {
|
||||
// Requested no padding, skip zeroes.
|
||||
continue;
|
||||
}
|
||||
// Requested no padding, got first non-zero, pretend we now want padding
|
||||
// so we don't skip zeroes anymore.
|
||||
aWithPadding = true;
|
||||
}
|
||||
aOut += digits[nibble];
|
||||
}
|
||||
}
|
||||
|
||||
static bool GetPdbInfo(uintptr_t aStart, std::string& aSignature,
|
||||
uint32_t& aAge, char** aPdbName) {
|
||||
if (!aStart) {
|
||||
return false;
|
||||
}
|
||||
|
||||
PIMAGE_DOS_HEADER dosHeader = reinterpret_cast<PIMAGE_DOS_HEADER>(aStart);
|
||||
if (dosHeader->e_magic != IMAGE_DOS_SIGNATURE) {
|
||||
return false;
|
||||
}
|
||||
|
||||
PIMAGE_NT_HEADERS ntHeaders =
|
||||
reinterpret_cast<PIMAGE_NT_HEADERS>(aStart + dosHeader->e_lfanew);
|
||||
if (ntHeaders->Signature != IMAGE_NT_SIGNATURE) {
|
||||
return false;
|
||||
}
|
||||
|
||||
uint32_t relativeVirtualAddress =
|
||||
ntHeaders->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_DEBUG]
|
||||
.VirtualAddress;
|
||||
if (!relativeVirtualAddress) {
|
||||
return false;
|
||||
}
|
||||
|
||||
PIMAGE_DEBUG_DIRECTORY debugDirectory =
|
||||
reinterpret_cast<PIMAGE_DEBUG_DIRECTORY>(aStart + relativeVirtualAddress);
|
||||
if (!debugDirectory || debugDirectory->Type != IMAGE_DEBUG_TYPE_CODEVIEW) {
|
||||
return false;
|
||||
}
|
||||
|
||||
CodeViewRecord70* debugInfo = reinterpret_cast<CodeViewRecord70*>(
|
||||
aStart + debugDirectory->AddressOfRawData);
|
||||
if (!debugInfo || debugInfo->signature != CV_SIGNATURE) {
|
||||
return false;
|
||||
}
|
||||
|
||||
aAge = debugInfo->pdbAge;
|
||||
GUID& pdbSignature = debugInfo->pdbSignature;
|
||||
AppendHex(pdbSignature.Data1, aSignature, WITH_PADDING);
|
||||
AppendHex(pdbSignature.Data2, aSignature, WITH_PADDING);
|
||||
AppendHex(pdbSignature.Data3, aSignature, WITH_PADDING);
|
||||
AppendHex(reinterpret_cast<const unsigned char*>(&pdbSignature.Data4),
|
||||
reinterpret_cast<const unsigned char*>(&pdbSignature.Data4) +
|
||||
sizeof(pdbSignature.Data4),
|
||||
aSignature);
|
||||
|
||||
// The PDB file name could be different from module filename, so report both
|
||||
// e.g. The PDB for C:\Windows\SysWOW64\ntdll.dll is wntdll.pdb
|
||||
*aPdbName = debugInfo->pdbFileName;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static std::string GetVersion(char* dllPath) {
|
||||
DWORD infoSize = GetFileVersionInfoSizeA(dllPath, nullptr);
|
||||
if (infoSize == 0) {
|
||||
return {};
|
||||
}
|
||||
|
||||
mozilla::UniquePtr<unsigned char[]> infoData =
|
||||
mozilla::MakeUnique<unsigned char[]>(infoSize);
|
||||
if (!GetFileVersionInfoA(dllPath, 0, infoSize, infoData.get())) {
|
||||
return {};
|
||||
}
|
||||
|
||||
VS_FIXEDFILEINFO* vInfo;
|
||||
UINT vInfoLen;
|
||||
if (!VerQueryValueW(infoData.get(), L"\\", (LPVOID*)&vInfo, &vInfoLen)) {
|
||||
return {};
|
||||
}
|
||||
if (!vInfo) {
|
||||
return {};
|
||||
}
|
||||
|
||||
return std::to_string(vInfo->dwFileVersionMS >> 16) + '.' +
|
||||
std::to_string(vInfo->dwFileVersionMS & 0xFFFF) + '.' +
|
||||
std::to_string(vInfo->dwFileVersionLS >> 16) + '.' +
|
||||
std::to_string(vInfo->dwFileVersionLS & 0xFFFF);
|
||||
}
|
||||
|
||||
SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() {
|
||||
SharedLibraryInfo sharedLibraryInfo;
|
||||
|
||||
HANDLE hProcess = GetCurrentProcess();
|
||||
mozilla::UniquePtr<HMODULE[]> hMods;
|
||||
size_t modulesNum = 0;
|
||||
if (hProcess != NULL) {
|
||||
DWORD modulesSize;
|
||||
if (!EnumProcessModules(hProcess, nullptr, 0, &modulesSize)) {
|
||||
return sharedLibraryInfo;
|
||||
}
|
||||
modulesNum = modulesSize / sizeof(HMODULE);
|
||||
hMods = mozilla::MakeUnique<HMODULE[]>(modulesNum);
|
||||
if (!EnumProcessModules(hProcess, hMods.get(), modulesNum * sizeof(HMODULE),
|
||||
&modulesSize)) {
|
||||
return sharedLibraryInfo;
|
||||
}
|
||||
// The list may have shrunk between calls
|
||||
if (modulesSize / sizeof(HMODULE) < modulesNum) {
|
||||
modulesNum = modulesSize / sizeof(HMODULE);
|
||||
}
|
||||
}
|
||||
|
||||
for (unsigned int i = 0; i < modulesNum; i++) {
|
||||
std::string pdbPathStr;
|
||||
std::string pdbNameStr;
|
||||
char* pdbName = NULL;
|
||||
char modulePath[MAX_PATH + 1];
|
||||
|
||||
if (!GetModuleFileNameEx(hProcess, hMods[i], modulePath,
|
||||
sizeof(modulePath) / sizeof(char))) {
|
||||
continue;
|
||||
}
|
||||
|
||||
MODULEINFO module = {0};
|
||||
if (!GetModuleInformation(hProcess, hMods[i], &module,
|
||||
sizeof(MODULEINFO))) {
|
||||
continue;
|
||||
}
|
||||
|
||||
std::string breakpadId;
|
||||
// Load the module again to make sure that its handle will remain
|
||||
// valid as we attempt to read the PDB information from it. We load the
|
||||
// DLL as a datafile so that if the module actually gets unloaded between
|
||||
// the call to EnumProcessModules and the following LoadLibraryEx, we
|
||||
// don't end up running the now newly loaded module's DllMain function. If
|
||||
// the module is already loaded, LoadLibraryEx just increments its
|
||||
// refcount.
|
||||
//
|
||||
// Note that because of the race condition above, merely loading the DLL
|
||||
// again is not safe enough, therefore we also need to make sure that we
|
||||
// can read the memory mapped at the base address before we can safely
|
||||
// proceed to actually access those pages.
|
||||
HMODULE handleLock =
|
||||
LoadLibraryEx(modulePath, NULL, LOAD_LIBRARY_AS_DATAFILE);
|
||||
MEMORY_BASIC_INFORMATION vmemInfo = {0};
|
||||
std::string pdbSig;
|
||||
uint32_t pdbAge;
|
||||
if (handleLock &&
|
||||
sizeof(vmemInfo) ==
|
||||
VirtualQuery(module.lpBaseOfDll, &vmemInfo, sizeof(vmemInfo)) &&
|
||||
vmemInfo.State == MEM_COMMIT &&
|
||||
GetPdbInfo((uintptr_t)module.lpBaseOfDll, pdbSig, pdbAge, &pdbName)) {
|
||||
MOZ_ASSERT(breakpadId.empty());
|
||||
breakpadId += pdbSig;
|
||||
AppendHex(pdbAge, breakpadId, WITHOUT_PADDING);
|
||||
|
||||
pdbPathStr = pdbName;
|
||||
size_t pos = pdbPathStr.rfind('\\');
|
||||
pdbNameStr =
|
||||
(pos != std::string::npos) ? pdbPathStr.substr(pos + 1) : pdbPathStr;
|
||||
}
|
||||
|
||||
std::string modulePathStr(modulePath);
|
||||
size_t pos = modulePathStr.rfind('\\');
|
||||
std::string moduleNameStr = (pos != std::string::npos)
|
||||
? modulePathStr.substr(pos + 1)
|
||||
: modulePathStr;
|
||||
|
||||
SharedLibrary shlib((uintptr_t)module.lpBaseOfDll,
|
||||
(uintptr_t)module.lpBaseOfDll + module.SizeOfImage,
|
||||
0, // DLLs are always mapped at offset 0 on Windows
|
||||
breakpadId, moduleNameStr, modulePathStr, pdbNameStr,
|
||||
pdbPathStr, GetVersion(modulePath), "");
|
||||
sharedLibraryInfo.AddSharedLibrary(shlib);
|
||||
|
||||
FreeLibrary(handleLock); // ok to free null handles
|
||||
}
|
||||
|
||||
return sharedLibraryInfo;
|
||||
}
|
||||
|
||||
void SharedLibraryInfo::Initialize() { /* do nothing */
|
||||
}
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,130 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include <sys/mman.h>
|
||||
# include <unistd.h>
|
||||
# include <sys/types.h>
|
||||
# include <sys/stat.h>
|
||||
# include <fcntl.h>
|
||||
|
||||
# include "mozilla/Assertions.h"
|
||||
# include "mozilla/Sprintf.h"
|
||||
|
||||
# include "PlatformMacros.h"
|
||||
# include "AutoObjectMapper.h"
|
||||
|
||||
# if defined(GP_OS_android)
|
||||
# include <dlfcn.h>
|
||||
# include "mozilla/Types.h"
|
||||
// FIXME move these out of mozglue/linker/ElfLoader.h into their
|
||||
// own header, so as to avoid conflicts arising from two definitions
|
||||
// of Array
|
||||
extern "C" {
|
||||
MFBT_API size_t __dl_get_mappable_length(void* handle);
|
||||
MFBT_API void* __dl_mmap(void* handle, void* addr, size_t length, off_t offset);
|
||||
MFBT_API void __dl_munmap(void* handle, void* addr, size_t length);
|
||||
}
|
||||
# endif
|
||||
|
||||
// A helper function for creating failure error messages in
|
||||
// AutoObjectMapper*::Map.
|
||||
static void failedToMessage(void (*aLog)(const char*), const char* aHowFailed,
|
||||
std::string aFileName) {
|
||||
char buf[300];
|
||||
SprintfLiteral(buf, "AutoObjectMapper::Map: Failed to %s \'%s\'", aHowFailed,
|
||||
aFileName.c_str());
|
||||
buf[sizeof(buf) - 1] = 0;
|
||||
aLog(buf);
|
||||
}
|
||||
|
||||
AutoObjectMapperPOSIX::AutoObjectMapperPOSIX(void (*aLog)(const char*))
|
||||
: mImage(nullptr), mSize(0), mLog(aLog), mIsMapped(false) {}
|
||||
|
||||
AutoObjectMapperPOSIX::~AutoObjectMapperPOSIX() {
|
||||
if (!mIsMapped) {
|
||||
// There's nothing to do.
|
||||
MOZ_ASSERT(!mImage);
|
||||
MOZ_ASSERT(mSize == 0);
|
||||
return;
|
||||
}
|
||||
MOZ_ASSERT(mSize > 0);
|
||||
// The following assertion doesn't necessarily have to be true,
|
||||
// but we assume (reasonably enough) that no mmap facility would
|
||||
// be crazy enough to map anything at page zero.
|
||||
MOZ_ASSERT(mImage);
|
||||
munmap(mImage, mSize);
|
||||
}
|
||||
|
||||
bool AutoObjectMapperPOSIX::Map(/*OUT*/ void** start, /*OUT*/ size_t* length,
|
||||
std::string fileName) {
|
||||
MOZ_ASSERT(!mIsMapped);
|
||||
|
||||
int fd = open(fileName.c_str(), O_RDONLY);
|
||||
if (fd == -1) {
|
||||
failedToMessage(mLog, "open", fileName);
|
||||
return false;
|
||||
}
|
||||
|
||||
struct stat st;
|
||||
int err = fstat(fd, &st);
|
||||
size_t sz = (err == 0) ? st.st_size : 0;
|
||||
if (err != 0 || sz == 0) {
|
||||
failedToMessage(mLog, "fstat", fileName);
|
||||
close(fd);
|
||||
return false;
|
||||
}
|
||||
|
||||
void* image = mmap(nullptr, sz, PROT_READ, MAP_SHARED, fd, 0);
|
||||
if (image == MAP_FAILED) {
|
||||
failedToMessage(mLog, "mmap", fileName);
|
||||
close(fd);
|
||||
return false;
|
||||
}
|
||||
|
||||
close(fd);
|
||||
mIsMapped = true;
|
||||
mImage = *start = image;
|
||||
mSize = *length = sz;
|
||||
return true;
|
||||
}
|
||||
|
||||
# if defined(GP_OS_android)
|
||||
AutoObjectMapperFaultyLib::AutoObjectMapperFaultyLib(void (*aLog)(const char*))
|
||||
: AutoObjectMapperPOSIX(aLog), mHdl(nullptr) {}
|
||||
|
||||
AutoObjectMapperFaultyLib::~AutoObjectMapperFaultyLib() {
|
||||
if (mHdl) {
|
||||
// We've got an object mapped by faulty.lib. Unmap it via faulty.lib.
|
||||
MOZ_ASSERT(mSize > 0);
|
||||
// Assert on the basis that no valid mapping would start at page zero.
|
||||
MOZ_ASSERT(mImage);
|
||||
__dl_munmap(mHdl, mImage, mSize);
|
||||
dlclose(mHdl);
|
||||
// Stop assertions in ~AutoObjectMapperPOSIX from failing.
|
||||
mImage = nullptr;
|
||||
mSize = 0;
|
||||
}
|
||||
// At this point the parent class destructor, ~AutoObjectMapperPOSIX,
|
||||
// gets called. If that has something mapped in the normal way, it
|
||||
// will unmap it in the normal way. Unfortunately there's no
|
||||
// obvious way to enforce the requirement that the object is mapped
|
||||
// either by faulty.lib or by the parent class, but not by both.
|
||||
}
|
||||
|
||||
bool AutoObjectMapperFaultyLib::Map(/*OUT*/ void** start,
|
||||
/*OUT*/ size_t* length,
|
||||
std::string fileName) {
|
||||
MOZ_ASSERT(!mHdl);
|
||||
return false;
|
||||
}
|
||||
|
||||
# endif // defined(GP_OS_android)
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,114 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef AutoObjectMapper_h
|
||||
#define AutoObjectMapper_h
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "mozilla/Attributes.h"
|
||||
#include "PlatformMacros.h"
|
||||
|
||||
// A (nearly-) RAII class that maps an object in and then unmaps it on
|
||||
// destruction. This base class version uses the "normal" POSIX
|
||||
// functions: open, fstat, close, mmap, munmap.
|
||||
|
||||
class MOZ_STACK_CLASS AutoObjectMapperPOSIX {
|
||||
public:
|
||||
// The constructor does not attempt to map the file, because that
|
||||
// might fail. Instead, once the object has been constructed,
|
||||
// call Map() to attempt the mapping. There is no corresponding
|
||||
// Unmap() since the unmapping is done in the destructor. Failure
|
||||
// messages are sent to |aLog|.
|
||||
explicit AutoObjectMapperPOSIX(void (*aLog)(const char*));
|
||||
|
||||
// Unmap the file on destruction of this object.
|
||||
~AutoObjectMapperPOSIX();
|
||||
|
||||
// Map |fileName| into the address space and return the mapping
|
||||
// extents. If the file is zero sized this will fail. The file is
|
||||
// mapped read-only and private. Returns true iff the mapping
|
||||
// succeeded, in which case *start and *length hold its extent.
|
||||
// Once a call to Map succeeds, all subsequent calls to it will
|
||||
// fail.
|
||||
bool Map(/*OUT*/ void** start, /*OUT*/ size_t* length, std::string fileName);
|
||||
|
||||
protected:
|
||||
// If we are currently holding a mapped object, these record the
|
||||
// mapped address range.
|
||||
void* mImage;
|
||||
size_t mSize;
|
||||
|
||||
// A logging sink, for complaining about mapping failures.
|
||||
void (*mLog)(const char*);
|
||||
|
||||
private:
|
||||
// Are we currently holding a mapped object? This is private to
|
||||
// the base class. Derived classes need to have their own way to
|
||||
// track whether they are holding a mapped object.
|
||||
bool mIsMapped;
|
||||
|
||||
// Disable copying and assignment.
|
||||
AutoObjectMapperPOSIX(const AutoObjectMapperPOSIX&);
|
||||
AutoObjectMapperPOSIX& operator=(const AutoObjectMapperPOSIX&);
|
||||
// Disable heap allocation of this class.
|
||||
void* operator new(size_t);
|
||||
void* operator new[](size_t);
|
||||
void operator delete(void*);
|
||||
void operator delete[](void*);
|
||||
};
|
||||
|
||||
#if defined(GP_OS_android)
|
||||
// This is a variant of AutoObjectMapperPOSIX suitable for use in
|
||||
// conjunction with faulty.lib on Android. How it behaves depends on
|
||||
// the name of the file to be mapped. There are three possible cases:
|
||||
//
|
||||
// (1) /foo/bar/xyzzy/blah.apk!/libwurble.so
|
||||
// We hand it as-is to faulty.lib and let it fish the relevant
|
||||
// bits out of the APK.
|
||||
//
|
||||
// (2) libmozglue.so
|
||||
// This is part of the Fennec installation, but is not in the
|
||||
// APK. Instead we have to figure out the installation path
|
||||
// and look for it there. Because of faulty.lib limitations,
|
||||
// we have to use regular open/mmap instead of faulty.lib.
|
||||
//
|
||||
// (3) libanythingelse.so
|
||||
// faulty.lib assumes this is a system library, and prepends
|
||||
// "/system/lib/" to the path. So as in (1), we can give it
|
||||
// as-is to faulty.lib.
|
||||
//
|
||||
// Hence (1) and (3) require special-casing here. Case (2) simply
|
||||
// hands the problem to the parent class.
|
||||
|
||||
class MOZ_STACK_CLASS AutoObjectMapperFaultyLib : public AutoObjectMapperPOSIX {
|
||||
public:
|
||||
explicit AutoObjectMapperFaultyLib(void (*aLog)(const char*));
|
||||
|
||||
~AutoObjectMapperFaultyLib();
|
||||
|
||||
bool Map(/*OUT*/ void** start, /*OUT*/ size_t* length, std::string fileName);
|
||||
|
||||
private:
|
||||
// faulty.lib requires us to maintain an abstract handle that can be
|
||||
// used later to unmap the area. If this is non-NULL, it is assumed
|
||||
// that unmapping is to be done by faulty.lib. Otherwise it goes
|
||||
// via the normal mechanism.
|
||||
void* mHdl;
|
||||
|
||||
// Disable copying and assignment.
|
||||
AutoObjectMapperFaultyLib(const AutoObjectMapperFaultyLib&);
|
||||
AutoObjectMapperFaultyLib& operator=(const AutoObjectMapperFaultyLib&);
|
||||
// Disable heap allocation of this class.
|
||||
void* operator new(size_t);
|
||||
void* operator new[](size_t);
|
||||
void operator delete(void*);
|
||||
void operator delete[](void*);
|
||||
};
|
||||
|
||||
#endif // defined(GP_OS_android)
|
||||
|
||||
#endif // AutoObjectMapper_h
|
|
@ -1,106 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
|
||||
// Copyright (c) 2011, 2013 Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Original author: Jim Blandy <jimb@mozilla.com> <jimb@red-bean.com>
|
||||
|
||||
// This file is derived from the following files in
|
||||
// toolkit/crashreporter/google-breakpad:
|
||||
// src/common/module.cc
|
||||
// src/common/unique_string.cc
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
// There's no internal-only interface for LulCommon. Hence include
|
||||
// the external interface directly.
|
||||
# include "LulCommonExt.h"
|
||||
|
||||
# include <stdlib.h>
|
||||
# include <string.h>
|
||||
|
||||
# include <string>
|
||||
# include <map>
|
||||
|
||||
namespace lul {
|
||||
|
||||
using std::string;
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Module
|
||||
//
|
||||
Module::Module(const string& name, const string& os, const string& architecture,
|
||||
const string& id)
|
||||
: name_(name), os_(os), architecture_(architecture), id_(id) {}
|
||||
|
||||
Module::~Module() {}
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// UniqueString
|
||||
//
|
||||
class UniqueString {
|
||||
public:
|
||||
explicit UniqueString(string str) { str_ = strdup(str.c_str()); }
|
||||
~UniqueString() { free(reinterpret_cast<void*>(const_cast<char*>(str_))); }
|
||||
const char* str_;
|
||||
};
|
||||
|
||||
const char* FromUniqueString(const UniqueString* ustr) { return ustr->str_; }
|
||||
|
||||
bool IsEmptyUniqueString(const UniqueString* ustr) {
|
||||
return (ustr->str_)[0] == '\0';
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// UniqueStringUniverse
|
||||
//
|
||||
UniqueStringUniverse::~UniqueStringUniverse() {
|
||||
for (std::map<string, UniqueString*>::iterator it = map_.begin();
|
||||
it != map_.end(); it++) {
|
||||
delete it->second;
|
||||
}
|
||||
}
|
||||
|
||||
const UniqueString* UniqueStringUniverse::ToUniqueString(string str) {
|
||||
std::map<string, UniqueString*>::iterator it = map_.find(str);
|
||||
if (it == map_.end()) {
|
||||
UniqueString* ustr = new UniqueString(str);
|
||||
map_[str] = ustr;
|
||||
return ustr;
|
||||
} else {
|
||||
return it->second;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace lul
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,509 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
|
||||
// Copyright (c) 2006, 2010, 2012, 2013 Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Original author: Jim Blandy <jimb@mozilla.com> <jimb@red-bean.com>
|
||||
|
||||
// module.h: Define google_breakpad::Module. A Module holds debugging
|
||||
// information, and can write that information out as a Breakpad
|
||||
// symbol file.
|
||||
|
||||
// (C) Copyright Greg Colvin and Beman Dawes 1998, 1999.
|
||||
// Copyright (c) 2001, 2002 Peter Dimov
|
||||
//
|
||||
// Permission to copy, use, modify, sell and distribute this software
|
||||
// is granted provided this copyright notice appears in all copies.
|
||||
// This software is provided "as is" without express or implied
|
||||
// warranty, and with no claim as to its suitability for any purpose.
|
||||
//
|
||||
// See http://www.boost.org/libs/smart_ptr/scoped_ptr.htm for documentation.
|
||||
//
|
||||
|
||||
// This file is derived from the following files in
|
||||
// toolkit/crashreporter/google-breakpad:
|
||||
// src/common/unique_string.h
|
||||
// src/common/scoped_ptr.h
|
||||
// src/common/module.h
|
||||
|
||||
// External interface for the "Common" component of LUL.
|
||||
|
||||
#ifndef LulCommonExt_h
|
||||
#define LulCommonExt_h
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <cstddef> // for std::ptrdiff_t
|
||||
|
||||
#include "mozilla/Assertions.h"
|
||||
|
||||
namespace lul {
|
||||
|
||||
using std::map;
|
||||
using std::string;
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// UniqueString
|
||||
//
|
||||
|
||||
// Abstract type
|
||||
class UniqueString;
|
||||
|
||||
// Get the contained C string (debugging only)
|
||||
const char* FromUniqueString(const UniqueString*);
|
||||
|
||||
// Is the given string empty (that is, "") ?
|
||||
bool IsEmptyUniqueString(const UniqueString*);
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// UniqueStringUniverse
|
||||
//
|
||||
|
||||
// All UniqueStrings live in some specific UniqueStringUniverse.
|
||||
class UniqueStringUniverse {
|
||||
public:
|
||||
UniqueStringUniverse() {}
|
||||
~UniqueStringUniverse();
|
||||
// Convert a |string| to a UniqueString, that lives in this universe.
|
||||
const UniqueString* ToUniqueString(string str);
|
||||
|
||||
private:
|
||||
map<string, UniqueString*> map_;
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// GUID
|
||||
//
|
||||
|
||||
typedef struct {
|
||||
uint32_t data1;
|
||||
uint16_t data2;
|
||||
uint16_t data3;
|
||||
uint8_t data4[8];
|
||||
} MDGUID; // GUID
|
||||
|
||||
typedef MDGUID GUID;
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// scoped_ptr
|
||||
//
|
||||
|
||||
// scoped_ptr mimics a built-in pointer except that it guarantees deletion
|
||||
// of the object pointed to, either on destruction of the scoped_ptr or via
|
||||
// an explicit reset(). scoped_ptr is a simple solution for simple needs;
|
||||
// use shared_ptr or std::auto_ptr if your needs are more complex.
|
||||
|
||||
// *** NOTE ***
|
||||
// If your scoped_ptr is a class member of class FOO pointing to a
|
||||
// forward declared type BAR (as shown below), then you MUST use a non-inlined
|
||||
// version of the destructor. The destructor of a scoped_ptr (called from
|
||||
// FOO's destructor) must have a complete definition of BAR in order to
|
||||
// destroy it. Example:
|
||||
//
|
||||
// -- foo.h --
|
||||
// class BAR;
|
||||
//
|
||||
// class FOO {
|
||||
// public:
|
||||
// FOO();
|
||||
// ~FOO(); // Required for sources that instantiate class FOO to compile!
|
||||
//
|
||||
// private:
|
||||
// scoped_ptr<BAR> bar_;
|
||||
// };
|
||||
//
|
||||
// -- foo.cc --
|
||||
// #include "foo.h"
|
||||
// FOO::~FOO() {} // Empty, but must be non-inlined to FOO's class definition.
|
||||
|
||||
// scoped_ptr_malloc added by Google
|
||||
// When one of these goes out of scope, instead of doing a delete or
|
||||
// delete[], it calls free(). scoped_ptr_malloc<char> is likely to see
|
||||
// much more use than any other specializations.
|
||||
|
||||
// release() added by Google
|
||||
// Use this to conditionally transfer ownership of a heap-allocated object
|
||||
// to the caller, usually on method success.
|
||||
|
||||
template <typename T>
|
||||
class scoped_ptr {
|
||||
private:
|
||||
T* ptr;
|
||||
|
||||
scoped_ptr(scoped_ptr const&);
|
||||
scoped_ptr& operator=(scoped_ptr const&);
|
||||
|
||||
public:
|
||||
typedef T element_type;
|
||||
|
||||
explicit scoped_ptr(T* p = 0) : ptr(p) {}
|
||||
|
||||
~scoped_ptr() { delete ptr; }
|
||||
|
||||
void reset(T* p = 0) {
|
||||
if (ptr != p) {
|
||||
delete ptr;
|
||||
ptr = p;
|
||||
}
|
||||
}
|
||||
|
||||
T& operator*() const {
|
||||
MOZ_ASSERT(ptr != 0);
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
T* operator->() const {
|
||||
MOZ_ASSERT(ptr != 0);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
bool operator==(T* p) const { return ptr == p; }
|
||||
|
||||
bool operator!=(T* p) const { return ptr != p; }
|
||||
|
||||
T* get() const { return ptr; }
|
||||
|
||||
void swap(scoped_ptr& b) {
|
||||
T* tmp = b.ptr;
|
||||
b.ptr = ptr;
|
||||
ptr = tmp;
|
||||
}
|
||||
|
||||
T* release() {
|
||||
T* tmp = ptr;
|
||||
ptr = 0;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
private:
|
||||
// no reason to use these: each scoped_ptr should have its own object
|
||||
template <typename U>
|
||||
bool operator==(scoped_ptr<U> const& p) const;
|
||||
template <typename U>
|
||||
bool operator!=(scoped_ptr<U> const& p) const;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
inline void swap(scoped_ptr<T>& a, scoped_ptr<T>& b) {
|
||||
a.swap(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline bool operator==(T* p, const scoped_ptr<T>& b) {
|
||||
return p == b.get();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline bool operator!=(T* p, const scoped_ptr<T>& b) {
|
||||
return p != b.get();
|
||||
}
|
||||
|
||||
// scoped_array extends scoped_ptr to arrays. Deletion of the array pointed to
|
||||
// is guaranteed, either on destruction of the scoped_array or via an explicit
|
||||
// reset(). Use shared_array or std::vector if your needs are more complex.
|
||||
|
||||
template <typename T>
|
||||
class scoped_array {
|
||||
private:
|
||||
T* ptr;
|
||||
|
||||
scoped_array(scoped_array const&);
|
||||
scoped_array& operator=(scoped_array const&);
|
||||
|
||||
public:
|
||||
typedef T element_type;
|
||||
|
||||
explicit scoped_array(T* p = 0) : ptr(p) {}
|
||||
|
||||
~scoped_array() { delete[] ptr; }
|
||||
|
||||
void reset(T* p = 0) {
|
||||
if (ptr != p) {
|
||||
delete[] ptr;
|
||||
ptr = p;
|
||||
}
|
||||
}
|
||||
|
||||
T& operator[](std::ptrdiff_t i) const {
|
||||
MOZ_ASSERT(ptr != 0);
|
||||
MOZ_ASSERT(i >= 0);
|
||||
return ptr[i];
|
||||
}
|
||||
|
||||
bool operator==(T* p) const { return ptr == p; }
|
||||
|
||||
bool operator!=(T* p) const { return ptr != p; }
|
||||
|
||||
T* get() const { return ptr; }
|
||||
|
||||
void swap(scoped_array& b) {
|
||||
T* tmp = b.ptr;
|
||||
b.ptr = ptr;
|
||||
ptr = tmp;
|
||||
}
|
||||
|
||||
T* release() {
|
||||
T* tmp = ptr;
|
||||
ptr = 0;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
private:
|
||||
// no reason to use these: each scoped_array should have its own object
|
||||
template <typename U>
|
||||
bool operator==(scoped_array<U> const& p) const;
|
||||
template <typename U>
|
||||
bool operator!=(scoped_array<U> const& p) const;
|
||||
};
|
||||
|
||||
template <class T>
|
||||
inline void swap(scoped_array<T>& a, scoped_array<T>& b) {
|
||||
a.swap(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline bool operator==(T* p, const scoped_array<T>& b) {
|
||||
return p == b.get();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline bool operator!=(T* p, const scoped_array<T>& b) {
|
||||
return p != b.get();
|
||||
}
|
||||
|
||||
// This class wraps the c library function free() in a class that can be
|
||||
// passed as a template argument to scoped_ptr_malloc below.
|
||||
class ScopedPtrMallocFree {
|
||||
public:
|
||||
inline void operator()(void* x) const { free(x); }
|
||||
};
|
||||
|
||||
// scoped_ptr_malloc<> is similar to scoped_ptr<>, but it accepts a
|
||||
// second template argument, the functor used to free the object.
|
||||
|
||||
template <typename T, typename FreeProc = ScopedPtrMallocFree>
|
||||
class scoped_ptr_malloc {
|
||||
private:
|
||||
T* ptr;
|
||||
|
||||
scoped_ptr_malloc(scoped_ptr_malloc const&);
|
||||
scoped_ptr_malloc& operator=(scoped_ptr_malloc const&);
|
||||
|
||||
public:
|
||||
typedef T element_type;
|
||||
|
||||
explicit scoped_ptr_malloc(T* p = 0) : ptr(p) {}
|
||||
|
||||
~scoped_ptr_malloc() { free_((void*)ptr); }
|
||||
|
||||
void reset(T* p = 0) {
|
||||
if (ptr != p) {
|
||||
free_((void*)ptr);
|
||||
ptr = p;
|
||||
}
|
||||
}
|
||||
|
||||
T& operator*() const {
|
||||
MOZ_ASSERT(ptr != 0);
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
T* operator->() const {
|
||||
MOZ_ASSERT(ptr != 0);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
bool operator==(T* p) const { return ptr == p; }
|
||||
|
||||
bool operator!=(T* p) const { return ptr != p; }
|
||||
|
||||
T* get() const { return ptr; }
|
||||
|
||||
void swap(scoped_ptr_malloc& b) {
|
||||
T* tmp = b.ptr;
|
||||
b.ptr = ptr;
|
||||
ptr = tmp;
|
||||
}
|
||||
|
||||
T* release() {
|
||||
T* tmp = ptr;
|
||||
ptr = 0;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
private:
|
||||
// no reason to use these: each scoped_ptr_malloc should have its own object
|
||||
template <typename U, typename GP>
|
||||
bool operator==(scoped_ptr_malloc<U, GP> const& p) const;
|
||||
template <typename U, typename GP>
|
||||
bool operator!=(scoped_ptr_malloc<U, GP> const& p) const;
|
||||
|
||||
static FreeProc const free_;
|
||||
};
|
||||
|
||||
template <typename T, typename FP>
|
||||
FP const scoped_ptr_malloc<T, FP>::free_ = FP();
|
||||
|
||||
template <typename T, typename FP>
|
||||
inline void swap(scoped_ptr_malloc<T, FP>& a, scoped_ptr_malloc<T, FP>& b) {
|
||||
a.swap(b);
|
||||
}
|
||||
|
||||
template <typename T, typename FP>
|
||||
inline bool operator==(T* p, const scoped_ptr_malloc<T, FP>& b) {
|
||||
return p == b.get();
|
||||
}
|
||||
|
||||
template <typename T, typename FP>
|
||||
inline bool operator!=(T* p, const scoped_ptr_malloc<T, FP>& b) {
|
||||
return p != b.get();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Module
|
||||
//
|
||||
|
||||
// A Module represents the contents of a module, and supports methods
|
||||
// for adding information produced by parsing STABS or DWARF data
|
||||
// --- possibly both from the same file --- and then writing out the
|
||||
// unified contents as a Breakpad-format symbol file.
|
||||
class Module {
|
||||
public:
|
||||
// The type of addresses and sizes in a symbol table.
|
||||
typedef uint64_t Address;
|
||||
|
||||
// Representation of an expression. This can either be a postfix
|
||||
// expression, in which case it is stored as a string, or a simple
|
||||
// expression of the form (identifier + imm) or *(identifier + imm).
|
||||
// It can also be invalid (denoting "no value").
|
||||
enum ExprHow { kExprInvalid = 1, kExprPostfix, kExprSimple, kExprSimpleMem };
|
||||
|
||||
struct Expr {
|
||||
// Construct a simple-form expression
|
||||
Expr(const UniqueString* ident, long offset, bool deref) {
|
||||
if (IsEmptyUniqueString(ident)) {
|
||||
Expr();
|
||||
} else {
|
||||
postfix_ = "";
|
||||
ident_ = ident;
|
||||
offset_ = offset;
|
||||
how_ = deref ? kExprSimpleMem : kExprSimple;
|
||||
}
|
||||
}
|
||||
|
||||
// Construct an invalid expression
|
||||
Expr() {
|
||||
postfix_ = "";
|
||||
ident_ = nullptr;
|
||||
offset_ = 0;
|
||||
how_ = kExprInvalid;
|
||||
}
|
||||
|
||||
// Return the postfix expression string, either directly,
|
||||
// if this is a postfix expression, or by synthesising it
|
||||
// for a simple expression.
|
||||
std::string getExprPostfix() const {
|
||||
switch (how_) {
|
||||
case kExprPostfix:
|
||||
return postfix_;
|
||||
case kExprSimple:
|
||||
case kExprSimpleMem: {
|
||||
char buf[40];
|
||||
sprintf(buf, " %ld %c%s", labs(offset_), offset_ < 0 ? '-' : '+',
|
||||
how_ == kExprSimple ? "" : " ^");
|
||||
return std::string(FromUniqueString(ident_)) + std::string(buf);
|
||||
}
|
||||
case kExprInvalid:
|
||||
default:
|
||||
MOZ_ASSERT(0 && "getExprPostfix: invalid Module::Expr type");
|
||||
return "Expr::genExprPostfix: kExprInvalid";
|
||||
}
|
||||
}
|
||||
|
||||
// The identifier that gives the starting value for simple expressions.
|
||||
const UniqueString* ident_;
|
||||
// The offset to add for simple expressions.
|
||||
long offset_;
|
||||
// The Postfix expression string to evaluate for non-simple expressions.
|
||||
std::string postfix_;
|
||||
// The operation expressed by this expression.
|
||||
ExprHow how_;
|
||||
};
|
||||
|
||||
// A map from register names to expressions that recover
|
||||
// their values. This can represent a complete set of rules to
|
||||
// follow at some address, or a set of changes to be applied to an
|
||||
// extant set of rules.
|
||||
// NOTE! there are two completely different types called RuleMap. This
|
||||
// is one of them.
|
||||
typedef std::map<const UniqueString*, Expr> RuleMap;
|
||||
|
||||
// A map from addresses to RuleMaps, representing changes that take
|
||||
// effect at given addresses.
|
||||
typedef std::map<Address, RuleMap> RuleChangeMap;
|
||||
|
||||
// A range of 'STACK CFI' stack walking information. An instance of
|
||||
// this structure corresponds to a 'STACK CFI INIT' record and the
|
||||
// subsequent 'STACK CFI' records that fall within its range.
|
||||
struct StackFrameEntry {
|
||||
// The starting address and number of bytes of machine code this
|
||||
// entry covers.
|
||||
Address address, size;
|
||||
|
||||
// The initial register recovery rules, in force at the starting
|
||||
// address.
|
||||
RuleMap initial_rules;
|
||||
|
||||
// A map from addresses to rule changes. To find the rules in
|
||||
// force at a given address, start with initial_rules, and then
|
||||
// apply the changes given in this map for all addresses up to and
|
||||
// including the address you're interested in.
|
||||
RuleChangeMap rule_changes;
|
||||
};
|
||||
|
||||
// Create a new module with the given name, operating system,
|
||||
// architecture, and ID string.
|
||||
Module(const std::string& name, const std::string& os,
|
||||
const std::string& architecture, const std::string& id);
|
||||
~Module();
|
||||
|
||||
private:
|
||||
// Module header entries.
|
||||
std::string name_, os_, architecture_, id_;
|
||||
};
|
||||
|
||||
} // namespace lul
|
||||
|
||||
#endif // LulCommonExt_h
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,193 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
|
||||
// Copyright (c) 2008, 2010 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// CFI reader author: Jim Blandy <jimb@mozilla.com> <jimb@red-bean.com>
|
||||
|
||||
// This file is derived from the following file in
|
||||
// toolkit/crashreporter/google-breakpad:
|
||||
// src/common/dwarf/dwarf2enums.h
|
||||
|
||||
#ifndef LulDwarfInt_h
|
||||
#define LulDwarfInt_h
|
||||
|
||||
#include "LulCommonExt.h"
|
||||
#include "LulDwarfExt.h"
|
||||
|
||||
namespace lul {
|
||||
|
||||
// These enums do not follow the google3 style only because they are
|
||||
// known universally (specs, other implementations) by the names in
|
||||
// exactly this capitalization.
|
||||
// Tag names and codes.
|
||||
|
||||
// Call Frame Info instructions.
|
||||
enum DwarfCFI {
|
||||
DW_CFA_advance_loc = 0x40,
|
||||
DW_CFA_offset = 0x80,
|
||||
DW_CFA_restore = 0xc0,
|
||||
DW_CFA_nop = 0x00,
|
||||
DW_CFA_set_loc = 0x01,
|
||||
DW_CFA_advance_loc1 = 0x02,
|
||||
DW_CFA_advance_loc2 = 0x03,
|
||||
DW_CFA_advance_loc4 = 0x04,
|
||||
DW_CFA_offset_extended = 0x05,
|
||||
DW_CFA_restore_extended = 0x06,
|
||||
DW_CFA_undefined = 0x07,
|
||||
DW_CFA_same_value = 0x08,
|
||||
DW_CFA_register = 0x09,
|
||||
DW_CFA_remember_state = 0x0a,
|
||||
DW_CFA_restore_state = 0x0b,
|
||||
DW_CFA_def_cfa = 0x0c,
|
||||
DW_CFA_def_cfa_register = 0x0d,
|
||||
DW_CFA_def_cfa_offset = 0x0e,
|
||||
DW_CFA_def_cfa_expression = 0x0f,
|
||||
DW_CFA_expression = 0x10,
|
||||
DW_CFA_offset_extended_sf = 0x11,
|
||||
DW_CFA_def_cfa_sf = 0x12,
|
||||
DW_CFA_def_cfa_offset_sf = 0x13,
|
||||
DW_CFA_val_offset = 0x14,
|
||||
DW_CFA_val_offset_sf = 0x15,
|
||||
DW_CFA_val_expression = 0x16,
|
||||
|
||||
// Opcodes in this range are reserved for user extensions.
|
||||
DW_CFA_lo_user = 0x1c,
|
||||
DW_CFA_hi_user = 0x3f,
|
||||
|
||||
// SGI/MIPS specific.
|
||||
DW_CFA_MIPS_advance_loc8 = 0x1d,
|
||||
|
||||
// GNU extensions.
|
||||
DW_CFA_GNU_window_save = 0x2d,
|
||||
DW_CFA_GNU_args_size = 0x2e,
|
||||
DW_CFA_GNU_negative_offset_extended = 0x2f
|
||||
};
|
||||
|
||||
// Exception handling 'z' augmentation letters.
|
||||
enum DwarfZAugmentationCodes {
|
||||
// If the CFI augmentation string begins with 'z', then the CIE and FDE
|
||||
// have an augmentation data area just before the instructions, whose
|
||||
// contents are determined by the subsequent augmentation letters.
|
||||
DW_Z_augmentation_start = 'z',
|
||||
|
||||
// If this letter is present in a 'z' augmentation string, the CIE
|
||||
// augmentation data includes a pointer encoding, and the FDE
|
||||
// augmentation data includes a language-specific data area pointer,
|
||||
// represented using that encoding.
|
||||
DW_Z_has_LSDA = 'L',
|
||||
|
||||
// If this letter is present in a 'z' augmentation string, the CIE
|
||||
// augmentation data includes a pointer encoding, followed by a pointer
|
||||
// to a personality routine, represented using that encoding.
|
||||
DW_Z_has_personality_routine = 'P',
|
||||
|
||||
// If this letter is present in a 'z' augmentation string, the CIE
|
||||
// augmentation data includes a pointer encoding describing how the FDE's
|
||||
// initial location, address range, and DW_CFA_set_loc operands are
|
||||
// encoded.
|
||||
DW_Z_has_FDE_address_encoding = 'R',
|
||||
|
||||
// If this letter is present in a 'z' augmentation string, then code
|
||||
// addresses covered by FDEs that cite this CIE are signal delivery
|
||||
// trampolines. Return addresses of frames in trampolines should not be
|
||||
// adjusted as described in section 6.4.4 of the DWARF 3 spec.
|
||||
DW_Z_is_signal_trampoline = 'S'
|
||||
};
|
||||
|
||||
// Expression opcodes
|
||||
enum DwarfExpressionOpcodes {
|
||||
DW_OP_addr = 0x03,
|
||||
DW_OP_deref = 0x06,
|
||||
DW_OP_const1s = 0x09,
|
||||
DW_OP_const2u = 0x0a,
|
||||
DW_OP_const2s = 0x0b,
|
||||
DW_OP_const4u = 0x0c,
|
||||
DW_OP_const4s = 0x0d,
|
||||
DW_OP_const8u = 0x0e,
|
||||
DW_OP_const8s = 0x0f,
|
||||
DW_OP_constu = 0x10,
|
||||
DW_OP_consts = 0x11,
|
||||
DW_OP_dup = 0x12,
|
||||
DW_OP_drop = 0x13,
|
||||
DW_OP_over = 0x14,
|
||||
DW_OP_pick = 0x15,
|
||||
DW_OP_swap = 0x16,
|
||||
DW_OP_rot = 0x17,
|
||||
DW_OP_xderef = 0x18,
|
||||
DW_OP_abs = 0x19,
|
||||
DW_OP_and = 0x1a,
|
||||
DW_OP_div = 0x1b,
|
||||
DW_OP_minus = 0x1c,
|
||||
DW_OP_mod = 0x1d,
|
||||
DW_OP_mul = 0x1e,
|
||||
DW_OP_neg = 0x1f,
|
||||
DW_OP_not = 0x20,
|
||||
DW_OP_or = 0x21,
|
||||
DW_OP_plus = 0x22,
|
||||
DW_OP_plus_uconst = 0x23,
|
||||
DW_OP_shl = 0x24,
|
||||
DW_OP_shr = 0x25,
|
||||
DW_OP_shra = 0x26,
|
||||
DW_OP_xor = 0x27,
|
||||
DW_OP_skip = 0x2f,
|
||||
DW_OP_bra = 0x28,
|
||||
DW_OP_eq = 0x29,
|
||||
DW_OP_ge = 0x2a,
|
||||
DW_OP_gt = 0x2b,
|
||||
DW_OP_le = 0x2c,
|
||||
DW_OP_lt = 0x2d,
|
||||
DW_OP_ne = 0x2e,
|
||||
DW_OP_lit0 = 0x30,
|
||||
DW_OP_lit31 = 0x4f,
|
||||
DW_OP_reg0 = 0x50,
|
||||
DW_OP_reg31 = 0x6f,
|
||||
DW_OP_breg0 = 0x70,
|
||||
DW_OP_breg31 = 0x8f,
|
||||
DW_OP_regx = 0x90,
|
||||
DW_OP_fbreg = 0x91,
|
||||
DW_OP_bregx = 0x92,
|
||||
DW_OP_piece = 0x93,
|
||||
DW_OP_deref_size = 0x94,
|
||||
DW_OP_xderef_size = 0x95,
|
||||
DW_OP_nop = 0x96,
|
||||
DW_OP_push_object_address = 0x97,
|
||||
DW_OP_call2 = 0x98,
|
||||
DW_OP_call4 = 0x99,
|
||||
DW_OP_call_ref = 0x9a,
|
||||
DW_OP_form_tls_address = 0x9b,
|
||||
DW_OP_call_frame_cfa = 0x9c,
|
||||
DW_OP_bit_piece = 0x9d,
|
||||
DW_OP_lo_user = 0xe0,
|
||||
DW_OP_hi_user = 0xff
|
||||
};
|
||||
|
||||
} // namespace lul
|
||||
|
||||
#endif // LulDwarfInt_h
|
|
@ -1,559 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include "LulDwarfSummariser.h"
|
||||
|
||||
# include "LulDwarfExt.h"
|
||||
|
||||
# include "mozilla/Assertions.h"
|
||||
# include "mozilla/Sprintf.h"
|
||||
|
||||
// Set this to 1 for verbose logging
|
||||
# define DEBUG_SUMMARISER 0
|
||||
|
||||
namespace lul {
|
||||
|
||||
// Do |s64|'s lowest 32 bits sign extend back to |s64| itself?
|
||||
static inline bool fitsIn32Bits(int64 s64) {
|
||||
return s64 == ((s64 & 0xffffffff) ^ 0x80000000) - 0x80000000;
|
||||
}
|
||||
|
||||
// Check a LExpr prefix expression, starting at pfxInstrs[start] up to
|
||||
// the next PX_End instruction, to ensure that:
|
||||
// * It only mentions registers that are tracked on this target
|
||||
// * The start point is sane
|
||||
// If the expression is ok, return NULL. Else return a pointer
|
||||
// a const char* holding a bit of text describing the problem.
|
||||
static const char* checkPfxExpr(const vector<PfxInstr>* pfxInstrs,
|
||||
int64_t start) {
|
||||
size_t nInstrs = pfxInstrs->size();
|
||||
if (start < 0 || start >= (ssize_t)nInstrs) {
|
||||
return "bogus start point";
|
||||
}
|
||||
size_t i;
|
||||
for (i = start; i < nInstrs; i++) {
|
||||
PfxInstr pxi = (*pfxInstrs)[i];
|
||||
if (pxi.mOpcode == PX_End) break;
|
||||
if (pxi.mOpcode == PX_DwReg &&
|
||||
!registerIsTracked((DW_REG_NUMBER)pxi.mOperand)) {
|
||||
return "uses untracked reg";
|
||||
}
|
||||
}
|
||||
return nullptr; // success
|
||||
}
|
||||
|
||||
Summariser::Summariser(SecMap* aSecMap, uintptr_t aTextBias,
|
||||
void (*aLog)(const char*))
|
||||
: mSecMap(aSecMap), mTextBias(aTextBias), mLog(aLog) {
|
||||
mCurrAddr = 0;
|
||||
mMax1Addr = 0; // Gives an empty range.
|
||||
|
||||
// Initialise the running RuleSet to "haven't got a clue" status.
|
||||
new (&mCurrRules) RuleSet();
|
||||
}
|
||||
|
||||
void Summariser::Entry(uintptr_t aAddress, uintptr_t aLength) {
|
||||
aAddress += mTextBias;
|
||||
if (DEBUG_SUMMARISER) {
|
||||
char buf[100];
|
||||
SprintfLiteral(buf, "LUL Entry(%llx, %llu)\n",
|
||||
(unsigned long long int)aAddress,
|
||||
(unsigned long long int)aLength);
|
||||
mLog(buf);
|
||||
}
|
||||
// This throws away any previous summary, that is, assumes
|
||||
// that the previous summary, if any, has been properly finished
|
||||
// by a call to End().
|
||||
mCurrAddr = aAddress;
|
||||
mMax1Addr = aAddress + aLength;
|
||||
new (&mCurrRules) RuleSet();
|
||||
}
|
||||
|
||||
void Summariser::Rule(uintptr_t aAddress, int aNewReg, LExprHow how,
|
||||
int16_t oldReg, int64_t offset) {
|
||||
aAddress += mTextBias;
|
||||
if (DEBUG_SUMMARISER) {
|
||||
char buf[100];
|
||||
if (how == NODEREF || how == DEREF) {
|
||||
bool deref = how == DEREF;
|
||||
SprintfLiteral(buf, "LUL 0x%llx old-r%d = %sr%d + %lld%s\n",
|
||||
(unsigned long long int)aAddress, aNewReg,
|
||||
deref ? "*(" : "", (int)oldReg, (long long int)offset,
|
||||
deref ? ")" : "");
|
||||
} else if (how == PFXEXPR) {
|
||||
SprintfLiteral(buf, "LUL 0x%llx old-r%d = pfx-expr-at %lld\n",
|
||||
(unsigned long long int)aAddress, aNewReg,
|
||||
(long long int)offset);
|
||||
} else {
|
||||
SprintfLiteral(buf, "LUL 0x%llx old-r%d = (invalid LExpr!)\n",
|
||||
(unsigned long long int)aAddress, aNewReg);
|
||||
}
|
||||
mLog(buf);
|
||||
}
|
||||
|
||||
if (mCurrAddr < aAddress) {
|
||||
// Flush the existing summary first.
|
||||
mCurrRules.mAddr = mCurrAddr;
|
||||
mCurrRules.mLen = aAddress - mCurrAddr;
|
||||
mSecMap->AddRuleSet(&mCurrRules);
|
||||
if (DEBUG_SUMMARISER) {
|
||||
mLog("LUL ");
|
||||
mCurrRules.Print(mLog);
|
||||
mLog("\n");
|
||||
}
|
||||
mCurrAddr = aAddress;
|
||||
}
|
||||
|
||||
// If for some reason summarisation fails, either or both of these
|
||||
// become non-null and point at constant text describing the
|
||||
// problem. Using two rather than just one avoids complications of
|
||||
// having to concatenate two strings to produce a complete error message.
|
||||
const char* reason1 = nullptr;
|
||||
const char* reason2 = nullptr;
|
||||
|
||||
// |offset| needs to be a 32 bit value that sign extends to 64 bits
|
||||
// on a 64 bit target. We will need to incorporate |offset| into
|
||||
// any LExpr made here. So we may as well check it right now.
|
||||
if (!fitsIn32Bits(offset)) {
|
||||
reason1 = "offset not in signed 32-bit range";
|
||||
goto cant_summarise;
|
||||
}
|
||||
|
||||
// FIXME: factor out common parts of the arch-dependent summarisers.
|
||||
|
||||
# if defined(GP_ARCH_arm)
|
||||
|
||||
// ----------------- arm ----------------- //
|
||||
|
||||
// Now, can we add the rule to our summary? This depends on whether
|
||||
// the registers and the overall expression are representable. This
|
||||
// is the heart of the summarisation process.
|
||||
switch (aNewReg) {
|
||||
case DW_REG_CFA:
|
||||
// This is a rule that defines the CFA. The only forms we
|
||||
// choose to represent are: r7/11/12/13 + offset. The offset
|
||||
// must fit into 32 bits since 'uintptr_t' is 32 bit on ARM,
|
||||
// hence there is no need to check it for overflow.
|
||||
if (how != NODEREF) {
|
||||
reason1 = "rule for DW_REG_CFA: invalid |how|";
|
||||
goto cant_summarise;
|
||||
}
|
||||
switch (oldReg) {
|
||||
case DW_REG_ARM_R7:
|
||||
case DW_REG_ARM_R11:
|
||||
case DW_REG_ARM_R12:
|
||||
case DW_REG_ARM_R13:
|
||||
break;
|
||||
default:
|
||||
reason1 = "rule for DW_REG_CFA: invalid |oldReg|";
|
||||
goto cant_summarise;
|
||||
}
|
||||
mCurrRules.mCfaExpr = LExpr(how, oldReg, offset);
|
||||
break;
|
||||
|
||||
case DW_REG_ARM_R7:
|
||||
case DW_REG_ARM_R11:
|
||||
case DW_REG_ARM_R12:
|
||||
case DW_REG_ARM_R13:
|
||||
case DW_REG_ARM_R14:
|
||||
case DW_REG_ARM_R15: {
|
||||
// This is a new rule for R7, R11, R12, R13 (SP), R14 (LR) or
|
||||
// R15 (the return address).
|
||||
switch (how) {
|
||||
case NODEREF:
|
||||
case DEREF:
|
||||
// Check the old register is one we're tracking.
|
||||
if (!registerIsTracked((DW_REG_NUMBER)oldReg) &&
|
||||
oldReg != DW_REG_CFA) {
|
||||
reason1 = "rule for R7/11/12/13/14/15: uses untracked reg";
|
||||
goto cant_summarise;
|
||||
}
|
||||
break;
|
||||
case PFXEXPR: {
|
||||
// Check that the prefix expression only mentions tracked registers.
|
||||
const vector<PfxInstr>* pfxInstrs = mSecMap->GetPfxInstrs();
|
||||
reason2 = checkPfxExpr(pfxInstrs, offset);
|
||||
if (reason2) {
|
||||
reason1 = "rule for R7/11/12/13/14/15: ";
|
||||
goto cant_summarise;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
goto cant_summarise;
|
||||
}
|
||||
LExpr expr = LExpr(how, oldReg, offset);
|
||||
switch (aNewReg) {
|
||||
case DW_REG_ARM_R7:
|
||||
mCurrRules.mR7expr = expr;
|
||||
break;
|
||||
case DW_REG_ARM_R11:
|
||||
mCurrRules.mR11expr = expr;
|
||||
break;
|
||||
case DW_REG_ARM_R12:
|
||||
mCurrRules.mR12expr = expr;
|
||||
break;
|
||||
case DW_REG_ARM_R13:
|
||||
mCurrRules.mR13expr = expr;
|
||||
break;
|
||||
case DW_REG_ARM_R14:
|
||||
mCurrRules.mR14expr = expr;
|
||||
break;
|
||||
case DW_REG_ARM_R15:
|
||||
mCurrRules.mR15expr = expr;
|
||||
break;
|
||||
default:
|
||||
MOZ_ASSERT(0);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
// Leave |reason1| and |reason2| unset here. This program point
|
||||
// is reached so often that it causes a flood of "Can't
|
||||
// summarise" messages. In any case, we don't really care about
|
||||
// the fact that this summary would produce a new value for a
|
||||
// register that we're not tracking. We do on the other hand
|
||||
// care if the summary's expression *uses* a register that we're
|
||||
// not tracking. But in that case one of the above failures
|
||||
// should tell us which.
|
||||
goto cant_summarise;
|
||||
}
|
||||
|
||||
// Mark callee-saved registers (r4 .. r11) as unchanged, if there is
|
||||
// no other information about them. FIXME: do this just once, at
|
||||
// the point where the ruleset is committed.
|
||||
if (mCurrRules.mR7expr.mHow == UNKNOWN) {
|
||||
mCurrRules.mR7expr = LExpr(NODEREF, DW_REG_ARM_R7, 0);
|
||||
}
|
||||
if (mCurrRules.mR11expr.mHow == UNKNOWN) {
|
||||
mCurrRules.mR11expr = LExpr(NODEREF, DW_REG_ARM_R11, 0);
|
||||
}
|
||||
if (mCurrRules.mR12expr.mHow == UNKNOWN) {
|
||||
mCurrRules.mR12expr = LExpr(NODEREF, DW_REG_ARM_R12, 0);
|
||||
}
|
||||
|
||||
// The old r13 (SP) value before the call is always the same as the
|
||||
// CFA.
|
||||
mCurrRules.mR13expr = LExpr(NODEREF, DW_REG_CFA, 0);
|
||||
|
||||
// If there's no information about R15 (the return address), say
|
||||
// it's a copy of R14 (the link register).
|
||||
if (mCurrRules.mR15expr.mHow == UNKNOWN) {
|
||||
mCurrRules.mR15expr = LExpr(NODEREF, DW_REG_ARM_R14, 0);
|
||||
}
|
||||
|
||||
# elif defined(GP_ARCH_arm64)
|
||||
|
||||
// ----------------- arm64 ----------------- //
|
||||
|
||||
switch (aNewReg) {
|
||||
case DW_REG_CFA:
|
||||
if (how != NODEREF) {
|
||||
reason1 = "rule for DW_REG_CFA: invalid |how|";
|
||||
goto cant_summarise;
|
||||
}
|
||||
switch (oldReg) {
|
||||
case DW_REG_AARCH64_X29:
|
||||
case DW_REG_AARCH64_SP:
|
||||
break;
|
||||
default:
|
||||
reason1 = "rule for DW_REG_CFA: invalid |oldReg|";
|
||||
goto cant_summarise;
|
||||
}
|
||||
mCurrRules.mCfaExpr = LExpr(how, oldReg, offset);
|
||||
break;
|
||||
|
||||
case DW_REG_AARCH64_X29:
|
||||
case DW_REG_AARCH64_X30:
|
||||
case DW_REG_AARCH64_SP: {
|
||||
switch (how) {
|
||||
case NODEREF:
|
||||
case DEREF:
|
||||
// Check the old register is one we're tracking.
|
||||
if (!registerIsTracked((DW_REG_NUMBER)oldReg) &&
|
||||
oldReg != DW_REG_CFA) {
|
||||
reason1 = "rule for X29/X30/SP: uses untracked reg";
|
||||
goto cant_summarise;
|
||||
}
|
||||
break;
|
||||
case PFXEXPR: {
|
||||
// Check that the prefix expression only mentions tracked registers.
|
||||
const vector<PfxInstr>* pfxInstrs = mSecMap->GetPfxInstrs();
|
||||
reason2 = checkPfxExpr(pfxInstrs, offset);
|
||||
if (reason2) {
|
||||
reason1 = "rule for X29/X30/SP: ";
|
||||
goto cant_summarise;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
goto cant_summarise;
|
||||
}
|
||||
LExpr expr = LExpr(how, oldReg, offset);
|
||||
switch (aNewReg) {
|
||||
case DW_REG_AARCH64_X29:
|
||||
mCurrRules.mX29expr = expr;
|
||||
break;
|
||||
case DW_REG_AARCH64_X30:
|
||||
mCurrRules.mX30expr = expr;
|
||||
break;
|
||||
case DW_REG_AARCH64_SP:
|
||||
mCurrRules.mSPexpr = expr;
|
||||
break;
|
||||
default:
|
||||
MOZ_ASSERT(0);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
// Leave |reason1| and |reason2| unset here, for the reasons explained
|
||||
// in the analogous point
|
||||
goto cant_summarise;
|
||||
}
|
||||
|
||||
if (mCurrRules.mX29expr.mHow == UNKNOWN) {
|
||||
mCurrRules.mX29expr = LExpr(NODEREF, DW_REG_AARCH64_X29, 0);
|
||||
}
|
||||
if (mCurrRules.mX30expr.mHow == UNKNOWN) {
|
||||
mCurrRules.mX30expr = LExpr(NODEREF, DW_REG_AARCH64_X30, 0);
|
||||
}
|
||||
// On aarch64, it seems the old SP value before the call is always the
|
||||
// same as the CFA. Therefore, in the absence of any other way to
|
||||
// recover the SP, specify that the CFA should be copied.
|
||||
if (mCurrRules.mSPexpr.mHow == UNKNOWN) {
|
||||
mCurrRules.mSPexpr = LExpr(NODEREF, DW_REG_CFA, 0);
|
||||
}
|
||||
# elif defined(GP_ARCH_amd64) || defined(GP_ARCH_x86)
|
||||
|
||||
// ---------------- x64/x86 ---------------- //
|
||||
|
||||
// Now, can we add the rule to our summary? This depends on whether
|
||||
// the registers and the overall expression are representable. This
|
||||
// is the heart of the summarisation process.
|
||||
switch (aNewReg) {
|
||||
case DW_REG_CFA: {
|
||||
// This is a rule that defines the CFA. The only forms we choose to
|
||||
// represent are: = SP+offset, = FP+offset, or =prefix-expr.
|
||||
switch (how) {
|
||||
case NODEREF:
|
||||
if (oldReg != DW_REG_INTEL_XSP && oldReg != DW_REG_INTEL_XBP) {
|
||||
reason1 = "rule for DW_REG_CFA: invalid |oldReg|";
|
||||
goto cant_summarise;
|
||||
}
|
||||
break;
|
||||
case DEREF:
|
||||
reason1 = "rule for DW_REG_CFA: invalid |how|";
|
||||
goto cant_summarise;
|
||||
case PFXEXPR: {
|
||||
// Check that the prefix expression only mentions tracked registers.
|
||||
const vector<PfxInstr>* pfxInstrs = mSecMap->GetPfxInstrs();
|
||||
reason2 = checkPfxExpr(pfxInstrs, offset);
|
||||
if (reason2) {
|
||||
reason1 = "rule for CFA: ";
|
||||
goto cant_summarise;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
goto cant_summarise;
|
||||
}
|
||||
mCurrRules.mCfaExpr = LExpr(how, oldReg, offset);
|
||||
break;
|
||||
}
|
||||
|
||||
case DW_REG_INTEL_XSP:
|
||||
case DW_REG_INTEL_XBP:
|
||||
case DW_REG_INTEL_XIP: {
|
||||
// This is a new rule for XSP, XBP or XIP (the return address).
|
||||
switch (how) {
|
||||
case NODEREF:
|
||||
case DEREF:
|
||||
// Check the old register is one we're tracking.
|
||||
if (!registerIsTracked((DW_REG_NUMBER)oldReg) &&
|
||||
oldReg != DW_REG_CFA) {
|
||||
reason1 = "rule for XSP/XBP/XIP: uses untracked reg";
|
||||
goto cant_summarise;
|
||||
}
|
||||
break;
|
||||
case PFXEXPR: {
|
||||
// Check that the prefix expression only mentions tracked registers.
|
||||
const vector<PfxInstr>* pfxInstrs = mSecMap->GetPfxInstrs();
|
||||
reason2 = checkPfxExpr(pfxInstrs, offset);
|
||||
if (reason2) {
|
||||
reason1 = "rule for XSP/XBP/XIP: ";
|
||||
goto cant_summarise;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
goto cant_summarise;
|
||||
}
|
||||
LExpr expr = LExpr(how, oldReg, offset);
|
||||
switch (aNewReg) {
|
||||
case DW_REG_INTEL_XBP:
|
||||
mCurrRules.mXbpExpr = expr;
|
||||
break;
|
||||
case DW_REG_INTEL_XSP:
|
||||
mCurrRules.mXspExpr = expr;
|
||||
break;
|
||||
case DW_REG_INTEL_XIP:
|
||||
mCurrRules.mXipExpr = expr;
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("impossible value for aNewReg");
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
// Leave |reason1| and |reason2| unset here, for the reasons
|
||||
// explained in the analogous point in the ARM case just above.
|
||||
goto cant_summarise;
|
||||
}
|
||||
|
||||
// On Intel, it seems the old SP value before the call is always the
|
||||
// same as the CFA. Therefore, in the absence of any other way to
|
||||
// recover the SP, specify that the CFA should be copied.
|
||||
if (mCurrRules.mXspExpr.mHow == UNKNOWN) {
|
||||
mCurrRules.mXspExpr = LExpr(NODEREF, DW_REG_CFA, 0);
|
||||
}
|
||||
|
||||
// Also, gcc says "Undef" for BP when it is unchanged.
|
||||
if (mCurrRules.mXbpExpr.mHow == UNKNOWN) {
|
||||
mCurrRules.mXbpExpr = LExpr(NODEREF, DW_REG_INTEL_XBP, 0);
|
||||
}
|
||||
|
||||
# elif defined(GP_ARCH_mips64)
|
||||
// ---------------- mips ---------------- //
|
||||
//
|
||||
// Now, can we add the rule to our summary? This depends on whether
|
||||
// the registers and the overall expression are representable. This
|
||||
// is the heart of the summarisation process.
|
||||
switch (aNewReg) {
|
||||
case DW_REG_CFA:
|
||||
// This is a rule that defines the CFA. The only forms we can
|
||||
// represent are: = SP+offset or = FP+offset.
|
||||
if (how != NODEREF) {
|
||||
reason1 = "rule for DW_REG_CFA: invalid |how|";
|
||||
goto cant_summarise;
|
||||
}
|
||||
if (oldReg != DW_REG_MIPS_SP && oldReg != DW_REG_MIPS_FP) {
|
||||
reason1 = "rule for DW_REG_CFA: invalid |oldReg|";
|
||||
goto cant_summarise;
|
||||
}
|
||||
mCurrRules.mCfaExpr = LExpr(how, oldReg, offset);
|
||||
break;
|
||||
|
||||
case DW_REG_MIPS_SP:
|
||||
case DW_REG_MIPS_FP:
|
||||
case DW_REG_MIPS_PC: {
|
||||
// This is a new rule for SP, FP or PC (the return address).
|
||||
switch (how) {
|
||||
case NODEREF:
|
||||
case DEREF:
|
||||
// Check the old register is one we're tracking.
|
||||
if (!registerIsTracked((DW_REG_NUMBER)oldReg) &&
|
||||
oldReg != DW_REG_CFA) {
|
||||
reason1 = "rule for SP/FP/PC: uses untracked reg";
|
||||
goto cant_summarise;
|
||||
}
|
||||
break;
|
||||
case PFXEXPR: {
|
||||
// Check that the prefix expression only mentions tracked registers.
|
||||
const vector<PfxInstr>* pfxInstrs = mSecMap->GetPfxInstrs();
|
||||
reason2 = checkPfxExpr(pfxInstrs, offset);
|
||||
if (reason2) {
|
||||
reason1 = "rule for SP/FP/PC: ";
|
||||
goto cant_summarise;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
goto cant_summarise;
|
||||
}
|
||||
LExpr expr = LExpr(how, oldReg, offset);
|
||||
switch (aNewReg) {
|
||||
case DW_REG_MIPS_FP:
|
||||
mCurrRules.mFPexpr = expr;
|
||||
break;
|
||||
case DW_REG_MIPS_SP:
|
||||
mCurrRules.mSPexpr = expr;
|
||||
break;
|
||||
case DW_REG_MIPS_PC:
|
||||
mCurrRules.mPCexpr = expr;
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("impossible value for aNewReg");
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
// Leave |reason1| and |reason2| unset here, for the reasons
|
||||
// explained in the analogous point in the ARM case just above.
|
||||
goto cant_summarise;
|
||||
}
|
||||
|
||||
// On MIPS, it seems the old SP value before the call is always the
|
||||
// same as the CFA. Therefore, in the absence of any other way to
|
||||
// recover the SP, specify that the CFA should be copied.
|
||||
if (mCurrRules.mSPexpr.mHow == UNKNOWN) {
|
||||
mCurrRules.mSPexpr = LExpr(NODEREF, DW_REG_CFA, 0);
|
||||
}
|
||||
|
||||
// Also, gcc says "Undef" for FP when it is unchanged.
|
||||
if (mCurrRules.mFPexpr.mHow == UNKNOWN) {
|
||||
mCurrRules.mFPexpr = LExpr(NODEREF, DW_REG_MIPS_FP, 0);
|
||||
}
|
||||
|
||||
# else
|
||||
|
||||
# error "Unsupported arch"
|
||||
# endif
|
||||
|
||||
return;
|
||||
|
||||
cant_summarise:
|
||||
if (reason1 || reason2) {
|
||||
char buf[200];
|
||||
SprintfLiteral(buf,
|
||||
"LUL can't summarise: "
|
||||
"SVMA=0x%llx: %s%s, expr=LExpr(%s,%u,%lld)\n",
|
||||
(unsigned long long int)(aAddress - mTextBias),
|
||||
reason1 ? reason1 : "", reason2 ? reason2 : "",
|
||||
NameOf_LExprHow(how), (unsigned int)oldReg,
|
||||
(long long int)offset);
|
||||
mLog(buf);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t Summariser::AddPfxInstr(PfxInstr pfxi) {
|
||||
return mSecMap->AddPfxInstr(pfxi);
|
||||
}
|
||||
|
||||
void Summariser::End() {
|
||||
if (DEBUG_SUMMARISER) {
|
||||
mLog("LUL End\n");
|
||||
}
|
||||
if (mCurrAddr < mMax1Addr) {
|
||||
mCurrRules.mAddr = mCurrAddr;
|
||||
mCurrRules.mLen = mMax1Addr - mCurrAddr;
|
||||
mSecMap->AddRuleSet(&mCurrRules);
|
||||
if (DEBUG_SUMMARISER) {
|
||||
mLog("LUL ");
|
||||
mCurrRules.Print(mLog);
|
||||
mLog("\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace lul
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,64 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef LulDwarfSummariser_h
|
||||
#define LulDwarfSummariser_h
|
||||
|
||||
#include "LulMainInt.h"
|
||||
|
||||
namespace lul {
|
||||
|
||||
class Summariser {
|
||||
public:
|
||||
Summariser(SecMap* aSecMap, uintptr_t aTextBias, void (*aLog)(const char*));
|
||||
|
||||
virtual void Entry(uintptr_t aAddress, uintptr_t aLength);
|
||||
virtual void End();
|
||||
|
||||
// Tell the summariser that the value for |aNewReg| at |aAddress| is
|
||||
// recovered using the LExpr that can be constructed using the
|
||||
// components |how|, |oldReg| and |offset|. The summariser will
|
||||
// inspect the components and may reject them for various reasons,
|
||||
// but the hope is that it will find them acceptable and record this
|
||||
// rule permanently.
|
||||
virtual void Rule(uintptr_t aAddress, int aNewReg, LExprHow how,
|
||||
int16_t oldReg, int64_t offset);
|
||||
|
||||
virtual uint32_t AddPfxInstr(PfxInstr pfxi);
|
||||
|
||||
// Send output to the logging sink, for debugging.
|
||||
virtual void Log(const char* str) { mLog(str); }
|
||||
|
||||
private:
|
||||
// The SecMap in which we park the finished summaries (RuleSets) and
|
||||
// also any PfxInstrs derived from Dwarf expressions.
|
||||
SecMap* mSecMap;
|
||||
|
||||
// Running state for the current summary (RuleSet) under construction.
|
||||
RuleSet mCurrRules;
|
||||
|
||||
// The start of the address range to which the RuleSet under
|
||||
// construction applies.
|
||||
uintptr_t mCurrAddr;
|
||||
|
||||
// The highest address, plus one, for which the RuleSet under
|
||||
// construction could possibly apply. If there are no further
|
||||
// incoming events then mCurrRules will eventually be emitted
|
||||
// as-is, for the range mCurrAddr.. mMax1Addr - 1, if that is
|
||||
// nonempty.
|
||||
uintptr_t mMax1Addr;
|
||||
|
||||
// The bias value (to add to the SVMAs, to get AVMAs) to be used
|
||||
// when adding entries into mSecMap.
|
||||
uintptr_t mTextBias;
|
||||
|
||||
// A logging sink, for debugging.
|
||||
void (*mLog)(const char* aFmt);
|
||||
};
|
||||
|
||||
} // namespace lul
|
||||
|
||||
#endif // LulDwarfSummariser_h
|
|
@ -1,878 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
|
||||
// Copyright (c) 2006, 2011, 2012 Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Restructured in 2009 by: Jim Blandy <jimb@mozilla.com> <jimb@red-bean.com>
|
||||
|
||||
// (derived from)
|
||||
// dump_symbols.cc: implement google_breakpad::WriteSymbolFile:
|
||||
// Find all the debugging info in a file and dump it as a Breakpad symbol file.
|
||||
//
|
||||
// dump_symbols.h: Read debugging information from an ELF file, and write
|
||||
// it out as a Breakpad symbol file.
|
||||
|
||||
// This file is derived from the following files in
|
||||
// toolkit/crashreporter/google-breakpad:
|
||||
// src/common/linux/dump_symbols.cc
|
||||
// src/common/linux/elfutils.cc
|
||||
// src/common/linux/file_id.cc
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include <errno.h>
|
||||
# include <fcntl.h>
|
||||
# include <libgen.h>
|
||||
# include <stdio.h>
|
||||
# include <string.h>
|
||||
# include <sys/mman.h>
|
||||
# include <sys/stat.h>
|
||||
# include <unistd.h>
|
||||
# include <arpa/inet.h>
|
||||
|
||||
# include <set>
|
||||
# include <string>
|
||||
# include <vector>
|
||||
|
||||
# include "mozilla/Assertions.h"
|
||||
# include "mozilla/Sprintf.h"
|
||||
|
||||
# include "PlatformMacros.h"
|
||||
# include "LulCommonExt.h"
|
||||
# include "LulDwarfExt.h"
|
||||
# include "LulElfInt.h"
|
||||
# include "LulMainInt.h"
|
||||
|
||||
# if defined(GP_PLAT_arm_android) && !defined(SHT_ARM_EXIDX)
|
||||
// bionic and older glibsc don't define it
|
||||
# define SHT_ARM_EXIDX (SHT_LOPROC + 1)
|
||||
# endif
|
||||
|
||||
// Old Linux header doesn't define EM_AARCH64
|
||||
# ifndef EM_AARCH64
|
||||
# define EM_AARCH64 183
|
||||
# endif
|
||||
|
||||
// This namespace contains helper functions.
|
||||
namespace {
|
||||
|
||||
using lul::DwarfCFIToModule;
|
||||
using lul::FindElfSectionByName;
|
||||
using lul::GetOffset;
|
||||
using lul::IsValidElf;
|
||||
using lul::Module;
|
||||
using lul::scoped_ptr;
|
||||
using lul::Summariser;
|
||||
using lul::UniqueStringUniverse;
|
||||
using std::set;
|
||||
using std::string;
|
||||
using std::vector;
|
||||
|
||||
//
|
||||
// FDWrapper
|
||||
//
|
||||
// Wrapper class to make sure opened file is closed.
|
||||
//
|
||||
class FDWrapper {
|
||||
public:
|
||||
explicit FDWrapper(int fd) : fd_(fd) {}
|
||||
~FDWrapper() {
|
||||
if (fd_ != -1) close(fd_);
|
||||
}
|
||||
int get() { return fd_; }
|
||||
int release() {
|
||||
int fd = fd_;
|
||||
fd_ = -1;
|
||||
return fd;
|
||||
}
|
||||
|
||||
private:
|
||||
int fd_;
|
||||
};
|
||||
|
||||
//
|
||||
// MmapWrapper
|
||||
//
|
||||
// Wrapper class to make sure mapped regions are unmapped.
|
||||
//
|
||||
class MmapWrapper {
|
||||
public:
|
||||
MmapWrapper() : is_set_(false), base_(NULL), size_(0) {}
|
||||
~MmapWrapper() {
|
||||
if (is_set_ && base_ != NULL) {
|
||||
MOZ_ASSERT(size_ > 0);
|
||||
munmap(base_, size_);
|
||||
}
|
||||
}
|
||||
void set(void* mapped_address, size_t mapped_size) {
|
||||
is_set_ = true;
|
||||
base_ = mapped_address;
|
||||
size_ = mapped_size;
|
||||
}
|
||||
void release() {
|
||||
MOZ_ASSERT(is_set_);
|
||||
is_set_ = false;
|
||||
base_ = NULL;
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
private:
|
||||
bool is_set_;
|
||||
void* base_;
|
||||
size_t size_;
|
||||
};
|
||||
|
||||
// Set NUM_DW_REGNAMES to be the number of Dwarf register names
|
||||
// appropriate to the machine architecture given in HEADER. Return
|
||||
// true on success, or false if HEADER's machine architecture is not
|
||||
// supported.
|
||||
template <typename ElfClass>
|
||||
bool DwarfCFIRegisterNames(const typename ElfClass::Ehdr* elf_header,
|
||||
unsigned int* num_dw_regnames) {
|
||||
switch (elf_header->e_machine) {
|
||||
case EM_386:
|
||||
*num_dw_regnames = DwarfCFIToModule::RegisterNames::I386();
|
||||
return true;
|
||||
case EM_ARM:
|
||||
*num_dw_regnames = DwarfCFIToModule::RegisterNames::ARM();
|
||||
return true;
|
||||
case EM_X86_64:
|
||||
*num_dw_regnames = DwarfCFIToModule::RegisterNames::X86_64();
|
||||
return true;
|
||||
case EM_MIPS:
|
||||
*num_dw_regnames = DwarfCFIToModule::RegisterNames::MIPS();
|
||||
return true;
|
||||
case EM_AARCH64:
|
||||
*num_dw_regnames = DwarfCFIToModule::RegisterNames::ARM64();
|
||||
return true;
|
||||
default:
|
||||
MOZ_ASSERT(0);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ElfClass>
|
||||
bool LoadDwarfCFI(const string& dwarf_filename,
|
||||
const typename ElfClass::Ehdr* elf_header,
|
||||
const char* section_name,
|
||||
const typename ElfClass::Shdr* section, const bool eh_frame,
|
||||
const typename ElfClass::Shdr* got_section,
|
||||
const typename ElfClass::Shdr* text_section,
|
||||
const bool big_endian, SecMap* smap, uintptr_t text_bias,
|
||||
UniqueStringUniverse* usu, void (*log)(const char*)) {
|
||||
// Find the appropriate set of register names for this file's
|
||||
// architecture.
|
||||
unsigned int num_dw_regs = 0;
|
||||
if (!DwarfCFIRegisterNames<ElfClass>(elf_header, &num_dw_regs)) {
|
||||
fprintf(stderr,
|
||||
"%s: unrecognized ELF machine architecture '%d';"
|
||||
" cannot convert DWARF call frame information\n",
|
||||
dwarf_filename.c_str(), elf_header->e_machine);
|
||||
return false;
|
||||
}
|
||||
|
||||
const lul::Endianness endianness =
|
||||
big_endian ? lul::ENDIANNESS_BIG : lul::ENDIANNESS_LITTLE;
|
||||
|
||||
// Find the call frame information and its size.
|
||||
const char* cfi = GetOffset<ElfClass, char>(elf_header, section->sh_offset);
|
||||
size_t cfi_size = section->sh_size;
|
||||
|
||||
// Plug together the parser, handler, and their entourages.
|
||||
|
||||
// Here's a summariser, which will receive the output of the
|
||||
// parser, create summaries, and add them to |smap|.
|
||||
Summariser summ(smap, text_bias, log);
|
||||
|
||||
lul::ByteReader reader(endianness);
|
||||
reader.SetAddressSize(ElfClass::kAddrSize);
|
||||
|
||||
DwarfCFIToModule::Reporter module_reporter(log, dwarf_filename, section_name);
|
||||
DwarfCFIToModule handler(num_dw_regs, &module_reporter, &reader, usu, &summ);
|
||||
|
||||
// Provide the base addresses for .eh_frame encoded pointers, if
|
||||
// possible.
|
||||
reader.SetCFIDataBase(section->sh_addr, cfi);
|
||||
if (got_section) reader.SetDataBase(got_section->sh_addr);
|
||||
if (text_section) reader.SetTextBase(text_section->sh_addr);
|
||||
|
||||
lul::CallFrameInfo::Reporter dwarf_reporter(log, dwarf_filename,
|
||||
section_name);
|
||||
lul::CallFrameInfo parser(cfi, cfi_size, &reader, &handler, &dwarf_reporter,
|
||||
eh_frame);
|
||||
parser.Start();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool LoadELF(const string& obj_file, MmapWrapper* map_wrapper,
|
||||
void** elf_header) {
|
||||
int obj_fd = open(obj_file.c_str(), O_RDONLY);
|
||||
if (obj_fd < 0) {
|
||||
fprintf(stderr, "Failed to open ELF file '%s': %s\n", obj_file.c_str(),
|
||||
strerror(errno));
|
||||
return false;
|
||||
}
|
||||
FDWrapper obj_fd_wrapper(obj_fd);
|
||||
struct stat st;
|
||||
if (fstat(obj_fd, &st) != 0 && st.st_size <= 0) {
|
||||
fprintf(stderr, "Unable to fstat ELF file '%s': %s\n", obj_file.c_str(),
|
||||
strerror(errno));
|
||||
return false;
|
||||
}
|
||||
// Mapping it read-only is good enough. In any case, mapping it
|
||||
// read-write confuses Valgrind's debuginfo acquire/discard
|
||||
// heuristics, making it hard to profile the profiler.
|
||||
void* obj_base = mmap(nullptr, st.st_size, PROT_READ, MAP_PRIVATE, obj_fd, 0);
|
||||
if (obj_base == MAP_FAILED) {
|
||||
fprintf(stderr, "Failed to mmap ELF file '%s': %s\n", obj_file.c_str(),
|
||||
strerror(errno));
|
||||
return false;
|
||||
}
|
||||
map_wrapper->set(obj_base, st.st_size);
|
||||
*elf_header = obj_base;
|
||||
if (!IsValidElf(*elf_header)) {
|
||||
fprintf(stderr, "Not a valid ELF file: %s\n", obj_file.c_str());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Get the endianness of ELF_HEADER. If it's invalid, return false.
|
||||
template <typename ElfClass>
|
||||
bool ElfEndianness(const typename ElfClass::Ehdr* elf_header,
|
||||
bool* big_endian) {
|
||||
if (elf_header->e_ident[EI_DATA] == ELFDATA2LSB) {
|
||||
*big_endian = false;
|
||||
return true;
|
||||
}
|
||||
if (elf_header->e_ident[EI_DATA] == ELFDATA2MSB) {
|
||||
*big_endian = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
fprintf(stderr, "bad data encoding in ELF header: %d\n",
|
||||
elf_header->e_ident[EI_DATA]);
|
||||
return false;
|
||||
}
|
||||
|
||||
//
|
||||
// LoadSymbolsInfo
|
||||
//
|
||||
// Holds the state between the two calls to LoadSymbols() in case it's necessary
|
||||
// to follow the .gnu_debuglink section and load debug information from a
|
||||
// different file.
|
||||
//
|
||||
template <typename ElfClass>
|
||||
class LoadSymbolsInfo {
|
||||
public:
|
||||
typedef typename ElfClass::Addr Addr;
|
||||
|
||||
explicit LoadSymbolsInfo(const vector<string>& dbg_dirs)
|
||||
: debug_dirs_(dbg_dirs), has_loading_addr_(false) {}
|
||||
|
||||
// Keeps track of which sections have been loaded so sections don't
|
||||
// accidentally get loaded twice from two different files.
|
||||
void LoadedSection(const string& section) {
|
||||
if (loaded_sections_.count(section) == 0) {
|
||||
loaded_sections_.insert(section);
|
||||
} else {
|
||||
fprintf(stderr, "Section %s has already been loaded.\n", section.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
string debuglink_file() const { return debuglink_file_; }
|
||||
|
||||
private:
|
||||
const vector<string>& debug_dirs_; // Directories in which to
|
||||
// search for the debug ELF file.
|
||||
|
||||
string debuglink_file_; // Full path to the debug ELF file.
|
||||
|
||||
bool has_loading_addr_; // Indicate if LOADING_ADDR_ is valid.
|
||||
|
||||
set<string> loaded_sections_; // Tracks the Loaded ELF sections
|
||||
// between calls to LoadSymbols().
|
||||
};
|
||||
|
||||
// Find the preferred loading address of the binary.
|
||||
template <typename ElfClass>
|
||||
typename ElfClass::Addr GetLoadingAddress(
|
||||
const typename ElfClass::Phdr* program_headers, int nheader) {
|
||||
typedef typename ElfClass::Phdr Phdr;
|
||||
|
||||
// For non-PIC executables (e_type == ET_EXEC), the load address is
|
||||
// the start address of the first PT_LOAD segment. (ELF requires
|
||||
// the segments to be sorted by load address.) For PIC executables
|
||||
// and dynamic libraries (e_type == ET_DYN), this address will
|
||||
// normally be zero.
|
||||
for (int i = 0; i < nheader; ++i) {
|
||||
const Phdr& header = program_headers[i];
|
||||
if (header.p_type == PT_LOAD) return header.p_vaddr;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <typename ElfClass>
|
||||
bool LoadSymbols(const string& obj_file, const bool big_endian,
|
||||
const typename ElfClass::Ehdr* elf_header,
|
||||
const bool read_gnu_debug_link,
|
||||
LoadSymbolsInfo<ElfClass>* info, SecMap* smap, void* rx_avma,
|
||||
size_t rx_size, UniqueStringUniverse* usu,
|
||||
void (*log)(const char*)) {
|
||||
typedef typename ElfClass::Phdr Phdr;
|
||||
typedef typename ElfClass::Shdr Shdr;
|
||||
|
||||
char buf[500];
|
||||
SprintfLiteral(buf, "LoadSymbols: BEGIN %s\n", obj_file.c_str());
|
||||
buf[sizeof(buf) - 1] = 0;
|
||||
log(buf);
|
||||
|
||||
// This is how the text bias is calculated.
|
||||
// BEGIN CALCULATE BIAS
|
||||
uintptr_t loading_addr = GetLoadingAddress<ElfClass>(
|
||||
GetOffset<ElfClass, Phdr>(elf_header, elf_header->e_phoff),
|
||||
elf_header->e_phnum);
|
||||
uintptr_t text_bias = ((uintptr_t)rx_avma) - loading_addr;
|
||||
SprintfLiteral(buf, "LoadSymbols: rx_avma=%llx, text_bias=%llx",
|
||||
(unsigned long long int)(uintptr_t)rx_avma,
|
||||
(unsigned long long int)text_bias);
|
||||
buf[sizeof(buf) - 1] = 0;
|
||||
log(buf);
|
||||
// END CALCULATE BIAS
|
||||
|
||||
const Shdr* sections =
|
||||
GetOffset<ElfClass, Shdr>(elf_header, elf_header->e_shoff);
|
||||
const Shdr* section_names = sections + elf_header->e_shstrndx;
|
||||
const char* names =
|
||||
GetOffset<ElfClass, char>(elf_header, section_names->sh_offset);
|
||||
const char* names_end = names + section_names->sh_size;
|
||||
bool found_usable_info = false;
|
||||
|
||||
// Dwarf Call Frame Information (CFI) is actually independent from
|
||||
// the other DWARF debugging information, and can be used alone.
|
||||
const Shdr* dwarf_cfi_section =
|
||||
FindElfSectionByName<ElfClass>(".debug_frame", SHT_PROGBITS, sections,
|
||||
names, names_end, elf_header->e_shnum);
|
||||
if (dwarf_cfi_section) {
|
||||
// Ignore the return value of this function; even without call frame
|
||||
// information, the other debugging information could be perfectly
|
||||
// useful.
|
||||
info->LoadedSection(".debug_frame");
|
||||
bool result = LoadDwarfCFI<ElfClass>(obj_file, elf_header, ".debug_frame",
|
||||
dwarf_cfi_section, false, 0, 0,
|
||||
big_endian, smap, text_bias, usu, log);
|
||||
found_usable_info = found_usable_info || result;
|
||||
if (result) log("LoadSymbols: read CFI from .debug_frame");
|
||||
}
|
||||
|
||||
// Linux C++ exception handling information can also provide
|
||||
// unwinding data.
|
||||
const Shdr* eh_frame_section =
|
||||
FindElfSectionByName<ElfClass>(".eh_frame", SHT_PROGBITS, sections, names,
|
||||
names_end, elf_header->e_shnum);
|
||||
if (eh_frame_section) {
|
||||
// Pointers in .eh_frame data may be relative to the base addresses of
|
||||
// certain sections. Provide those sections if present.
|
||||
const Shdr* got_section = FindElfSectionByName<ElfClass>(
|
||||
".got", SHT_PROGBITS, sections, names, names_end, elf_header->e_shnum);
|
||||
const Shdr* text_section = FindElfSectionByName<ElfClass>(
|
||||
".text", SHT_PROGBITS, sections, names, names_end, elf_header->e_shnum);
|
||||
info->LoadedSection(".eh_frame");
|
||||
// As above, ignore the return value of this function.
|
||||
bool result = LoadDwarfCFI<ElfClass>(
|
||||
obj_file, elf_header, ".eh_frame", eh_frame_section, true, got_section,
|
||||
text_section, big_endian, smap, text_bias, usu, log);
|
||||
found_usable_info = found_usable_info || result;
|
||||
if (result) log("LoadSymbols: read CFI from .eh_frame");
|
||||
}
|
||||
|
||||
SprintfLiteral(buf, "LoadSymbols: END %s\n", obj_file.c_str());
|
||||
buf[sizeof(buf) - 1] = 0;
|
||||
log(buf);
|
||||
|
||||
return found_usable_info;
|
||||
}
|
||||
|
||||
// Return the breakpad symbol file identifier for the architecture of
|
||||
// ELF_HEADER.
|
||||
template <typename ElfClass>
|
||||
const char* ElfArchitecture(const typename ElfClass::Ehdr* elf_header) {
|
||||
typedef typename ElfClass::Half Half;
|
||||
Half arch = elf_header->e_machine;
|
||||
switch (arch) {
|
||||
case EM_386:
|
||||
return "x86";
|
||||
case EM_ARM:
|
||||
return "arm";
|
||||
case EM_AARCH64:
|
||||
return "arm64";
|
||||
case EM_MIPS:
|
||||
return "mips";
|
||||
case EM_PPC64:
|
||||
return "ppc64";
|
||||
case EM_PPC:
|
||||
return "ppc";
|
||||
case EM_S390:
|
||||
return "s390";
|
||||
case EM_SPARC:
|
||||
return "sparc";
|
||||
case EM_SPARCV9:
|
||||
return "sparcv9";
|
||||
case EM_X86_64:
|
||||
return "x86_64";
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// Format the Elf file identifier in IDENTIFIER as a UUID with the
|
||||
// dashes removed.
|
||||
string FormatIdentifier(unsigned char identifier[16]) {
|
||||
char identifier_str[40];
|
||||
lul::FileID::ConvertIdentifierToString(identifier, identifier_str,
|
||||
sizeof(identifier_str));
|
||||
string id_no_dash;
|
||||
for (int i = 0; identifier_str[i] != '\0'; ++i)
|
||||
if (identifier_str[i] != '-') id_no_dash += identifier_str[i];
|
||||
// Add an extra "0" by the end. PDB files on Windows have an 'age'
|
||||
// number appended to the end of the file identifier; this isn't
|
||||
// really used or necessary on other platforms, but be consistent.
|
||||
id_no_dash += '0';
|
||||
return id_no_dash;
|
||||
}
|
||||
|
||||
// Return the non-directory portion of FILENAME: the portion after the
|
||||
// last slash, or the whole filename if there are no slashes.
|
||||
string BaseFileName(const string& filename) {
|
||||
// Lots of copies! basename's behavior is less than ideal.
|
||||
char* c_filename = strdup(filename.c_str());
|
||||
string base = basename(c_filename);
|
||||
free(c_filename);
|
||||
return base;
|
||||
}
|
||||
|
||||
template <typename ElfClass>
|
||||
bool ReadSymbolDataElfClass(const typename ElfClass::Ehdr* elf_header,
|
||||
const string& obj_filename,
|
||||
const vector<string>& debug_dirs, SecMap* smap,
|
||||
void* rx_avma, size_t rx_size,
|
||||
UniqueStringUniverse* usu,
|
||||
void (*log)(const char*)) {
|
||||
typedef typename ElfClass::Ehdr Ehdr;
|
||||
|
||||
unsigned char identifier[16];
|
||||
if (!lul ::FileID::ElfFileIdentifierFromMappedFile(elf_header, identifier)) {
|
||||
fprintf(stderr, "%s: unable to generate file identifier\n",
|
||||
obj_filename.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
const char* architecture = ElfArchitecture<ElfClass>(elf_header);
|
||||
if (!architecture) {
|
||||
fprintf(stderr, "%s: unrecognized ELF machine architecture: %d\n",
|
||||
obj_filename.c_str(), elf_header->e_machine);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Figure out what endianness this file is.
|
||||
bool big_endian;
|
||||
if (!ElfEndianness<ElfClass>(elf_header, &big_endian)) return false;
|
||||
|
||||
string name = BaseFileName(obj_filename);
|
||||
string os = "Linux";
|
||||
string id = FormatIdentifier(identifier);
|
||||
|
||||
LoadSymbolsInfo<ElfClass> info(debug_dirs);
|
||||
if (!LoadSymbols<ElfClass>(obj_filename, big_endian, elf_header,
|
||||
!debug_dirs.empty(), &info, smap, rx_avma, rx_size,
|
||||
usu, log)) {
|
||||
const string debuglink_file = info.debuglink_file();
|
||||
if (debuglink_file.empty()) return false;
|
||||
|
||||
// Load debuglink ELF file.
|
||||
fprintf(stderr, "Found debugging info in %s\n", debuglink_file.c_str());
|
||||
MmapWrapper debug_map_wrapper;
|
||||
Ehdr* debug_elf_header = NULL;
|
||||
if (!LoadELF(debuglink_file, &debug_map_wrapper,
|
||||
reinterpret_cast<void**>(&debug_elf_header)))
|
||||
return false;
|
||||
// Sanity checks to make sure everything matches up.
|
||||
const char* debug_architecture =
|
||||
ElfArchitecture<ElfClass>(debug_elf_header);
|
||||
if (!debug_architecture) {
|
||||
fprintf(stderr, "%s: unrecognized ELF machine architecture: %d\n",
|
||||
debuglink_file.c_str(), debug_elf_header->e_machine);
|
||||
return false;
|
||||
}
|
||||
if (strcmp(architecture, debug_architecture)) {
|
||||
fprintf(stderr,
|
||||
"%s with ELF machine architecture %s does not match "
|
||||
"%s with ELF architecture %s\n",
|
||||
debuglink_file.c_str(), debug_architecture, obj_filename.c_str(),
|
||||
architecture);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool debug_big_endian;
|
||||
if (!ElfEndianness<ElfClass>(debug_elf_header, &debug_big_endian))
|
||||
return false;
|
||||
if (debug_big_endian != big_endian) {
|
||||
fprintf(stderr, "%s and %s does not match in endianness\n",
|
||||
obj_filename.c_str(), debuglink_file.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!LoadSymbols<ElfClass>(debuglink_file, debug_big_endian,
|
||||
debug_elf_header, false, &info, smap, rx_avma,
|
||||
rx_size, usu, log)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace lul {
|
||||
|
||||
bool ReadSymbolDataInternal(const uint8_t* obj_file, const string& obj_filename,
|
||||
const vector<string>& debug_dirs, SecMap* smap,
|
||||
void* rx_avma, size_t rx_size,
|
||||
UniqueStringUniverse* usu,
|
||||
void (*log)(const char*)) {
|
||||
if (!IsValidElf(obj_file)) {
|
||||
fprintf(stderr, "Not a valid ELF file: %s\n", obj_filename.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
int elfclass = ElfClass(obj_file);
|
||||
if (elfclass == ELFCLASS32) {
|
||||
return ReadSymbolDataElfClass<ElfClass32>(
|
||||
reinterpret_cast<const Elf32_Ehdr*>(obj_file), obj_filename, debug_dirs,
|
||||
smap, rx_avma, rx_size, usu, log);
|
||||
}
|
||||
if (elfclass == ELFCLASS64) {
|
||||
return ReadSymbolDataElfClass<ElfClass64>(
|
||||
reinterpret_cast<const Elf64_Ehdr*>(obj_file), obj_filename, debug_dirs,
|
||||
smap, rx_avma, rx_size, usu, log);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ReadSymbolData(const string& obj_file, const vector<string>& debug_dirs,
|
||||
SecMap* smap, void* rx_avma, size_t rx_size,
|
||||
UniqueStringUniverse* usu, void (*log)(const char*)) {
|
||||
MmapWrapper map_wrapper;
|
||||
void* elf_header = NULL;
|
||||
if (!LoadELF(obj_file, &map_wrapper, &elf_header)) return false;
|
||||
|
||||
return ReadSymbolDataInternal(reinterpret_cast<uint8_t*>(elf_header),
|
||||
obj_file, debug_dirs, smap, rx_avma, rx_size,
|
||||
usu, log);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
template <typename ElfClass>
|
||||
void FindElfClassSection(const char* elf_base, const char* section_name,
|
||||
typename ElfClass::Word section_type,
|
||||
const void** section_start, int* section_size) {
|
||||
typedef typename ElfClass::Ehdr Ehdr;
|
||||
typedef typename ElfClass::Shdr Shdr;
|
||||
|
||||
MOZ_ASSERT(elf_base);
|
||||
MOZ_ASSERT(section_start);
|
||||
MOZ_ASSERT(section_size);
|
||||
|
||||
MOZ_ASSERT(strncmp(elf_base, ELFMAG, SELFMAG) == 0);
|
||||
|
||||
const Ehdr* elf_header = reinterpret_cast<const Ehdr*>(elf_base);
|
||||
MOZ_ASSERT(elf_header->e_ident[EI_CLASS] == ElfClass::kClass);
|
||||
|
||||
const Shdr* sections =
|
||||
GetOffset<ElfClass, Shdr>(elf_header, elf_header->e_shoff);
|
||||
const Shdr* section_names = sections + elf_header->e_shstrndx;
|
||||
const char* names =
|
||||
GetOffset<ElfClass, char>(elf_header, section_names->sh_offset);
|
||||
const char* names_end = names + section_names->sh_size;
|
||||
|
||||
const Shdr* section =
|
||||
FindElfSectionByName<ElfClass>(section_name, section_type, sections,
|
||||
names, names_end, elf_header->e_shnum);
|
||||
|
||||
if (section != NULL && section->sh_size > 0) {
|
||||
*section_start = elf_base + section->sh_offset;
|
||||
*section_size = section->sh_size;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ElfClass>
|
||||
void FindElfClassSegment(const char* elf_base,
|
||||
typename ElfClass::Word segment_type,
|
||||
const void** segment_start, int* segment_size) {
|
||||
typedef typename ElfClass::Ehdr Ehdr;
|
||||
typedef typename ElfClass::Phdr Phdr;
|
||||
|
||||
MOZ_ASSERT(elf_base);
|
||||
MOZ_ASSERT(segment_start);
|
||||
MOZ_ASSERT(segment_size);
|
||||
|
||||
MOZ_ASSERT(strncmp(elf_base, ELFMAG, SELFMAG) == 0);
|
||||
|
||||
const Ehdr* elf_header = reinterpret_cast<const Ehdr*>(elf_base);
|
||||
MOZ_ASSERT(elf_header->e_ident[EI_CLASS] == ElfClass::kClass);
|
||||
|
||||
const Phdr* phdrs =
|
||||
GetOffset<ElfClass, Phdr>(elf_header, elf_header->e_phoff);
|
||||
|
||||
for (int i = 0; i < elf_header->e_phnum; ++i) {
|
||||
if (phdrs[i].p_type == segment_type) {
|
||||
*segment_start = elf_base + phdrs[i].p_offset;
|
||||
*segment_size = phdrs[i].p_filesz;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
bool IsValidElf(const void* elf_base) {
|
||||
return strncmp(reinterpret_cast<const char*>(elf_base), ELFMAG, SELFMAG) == 0;
|
||||
}
|
||||
|
||||
int ElfClass(const void* elf_base) {
|
||||
const ElfW(Ehdr)* elf_header = reinterpret_cast<const ElfW(Ehdr)*>(elf_base);
|
||||
|
||||
return elf_header->e_ident[EI_CLASS];
|
||||
}
|
||||
|
||||
bool FindElfSection(const void* elf_mapped_base, const char* section_name,
|
||||
uint32_t section_type, const void** section_start,
|
||||
int* section_size, int* elfclass) {
|
||||
MOZ_ASSERT(elf_mapped_base);
|
||||
MOZ_ASSERT(section_start);
|
||||
MOZ_ASSERT(section_size);
|
||||
|
||||
*section_start = NULL;
|
||||
*section_size = 0;
|
||||
|
||||
if (!IsValidElf(elf_mapped_base)) return false;
|
||||
|
||||
int cls = ElfClass(elf_mapped_base);
|
||||
if (elfclass) {
|
||||
*elfclass = cls;
|
||||
}
|
||||
|
||||
const char* elf_base = static_cast<const char*>(elf_mapped_base);
|
||||
|
||||
if (cls == ELFCLASS32) {
|
||||
FindElfClassSection<ElfClass32>(elf_base, section_name, section_type,
|
||||
section_start, section_size);
|
||||
return *section_start != NULL;
|
||||
} else if (cls == ELFCLASS64) {
|
||||
FindElfClassSection<ElfClass64>(elf_base, section_name, section_type,
|
||||
section_start, section_size);
|
||||
return *section_start != NULL;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool FindElfSegment(const void* elf_mapped_base, uint32_t segment_type,
|
||||
const void** segment_start, int* segment_size,
|
||||
int* elfclass) {
|
||||
MOZ_ASSERT(elf_mapped_base);
|
||||
MOZ_ASSERT(segment_start);
|
||||
MOZ_ASSERT(segment_size);
|
||||
|
||||
*segment_start = NULL;
|
||||
*segment_size = 0;
|
||||
|
||||
if (!IsValidElf(elf_mapped_base)) return false;
|
||||
|
||||
int cls = ElfClass(elf_mapped_base);
|
||||
if (elfclass) {
|
||||
*elfclass = cls;
|
||||
}
|
||||
|
||||
const char* elf_base = static_cast<const char*>(elf_mapped_base);
|
||||
|
||||
if (cls == ELFCLASS32) {
|
||||
FindElfClassSegment<ElfClass32>(elf_base, segment_type, segment_start,
|
||||
segment_size);
|
||||
return *segment_start != NULL;
|
||||
} else if (cls == ELFCLASS64) {
|
||||
FindElfClassSegment<ElfClass64>(elf_base, segment_type, segment_start,
|
||||
segment_size);
|
||||
return *segment_start != NULL;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// (derived from)
|
||||
// file_id.cc: Return a unique identifier for a file
|
||||
//
|
||||
// See file_id.h for documentation
|
||||
//
|
||||
|
||||
// ELF note name and desc are 32-bits word padded.
|
||||
# define NOTE_PADDING(a) ((a + 3) & ~3)
|
||||
|
||||
// These functions are also used inside the crashed process, so be safe
|
||||
// and use the syscall/libc wrappers instead of direct syscalls or libc.
|
||||
|
||||
template <typename ElfClass>
|
||||
static bool ElfClassBuildIDNoteIdentifier(const void* section, int length,
|
||||
uint8_t identifier[kMDGUIDSize]) {
|
||||
typedef typename ElfClass::Nhdr Nhdr;
|
||||
|
||||
const void* section_end = reinterpret_cast<const char*>(section) + length;
|
||||
const Nhdr* note_header = reinterpret_cast<const Nhdr*>(section);
|
||||
while (reinterpret_cast<const void*>(note_header) < section_end) {
|
||||
if (note_header->n_type == NT_GNU_BUILD_ID) break;
|
||||
note_header = reinterpret_cast<const Nhdr*>(
|
||||
reinterpret_cast<const char*>(note_header) + sizeof(Nhdr) +
|
||||
NOTE_PADDING(note_header->n_namesz) +
|
||||
NOTE_PADDING(note_header->n_descsz));
|
||||
}
|
||||
if (reinterpret_cast<const void*>(note_header) >= section_end ||
|
||||
note_header->n_descsz == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const char* build_id = reinterpret_cast<const char*>(note_header) +
|
||||
sizeof(Nhdr) + NOTE_PADDING(note_header->n_namesz);
|
||||
// Copy as many bits of the build ID as will fit
|
||||
// into the GUID space.
|
||||
memset(identifier, 0, kMDGUIDSize);
|
||||
memcpy(identifier, build_id,
|
||||
std::min(kMDGUIDSize, (size_t)note_header->n_descsz));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Attempt to locate a .note.gnu.build-id section in an ELF binary
|
||||
// and copy as many bytes of it as will fit into |identifier|.
|
||||
static bool FindElfBuildIDNote(const void* elf_mapped_base,
|
||||
uint8_t identifier[kMDGUIDSize]) {
|
||||
void* note_section;
|
||||
int note_size, elfclass;
|
||||
if ((!FindElfSegment(elf_mapped_base, PT_NOTE, (const void**)¬e_section,
|
||||
¬e_size, &elfclass) ||
|
||||
note_size == 0) &&
|
||||
(!FindElfSection(elf_mapped_base, ".note.gnu.build-id", SHT_NOTE,
|
||||
(const void**)¬e_section, ¬e_size, &elfclass) ||
|
||||
note_size == 0)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (elfclass == ELFCLASS32) {
|
||||
return ElfClassBuildIDNoteIdentifier<ElfClass32>(note_section, note_size,
|
||||
identifier);
|
||||
} else if (elfclass == ELFCLASS64) {
|
||||
return ElfClassBuildIDNoteIdentifier<ElfClass64>(note_section, note_size,
|
||||
identifier);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Attempt to locate the .text section of an ELF binary and generate
|
||||
// a simple hash by XORing the first page worth of bytes into |identifier|.
|
||||
static bool HashElfTextSection(const void* elf_mapped_base,
|
||||
uint8_t identifier[kMDGUIDSize]) {
|
||||
void* text_section;
|
||||
int text_size;
|
||||
if (!FindElfSection(elf_mapped_base, ".text", SHT_PROGBITS,
|
||||
(const void**)&text_section, &text_size, NULL) ||
|
||||
text_size == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
memset(identifier, 0, kMDGUIDSize);
|
||||
const uint8_t* ptr = reinterpret_cast<const uint8_t*>(text_section);
|
||||
const uint8_t* ptr_end = ptr + std::min(text_size, 4096);
|
||||
while (ptr < ptr_end) {
|
||||
for (unsigned i = 0; i < kMDGUIDSize; i++) identifier[i] ^= ptr[i];
|
||||
ptr += kMDGUIDSize;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// static
|
||||
bool FileID::ElfFileIdentifierFromMappedFile(const void* base,
|
||||
uint8_t identifier[kMDGUIDSize]) {
|
||||
// Look for a build id note first.
|
||||
if (FindElfBuildIDNote(base, identifier)) return true;
|
||||
|
||||
// Fall back on hashing the first page of the text section.
|
||||
return HashElfTextSection(base, identifier);
|
||||
}
|
||||
|
||||
// static
|
||||
void FileID::ConvertIdentifierToString(const uint8_t identifier[kMDGUIDSize],
|
||||
char* buffer, int buffer_length) {
|
||||
uint8_t identifier_swapped[kMDGUIDSize];
|
||||
|
||||
// Endian-ness swap to match dump processor expectation.
|
||||
memcpy(identifier_swapped, identifier, kMDGUIDSize);
|
||||
uint32_t* data1 = reinterpret_cast<uint32_t*>(identifier_swapped);
|
||||
*data1 = htonl(*data1);
|
||||
uint16_t* data2 = reinterpret_cast<uint16_t*>(identifier_swapped + 4);
|
||||
*data2 = htons(*data2);
|
||||
uint16_t* data3 = reinterpret_cast<uint16_t*>(identifier_swapped + 6);
|
||||
*data3 = htons(*data3);
|
||||
|
||||
int buffer_idx = 0;
|
||||
for (unsigned int idx = 0;
|
||||
(buffer_idx < buffer_length) && (idx < kMDGUIDSize); ++idx) {
|
||||
int hi = (identifier_swapped[idx] >> 4) & 0x0F;
|
||||
int lo = (identifier_swapped[idx]) & 0x0F;
|
||||
|
||||
if (idx == 4 || idx == 6 || idx == 8 || idx == 10)
|
||||
buffer[buffer_idx++] = '-';
|
||||
|
||||
buffer[buffer_idx++] = (hi >= 10) ? 'A' + hi - 10 : '0' + hi;
|
||||
buffer[buffer_idx++] = (lo >= 10) ? 'A' + lo - 10 : '0' + lo;
|
||||
}
|
||||
|
||||
// NULL terminate
|
||||
buffer[(buffer_idx < buffer_length) ? buffer_idx : buffer_idx - 1] = 0;
|
||||
}
|
||||
|
||||
} // namespace lul
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,69 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
|
||||
// Copyright (c) 2006, 2011, 2012 Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// This file is derived from the following files in
|
||||
// toolkit/crashreporter/google-breakpad:
|
||||
// src/common/linux/dump_symbols.h
|
||||
|
||||
#ifndef LulElfExt_h
|
||||
#define LulElfExt_h
|
||||
|
||||
// These two functions are the external interface to the
|
||||
// ELF/Dwarf/EXIDX reader.
|
||||
|
||||
#include "LulMainInt.h"
|
||||
|
||||
using lul::SecMap;
|
||||
|
||||
namespace lul {
|
||||
|
||||
class UniqueStringUniverse;
|
||||
|
||||
// Find all the unwind information in OBJ_FILE, an ELF executable
|
||||
// or shared library, and add it to SMAP.
|
||||
bool ReadSymbolData(const std::string& obj_file,
|
||||
const std::vector<std::string>& debug_dirs, SecMap* smap,
|
||||
void* rx_avma, size_t rx_size, UniqueStringUniverse* usu,
|
||||
void (*log)(const char*));
|
||||
|
||||
// The same as ReadSymbolData, except that OBJ_FILE is assumed to
|
||||
// point to a mapped-in image of OBJ_FILENAME.
|
||||
bool ReadSymbolDataInternal(const uint8_t* obj_file,
|
||||
const std::string& obj_filename,
|
||||
const std::vector<std::string>& debug_dirs,
|
||||
SecMap* smap, void* rx_avma, size_t rx_size,
|
||||
UniqueStringUniverse* usu,
|
||||
void (*log)(const char*));
|
||||
|
||||
} // namespace lul
|
||||
|
||||
#endif // LulElfExt_h
|
|
@ -1,210 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
|
||||
// Copyright (c) 2006, 2012, Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// This file is derived from the following files in
|
||||
// toolkit/crashreporter/google-breakpad:
|
||||
// src/common/android/include/elf.h
|
||||
// src/common/linux/elfutils.h
|
||||
// src/common/linux/file_id.h
|
||||
// src/common/linux/elfutils-inl.h
|
||||
|
||||
#ifndef LulElfInt_h
|
||||
#define LulElfInt_h
|
||||
|
||||
// This header defines functions etc internal to the ELF reader. It
|
||||
// should not be included outside of LulElf.cpp.
|
||||
|
||||
#include <elf.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "mozilla/Assertions.h"
|
||||
|
||||
#include "PlatformMacros.h"
|
||||
|
||||
// (derived from)
|
||||
// elfutils.h: Utilities for dealing with ELF files.
|
||||
//
|
||||
#include <link.h>
|
||||
|
||||
#if defined(GP_OS_android)
|
||||
|
||||
// From toolkit/crashreporter/google-breakpad/src/common/android/include/elf.h
|
||||
// The Android headers don't always define this constant.
|
||||
# ifndef EM_X86_64
|
||||
# define EM_X86_64 62
|
||||
# endif
|
||||
|
||||
# ifndef EM_PPC64
|
||||
# define EM_PPC64 21
|
||||
# endif
|
||||
|
||||
# ifndef EM_S390
|
||||
# define EM_S390 22
|
||||
# endif
|
||||
|
||||
# ifndef NT_GNU_BUILD_ID
|
||||
# define NT_GNU_BUILD_ID 3
|
||||
# endif
|
||||
|
||||
# ifndef ElfW
|
||||
# define ElfW(type) _ElfW(Elf, ELFSIZE, type)
|
||||
# define _ElfW(e, w, t) _ElfW_1(e, w, _##t)
|
||||
# define _ElfW_1(e, w, t) e##w##t
|
||||
# endif
|
||||
|
||||
#endif
|
||||
|
||||
namespace lul {
|
||||
|
||||
// Traits classes so consumers can write templatized code to deal
|
||||
// with specific ELF bits.
|
||||
struct ElfClass32 {
|
||||
typedef Elf32_Addr Addr;
|
||||
typedef Elf32_Ehdr Ehdr;
|
||||
typedef Elf32_Nhdr Nhdr;
|
||||
typedef Elf32_Phdr Phdr;
|
||||
typedef Elf32_Shdr Shdr;
|
||||
typedef Elf32_Half Half;
|
||||
typedef Elf32_Off Off;
|
||||
typedef Elf32_Word Word;
|
||||
static const int kClass = ELFCLASS32;
|
||||
static const size_t kAddrSize = sizeof(Elf32_Addr);
|
||||
};
|
||||
|
||||
struct ElfClass64 {
|
||||
typedef Elf64_Addr Addr;
|
||||
typedef Elf64_Ehdr Ehdr;
|
||||
typedef Elf64_Nhdr Nhdr;
|
||||
typedef Elf64_Phdr Phdr;
|
||||
typedef Elf64_Shdr Shdr;
|
||||
typedef Elf64_Half Half;
|
||||
typedef Elf64_Off Off;
|
||||
typedef Elf64_Word Word;
|
||||
static const int kClass = ELFCLASS64;
|
||||
static const size_t kAddrSize = sizeof(Elf64_Addr);
|
||||
};
|
||||
|
||||
bool IsValidElf(const void* elf_header);
|
||||
int ElfClass(const void* elf_base);
|
||||
|
||||
// Attempt to find a section named |section_name| of type |section_type|
|
||||
// in the ELF binary data at |elf_mapped_base|. On success, returns true
|
||||
// and sets |*section_start| to point to the start of the section data,
|
||||
// and |*section_size| to the size of the section's data. If |elfclass|
|
||||
// is not NULL, set |*elfclass| to the ELF file class.
|
||||
bool FindElfSection(const void* elf_mapped_base, const char* section_name,
|
||||
uint32_t section_type, const void** section_start,
|
||||
int* section_size, int* elfclass);
|
||||
|
||||
// Internal helper method, exposed for convenience for callers
|
||||
// that already have more info.
|
||||
template <typename ElfClass>
|
||||
const typename ElfClass::Shdr* FindElfSectionByName(
|
||||
const char* name, typename ElfClass::Word section_type,
|
||||
const typename ElfClass::Shdr* sections, const char* section_names,
|
||||
const char* names_end, int nsection);
|
||||
|
||||
// Attempt to find the first segment of type |segment_type| in the ELF
|
||||
// binary data at |elf_mapped_base|. On success, returns true and sets
|
||||
// |*segment_start| to point to the start of the segment data, and
|
||||
// and |*segment_size| to the size of the segment's data. If |elfclass|
|
||||
// is not NULL, set |*elfclass| to the ELF file class.
|
||||
bool FindElfSegment(const void* elf_mapped_base, uint32_t segment_type,
|
||||
const void** segment_start, int* segment_size,
|
||||
int* elfclass);
|
||||
|
||||
// Convert an offset from an Elf header into a pointer to the mapped
|
||||
// address in the current process. Takes an extra template parameter
|
||||
// to specify the return type to avoid having to dynamic_cast the
|
||||
// result.
|
||||
template <typename ElfClass, typename T>
|
||||
const T* GetOffset(const typename ElfClass::Ehdr* elf_header,
|
||||
typename ElfClass::Off offset);
|
||||
|
||||
// (derived from)
|
||||
// file_id.h: Return a unique identifier for a file
|
||||
//
|
||||
|
||||
static const size_t kMDGUIDSize = sizeof(MDGUID);
|
||||
|
||||
class FileID {
|
||||
public:
|
||||
// Load the identifier for the elf file mapped into memory at |base| into
|
||||
// |identifier|. Return false if the identifier could not be created for the
|
||||
// file.
|
||||
static bool ElfFileIdentifierFromMappedFile(const void* base,
|
||||
uint8_t identifier[kMDGUIDSize]);
|
||||
|
||||
// Convert the |identifier| data to a NULL terminated string. The string will
|
||||
// be formatted as a UUID (e.g., 22F065BB-FC9C-49F7-80FE-26A7CEBD7BCE).
|
||||
// The |buffer| should be at least 37 bytes long to receive all of the data
|
||||
// and termination. Shorter buffers will contain truncated data.
|
||||
static void ConvertIdentifierToString(const uint8_t identifier[kMDGUIDSize],
|
||||
char* buffer, int buffer_length);
|
||||
};
|
||||
|
||||
template <typename ElfClass, typename T>
|
||||
const T* GetOffset(const typename ElfClass::Ehdr* elf_header,
|
||||
typename ElfClass::Off offset) {
|
||||
return reinterpret_cast<const T*>(reinterpret_cast<uintptr_t>(elf_header) +
|
||||
offset);
|
||||
}
|
||||
|
||||
template <typename ElfClass>
|
||||
const typename ElfClass::Shdr* FindElfSectionByName(
|
||||
const char* name, typename ElfClass::Word section_type,
|
||||
const typename ElfClass::Shdr* sections, const char* section_names,
|
||||
const char* names_end, int nsection) {
|
||||
MOZ_ASSERT(name != NULL);
|
||||
MOZ_ASSERT(sections != NULL);
|
||||
MOZ_ASSERT(nsection > 0);
|
||||
|
||||
int name_len = strlen(name);
|
||||
if (name_len == 0) return NULL;
|
||||
|
||||
for (int i = 0; i < nsection; ++i) {
|
||||
const char* section_name = section_names + sections[i].sh_name;
|
||||
if (sections[i].sh_type == section_type &&
|
||||
names_end - section_name >= name_len + 1 &&
|
||||
strcmp(name, section_name) == 0) {
|
||||
return sections + i;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
} // namespace lul
|
||||
|
||||
// And finally, the external interface, offered to LulMain.cpp
|
||||
#include "LulElfExt.h"
|
||||
|
||||
#endif // LulElfInt_h
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,377 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef LulMain_h
|
||||
#define LulMain_h
|
||||
|
||||
#include "PlatformMacros.h"
|
||||
#include "mozilla/Atomics.h"
|
||||
#include "mozilla/MemoryReporting.h"
|
||||
|
||||
// LUL: A Lightweight Unwind Library.
|
||||
// This file provides the end-user (external) interface for LUL.
|
||||
|
||||
// Some comments about naming in the implementation. These are safe
|
||||
// to ignore if you are merely using LUL, but are important if you
|
||||
// hack on its internals.
|
||||
//
|
||||
// Debuginfo readers in general have tended to use the word "address"
|
||||
// to mean several different things. This sometimes makes them
|
||||
// difficult to understand and maintain. LUL tries hard to avoid
|
||||
// using the word "address" and instead uses the following more
|
||||
// precise terms:
|
||||
//
|
||||
// * SVMA ("Stated Virtual Memory Address"): this is an address of a
|
||||
// symbol (etc) as it is stated in the symbol table, or other
|
||||
// metadata, of an object. Such values are typically small and
|
||||
// start from zero or thereabouts, unless the object has been
|
||||
// prelinked.
|
||||
//
|
||||
// * AVMA ("Actual Virtual Memory Address"): this is the address of a
|
||||
// symbol (etc) in a running process, that is, once the associated
|
||||
// object has been mapped into a process. Such values are typically
|
||||
// much larger than SVMAs, since objects can get mapped arbitrarily
|
||||
// far along the address space.
|
||||
//
|
||||
// * "Bias": the difference between AVMA and SVMA for a given symbol
|
||||
// (specifically, AVMA - SVMA). The bias is always an integral
|
||||
// number of pages. Once we know the bias for a given object's
|
||||
// text section (for example), we can compute the AVMAs of all of
|
||||
// its text symbols by adding the bias to their SVMAs.
|
||||
//
|
||||
// * "Image address": typically, to read debuginfo from an object we
|
||||
// will temporarily mmap in the file so as to read symbol tables
|
||||
// etc. Addresses in this temporary mapping are called "Image
|
||||
// addresses". Note that the temporary mapping is entirely
|
||||
// unrelated to the mappings of the file that the dynamic linker
|
||||
// must perform merely in order to get the program to run. Hence
|
||||
// image addresses are unrelated to either SVMAs or AVMAs.
|
||||
|
||||
namespace lul {
|
||||
|
||||
// A machine word plus validity tag.
|
||||
class TaggedUWord {
|
||||
public:
|
||||
// RUNS IN NO-MALLOC CONTEXT
|
||||
// Construct a valid one.
|
||||
explicit TaggedUWord(uintptr_t w) : mValue(w), mValid(true) {}
|
||||
|
||||
// RUNS IN NO-MALLOC CONTEXT
|
||||
// Construct an invalid one.
|
||||
TaggedUWord() : mValue(0), mValid(false) {}
|
||||
|
||||
// RUNS IN NO-MALLOC CONTEXT
|
||||
TaggedUWord operator+(TaggedUWord rhs) const {
|
||||
return (Valid() && rhs.Valid()) ? TaggedUWord(Value() + rhs.Value())
|
||||
: TaggedUWord();
|
||||
}
|
||||
|
||||
// RUNS IN NO-MALLOC CONTEXT
|
||||
TaggedUWord operator-(TaggedUWord rhs) const {
|
||||
return (Valid() && rhs.Valid()) ? TaggedUWord(Value() - rhs.Value())
|
||||
: TaggedUWord();
|
||||
}
|
||||
|
||||
// RUNS IN NO-MALLOC CONTEXT
|
||||
TaggedUWord operator&(TaggedUWord rhs) const {
|
||||
return (Valid() && rhs.Valid()) ? TaggedUWord(Value() & rhs.Value())
|
||||
: TaggedUWord();
|
||||
}
|
||||
|
||||
// RUNS IN NO-MALLOC CONTEXT
|
||||
TaggedUWord operator|(TaggedUWord rhs) const {
|
||||
return (Valid() && rhs.Valid()) ? TaggedUWord(Value() | rhs.Value())
|
||||
: TaggedUWord();
|
||||
}
|
||||
|
||||
// RUNS IN NO-MALLOC CONTEXT
|
||||
TaggedUWord CmpGEs(TaggedUWord rhs) const {
|
||||
if (Valid() && rhs.Valid()) {
|
||||
intptr_t s1 = (intptr_t)Value();
|
||||
intptr_t s2 = (intptr_t)rhs.Value();
|
||||
return TaggedUWord(s1 >= s2 ? 1 : 0);
|
||||
}
|
||||
return TaggedUWord();
|
||||
}
|
||||
|
||||
// RUNS IN NO-MALLOC CONTEXT
|
||||
TaggedUWord operator<<(TaggedUWord rhs) const {
|
||||
if (Valid() && rhs.Valid()) {
|
||||
uintptr_t shift = rhs.Value();
|
||||
if (shift < 8 * sizeof(uintptr_t)) return TaggedUWord(Value() << shift);
|
||||
}
|
||||
return TaggedUWord();
|
||||
}
|
||||
|
||||
// RUNS IN NO-MALLOC CONTEXT
|
||||
// Is equal? Note: non-validity on either side gives non-equality.
|
||||
bool operator==(TaggedUWord other) const {
|
||||
return (mValid && other.Valid()) ? (mValue == other.Value()) : false;
|
||||
}
|
||||
|
||||
// RUNS IN NO-MALLOC CONTEXT
|
||||
// Is it word-aligned?
|
||||
bool IsAligned() const {
|
||||
return mValid && (mValue & (sizeof(uintptr_t) - 1)) == 0;
|
||||
}
|
||||
|
||||
// RUNS IN NO-MALLOC CONTEXT
|
||||
uintptr_t Value() const { return mValue; }
|
||||
|
||||
// RUNS IN NO-MALLOC CONTEXT
|
||||
bool Valid() const { return mValid; }
|
||||
|
||||
private:
|
||||
uintptr_t mValue;
|
||||
bool mValid;
|
||||
};
|
||||
|
||||
// The registers, with validity tags, that will be unwound.
|
||||
|
||||
struct UnwindRegs {
|
||||
#if defined(GP_ARCH_arm)
|
||||
TaggedUWord r7;
|
||||
TaggedUWord r11;
|
||||
TaggedUWord r12;
|
||||
TaggedUWord r13;
|
||||
TaggedUWord r14;
|
||||
TaggedUWord r15;
|
||||
#elif defined(GP_ARCH_arm64)
|
||||
TaggedUWord x29;
|
||||
TaggedUWord x30;
|
||||
TaggedUWord sp;
|
||||
TaggedUWord pc;
|
||||
#elif defined(GP_ARCH_amd64) || defined(GP_ARCH_x86)
|
||||
TaggedUWord xbp;
|
||||
TaggedUWord xsp;
|
||||
TaggedUWord xip;
|
||||
#elif defined(GP_ARCH_mips64)
|
||||
TaggedUWord sp;
|
||||
TaggedUWord fp;
|
||||
TaggedUWord pc;
|
||||
#else
|
||||
# error "Unknown plat"
|
||||
#endif
|
||||
};
|
||||
|
||||
// The maximum number of bytes in a stack snapshot. This value can be increased
|
||||
// if necessary, but testing showed that 160k is enough to obtain good
|
||||
// backtraces on x86_64 Linux. Most backtraces fit comfortably into 4-8k of
|
||||
// stack space, but we do have some very deep stacks occasionally. Please see
|
||||
// the comments in DoNativeBacktrace as to why it's OK to have this value be so
|
||||
// large.
|
||||
static const size_t N_STACK_BYTES = 160 * 1024;
|
||||
|
||||
// The stack chunk image that will be unwound.
|
||||
struct StackImage {
|
||||
// [start_avma, +len) specify the address range in the buffer.
|
||||
// Obviously we require 0 <= len <= N_STACK_BYTES.
|
||||
uintptr_t mStartAvma;
|
||||
size_t mLen;
|
||||
uint8_t mContents[N_STACK_BYTES];
|
||||
};
|
||||
|
||||
// Statistics collection for the unwinder.
|
||||
template <typename T>
|
||||
class LULStats {
|
||||
public:
|
||||
LULStats() : mContext(0), mCFI(0), mFP(0) {}
|
||||
|
||||
template <typename S>
|
||||
explicit LULStats(const LULStats<S>& aOther)
|
||||
: mContext(aOther.mContext), mCFI(aOther.mCFI), mFP(aOther.mFP) {}
|
||||
|
||||
template <typename S>
|
||||
LULStats<T>& operator=(const LULStats<S>& aOther) {
|
||||
mContext = aOther.mContext;
|
||||
mCFI = aOther.mCFI;
|
||||
mFP = aOther.mFP;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename S>
|
||||
uint32_t operator-(const LULStats<S>& aOther) {
|
||||
return (mContext - aOther.mContext) + (mCFI - aOther.mCFI) +
|
||||
(mFP - aOther.mFP);
|
||||
}
|
||||
|
||||
T mContext; // Number of context frames
|
||||
T mCFI; // Number of CFI/EXIDX frames
|
||||
T mFP; // Number of frame-pointer recovered frames
|
||||
};
|
||||
|
||||
// The core unwinder library class. Just one of these is needed, and
|
||||
// it can be shared by multiple unwinder threads.
|
||||
//
|
||||
// The library operates in one of two modes.
|
||||
//
|
||||
// * Admin mode. The library is this state after creation. In Admin
|
||||
// mode, no unwinding may be performed. It is however allowable to
|
||||
// perform administrative tasks -- primarily, loading of unwind info
|
||||
// -- in this mode. In particular, it is safe for the library to
|
||||
// perform dynamic memory allocation in this mode. Safe in the
|
||||
// sense that there is no risk of deadlock against unwinding threads
|
||||
// that might -- because of where they have been sampled -- hold the
|
||||
// system's malloc lock.
|
||||
//
|
||||
// * Unwind mode. In this mode, calls to ::Unwind may be made, but
|
||||
// nothing else. ::Unwind guarantees not to make any dynamic memory
|
||||
// requests, so as to guarantee that the calling thread won't
|
||||
// deadlock in the case where it already holds the system's malloc lock.
|
||||
//
|
||||
// The library is created in Admin mode. After debuginfo is loaded,
|
||||
// the caller must switch it into Unwind mode by calling
|
||||
// ::EnableUnwinding. There is no way to switch it back to Admin mode
|
||||
// after that. To safely switch back to Admin mode would require the
|
||||
// caller (or other external agent) to guarantee that there are no
|
||||
// pending ::Unwind calls.
|
||||
|
||||
class PriMap;
|
||||
class SegArray;
|
||||
class UniqueStringUniverse;
|
||||
|
||||
class LUL {
|
||||
public:
|
||||
// Create; supply a logging sink. Sets the object in Admin mode.
|
||||
explicit LUL(void (*aLog)(const char*));
|
||||
|
||||
// Destroy. Caller is responsible for ensuring that no other
|
||||
// threads are in Unwind calls. All resources are freed and all
|
||||
// registered unwinder threads are deregistered. Can be called
|
||||
// either in Admin or Unwind mode.
|
||||
~LUL();
|
||||
|
||||
// Notify the library that unwinding is now allowed and so
|
||||
// admin-mode calls are no longer allowed. The object is initially
|
||||
// created in admin mode. The only possible transition is
|
||||
// admin->unwinding, therefore.
|
||||
void EnableUnwinding();
|
||||
|
||||
// Notify of a new r-x mapping, and load the associated unwind info.
|
||||
// The filename is strdup'd and used for debug printing. If
|
||||
// aMappedImage is NULL, this function will mmap/munmap the file
|
||||
// itself, so as to be able to read the unwind info. If
|
||||
// aMappedImage is non-NULL then it is assumed to point to a
|
||||
// called-supplied and caller-managed mapped image of the file.
|
||||
// May only be called in Admin mode.
|
||||
void NotifyAfterMap(uintptr_t aRXavma, size_t aSize, const char* aFileName,
|
||||
const void* aMappedImage);
|
||||
|
||||
// In rare cases we know an executable area exists but don't know
|
||||
// what the associated file is. This call notifies LUL of such
|
||||
// areas. This is important for correct functioning of stack
|
||||
// scanning and of the x86-{linux,android} special-case
|
||||
// __kernel_syscall function handling.
|
||||
// This must be called only after the code area in
|
||||
// question really has been mapped.
|
||||
// May only be called in Admin mode.
|
||||
void NotifyExecutableArea(uintptr_t aRXavma, size_t aSize);
|
||||
|
||||
// Notify that a mapped area has been unmapped; discard any
|
||||
// associated unwind info. Acquires mRWlock for writing. Note that
|
||||
// to avoid segfaulting the stack-scan unwinder, which inspects code
|
||||
// areas, this must be called before the code area in question is
|
||||
// really unmapped. Note that, unlike NotifyAfterMap(), this
|
||||
// function takes the start and end addresses of the range to be
|
||||
// unmapped, rather than a start and a length parameter. This is so
|
||||
// as to make it possible to notify an unmap for the entire address
|
||||
// space using a single call.
|
||||
// May only be called in Admin mode.
|
||||
void NotifyBeforeUnmap(uintptr_t aAvmaMin, uintptr_t aAvmaMax);
|
||||
|
||||
// Apply NotifyBeforeUnmap to the entire address space. This causes
|
||||
// LUL to discard all unwind and executable-area information for the
|
||||
// entire address space.
|
||||
// May only be called in Admin mode.
|
||||
void NotifyBeforeUnmapAll() { NotifyBeforeUnmap(0, UINTPTR_MAX); }
|
||||
|
||||
// Returns the number of mappings currently registered.
|
||||
// May only be called in Admin mode.
|
||||
size_t CountMappings();
|
||||
|
||||
// Unwind |aStackImg| starting with the context in |aStartRegs|.
|
||||
// Write the number of frames recovered in *aFramesUsed. Put
|
||||
// the PC values in aFramePCs[0 .. *aFramesUsed-1] and
|
||||
// the SP values in aFrameSPs[0 .. *aFramesUsed-1].
|
||||
// |aFramesAvail| is the size of the two output arrays and hence the
|
||||
// largest possible value of *aFramesUsed. PC values are always
|
||||
// valid, and the unwind will stop when the PC becomes invalid, but
|
||||
// the SP values might be invalid, in which case the value zero will
|
||||
// be written in the relevant frameSPs[] slot.
|
||||
//
|
||||
// This function assumes that the SP values increase as it unwinds
|
||||
// away from the innermost frame -- that is, that the stack grows
|
||||
// down. It monitors SP values as it unwinds to check they
|
||||
// decrease, so as to avoid looping on corrupted stacks.
|
||||
//
|
||||
// May only be called in Unwind mode. Multiple threads may unwind
|
||||
// at once. LUL user is responsible for ensuring that no thread makes
|
||||
// any Admin calls whilst in Unwind mode.
|
||||
// MOZ_CRASHes if the calling thread is not registered for unwinding.
|
||||
//
|
||||
// The calling thread must previously have been registered via a call to
|
||||
// RegisterSampledThread.
|
||||
void Unwind(/*OUT*/ uintptr_t* aFramePCs,
|
||||
/*OUT*/ uintptr_t* aFrameSPs,
|
||||
/*OUT*/ size_t* aFramesUsed,
|
||||
/*OUT*/ size_t* aFramePointerFramesAcquired, size_t aFramesAvail,
|
||||
UnwindRegs* aStartRegs, StackImage* aStackImg);
|
||||
|
||||
// The logging sink. Call to send debug strings to the caller-
|
||||
// specified destination. Can only be called by the Admin thread.
|
||||
void (*mLog)(const char*);
|
||||
|
||||
// Statistics relating to unwinding. These have to be atomic since
|
||||
// unwinding can occur on different threads simultaneously.
|
||||
LULStats<mozilla::Atomic<uint32_t>> mStats;
|
||||
|
||||
// Possibly show the statistics. This may not be called from any
|
||||
// registered sampling thread, since it involves I/O.
|
||||
void MaybeShowStats();
|
||||
|
||||
size_t SizeOfIncludingThis(mozilla::MallocSizeOf) const;
|
||||
|
||||
private:
|
||||
// The statistics counters at the point where they were last printed.
|
||||
LULStats<uint32_t> mStatsPrevious;
|
||||
|
||||
// Are we in admin mode? Initially |true| but changes to |false|
|
||||
// once unwinding begins.
|
||||
bool mAdminMode;
|
||||
|
||||
// The thread ID associated with admin mode. This is the only thread
|
||||
// that is allowed do perform non-Unwind calls on this object. Conversely,
|
||||
// no registered Unwinding thread may be the admin thread. This is so
|
||||
// as to clearly partition the one thread that may do dynamic memory
|
||||
// allocation from the threads that are being sampled, since the latter
|
||||
// absolutely may not do dynamic memory allocation.
|
||||
int mAdminThreadId;
|
||||
|
||||
// The top level mapping from code address ranges to postprocessed
|
||||
// unwind info. Basically a sorted array of (addr, len, info)
|
||||
// records. This field is updated by NotifyAfterMap and NotifyBeforeUnmap.
|
||||
PriMap* mPriMap;
|
||||
|
||||
// An auxiliary structure that records which address ranges are
|
||||
// mapped r-x, for the benefit of the stack scanner.
|
||||
SegArray* mSegArray;
|
||||
|
||||
// A UniqueStringUniverse that holds all the strdup'd strings created
|
||||
// whilst reading unwind information. This is included so as to make
|
||||
// it possible to free them in ~LUL.
|
||||
UniqueStringUniverse* mUSU;
|
||||
};
|
||||
|
||||
// Run unit tests on an initialised, loaded-up LUL instance, and print
|
||||
// summary results on |aLUL|'s logging sink. Also return the number
|
||||
// of tests run in *aNTests and the number that passed in
|
||||
// *aNTestsPassed.
|
||||
void RunLulUnitTests(/*OUT*/ int* aNTests, /*OUT*/ int* aNTestsPassed,
|
||||
LUL* aLUL);
|
||||
|
||||
} // namespace lul
|
||||
|
||||
#endif // LulMain_h
|
|
@ -1,419 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef LulMainInt_h
|
||||
#define LulMainInt_h
|
||||
|
||||
#include "PlatformMacros.h"
|
||||
#include "LulMain.h" // for TaggedUWord
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "mozilla/Assertions.h"
|
||||
|
||||
// This file is provides internal interface inside LUL. If you are an
|
||||
// end-user of LUL, do not include it in your code. The end-user
|
||||
// interface is in LulMain.h.
|
||||
|
||||
namespace lul {
|
||||
|
||||
using std::vector;
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// DW_REG_ constants //
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
||||
// These are the Dwarf CFI register numbers, as (presumably) defined
|
||||
// in the ELF ABI supplements for each architecture.
|
||||
|
||||
enum DW_REG_NUMBER {
|
||||
// No real register has this number. It's convenient to be able to
|
||||
// treat the CFA (Canonical Frame Address) as "just another
|
||||
// register", though.
|
||||
DW_REG_CFA = -1,
|
||||
#if defined(GP_ARCH_arm)
|
||||
// ARM registers
|
||||
DW_REG_ARM_R7 = 7,
|
||||
DW_REG_ARM_R11 = 11,
|
||||
DW_REG_ARM_R12 = 12,
|
||||
DW_REG_ARM_R13 = 13,
|
||||
DW_REG_ARM_R14 = 14,
|
||||
DW_REG_ARM_R15 = 15,
|
||||
#elif defined(GP_ARCH_arm64)
|
||||
// aarch64 registers
|
||||
DW_REG_AARCH64_X29 = 29,
|
||||
DW_REG_AARCH64_X30 = 30,
|
||||
DW_REG_AARCH64_SP = 31,
|
||||
#elif defined(GP_ARCH_amd64)
|
||||
// Because the X86 (32 bit) and AMD64 (64 bit) summarisers are
|
||||
// combined, a merged set of register constants is needed.
|
||||
DW_REG_INTEL_XBP = 6,
|
||||
DW_REG_INTEL_XSP = 7,
|
||||
DW_REG_INTEL_XIP = 16,
|
||||
#elif defined(GP_ARCH_x86)
|
||||
DW_REG_INTEL_XBP = 5,
|
||||
DW_REG_INTEL_XSP = 4,
|
||||
DW_REG_INTEL_XIP = 8,
|
||||
#elif defined(GP_ARCH_mips64)
|
||||
DW_REG_MIPS_SP = 29,
|
||||
DW_REG_MIPS_FP = 30,
|
||||
DW_REG_MIPS_PC = 34,
|
||||
#else
|
||||
# error "Unknown arch"
|
||||
#endif
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// PfxExpr //
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
||||
enum PfxExprOp {
|
||||
// meaning of mOperand effect on stack
|
||||
PX_Start, // bool start-with-CFA? start, with CFA on stack, or not
|
||||
PX_End, // none stop; result is at top of stack
|
||||
PX_SImm32, // int32 push signed int32
|
||||
PX_DwReg, // DW_REG_NUMBER push value of the specified reg
|
||||
PX_Deref, // none pop X ; push *X
|
||||
PX_Add, // none pop X ; pop Y ; push Y + X
|
||||
PX_Sub, // none pop X ; pop Y ; push Y - X
|
||||
PX_And, // none pop X ; pop Y ; push Y & X
|
||||
PX_Or, // none pop X ; pop Y ; push Y | X
|
||||
PX_CmpGES, // none pop X ; pop Y ; push (Y >=s X) ? 1 : 0
|
||||
PX_Shl // none pop X ; pop Y ; push Y << X
|
||||
};
|
||||
|
||||
struct PfxInstr {
|
||||
PfxInstr(PfxExprOp opcode, int32_t operand)
|
||||
: mOpcode(opcode), mOperand(operand) {}
|
||||
explicit PfxInstr(PfxExprOp opcode) : mOpcode(opcode), mOperand(0) {}
|
||||
bool operator==(const PfxInstr& other) const {
|
||||
return mOpcode == other.mOpcode && mOperand == other.mOperand;
|
||||
}
|
||||
PfxExprOp mOpcode;
|
||||
int32_t mOperand;
|
||||
};
|
||||
|
||||
static_assert(sizeof(PfxInstr) <= 8, "PfxInstr size changed unexpectedly");
|
||||
|
||||
// Evaluate the prefix expression whose PfxInstrs start at aPfxInstrs[start].
|
||||
// In the case of any mishap (stack over/underflow, running off the end of
|
||||
// the instruction vector, obviously malformed sequences),
|
||||
// return an invalid TaggedUWord.
|
||||
// RUNS IN NO-MALLOC CONTEXT
|
||||
TaggedUWord EvaluatePfxExpr(int32_t start, const UnwindRegs* aOldRegs,
|
||||
TaggedUWord aCFA, const StackImage* aStackImg,
|
||||
const vector<PfxInstr>& aPfxInstrs);
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// LExpr //
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
||||
// An expression -- very primitive. Denotes either "register +
|
||||
// offset", a dereferenced version of the same, or a reference to a
|
||||
// prefix expression stored elsewhere. So as to allow convenient
|
||||
// handling of Dwarf-derived unwind info, the register may also denote
|
||||
// the CFA. A large number of these need to be stored, so we ensure
|
||||
// it fits into 8 bytes. See comment below on RuleSet to see how
|
||||
// expressions fit into the bigger picture.
|
||||
|
||||
enum LExprHow {
|
||||
UNKNOWN = 0, // This LExpr denotes no value.
|
||||
NODEREF, // Value is (mReg + mOffset).
|
||||
DEREF, // Value is *(mReg + mOffset).
|
||||
PFXEXPR // Value is EvaluatePfxExpr(secMap->mPfxInstrs[mOffset])
|
||||
};
|
||||
|
||||
inline static const char* NameOf_LExprHow(LExprHow how) {
|
||||
switch (how) {
|
||||
case UNKNOWN:
|
||||
return "UNKNOWN";
|
||||
case NODEREF:
|
||||
return "NODEREF";
|
||||
case DEREF:
|
||||
return "DEREF";
|
||||
case PFXEXPR:
|
||||
return "PFXEXPR";
|
||||
default:
|
||||
return "LExpr-??";
|
||||
}
|
||||
}
|
||||
|
||||
struct LExpr {
|
||||
// Denotes an expression with no value.
|
||||
LExpr() : mHow(UNKNOWN), mReg(0), mOffset(0) {}
|
||||
|
||||
// Denotes any expressible expression.
|
||||
LExpr(LExprHow how, int16_t reg, int32_t offset)
|
||||
: mHow(how), mReg(reg), mOffset(offset) {
|
||||
switch (how) {
|
||||
case UNKNOWN:
|
||||
MOZ_ASSERT(reg == 0 && offset == 0);
|
||||
break;
|
||||
case NODEREF:
|
||||
break;
|
||||
case DEREF:
|
||||
break;
|
||||
case PFXEXPR:
|
||||
MOZ_ASSERT(reg == 0 && offset >= 0);
|
||||
break;
|
||||
default:
|
||||
MOZ_ASSERT(0, "LExpr::LExpr: invalid how");
|
||||
}
|
||||
}
|
||||
|
||||
// Change the offset for an expression that references memory.
|
||||
LExpr add_delta(long delta) {
|
||||
MOZ_ASSERT(mHow == NODEREF);
|
||||
// If this is a non-debug build and the above assertion would have
|
||||
// failed, at least return LExpr() so that the machinery that uses
|
||||
// the resulting expression fails in a repeatable way.
|
||||
return (mHow == NODEREF) ? LExpr(mHow, mReg, mOffset + delta)
|
||||
: LExpr(); // Gone bad
|
||||
}
|
||||
|
||||
// Dereference an expression that denotes a memory address.
|
||||
LExpr deref() {
|
||||
MOZ_ASSERT(mHow == NODEREF);
|
||||
// Same rationale as for add_delta().
|
||||
return (mHow == NODEREF) ? LExpr(DEREF, mReg, mOffset)
|
||||
: LExpr(); // Gone bad
|
||||
}
|
||||
|
||||
// Print a rule for recovery of |aNewReg| whose recovered value
|
||||
// is this LExpr.
|
||||
std::string ShowRule(const char* aNewReg) const;
|
||||
|
||||
// Evaluate this expression, producing a TaggedUWord. |aOldRegs|
|
||||
// holds register values that may be referred to by the expression.
|
||||
// |aCFA| holds the CFA value, if any, that applies. |aStackImg|
|
||||
// contains a chuck of stack that will be consulted if the expression
|
||||
// references memory. |aPfxInstrs| holds the vector of PfxInstrs
|
||||
// that will be consulted if this is a PFXEXPR.
|
||||
// RUNS IN NO-MALLOC CONTEXT
|
||||
TaggedUWord EvaluateExpr(const UnwindRegs* aOldRegs, TaggedUWord aCFA,
|
||||
const StackImage* aStackImg,
|
||||
const vector<PfxInstr>* aPfxInstrs) const;
|
||||
|
||||
// Representation of expressions. If |mReg| is DW_REG_CFA (-1) then
|
||||
// it denotes the CFA. All other allowed values for |mReg| are
|
||||
// nonnegative and are DW_REG_ values.
|
||||
LExprHow mHow : 8;
|
||||
int16_t mReg; // A DW_REG_ value
|
||||
int32_t mOffset; // 32-bit signed offset should be more than enough.
|
||||
};
|
||||
|
||||
static_assert(sizeof(LExpr) <= 8, "LExpr size changed unexpectedly");
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// RuleSet //
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
||||
// This is platform-dependent. For some address range, describes how
|
||||
// to recover the CFA and then how to recover the registers for the
|
||||
// previous frame.
|
||||
//
|
||||
// The set of LExprs contained in a given RuleSet describe a DAG which
|
||||
// says how to compute the caller's registers ("new registers") from
|
||||
// the callee's registers ("old registers"). The DAG can contain a
|
||||
// single internal node, which is the value of the CFA for the callee.
|
||||
// It would be possible to construct a DAG that omits the CFA, but
|
||||
// including it makes the summarisers simpler, and the Dwarf CFI spec
|
||||
// has the CFA as a central concept.
|
||||
//
|
||||
// For this to make sense, |mCfaExpr| can't have
|
||||
// |mReg| == DW_REG_CFA since we have no previous value for the CFA.
|
||||
// All of the other |Expr| fields can -- and usually do -- specify
|
||||
// |mReg| == DW_REG_CFA.
|
||||
//
|
||||
// With that in place, the unwind algorithm proceeds as follows.
|
||||
//
|
||||
// (0) Initially: we have values for the old registers, and a memory
|
||||
// image.
|
||||
//
|
||||
// (1) Compute the CFA by evaluating |mCfaExpr|. Add the computed
|
||||
// value to the set of "old registers".
|
||||
//
|
||||
// (2) Compute values for the registers by evaluating all of the other
|
||||
// |Expr| fields in the RuleSet. These can depend on both the old
|
||||
// register values and the just-computed CFA.
|
||||
//
|
||||
// If we are unwinding without computing a CFA, perhaps because the
|
||||
// RuleSets are derived from EXIDX instead of Dwarf, then
|
||||
// |mCfaExpr.mHow| will be LExpr::UNKNOWN, so the computed value will
|
||||
// be invalid -- that is, TaggedUWord() -- and so any attempt to use
|
||||
// that will result in the same value. But that's OK because the
|
||||
// RuleSet would make no sense if depended on the CFA but specified no
|
||||
// way to compute it.
|
||||
//
|
||||
// A RuleSet is not allowed to cover zero address range. Having zero
|
||||
// length would break binary searching in SecMaps and PriMaps.
|
||||
|
||||
class RuleSet {
|
||||
public:
|
||||
RuleSet();
|
||||
void Print(void (*aLog)(const char*)) const;
|
||||
|
||||
// Find the LExpr* for a given DW_REG_ value in this class.
|
||||
LExpr* ExprForRegno(DW_REG_NUMBER aRegno);
|
||||
|
||||
uintptr_t mAddr;
|
||||
uintptr_t mLen;
|
||||
// How to compute the CFA.
|
||||
LExpr mCfaExpr;
|
||||
// How to compute caller register values. These may reference the
|
||||
// value defined by |mCfaExpr|.
|
||||
#if defined(GP_ARCH_amd64) || defined(GP_ARCH_x86)
|
||||
LExpr mXipExpr; // return address
|
||||
LExpr mXspExpr;
|
||||
LExpr mXbpExpr;
|
||||
#elif defined(GP_ARCH_arm)
|
||||
LExpr mR15expr; // return address
|
||||
LExpr mR14expr;
|
||||
LExpr mR13expr;
|
||||
LExpr mR12expr;
|
||||
LExpr mR11expr;
|
||||
LExpr mR7expr;
|
||||
#elif defined(GP_ARCH_arm64)
|
||||
LExpr mX29expr; // frame pointer register
|
||||
LExpr mX30expr; // link register
|
||||
LExpr mSPexpr;
|
||||
#elif defined(GP_ARCH_mips64)
|
||||
LExpr mPCexpr;
|
||||
LExpr mFPexpr;
|
||||
LExpr mSPexpr;
|
||||
#else
|
||||
# error "Unknown arch"
|
||||
#endif
|
||||
};
|
||||
|
||||
// Returns |true| for Dwarf register numbers which are members
|
||||
// of the set of registers that LUL unwinds on this target.
|
||||
static inline bool registerIsTracked(DW_REG_NUMBER reg) {
|
||||
switch (reg) {
|
||||
#if defined(GP_ARCH_amd64) || defined(GP_ARCH_x86)
|
||||
case DW_REG_INTEL_XBP:
|
||||
case DW_REG_INTEL_XSP:
|
||||
case DW_REG_INTEL_XIP:
|
||||
return true;
|
||||
#elif defined(GP_ARCH_arm)
|
||||
case DW_REG_ARM_R7:
|
||||
case DW_REG_ARM_R11:
|
||||
case DW_REG_ARM_R12:
|
||||
case DW_REG_ARM_R13:
|
||||
case DW_REG_ARM_R14:
|
||||
case DW_REG_ARM_R15:
|
||||
return true;
|
||||
#elif defined(GP_ARCH_arm64)
|
||||
case DW_REG_AARCH64_X29:
|
||||
case DW_REG_AARCH64_X30:
|
||||
case DW_REG_AARCH64_SP:
|
||||
return true;
|
||||
#elif defined(GP_ARCH_mips64)
|
||||
case DW_REG_MIPS_FP:
|
||||
case DW_REG_MIPS_SP:
|
||||
case DW_REG_MIPS_PC:
|
||||
return true;
|
||||
#else
|
||||
# error "Unknown arch"
|
||||
#endif
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// SecMap //
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
||||
// A SecMap may have zero address range, temporarily, whilst RuleSets
|
||||
// are being added to it. But adding a zero-range SecMap to a PriMap
|
||||
// will make it impossible to maintain the total order of the PriMap
|
||||
// entries, and so that can't be allowed to happen.
|
||||
|
||||
class SecMap {
|
||||
public:
|
||||
// These summarise the contained mRuleSets, in that they give
|
||||
// exactly the lowest and highest addresses that any of the entries
|
||||
// in this SecMap cover. Hence invariants:
|
||||
//
|
||||
// mRuleSets is nonempty
|
||||
// <=> mSummaryMinAddr <= mSummaryMaxAddr
|
||||
// && mSummaryMinAddr == mRuleSets[0].mAddr
|
||||
// && mSummaryMaxAddr == mRuleSets[#rulesets-1].mAddr
|
||||
// + mRuleSets[#rulesets-1].mLen - 1;
|
||||
//
|
||||
// This requires that no RuleSet has zero length.
|
||||
//
|
||||
// mRuleSets is empty
|
||||
// <=> mSummaryMinAddr > mSummaryMaxAddr
|
||||
//
|
||||
// This doesn't constrain mSummaryMinAddr and mSummaryMaxAddr uniquely,
|
||||
// so let's use mSummaryMinAddr == 1 and mSummaryMaxAddr == 0 to denote
|
||||
// this case.
|
||||
|
||||
explicit SecMap(void (*aLog)(const char*));
|
||||
~SecMap();
|
||||
|
||||
// Binary search mRuleSets to find one that brackets |ia|, or nullptr
|
||||
// if none is found. It's not allowable to do this until PrepareRuleSets
|
||||
// has been called first.
|
||||
RuleSet* FindRuleSet(uintptr_t ia);
|
||||
|
||||
// Add a RuleSet to the collection. The rule is copied in. Calling
|
||||
// this makes the map non-searchable.
|
||||
void AddRuleSet(const RuleSet* rs);
|
||||
|
||||
// Add a PfxInstr to the vector of such instrs, and return the index
|
||||
// in the vector. Calling this makes the map non-searchable.
|
||||
uint32_t AddPfxInstr(PfxInstr pfxi);
|
||||
|
||||
// Returns the entire vector of PfxInstrs.
|
||||
const vector<PfxInstr>* GetPfxInstrs() { return &mPfxInstrs; }
|
||||
|
||||
// Prepare the map for searching. Also, remove any rules for code
|
||||
// address ranges which don't fall inside [start, +len). |len| may
|
||||
// not be zero.
|
||||
void PrepareRuleSets(uintptr_t start, size_t len);
|
||||
|
||||
bool IsEmpty();
|
||||
|
||||
size_t Size() { return mRuleSets.size(); }
|
||||
|
||||
size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
|
||||
|
||||
// The min and max addresses of the addresses in the contained
|
||||
// RuleSets. See comment above for invariants.
|
||||
uintptr_t mSummaryMinAddr;
|
||||
uintptr_t mSummaryMaxAddr;
|
||||
|
||||
private:
|
||||
// False whilst adding entries; true once it is safe to call FindRuleSet.
|
||||
// Transition (false->true) is caused by calling PrepareRuleSets().
|
||||
bool mUsable;
|
||||
|
||||
// A vector of RuleSets, sorted, nonoverlapping (post Prepare()).
|
||||
vector<RuleSet> mRuleSets;
|
||||
|
||||
// A vector of PfxInstrs, which are referred to by the RuleSets.
|
||||
// These are provided as a representation of Dwarf expressions
|
||||
// (DW_CFA_val_expression, DW_CFA_expression, DW_CFA_def_cfa_expression),
|
||||
// are relatively expensive to evaluate, and and are therefore
|
||||
// expected to be used only occasionally.
|
||||
//
|
||||
// The vector holds a bunch of separate PfxInstr programs, each one
|
||||
// starting with a PX_Start and terminated by a PX_End, all
|
||||
// concatenated together. When a RuleSet can't recover a value
|
||||
// using a self-contained LExpr, it uses a PFXEXPR whose mOffset is
|
||||
// the index in this vector of start of the necessary PfxInstr program.
|
||||
vector<PfxInstr> mPfxInstrs;
|
||||
|
||||
// A logging sink, for debugging.
|
||||
void (*mLog)(const char*);
|
||||
};
|
||||
|
||||
} // namespace lul
|
||||
|
||||
#endif // ndef LulMainInt_h
|
|
@ -1,86 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include <stdio.h>
|
||||
# include <signal.h>
|
||||
# include <string.h>
|
||||
# include <stdlib.h>
|
||||
# include <time.h>
|
||||
|
||||
# include "platform.h"
|
||||
# include "PlatformMacros.h"
|
||||
# include "LulMain.h"
|
||||
# include "BaseProfilerSharedLibraries.h"
|
||||
# include "AutoObjectMapper.h"
|
||||
|
||||
// Contains miscellaneous helpers that are used to connect the Gecko Profiler
|
||||
// and LUL.
|
||||
|
||||
// Find out, in a platform-dependent way, where the code modules got
|
||||
// mapped in the process' virtual address space, and get |aLUL| to
|
||||
// load unwind info for them.
|
||||
void read_procmaps(lul::LUL* aLUL) {
|
||||
MOZ_ASSERT(aLUL->CountMappings() == 0);
|
||||
|
||||
# if defined(GP_OS_linux) || defined(GP_OS_android)
|
||||
SharedLibraryInfo info = SharedLibraryInfo::GetInfoForSelf();
|
||||
|
||||
for (size_t i = 0; i < info.GetSize(); i++) {
|
||||
const SharedLibrary& lib = info.GetEntry(i);
|
||||
|
||||
std::string nativePath = lib.GetDebugPath();
|
||||
|
||||
# if defined(GP_OS_android)
|
||||
// We're using faulty.lib. Use a special-case object mapper.
|
||||
AutoObjectMapperFaultyLib mapper(aLUL->mLog);
|
||||
# else
|
||||
// We can use the standard POSIX-based mapper.
|
||||
AutoObjectMapperPOSIX mapper(aLUL->mLog);
|
||||
# endif
|
||||
|
||||
// Ask |mapper| to map the object. Then hand its mapped address
|
||||
// to NotifyAfterMap().
|
||||
void* image = nullptr;
|
||||
size_t size = 0;
|
||||
bool ok = mapper.Map(&image, &size, nativePath);
|
||||
if (ok && image && size > 0) {
|
||||
aLUL->NotifyAfterMap(lib.GetStart(), lib.GetEnd() - lib.GetStart(),
|
||||
nativePath.c_str(), image);
|
||||
} else if (!ok && lib.GetDebugName().empty()) {
|
||||
// The object has no name and (as a consequence) the mapper failed to map
|
||||
// it. This happens on Linux, where GetInfoForSelf() produces such a
|
||||
// mapping for the VDSO. This is a problem on x86-{linux,android} because
|
||||
// lack of knowledge about the mapped area inhibits LUL's special
|
||||
// __kernel_syscall handling. Hence notify |aLUL| at least of the
|
||||
// mapping, even though it can't read any unwind information for the area.
|
||||
aLUL->NotifyExecutableArea(lib.GetStart(), lib.GetEnd() - lib.GetStart());
|
||||
}
|
||||
|
||||
// |mapper| goes out of scope at this point and so its destructor
|
||||
// unmaps the object.
|
||||
}
|
||||
|
||||
# else
|
||||
# error "Unknown platform"
|
||||
# endif
|
||||
}
|
||||
|
||||
// LUL needs a callback for its logging sink.
|
||||
void logging_sink_for_LUL(const char* str) {
|
||||
// These are only printed when Verbose logging is enabled (e.g. with
|
||||
// MOZ_BASE_PROFILER_VERBOSE_LOGGING=1). This is because LUL's logging is much
|
||||
// more verbose than the rest of the profiler's logging, which occurs at the
|
||||
// Info (3) and Debug (4) levels.
|
||||
// FIXME: This causes a build failure in memory/replace/dmd/test/SmokeDMD (!)
|
||||
// and other places, because it doesn't link the implementation in
|
||||
// platform.cpp.
|
||||
// VERBOSE_LOG("[%d] %s", profiler_current_process_id(), str);
|
||||
}
|
||||
|
||||
#endif // MOZ_BASE_PROFILER
|
|
@ -1,21 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef MOZ_PLATFORM_LINUX_LUL_H
|
||||
#define MOZ_PLATFORM_LINUX_LUL_H
|
||||
|
||||
#include "platform.h"
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
// Find out, in a platform-dependent way, where the code modules got
|
||||
// mapped in the process' virtual address space, and get |aLUL| to
|
||||
// load unwind info for them.
|
||||
void read_procmaps(lul::LUL* aLUL);
|
||||
|
||||
// LUL needs a callback for its logging sink.
|
||||
void logging_sink_for_LUL(const char* str);
|
||||
|
||||
#endif /* ndef MOZ_PLATFORM_LINUX_LUL_H */
|
|
@ -1,102 +0,0 @@
|
|||
# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
|
||||
# vim: set filetype=python:
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
# This is pretty much a copy from tools/profiler, cut down to exclude anything
|
||||
# that cannot work in mozglue (because they are totally dependent on libxul-
|
||||
# specific code).
|
||||
# All exported headers now prefixed with "Base" to avoid #include name clashes.
|
||||
|
||||
if CONFIG['MOZ_GECKO_PROFILER']:
|
||||
DEFINES['IMPL_MFBT'] = True
|
||||
EXPORTS += [
|
||||
'public/BaseProfileJSONWriter.h',
|
||||
'public/BaseProfilerMarkerPayload.h',
|
||||
'public/BaseProfilerSharedLibraries.h',
|
||||
'public/BaseProfilingCategory.h',
|
||||
'public/BaseProfilingStack.h',
|
||||
]
|
||||
UNIFIED_SOURCES += [
|
||||
'core/PageInformation.cpp',
|
||||
'core/platform.cpp',
|
||||
'core/ProfileBuffer.cpp',
|
||||
'core/ProfileBufferEntry.cpp',
|
||||
'core/ProfiledThreadData.cpp',
|
||||
'core/ProfileJSONWriter.cpp',
|
||||
'core/ProfilerBacktrace.cpp',
|
||||
'core/ProfilerMarkerPayload.cpp',
|
||||
'core/ProfilingCategory.cpp',
|
||||
'core/ProfilingStack.cpp',
|
||||
'core/RegisteredThread.cpp',
|
||||
]
|
||||
|
||||
if CONFIG['OS_TARGET'] in ('Android', 'Linux'):
|
||||
if CONFIG['CPU_ARCH'] in ('arm', 'aarch64', 'x86', 'x86_64', 'mips64'):
|
||||
UNIFIED_SOURCES += [
|
||||
'lul/AutoObjectMapper.cpp',
|
||||
'lul/LulCommon.cpp',
|
||||
'lul/LulDwarf.cpp',
|
||||
'lul/LulDwarfSummariser.cpp',
|
||||
'lul/LulElf.cpp',
|
||||
'lul/LulMain.cpp',
|
||||
'lul/platform-linux-lul.cpp',
|
||||
]
|
||||
# These files cannot be built in unified mode because of name clashes with mozglue headers on Android.
|
||||
SOURCES += [
|
||||
'core/shared-libraries-linux.cc',
|
||||
]
|
||||
if CONFIG['CPU_ARCH'] == 'arm':
|
||||
SOURCES += [
|
||||
'core/EHABIStackWalk.cpp',
|
||||
]
|
||||
elif CONFIG['OS_TARGET'] == 'Darwin':
|
||||
UNIFIED_SOURCES += [
|
||||
'core/shared-libraries-macos.cc',
|
||||
]
|
||||
elif CONFIG['OS_TARGET'] == 'WINNT':
|
||||
SOURCES += [
|
||||
'core/shared-libraries-win32.cc',
|
||||
]
|
||||
|
||||
LOCAL_INCLUDES += [
|
||||
'/mozglue/baseprofiler/core/',
|
||||
'/mozglue/linker',
|
||||
]
|
||||
|
||||
if CONFIG['OS_TARGET'] == 'Android':
|
||||
DEFINES['ANDROID_NDK_MAJOR_VERSION'] = CONFIG['ANDROID_NDK_MAJOR_VERSION']
|
||||
DEFINES['ANDROID_NDK_MINOR_VERSION'] = CONFIG['ANDROID_NDK_MINOR_VERSION']
|
||||
LOCAL_INCLUDES += [
|
||||
'lul',
|
||||
]
|
||||
|
||||
FINAL_LIBRARY = 'mozglue'
|
||||
|
||||
# BaseProfiler.h and BaseProfilerCounts.h are the only headers that are usable
|
||||
# in non-MOZ_GECKO_PROFILER builds, and they only contains no-op macros in that
|
||||
# case.
|
||||
EXPORTS += [
|
||||
'public/BaseProfiler.h',
|
||||
]
|
||||
|
||||
EXPORTS.mozilla += [
|
||||
'public/BaseProfilerCounts.h',
|
||||
]
|
||||
|
||||
if CONFIG['MOZ_VTUNE']:
|
||||
DEFINES['MOZ_VTUNE_INSTRUMENTATION'] = True
|
||||
UNIFIED_SOURCES += [
|
||||
'core/VTuneProfiler.cpp',
|
||||
]
|
||||
|
||||
|
||||
if CONFIG['CC_TYPE'] in ('clang', 'gcc'):
|
||||
CXXFLAGS += [
|
||||
'-Wno-error=shadow',
|
||||
'-Wno-ignored-qualifiers', # due to use of breakpad headers
|
||||
]
|
||||
|
||||
with Files('**'):
|
||||
BUG_COMPONENT = ('Core', 'Gecko Profiler')
|
|
@ -1,151 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef BASEPROFILEJSONWRITER_H
|
||||
#define BASEPROFILEJSONWRITER_H
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifndef MOZ_BASE_PROFILER
|
||||
# error Do not #include this header when MOZ_BASE_PROFILER is not #defined.
|
||||
#endif
|
||||
|
||||
#include "mozilla/JSONWriter.h"
|
||||
#include "mozilla/UniquePtr.h"
|
||||
|
||||
#include <functional>
|
||||
#include <ostream>
|
||||
#include <string>
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
class SpliceableJSONWriter;
|
||||
|
||||
// On average, profile JSONs are large enough such that we want to avoid
|
||||
// reallocating its buffer when expanding. Additionally, the contents of the
|
||||
// profile are not accessed until the profile is entirely written. For these
|
||||
// reasons we use a chunked writer that keeps an array of chunks, which is
|
||||
// concatenated together after writing is finished.
|
||||
class ChunkedJSONWriteFunc : public JSONWriteFunc {
|
||||
public:
|
||||
friend class SpliceableJSONWriter;
|
||||
|
||||
ChunkedJSONWriteFunc() : mChunkPtr{nullptr}, mChunkEnd{nullptr} {
|
||||
AllocChunk(kChunkSize);
|
||||
}
|
||||
|
||||
bool IsEmpty() const {
|
||||
MOZ_ASSERT_IF(!mChunkPtr, !mChunkEnd && mChunkList.length() == 0 &&
|
||||
mChunkLengths.length() == 0);
|
||||
return !mChunkPtr;
|
||||
}
|
||||
|
||||
void Write(const char* aStr) override;
|
||||
void CopyDataIntoLazilyAllocatedBuffer(
|
||||
const std::function<char*(size_t)>& aAllocator) const;
|
||||
UniquePtr<char[]> CopyData() const;
|
||||
void Take(ChunkedJSONWriteFunc&& aOther);
|
||||
// Returns the byte length of the complete combined string, including the
|
||||
// null terminator byte.
|
||||
size_t GetTotalLength() const;
|
||||
|
||||
private:
|
||||
void AllocChunk(size_t aChunkSize);
|
||||
|
||||
static const size_t kChunkSize = 4096 * 512;
|
||||
|
||||
// Pointer for writing inside the current chunk.
|
||||
//
|
||||
// The current chunk is always at the back of mChunkList, i.e.,
|
||||
// mChunkList.back() <= mChunkPtr <= mChunkEnd.
|
||||
char* mChunkPtr;
|
||||
|
||||
// Pointer to the end of the current chunk.
|
||||
//
|
||||
// The current chunk is always at the back of mChunkList, i.e.,
|
||||
// mChunkEnd >= mChunkList.back() + mChunkLengths.back().
|
||||
char* mChunkEnd;
|
||||
|
||||
// List of chunks and their lengths.
|
||||
//
|
||||
// For all i, the length of the string in mChunkList[i] is
|
||||
// mChunkLengths[i].
|
||||
Vector<UniquePtr<char[]>> mChunkList;
|
||||
Vector<size_t> mChunkLengths;
|
||||
};
|
||||
|
||||
struct OStreamJSONWriteFunc : public JSONWriteFunc {
|
||||
explicit OStreamJSONWriteFunc(std::ostream& aStream) : mStream(aStream) {}
|
||||
|
||||
void Write(const char* aStr) override { mStream << aStr; }
|
||||
|
||||
std::ostream& mStream;
|
||||
};
|
||||
|
||||
class SpliceableJSONWriter : public JSONWriter {
|
||||
public:
|
||||
explicit SpliceableJSONWriter(UniquePtr<JSONWriteFunc> aWriter)
|
||||
: JSONWriter(std::move(aWriter)) {}
|
||||
|
||||
void StartBareList(CollectionStyle aStyle = MultiLineStyle) {
|
||||
StartCollection(nullptr, "", aStyle);
|
||||
}
|
||||
|
||||
void EndBareList() { EndCollection(""); }
|
||||
|
||||
void NullElements(uint32_t aCount) {
|
||||
for (uint32_t i = 0; i < aCount; i++) {
|
||||
NullElement();
|
||||
}
|
||||
}
|
||||
|
||||
void Splice(const ChunkedJSONWriteFunc* aFunc);
|
||||
void Splice(const char* aStr);
|
||||
|
||||
// Splice the given JSON directly in, without quoting.
|
||||
void SplicedJSONProperty(const char* aMaybePropertyName,
|
||||
const char* aJsonValue) {
|
||||
Scalar(aMaybePropertyName, aJsonValue);
|
||||
}
|
||||
|
||||
// Takes the chunks from aFunc and write them. If move is not possible
|
||||
// (e.g., using OStreamJSONWriteFunc), aFunc's chunks are copied and its
|
||||
// storage cleared.
|
||||
virtual void TakeAndSplice(ChunkedJSONWriteFunc* aFunc);
|
||||
};
|
||||
|
||||
class SpliceableChunkedJSONWriter : public SpliceableJSONWriter {
|
||||
public:
|
||||
explicit SpliceableChunkedJSONWriter()
|
||||
: SpliceableJSONWriter(MakeUnique<ChunkedJSONWriteFunc>()) {}
|
||||
|
||||
ChunkedJSONWriteFunc* WriteFunc() const {
|
||||
return static_cast<ChunkedJSONWriteFunc*>(JSONWriter::WriteFunc());
|
||||
}
|
||||
|
||||
// Adopts the chunks from aFunc without copying.
|
||||
virtual void TakeAndSplice(ChunkedJSONWriteFunc* aFunc) override;
|
||||
};
|
||||
|
||||
class JSONSchemaWriter {
|
||||
JSONWriter& mWriter;
|
||||
uint32_t mIndex;
|
||||
|
||||
public:
|
||||
explicit JSONSchemaWriter(JSONWriter& aWriter) : mWriter(aWriter), mIndex(0) {
|
||||
aWriter.StartObjectProperty("schema",
|
||||
SpliceableJSONWriter::SingleLineStyle);
|
||||
}
|
||||
|
||||
void WriteField(const char* aName) { mWriter.IntProperty(aName, mIndex++); }
|
||||
|
||||
~JSONSchemaWriter() { mWriter.EndObject(); }
|
||||
};
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // BASEPROFILEJSONWRITER_H
|
|
@ -1,964 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
// The Gecko Profiler is an always-on profiler that takes fast and low overhead
|
||||
// samples of the program execution using only userspace functionality for
|
||||
// portability. The goal of this module is to provide performance data in a
|
||||
// generic cross-platform way without requiring custom tools or kernel support.
|
||||
//
|
||||
// Samples are collected to form a timeline with optional timeline event
|
||||
// (markers) used for filtering. The samples include both native stacks and
|
||||
// platform-independent "label stack" frames.
|
||||
|
||||
#ifndef BaseProfiler_h
|
||||
#define BaseProfiler_h
|
||||
|
||||
// Everything in here is also safe to include unconditionally, and only defines
|
||||
// empty macros if MOZ_GECKO_PROFILER or MOZ_BASE_PROFILER is unset.
|
||||
|
||||
// MOZ_BASE_PROFILER is #defined (or not) in this header, so it should be
|
||||
// #included wherever Base Profiler may be used.
|
||||
|
||||
#ifdef MOZ_GECKO_PROFILER
|
||||
// Enable Base Profiler on Mac and Non-Android Linux, which are supported.
|
||||
// (Android not implemented yet. Windows not working yet when packaged.)
|
||||
# if defined(XP_MACOSX) || (defined(XP_LINUX) && !defined(ANDROID))
|
||||
# define MOZ_BASE_PROFILER
|
||||
# else
|
||||
// Other platforms are currently not supported. But you may uncomment the
|
||||
// following line to enable Base Profiler in your build.
|
||||
//# define MOZ_BASE_PROFILER
|
||||
# endif
|
||||
#endif // MOZ_GECKO_PROFILER
|
||||
|
||||
// BaseProfilerCounts.h is also safe to include unconditionally, with empty
|
||||
// macros if MOZ_BASE_PROFILER is unset.
|
||||
#include "mozilla/BaseProfilerCounts.h"
|
||||
|
||||
#ifndef MOZ_BASE_PROFILER
|
||||
|
||||
// This file can be #included unconditionally. However, everything within this
|
||||
// file must be guarded by a #ifdef MOZ_BASE_PROFILER, *except* for the
|
||||
// following macros, which encapsulate the most common operations and thus
|
||||
// avoid the need for many #ifdefs.
|
||||
|
||||
# define AUTO_BASE_PROFILER_INIT
|
||||
|
||||
# define BASE_PROFILER_REGISTER_THREAD(name)
|
||||
# define BASE_PROFILER_UNREGISTER_THREAD()
|
||||
# define AUTO_BASE_PROFILER_REGISTER_THREAD(name)
|
||||
|
||||
# define AUTO_BASE_PROFILER_THREAD_SLEEP
|
||||
# define AUTO_BASE_PROFILER_THREAD_WAKE
|
||||
|
||||
# define AUTO_BASE_PROFILER_LABEL(label, categoryPair)
|
||||
# define AUTO_BASE_PROFILER_LABEL_CATEGORY_PAIR(categoryPair)
|
||||
# define AUTO_BASE_PROFILER_LABEL_DYNAMIC_CSTR(label, categoryPair, cStr)
|
||||
# define AUTO_BASE_PROFILER_LABEL_DYNAMIC_STRING(label, categoryPair, str)
|
||||
# define AUTO_BASE_PROFILER_LABEL_FAST(label, categoryPair, ctx)
|
||||
# define AUTO_BASE_PROFILER_LABEL_DYNAMIC_FAST(label, dynamicString, \
|
||||
categoryPair, ctx, flags)
|
||||
|
||||
# define BASE_PROFILER_ADD_MARKER(markerName, categoryPair)
|
||||
|
||||
# define MOZDECLARE_DOCSHELL_AND_HISTORY_ID(docShell)
|
||||
# define BASE_PROFILER_TRACING(categoryString, markerName, categoryPair, kind)
|
||||
# define BASE_PROFILER_TRACING_DOCSHELL(categoryString, markerName, \
|
||||
categoryPair, kind, docshell)
|
||||
# define AUTO_BASE_PROFILER_TRACING(categoryString, markerName, categoryPair)
|
||||
# define AUTO_BASE_PROFILER_TRACING_DOCSHELL(categoryString, markerName, \
|
||||
categoryPair, docShell)
|
||||
# define AUTO_BASE_PROFILER_TEXT_MARKER_CAUSE(markerName, text, categoryPair, \
|
||||
cause)
|
||||
# define AUTO_BASE_PROFILER_TEXT_MARKER_DOCSHELL(markerName, text, \
|
||||
categoryPair, docShell)
|
||||
# define AUTO_BASE_PROFILER_TEXT_MARKER_DOCSHELL_CAUSE( \
|
||||
markerName, text, categoryPair, docShell, cause)
|
||||
|
||||
#else // !MOZ_BASE_PROFILER
|
||||
|
||||
# include "BaseProfilingStack.h"
|
||||
|
||||
# include "mozilla/Assertions.h"
|
||||
# include "mozilla/Atomics.h"
|
||||
# include "mozilla/Attributes.h"
|
||||
# include "mozilla/GuardObjects.h"
|
||||
# include "mozilla/Maybe.h"
|
||||
# include "mozilla/Sprintf.h"
|
||||
# include "mozilla/ThreadLocal.h"
|
||||
# include "mozilla/TimeStamp.h"
|
||||
# include "mozilla/UniquePtr.h"
|
||||
|
||||
# include <stdint.h>
|
||||
# include <string>
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
class MallocAllocPolicy;
|
||||
template <class T, size_t MinInlineCapacity, class AllocPolicy>
|
||||
class Vector;
|
||||
|
||||
namespace baseprofiler {
|
||||
|
||||
class ProfilerBacktrace;
|
||||
class ProfilerMarkerPayload;
|
||||
class SpliceableJSONWriter;
|
||||
|
||||
// Macros used by the AUTO_PROFILER_* macros below.
|
||||
# define BASE_PROFILER_RAII_PASTE(id, line) id##line
|
||||
# define BASE_PROFILER_RAII_EXPAND(id, line) BASE_PROFILER_RAII_PASTE(id, line)
|
||||
# define BASE_PROFILER_RAII BASE_PROFILER_RAII_EXPAND(raiiObject, __LINE__)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// Profiler features
|
||||
//---------------------------------------------------------------------------
|
||||
|
||||
// Higher-order macro containing all the feature info in one place. Define
|
||||
// |MACRO| appropriately to extract the relevant parts. Note that the number
|
||||
// values are used internally only and so can be changed without consequence.
|
||||
// Any changes to this list should also be applied to the feature list in
|
||||
// toolkit/components/extensions/schemas/geckoProfiler.json.
|
||||
# define BASE_PROFILER_FOR_EACH_FEATURE(MACRO) \
|
||||
MACRO(0, "java", Java, "Profile Java code, Android only") \
|
||||
\
|
||||
MACRO(1, "js", JS, \
|
||||
"Get the JS engine to expose the JS stack to the profiler") \
|
||||
\
|
||||
/* The DevTools profiler doesn't want the native addresses. */ \
|
||||
MACRO(2, "leaf", Leaf, "Include the C++ leaf node if not stackwalking") \
|
||||
\
|
||||
MACRO(3, "mainthreadio", MainThreadIO, \
|
||||
"Add main thread I/O to the profile") \
|
||||
\
|
||||
MACRO(4, "memory", Memory, "Add memory measurements") \
|
||||
\
|
||||
MACRO(5, "privacy", Privacy, \
|
||||
"Do not include user-identifiable information") \
|
||||
\
|
||||
MACRO(6, "responsiveness", Responsiveness, \
|
||||
"Collect thread responsiveness information") \
|
||||
\
|
||||
MACRO(7, "screenshots", Screenshots, \
|
||||
"Take a snapshot of the window on every composition") \
|
||||
\
|
||||
MACRO(8, "seqstyle", SequentialStyle, \
|
||||
"Disable parallel traversal in styling") \
|
||||
\
|
||||
MACRO(9, "stackwalk", StackWalk, \
|
||||
"Walk the C++ stack, not available on all platforms") \
|
||||
\
|
||||
MACRO(10, "tasktracer", TaskTracer, \
|
||||
"Start profiling with feature TaskTracer") \
|
||||
\
|
||||
MACRO(11, "threads", Threads, "Profile the registered secondary threads") \
|
||||
\
|
||||
MACRO(12, "trackopts", TrackOptimizations, \
|
||||
"Have the JavaScript engine track JIT optimizations") \
|
||||
\
|
||||
MACRO(13, "jstracer", JSTracer, "Enable tracing of the JavaScript engine")
|
||||
|
||||
struct ProfilerFeature {
|
||||
# define DECLARE(n_, str_, Name_, desc_) \
|
||||
static constexpr uint32_t Name_ = (1u << n_); \
|
||||
static constexpr bool Has##Name_(uint32_t aFeatures) { \
|
||||
return aFeatures & Name_; \
|
||||
} \
|
||||
static constexpr void Set##Name_(uint32_t& aFeatures) { \
|
||||
aFeatures |= Name_; \
|
||||
} \
|
||||
static constexpr void Clear##Name_(uint32_t& aFeatures) { \
|
||||
aFeatures &= ~Name_; \
|
||||
}
|
||||
|
||||
// Define a bitfield constant, a getter, and two setters for each feature.
|
||||
BASE_PROFILER_FOR_EACH_FEATURE(DECLARE)
|
||||
|
||||
# undef DECLARE
|
||||
};
|
||||
|
||||
namespace detail {
|
||||
|
||||
// RacyFeatures is only defined in this header file so that its methods can
|
||||
// be inlined into profiler_is_active(). Please do not use anything from the
|
||||
// detail namespace outside the profiler.
|
||||
|
||||
// Within the profiler's code, the preferred way to check profiler activeness
|
||||
// and features is via ActivePS(). However, that requires locking gPSMutex.
|
||||
// There are some hot operations where absolute precision isn't required, so we
|
||||
// duplicate the activeness/feature state in a lock-free manner in this class.
|
||||
class RacyFeatures {
|
||||
public:
|
||||
MFBT_API static void SetActive(uint32_t aFeatures);
|
||||
|
||||
MFBT_API static void SetInactive();
|
||||
|
||||
MFBT_API static bool IsActive();
|
||||
|
||||
MFBT_API static bool IsActiveWithFeature(uint32_t aFeature);
|
||||
|
||||
MFBT_API static bool IsActiveWithoutPrivacy();
|
||||
|
||||
private:
|
||||
static const uint32_t Active = 1u << 31;
|
||||
|
||||
// Ensure Active doesn't overlap with any of the feature bits.
|
||||
# define NO_OVERLAP(n_, str_, Name_, desc_) \
|
||||
static_assert(ProfilerFeature::Name_ != Active, "bad Active value");
|
||||
|
||||
BASE_PROFILER_FOR_EACH_FEATURE(NO_OVERLAP);
|
||||
|
||||
# undef NO_OVERLAP
|
||||
|
||||
// We combine the active bit with the feature bits so they can be read or
|
||||
// written in a single atomic operation. Accesses to this atomic are not
|
||||
// recorded by web replay as they may occur at non-deterministic points.
|
||||
// TODO: Could this be MFBT_DATA for better inlining optimization?
|
||||
static Atomic<uint32_t, MemoryOrdering::Relaxed,
|
||||
recordreplay::Behavior::DontPreserve>
|
||||
sActiveAndFeatures;
|
||||
};
|
||||
|
||||
MFBT_API bool IsThreadBeingProfiled();
|
||||
|
||||
} // namespace detail
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// Start and stop the profiler
|
||||
//---------------------------------------------------------------------------
|
||||
|
||||
static constexpr uint32_t BASE_PROFILER_DEFAULT_ENTRIES =
|
||||
# if !defined(ARCH_ARMV6)
|
||||
1u << 20; // 1'048'576
|
||||
# else
|
||||
1u << 17; // 131'072
|
||||
# endif
|
||||
|
||||
// Startup profiling usually need to capture more data, especially on slow
|
||||
// systems.
|
||||
static constexpr uint32_t BASE_PROFILER_DEFAULT_STARTUP_ENTRIES =
|
||||
# if !defined(ARCH_ARMV6)
|
||||
1u << 22; // 4'194'304
|
||||
# else
|
||||
1u << 17; // 131'072
|
||||
# endif
|
||||
|
||||
# define BASE_PROFILER_DEFAULT_DURATION 20
|
||||
# define BASE_PROFILER_DEFAULT_INTERVAL 1
|
||||
|
||||
// Initialize the profiler. If MOZ_BASE_PROFILER_STARTUP is set the profiler
|
||||
// will also be started. This call must happen before any other profiler calls
|
||||
// (except profiler_start(), which will call profiler_init() if it hasn't
|
||||
// already run).
|
||||
MFBT_API void profiler_init(void* stackTop);
|
||||
|
||||
# define AUTO_BASE_PROFILER_INIT \
|
||||
::mozilla::baseprofiler::AutoProfilerInit BASE_PROFILER_RAII
|
||||
|
||||
// Clean up the profiler module, stopping it if required. This function may
|
||||
// also save a shutdown profile if requested. No profiler calls should happen
|
||||
// after this point and all profiling stack labels should have been popped.
|
||||
MFBT_API void profiler_shutdown();
|
||||
|
||||
// Start the profiler -- initializing it first if necessary -- with the
|
||||
// selected options. Stops and restarts the profiler if it is already active.
|
||||
// After starting the profiler is "active". The samples will be recorded in a
|
||||
// circular buffer.
|
||||
// "aCapacity" is the maximum number of entries in the profiler's circular
|
||||
// buffer.
|
||||
// "aInterval" the sampling interval, measured in millseconds.
|
||||
// "aFeatures" is the feature set. Features unsupported by this
|
||||
// platform/configuration are ignored.
|
||||
// "aFilters" is the list of thread filters. Threads that do not match any
|
||||
// of the filters are not profiled. A filter matches a thread if
|
||||
// (a) the thread name contains the filter as a case-insensitive
|
||||
// substring, or
|
||||
// (b) the filter is of the form "pid:<n>" where n is the process
|
||||
// id of the process that the thread is running in.
|
||||
// "aDuration" is the duration of entries in the profiler's circular buffer.
|
||||
MFBT_API void profiler_start(uint32_t aCapacity, double aInterval,
|
||||
uint32_t aFeatures, const char** aFilters,
|
||||
uint32_t aFilterCount,
|
||||
const Maybe<double>& aDuration = Nothing());
|
||||
|
||||
// Stop the profiler and discard the profile without saving it. A no-op if the
|
||||
// profiler is inactive. After stopping the profiler is "inactive".
|
||||
MFBT_API void profiler_stop();
|
||||
|
||||
// If the profiler is inactive, start it. If it's already active, restart it if
|
||||
// the requested settings differ from the current settings. Both the check and
|
||||
// the state change are performed while the profiler state is locked.
|
||||
// The only difference to profiler_start is that the current buffer contents are
|
||||
// not discarded if the profiler is already running with the requested settings.
|
||||
MFBT_API void profiler_ensure_started(
|
||||
uint32_t aCapacity, double aInterval, uint32_t aFeatures,
|
||||
const char** aFilters, uint32_t aFilterCount,
|
||||
const Maybe<double>& aDuration = Nothing());
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// Control the profiler
|
||||
//---------------------------------------------------------------------------
|
||||
|
||||
// Register/unregister threads with the profiler. Both functions operate the
|
||||
// same whether the profiler is active or inactive.
|
||||
# define BASE_PROFILER_REGISTER_THREAD(name) \
|
||||
do { \
|
||||
char stackTop; \
|
||||
::mozilla::baseprofiler::profiler_register_thread(name, &stackTop); \
|
||||
} while (0)
|
||||
# define BASE_PROFILER_UNREGISTER_THREAD() \
|
||||
::mozilla::baseprofiler::profiler_unregister_thread()
|
||||
MFBT_API ProfilingStack* profiler_register_thread(const char* name,
|
||||
void* guessStackTop);
|
||||
MFBT_API void profiler_unregister_thread();
|
||||
|
||||
// Register pages with the profiler.
|
||||
//
|
||||
// The `page` means every new history entry for docShells.
|
||||
// DocShellId + HistoryID is a unique pair to identify these pages.
|
||||
// We also keep these pairs inside markers to associate with the pages.
|
||||
// That allows us to see which markers belong to a specific page and filter the
|
||||
// markers by a page.
|
||||
// We register pages in these cases:
|
||||
// - If there is a navigation through a link or URL bar.
|
||||
// - If there is a navigation through `location.replace` or `history.pushState`.
|
||||
// We do not register pages in these cases:
|
||||
// - If there is a history navigation through the back and forward buttons.
|
||||
// - If there is a navigation through `history.replaceState` or anchor scrolls.
|
||||
//
|
||||
// "aDocShellId" is the ID of the docShell that page belongs to.
|
||||
// "aHistoryId" is the ID of the history entry on the given docShell.
|
||||
// "aUrl" is the URL of the page.
|
||||
// "aIsSubFrame" is true if the page is a sub frame.
|
||||
MFBT_API void profiler_register_page(const std::string& aDocShellId,
|
||||
uint32_t aHistoryId,
|
||||
const std::string& aUrl, bool aIsSubFrame);
|
||||
// Unregister pages with the profiler.
|
||||
//
|
||||
// Take a docShellId and unregister all the page entries that have the given ID.
|
||||
MFBT_API void profiler_unregister_pages(
|
||||
const std::string& aRegisteredDocShellId);
|
||||
|
||||
// Remove all registered and unregistered pages in the profiler.
|
||||
void profiler_clear_all_pages();
|
||||
|
||||
class BaseProfilerCount;
|
||||
MFBT_API void profiler_add_sampled_counter(BaseProfilerCount* aCounter);
|
||||
MFBT_API void profiler_remove_sampled_counter(BaseProfilerCount* aCounter);
|
||||
|
||||
// Register and unregister a thread within a scope.
|
||||
# define AUTO_BASE_PROFILER_REGISTER_THREAD(name) \
|
||||
::mozilla::baseprofiler::AutoProfilerRegisterThread BASE_PROFILER_RAII(name)
|
||||
|
||||
// Pause and resume the profiler. No-ops if the profiler is inactive. While
|
||||
// paused the profile will not take any samples and will not record any data
|
||||
// into its buffers. The profiler remains fully initialized in this state.
|
||||
// Timeline markers will still be stored. This feature will keep JavaScript
|
||||
// profiling enabled, thus allowing toggling the profiler without invalidating
|
||||
// the JIT.
|
||||
MFBT_API void profiler_pause();
|
||||
MFBT_API void profiler_resume();
|
||||
|
||||
// These functions tell the profiler that a thread went to sleep so that we can
|
||||
// avoid sampling it while it's sleeping. Calling profiler_thread_sleep()
|
||||
// twice without an intervening profiler_thread_wake() is an error. All three
|
||||
// functions operate the same whether the profiler is active or inactive.
|
||||
MFBT_API void profiler_thread_sleep();
|
||||
MFBT_API void profiler_thread_wake();
|
||||
|
||||
// Mark a thread as asleep/awake within a scope.
|
||||
# define AUTO_BASE_PROFILER_THREAD_SLEEP \
|
||||
::mozilla::baseprofiler::AutoProfilerThreadSleep BASE_PROFILER_RAII
|
||||
# define AUTO_BASE_PROFILER_THREAD_WAKE \
|
||||
::mozilla::baseprofiler::AutoProfilerThreadWake BASE_PROFILER_RAII
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// Get information from the profiler
|
||||
//---------------------------------------------------------------------------
|
||||
|
||||
// Is the profiler active? Note: the return value of this function can become
|
||||
// immediately out-of-date. E.g. the profile might be active but then
|
||||
// profiler_stop() is called immediately afterward. One common and reasonable
|
||||
// pattern of usage is the following:
|
||||
//
|
||||
// if (profiler_is_active()) {
|
||||
// ExpensiveData expensiveData = CreateExpensiveData();
|
||||
// PROFILER_OPERATION(expensiveData);
|
||||
// }
|
||||
//
|
||||
// where PROFILER_OPERATION is a no-op if the profiler is inactive. In this
|
||||
// case the profiler_is_active() check is just an optimization -- it prevents
|
||||
// us calling CreateExpensiveData() unnecessarily in most cases, but the
|
||||
// expensive data will end up being created but not used if another thread
|
||||
// stops the profiler between the CreateExpensiveData() and PROFILER_OPERATION
|
||||
// calls.
|
||||
inline bool profiler_is_active() {
|
||||
return baseprofiler::detail::RacyFeatures::IsActive();
|
||||
}
|
||||
|
||||
// Is the profiler active, and is the current thread being profiled?
|
||||
// (Same caveats and recommented usage as profiler_is_active().)
|
||||
inline bool profiler_thread_is_being_profiled() {
|
||||
return profiler_is_active() && baseprofiler::detail::IsThreadBeingProfiled();
|
||||
}
|
||||
|
||||
// Is the profiler active and paused? Returns false if the profiler is inactive.
|
||||
MFBT_API bool profiler_is_paused();
|
||||
|
||||
// Is the current thread sleeping?
|
||||
MFBT_API bool profiler_thread_is_sleeping();
|
||||
|
||||
// Get all the features supported by the profiler that are accepted by
|
||||
// profiler_start(). The result is the same whether the profiler is active or
|
||||
// not.
|
||||
MFBT_API uint32_t profiler_get_available_features();
|
||||
|
||||
// Check if a profiler feature (specified via the ProfilerFeature type) is
|
||||
// active. Returns false if the profiler is inactive. Note: the return value
|
||||
// can become immediately out-of-date, much like the return value of
|
||||
// profiler_is_active().
|
||||
MFBT_API bool profiler_feature_active(uint32_t aFeature);
|
||||
|
||||
// Get the params used to start the profiler. Returns 0 and an empty vector
|
||||
// (via outparams) if the profile is inactive. It's possible that the features
|
||||
// returned may be slightly different to those requested due to required
|
||||
// adjustments.
|
||||
MFBT_API void profiler_get_start_params(
|
||||
int* aEntrySize, Maybe<double>* aDuration, double* aInterval,
|
||||
uint32_t* aFeatures, Vector<const char*, 0, MallocAllocPolicy>* aFilters);
|
||||
|
||||
// The number of milliseconds since the process started. Operates the same
|
||||
// whether the profiler is active or inactive.
|
||||
MFBT_API double profiler_time();
|
||||
|
||||
// Get the current process's ID.
|
||||
MFBT_API int profiler_current_process_id();
|
||||
|
||||
// Get the current thread's ID.
|
||||
MFBT_API int profiler_current_thread_id();
|
||||
|
||||
// An object of this class is passed to profiler_suspend_and_sample_thread().
|
||||
// For each stack frame, one of the Collect methods will be called.
|
||||
class ProfilerStackCollector {
|
||||
public:
|
||||
// Some collectors need to worry about possibly overwriting previous
|
||||
// generations of data. If that's not an issue, this can return Nothing,
|
||||
// which is the default behaviour.
|
||||
virtual Maybe<uint64_t> SamplePositionInBuffer() { return Nothing(); }
|
||||
virtual Maybe<uint64_t> BufferRangeStart() { return Nothing(); }
|
||||
|
||||
// This method will be called once if the thread being suspended is the main
|
||||
// thread. Default behaviour is to do nothing.
|
||||
virtual void SetIsMainThread() {}
|
||||
|
||||
// WARNING: The target thread is suspended when the Collect methods are
|
||||
// called. Do not try to allocate or acquire any locks, or you could
|
||||
// deadlock. The target thread will have resumed by the time this function
|
||||
// returns.
|
||||
|
||||
virtual void CollectNativeLeafAddr(void* aAddr) = 0;
|
||||
|
||||
virtual void CollectProfilingStackFrame(
|
||||
const ProfilingStackFrame& aFrame) = 0;
|
||||
};
|
||||
|
||||
// This method suspends the thread identified by aThreadId, samples its
|
||||
// profiling stack, JS stack, and (optionally) native stack, passing the
|
||||
// collected frames into aCollector. aFeatures dictates which compiler features
|
||||
// are used. |Privacy| and |Leaf| are the only relevant ones.
|
||||
MFBT_API void profiler_suspend_and_sample_thread(
|
||||
int aThreadId, uint32_t aFeatures, ProfilerStackCollector& aCollector,
|
||||
bool aSampleNative = true);
|
||||
|
||||
struct ProfilerBacktraceDestructor {
|
||||
MFBT_API void operator()(ProfilerBacktrace*);
|
||||
};
|
||||
|
||||
using UniqueProfilerBacktrace =
|
||||
UniquePtr<ProfilerBacktrace, ProfilerBacktraceDestructor>;
|
||||
|
||||
// Immediately capture the current thread's call stack and return it. A no-op
|
||||
// if the profiler is inactive or in privacy mode.
|
||||
MFBT_API UniqueProfilerBacktrace profiler_get_backtrace();
|
||||
|
||||
struct ProfilerBufferInfo {
|
||||
uint64_t mRangeStart;
|
||||
uint64_t mRangeEnd;
|
||||
uint32_t mEntryCount;
|
||||
};
|
||||
|
||||
// Get information about the current buffer status.
|
||||
// Returns Nothing() if the profiler is inactive.
|
||||
//
|
||||
// This information may be useful to a user-interface displaying the current
|
||||
// status of the profiler, allowing the user to get a sense for how fast the
|
||||
// buffer is being written to, and how much data is visible.
|
||||
MFBT_API Maybe<ProfilerBufferInfo> profiler_get_buffer_info();
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// Put profiling data into the profiler (labels and markers)
|
||||
//---------------------------------------------------------------------------
|
||||
|
||||
// Insert an RAII object in this scope to enter a label stack frame. Any
|
||||
// samples collected in this scope will contain this label in their stack.
|
||||
// The label argument must be a static C string. It is usually of the
|
||||
// form "ClassName::FunctionName". (Ideally we'd use the compiler to provide
|
||||
// that for us, but __func__ gives us the function name without the class
|
||||
// name.) If the label applies to only part of a function, you can qualify it
|
||||
// like this: "ClassName::FunctionName:PartName".
|
||||
//
|
||||
// Use AUTO_BASE_PROFILER_LABEL_DYNAMIC_* if you want to add additional /
|
||||
// dynamic information to the label stack frame.
|
||||
# define AUTO_BASE_PROFILER_LABEL(label, categoryPair) \
|
||||
::mozilla::baseprofiler::AutoProfilerLabel BASE_PROFILER_RAII( \
|
||||
label, nullptr, \
|
||||
::mozilla::baseprofiler::ProfilingCategoryPair::categoryPair)
|
||||
|
||||
// Similar to AUTO_BASE_PROFILER_LABEL, but with only one argument: the category
|
||||
// pair. The label string is taken from the category pair. This is convenient
|
||||
// for labels like
|
||||
// AUTO_BASE_PROFILER_LABEL_CATEGORY_PAIR(GRAPHICS_LayerBuilding) which would
|
||||
// otherwise just repeat the string.
|
||||
# define AUTO_BASE_PROFILER_LABEL_CATEGORY_PAIR(categoryPair) \
|
||||
::mozilla::baseprofiler::AutoProfilerLabel BASE_PROFILER_RAII( \
|
||||
"", nullptr, \
|
||||
::mozilla::baseprofiler::ProfilingCategoryPair::categoryPair, \
|
||||
uint32_t(::mozilla::baseprofiler::ProfilingStackFrame::Flags:: \
|
||||
LABEL_DETERMINED_BY_CATEGORY_PAIR))
|
||||
|
||||
// Similar to AUTO_BASE_PROFILER_LABEL, but with an additional string. The
|
||||
// inserted RAII object stores the cStr pointer in a field; it does not copy the
|
||||
// string.
|
||||
//
|
||||
// WARNING: This means that the string you pass to this macro needs to live at
|
||||
// least until the end of the current scope. Be careful using this macro with
|
||||
// ns[C]String; the other AUTO_BASE_PROFILER_LABEL_DYNAMIC_* macros below are
|
||||
// preferred because they avoid this problem.
|
||||
//
|
||||
// If the profiler samples the current thread and walks the label stack while
|
||||
// this RAII object is on the stack, it will copy the supplied string into the
|
||||
// profile buffer. So there's one string copy operation, and it happens at
|
||||
// sample time.
|
||||
//
|
||||
// Compare this to the plain AUTO_BASE_PROFILER_LABEL macro, which only accepts
|
||||
// literal strings: When the label stack frames generated by
|
||||
// AUTO_BASE_PROFILER_LABEL are sampled, no string copy needs to be made because
|
||||
// the profile buffer can just store the raw pointers to the literal strings.
|
||||
// Consequently, AUTO_BASE_PROFILER_LABEL frames take up considerably less space
|
||||
// in the profile buffer than AUTO_BASE_PROFILER_LABEL_DYNAMIC_* frames.
|
||||
# define AUTO_BASE_PROFILER_LABEL_DYNAMIC_CSTR(label, categoryPair, cStr) \
|
||||
::mozilla::baseprofiler::AutoProfilerLabel BASE_PROFILER_RAII( \
|
||||
label, cStr, \
|
||||
::mozilla::baseprofiler::ProfilingCategoryPair::categoryPair)
|
||||
|
||||
// Similar to AUTO_BASE_PROFILER_LABEL_DYNAMIC_CSTR, but takes an std::string.
|
||||
//
|
||||
// Note: The use of the Maybe<>s ensures the scopes for the dynamic string and
|
||||
// the AutoProfilerLabel are appropriate, while also not incurring the runtime
|
||||
// cost of the string assignment unless the profiler is active. Therefore,
|
||||
// unlike AUTO_BASE_PROFILER_LABEL and AUTO_BASE_PROFILER_LABEL_DYNAMIC_CSTR,
|
||||
// this macro doesn't push/pop a label when the profiler is inactive.
|
||||
# define AUTO_BASE_PROFILER_LABEL_DYNAMIC_STRING(label, categoryPair, str) \
|
||||
Maybe<std::string> autoStr; \
|
||||
Maybe<::mozilla::baseprofiler::AutoProfilerLabel> raiiObjectString; \
|
||||
if (::mozilla::baseprofiler::profiler_is_active()) { \
|
||||
autoStr.emplace(str); \
|
||||
raiiObjectString.emplace( \
|
||||
label, autoStr->c_str(), \
|
||||
::mozilla::baseprofiler::ProfilingCategoryPair::categoryPair); \
|
||||
}
|
||||
|
||||
// Similar to AUTO_BASE_PROFILER_LABEL, but accepting a JSContext* parameter,
|
||||
// and a no-op if the profiler is disabled. Used to annotate functions for which
|
||||
// overhead in the range of nanoseconds is noticeable. It avoids overhead from
|
||||
// the TLS lookup because it can get the ProfilingStack from the JS context, and
|
||||
// avoids almost all overhead in the case where the profiler is disabled.
|
||||
# define AUTO_BASE_PROFILER_LABEL_FAST(label, categoryPair, ctx) \
|
||||
::mozilla::baseprofiler::AutoProfilerLabel BASE_PROFILER_RAII( \
|
||||
ctx, label, nullptr, \
|
||||
::mozilla::baseprofiler::ProfilingCategoryPair::categoryPair)
|
||||
|
||||
// Similar to AUTO_BASE_PROFILER_LABEL_FAST, but also takes an extra string and
|
||||
// an additional set of flags. The flags parameter should carry values from the
|
||||
// ProfilingStackFrame::Flags enum.
|
||||
# define AUTO_BASE_PROFILER_LABEL_DYNAMIC_FAST(label, dynamicString, \
|
||||
categoryPair, ctx, flags) \
|
||||
::mozilla::baseprofiler::AutoProfilerLabel BASE_PROFILER_RAII( \
|
||||
ctx, label, dynamicString, \
|
||||
::mozilla::baseprofiler::ProfilingCategoryPair::categoryPair, flags)
|
||||
|
||||
// Insert a marker in the profile timeline. This is useful to delimit something
|
||||
// important happening such as the first paint. Unlike labels, which are only
|
||||
// recorded in the profile buffer if a sample is collected while the label is
|
||||
// on the label stack, markers will always be recorded in the profile buffer.
|
||||
// aMarkerName is copied, so the caller does not need to ensure it lives for a
|
||||
// certain length of time. A no-op if the profiler is inactive or in privacy
|
||||
// mode.
|
||||
|
||||
# define BASE_PROFILER_ADD_MARKER(markerName, categoryPair) \
|
||||
::mozilla::baseprofiler::profiler_add_marker( \
|
||||
markerName, \
|
||||
::mozilla::baseprofiler::ProfilingCategoryPair::categoryPair)
|
||||
|
||||
MFBT_API void profiler_add_marker(const char* aMarkerName,
|
||||
ProfilingCategoryPair aCategoryPair);
|
||||
MFBT_API void profiler_add_marker(const char* aMarkerName,
|
||||
ProfilingCategoryPair aCategoryPair,
|
||||
UniquePtr<ProfilerMarkerPayload> aPayload);
|
||||
MFBT_API void profiler_add_js_marker(const char* aMarkerName);
|
||||
|
||||
// Insert a marker in the profile timeline for a specified thread.
|
||||
MFBT_API void profiler_add_marker_for_thread(
|
||||
int aThreadId, ProfilingCategoryPair aCategoryPair, const char* aMarkerName,
|
||||
UniquePtr<ProfilerMarkerPayload> aPayload);
|
||||
|
||||
enum TracingKind {
|
||||
TRACING_EVENT,
|
||||
TRACING_INTERVAL_START,
|
||||
TRACING_INTERVAL_END,
|
||||
};
|
||||
|
||||
// Helper macro to retrieve DocShellId and DocShellHistoryId from docShell
|
||||
# define MOZDECLARE_DOCSHELL_AND_HISTORY_ID(docShell) \
|
||||
Maybe<std::string> docShellId; \
|
||||
Maybe<uint32_t> docShellHistoryId; \
|
||||
if (docShell) { \
|
||||
docShellId = mozilla::Some(docShell->HistoryID()); \
|
||||
uint32_t id; \
|
||||
nsresult rv = docShell->GetOSHEId(&id); \
|
||||
if (NS_SUCCEEDED(rv)) { \
|
||||
docShellHistoryId = mozilla::Some(id); \
|
||||
} else { \
|
||||
docShellHistoryId = mozilla::Nothing(); \
|
||||
} \
|
||||
} else { \
|
||||
docShellId = mozilla::Nothing(); \
|
||||
docShellHistoryId = mozilla::Nothing(); \
|
||||
}
|
||||
|
||||
// Adds a tracing marker to the profile. A no-op if the profiler is inactive or
|
||||
// in privacy mode.
|
||||
|
||||
# define BASE_PROFILER_TRACING(categoryString, markerName, categoryPair, \
|
||||
kind) \
|
||||
::mozilla::baseprofiler::profiler_tracing( \
|
||||
categoryString, markerName, \
|
||||
::mozilla::baseprofiler::ProfilingCategoryPair::categoryPair, kind)
|
||||
# define BASE_PROFILER_TRACING_DOCSHELL(categoryString, markerName, \
|
||||
categoryPair, kind, docShell) \
|
||||
MOZDECLARE_DOCSHELL_AND_HISTORY_ID(docShell); \
|
||||
::mozilla::baseprofiler::profiler_tracing( \
|
||||
categoryString, markerName, \
|
||||
::mozilla::baseprofiler::ProfilingCategoryPair::categoryPair, kind, \
|
||||
docShellId, docShellHistoryId)
|
||||
|
||||
MFBT_API void profiler_tracing(
|
||||
const char* aCategoryString, const char* aMarkerName,
|
||||
ProfilingCategoryPair aCategoryPair, TracingKind aKind,
|
||||
const Maybe<std::string>& aDocShellId = Nothing(),
|
||||
const Maybe<uint32_t>& aDocShellHistoryId = Nothing());
|
||||
MFBT_API void profiler_tracing(
|
||||
const char* aCategoryString, const char* aMarkerName,
|
||||
ProfilingCategoryPair aCategoryPair, TracingKind aKind,
|
||||
UniqueProfilerBacktrace aCause,
|
||||
const Maybe<std::string>& aDocShellId = Nothing(),
|
||||
const Maybe<uint32_t>& aDocShellHistoryId = Nothing());
|
||||
|
||||
// Adds a START/END pair of tracing markers.
|
||||
# define AUTO_BASE_PROFILER_TRACING(categoryString, markerName, categoryPair) \
|
||||
::mozilla::baseprofiler::AutoProfilerTracing BASE_PROFILER_RAII( \
|
||||
categoryString, markerName, \
|
||||
::mozilla::baseprofiler::ProfilingCategoryPair::categoryPair, \
|
||||
Nothing(), Nothing())
|
||||
# define AUTO_BASE_PROFILER_TRACING_DOCSHELL(categoryString, markerName, \
|
||||
categoryPair, docShell) \
|
||||
MOZDECLARE_DOCSHELL_AND_HISTORY_ID(docShell); \
|
||||
::mozilla::baseprofiler::AutoProfilerTracing BASE_PROFILER_RAII( \
|
||||
categoryString, markerName, \
|
||||
::mozilla::baseprofiler::ProfilingCategoryPair::categoryPair, \
|
||||
docShellId, docShellHistoryId)
|
||||
|
||||
// Add a text marker. Text markers are similar to tracing markers, with the
|
||||
// difference that text markers have their "text" separate from the marker name;
|
||||
// multiple text markers with the same name can have different text, and these
|
||||
// markers will still be displayed in the same "row" in the UI.
|
||||
// Another difference is that text markers combine the start and end markers
|
||||
// into one marker.
|
||||
MFBT_API void profiler_add_text_marker(
|
||||
const char* aMarkerName, const std::string& aText,
|
||||
ProfilingCategoryPair aCategoryPair, const TimeStamp& aStartTime,
|
||||
const TimeStamp& aEndTime,
|
||||
const Maybe<std::string>& aDocShellId = Nothing(),
|
||||
const Maybe<uint32_t>& aDocShellHistoryId = Nothing(),
|
||||
UniqueProfilerBacktrace aCause = nullptr);
|
||||
|
||||
class MOZ_RAII AutoProfilerTextMarker {
|
||||
public:
|
||||
AutoProfilerTextMarker(const char* aMarkerName, const std::string& aText,
|
||||
ProfilingCategoryPair aCategoryPair,
|
||||
const Maybe<std::string>& aDocShellId,
|
||||
const Maybe<uint32_t>& aDocShellHistoryId,
|
||||
UniqueProfilerBacktrace&& aCause =
|
||||
nullptr MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: mMarkerName(aMarkerName),
|
||||
mText(aText),
|
||||
mCategoryPair(aCategoryPair),
|
||||
mStartTime(TimeStamp::Now()),
|
||||
mCause(std::move(aCause)),
|
||||
mDocShellId(aDocShellId),
|
||||
mDocShellHistoryId(aDocShellHistoryId) {
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
}
|
||||
|
||||
~AutoProfilerTextMarker() {
|
||||
profiler_add_text_marker(mMarkerName, mText, mCategoryPair, mStartTime,
|
||||
TimeStamp::Now(), mDocShellId, mDocShellHistoryId,
|
||||
std::move(mCause));
|
||||
}
|
||||
|
||||
protected:
|
||||
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
const char* mMarkerName;
|
||||
std::string mText;
|
||||
const ProfilingCategoryPair mCategoryPair;
|
||||
TimeStamp mStartTime;
|
||||
UniqueProfilerBacktrace mCause;
|
||||
const Maybe<std::string> mDocShellId;
|
||||
const Maybe<uint32_t> mDocShellHistoryId;
|
||||
};
|
||||
|
||||
# define AUTO_BASE_PROFILER_TEXT_MARKER_CAUSE(markerName, text, categoryPair, \
|
||||
cause) \
|
||||
::mozilla::baseprofiler::AutoProfilerTextMarker BASE_PROFILER_RAII( \
|
||||
markerName, text, \
|
||||
::mozilla::baseprofiler::ProfilingCategoryPair::categoryPair, \
|
||||
mozilla::Nothing(), mozilla::Nothing(), cause)
|
||||
|
||||
# define AUTO_BASE_PROFILER_TEXT_MARKER_DOCSHELL(markerName, text, \
|
||||
categoryPair, docShell) \
|
||||
MOZDECLARE_DOCSHELL_AND_HISTORY_ID(docShell); \
|
||||
::mozilla::baseprofiler::AutoProfilerTextMarker BASE_PROFILER_RAII( \
|
||||
markerName, text, \
|
||||
::mozilla::baseprofiler::ProfilingCategoryPair::categoryPair, \
|
||||
docShellId, docShellHistoryId)
|
||||
|
||||
# define AUTO_BASE_PROFILER_TEXT_MARKER_DOCSHELL_CAUSE( \
|
||||
markerName, text, categoryPair, docShell, cause) \
|
||||
MOZDECLARE_DOCSHELL_AND_HISTORY_ID(docShell); \
|
||||
::mozilla::baseprofiler::AutoProfilerTextMarker BASE_PROFILER_RAII( \
|
||||
markerName, text, \
|
||||
::mozilla::baseprofiler::ProfilingCategoryPair::categoryPair, \
|
||||
docShellId, docShellHistoryId, cause)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// Output profiles
|
||||
//---------------------------------------------------------------------------
|
||||
|
||||
// Set a user-friendly process name, used in JSON stream.
|
||||
MFBT_API void profiler_set_process_name(const std::string& aProcessName);
|
||||
|
||||
// Get the profile encoded as a JSON string. A no-op (returning nullptr) if the
|
||||
// profiler is inactive.
|
||||
// If aIsShuttingDown is true, the current time is included as the process
|
||||
// shutdown time in the JSON's "meta" object.
|
||||
MFBT_API UniquePtr<char[]> profiler_get_profile(double aSinceTime = 0,
|
||||
bool aIsShuttingDown = false,
|
||||
bool aOnlyThreads = false);
|
||||
|
||||
// Write the profile for this process (excluding subprocesses) into aWriter.
|
||||
// Returns false if the profiler is inactive.
|
||||
MFBT_API bool profiler_stream_json_for_this_process(
|
||||
SpliceableJSONWriter& aWriter, double aSinceTime = 0,
|
||||
bool aIsShuttingDown = false, bool aOnlyThreads = false);
|
||||
|
||||
// Get the profile and write it into a file. A no-op if the profile is
|
||||
// inactive.
|
||||
MFBT_API void profiler_save_profile_to_file(const char* aFilename);
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// RAII classes
|
||||
//---------------------------------------------------------------------------
|
||||
|
||||
class MOZ_RAII AutoProfilerInit {
|
||||
public:
|
||||
explicit AutoProfilerInit(MOZ_GUARD_OBJECT_NOTIFIER_ONLY_PARAM) {
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
profiler_init(this);
|
||||
}
|
||||
|
||||
~AutoProfilerInit() { profiler_shutdown(); }
|
||||
|
||||
private:
|
||||
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
};
|
||||
|
||||
// Convenience class to register and unregister a thread with the profiler.
|
||||
// Needs to be the first object on the stack of the thread.
|
||||
class MOZ_RAII AutoProfilerRegisterThread final {
|
||||
public:
|
||||
explicit AutoProfilerRegisterThread(
|
||||
const char* aName MOZ_GUARD_OBJECT_NOTIFIER_PARAM) {
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
profiler_register_thread(aName, this);
|
||||
}
|
||||
|
||||
~AutoProfilerRegisterThread() { profiler_unregister_thread(); }
|
||||
|
||||
private:
|
||||
AutoProfilerRegisterThread(const AutoProfilerRegisterThread&) = delete;
|
||||
AutoProfilerRegisterThread& operator=(const AutoProfilerRegisterThread&) =
|
||||
delete;
|
||||
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
};
|
||||
|
||||
class MOZ_RAII AutoProfilerThreadSleep {
|
||||
public:
|
||||
explicit AutoProfilerThreadSleep(MOZ_GUARD_OBJECT_NOTIFIER_ONLY_PARAM) {
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
profiler_thread_sleep();
|
||||
}
|
||||
|
||||
~AutoProfilerThreadSleep() { profiler_thread_wake(); }
|
||||
|
||||
private:
|
||||
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
};
|
||||
|
||||
// Temporarily wake up the profiling of a thread while servicing events such as
|
||||
// Asynchronous Procedure Calls (APCs).
|
||||
class MOZ_RAII AutoProfilerThreadWake {
|
||||
public:
|
||||
explicit AutoProfilerThreadWake(MOZ_GUARD_OBJECT_NOTIFIER_ONLY_PARAM)
|
||||
: mIssuedWake(profiler_thread_is_sleeping()) {
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
if (mIssuedWake) {
|
||||
profiler_thread_wake();
|
||||
}
|
||||
}
|
||||
|
||||
~AutoProfilerThreadWake() {
|
||||
if (mIssuedWake) {
|
||||
MOZ_ASSERT(!profiler_thread_is_sleeping());
|
||||
profiler_thread_sleep();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
bool mIssuedWake;
|
||||
};
|
||||
|
||||
// This class creates a non-owning ProfilingStack reference. Objects of this
|
||||
// class are stack-allocated, and so exist within a thread, and are thus bounded
|
||||
// by the lifetime of the thread, which ensures that the references held can't
|
||||
// be used after the ProfilingStack is destroyed.
|
||||
class MOZ_RAII AutoProfilerLabel {
|
||||
public:
|
||||
// This is the AUTO_BASE_PROFILER_LABEL and AUTO_BASE_PROFILER_LABEL_DYNAMIC
|
||||
// variant.
|
||||
AutoProfilerLabel(const char* aLabel, const char* aDynamicString,
|
||||
ProfilingCategoryPair aCategoryPair,
|
||||
uint32_t aFlags = 0 MOZ_GUARD_OBJECT_NOTIFIER_PARAM) {
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
|
||||
// Get the ProfilingStack from TLS.
|
||||
Push(GetProfilingStack(), aLabel, aDynamicString, aCategoryPair, aFlags);
|
||||
}
|
||||
|
||||
void Push(ProfilingStack* aProfilingStack, const char* aLabel,
|
||||
const char* aDynamicString, ProfilingCategoryPair aCategoryPair,
|
||||
uint32_t aFlags = 0) {
|
||||
// This function runs both on and off the main thread.
|
||||
|
||||
mProfilingStack = aProfilingStack;
|
||||
if (mProfilingStack) {
|
||||
mProfilingStack->pushLabelFrame(aLabel, aDynamicString, this,
|
||||
aCategoryPair, aFlags);
|
||||
}
|
||||
}
|
||||
|
||||
~AutoProfilerLabel() {
|
||||
// This function runs both on and off the main thread.
|
||||
|
||||
if (mProfilingStack) {
|
||||
mProfilingStack->pop();
|
||||
}
|
||||
}
|
||||
|
||||
MFBT_API static ProfilingStack* GetProfilingStack();
|
||||
|
||||
private:
|
||||
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
|
||||
// We save a ProfilingStack pointer in the ctor so we don't have to redo the
|
||||
// TLS lookup in the dtor.
|
||||
ProfilingStack* mProfilingStack;
|
||||
|
||||
public:
|
||||
// See the comment on the definition in platform.cpp for details about this.
|
||||
static MOZ_THREAD_LOCAL(ProfilingStack*) sProfilingStack;
|
||||
};
|
||||
|
||||
class MOZ_RAII AutoProfilerTracing {
|
||||
public:
|
||||
AutoProfilerTracing(const char* aCategoryString, const char* aMarkerName,
|
||||
ProfilingCategoryPair aCategoryPair,
|
||||
const Maybe<std::string>& aDocShellId,
|
||||
const Maybe<uint32_t>& aDocShellHistoryId
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: mCategoryString(aCategoryString),
|
||||
mMarkerName(aMarkerName),
|
||||
mCategoryPair(aCategoryPair),
|
||||
mDocShellId(aDocShellId),
|
||||
mDocShellHistoryId(aDocShellHistoryId) {
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
profiler_tracing(mCategoryString, mMarkerName, aCategoryPair,
|
||||
TRACING_INTERVAL_START, mDocShellId, mDocShellHistoryId);
|
||||
}
|
||||
|
||||
AutoProfilerTracing(
|
||||
const char* aCategoryString, const char* aMarkerName,
|
||||
ProfilingCategoryPair aCategoryPair, UniqueProfilerBacktrace aBacktrace,
|
||||
const Maybe<std::string>& aDocShellId,
|
||||
const Maybe<uint32_t>& aDocShellHistoryId MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: mCategoryString(aCategoryString),
|
||||
mMarkerName(aMarkerName),
|
||||
mCategoryPair(aCategoryPair),
|
||||
mDocShellId(aDocShellId),
|
||||
mDocShellHistoryId(aDocShellHistoryId) {
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
profiler_tracing(mCategoryString, mMarkerName, aCategoryPair,
|
||||
TRACING_INTERVAL_START, std::move(aBacktrace), mDocShellId,
|
||||
mDocShellHistoryId);
|
||||
}
|
||||
|
||||
~AutoProfilerTracing() {
|
||||
profiler_tracing(mCategoryString, mMarkerName, mCategoryPair,
|
||||
TRACING_INTERVAL_END, mDocShellId, mDocShellHistoryId);
|
||||
}
|
||||
|
||||
protected:
|
||||
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
const char* mCategoryString;
|
||||
const char* mMarkerName;
|
||||
const ProfilingCategoryPair mCategoryPair;
|
||||
const Maybe<std::string> mDocShellId;
|
||||
const Maybe<uint32_t> mDocShellHistoryId;
|
||||
};
|
||||
|
||||
// Get the MOZ_BASE_PROFILER_STARTUP* environment variables that should be
|
||||
// supplied to a child process that is about to be launched, in order
|
||||
// to make that child process start with the same profiler settings as
|
||||
// in the current process. The given function is invoked once for
|
||||
// each variable to be set.
|
||||
MFBT_API void GetProfilerEnvVarsForChildProcess(
|
||||
std::function<void(const char* key, const char* value)>&& aSetEnv);
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // !MOZ_BASE_PROFILER
|
||||
|
||||
#endif // BaseProfiler_h
|
|
@ -1,280 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef BaseProfilerCounts_h
|
||||
#define BaseProfilerCounts_h
|
||||
|
||||
#ifndef MOZ_BASE_PROFILER
|
||||
|
||||
# define BASE_PROFILER_DEFINE_COUNT_TOTAL(label, category, description)
|
||||
# define BASE_PROFILER_DEFINE_COUNT(label, category, description)
|
||||
# define BASE_PROFILER_DEFINE_STATIC_COUNT_TOTAL(label, category, description)
|
||||
# define AUTO_BASE_PROFILER_COUNT_TOTAL(label, count)
|
||||
# define AUTO_BASE_PROFILER_COUNT(label)
|
||||
# define AUTO_BASE_PROFILER_STATIC_COUNT(label, count)
|
||||
# define AUTO_BASE_PROFILER_FORCE_ALLOCATION(label)
|
||||
|
||||
#else
|
||||
|
||||
# include "mozilla/Atomics.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
class BaseProfilerCount;
|
||||
MFBT_API void profiler_add_sampled_counter(BaseProfilerCount* aCounter);
|
||||
MFBT_API void profiler_remove_sampled_counter(BaseProfilerCount* aCounter);
|
||||
|
||||
typedef Atomic<int64_t, MemoryOrdering::Relaxed> ProfilerAtomicSigned;
|
||||
typedef Atomic<uint64_t, MemoryOrdering::Relaxed> ProfilerAtomicUnsigned;
|
||||
|
||||
// Counter support
|
||||
// There are two types of counters:
|
||||
// 1) a simple counter which can be added to or subtracted from. This could
|
||||
// track the number of objects of a type, the number of calls to something
|
||||
// (reflow, JIT, etc).
|
||||
// 2) a combined counter which has the above, plus a number-of-calls counter
|
||||
// that is incremented by 1 for each call to modify the count. This provides
|
||||
// an optional source for a 'heatmap' of access. This can be used (for
|
||||
// example) to track the amount of memory allocated, and provide a heatmap of
|
||||
// memory operations (allocs/frees).
|
||||
//
|
||||
// Counters are sampled by the profiler once per sample-period. At this time,
|
||||
// all counters are global to the process. In the future, there might be more
|
||||
// versions with per-thread or other discriminators.
|
||||
//
|
||||
// Typical usage:
|
||||
// There are two ways to use counters: With heap-created counter objects,
|
||||
// or using macros. Note: the macros use statics, and will be slightly
|
||||
// faster/smaller, and you need to care about creating them before using
|
||||
// them. They're similar to the use-pattern for the other AUTO_PROFILER*
|
||||
// macros, but they do need the PROFILER_DEFINE* to be use to instantiate
|
||||
// the statics.
|
||||
//
|
||||
// PROFILER_DEFINE_COUNT(mything, "JIT", "Some JIT byte count")
|
||||
// ...
|
||||
// void foo() { ... AUTO_PROFILER_COUNT(mything, number_of_bytes_used); ... }
|
||||
//
|
||||
// or (to also get a heatmap)
|
||||
//
|
||||
// PROFILER_DEFINE_COUNT_TOTAL(mything, "JIT", "Some JIT byte count")
|
||||
// ...
|
||||
// void foo() {
|
||||
// ...
|
||||
// AUTO_PROFILER_COUNT_TOTAL(mything, number_of_bytes_generated);
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// To use without statics/macros:
|
||||
//
|
||||
// UniquePtr<ProfilerCounter> myCounter;
|
||||
// ...
|
||||
// myCounter =
|
||||
// MakeUnique<ProfilerCounter>("mything", "JIT", "Some JIT byte count"));
|
||||
// ...
|
||||
// void foo() { ... myCounter->Add(number_of_bytes_generated0; ... }
|
||||
|
||||
class BaseProfilerCount {
|
||||
public:
|
||||
BaseProfilerCount(const char* aLabel, ProfilerAtomicSigned* aCounter,
|
||||
ProfilerAtomicUnsigned* aNumber, const char* aCategory,
|
||||
const char* aDescription)
|
||||
: mLabel(aLabel),
|
||||
mCategory(aCategory),
|
||||
mDescription(aDescription),
|
||||
mCounter(aCounter),
|
||||
mNumber(aNumber) {
|
||||
# define COUNTER_CANARY 0xDEADBEEF
|
||||
# ifdef DEBUG
|
||||
mCanary = COUNTER_CANARY;
|
||||
mPrevNumber = 0;
|
||||
# endif
|
||||
// Can't call profiler_* here since this may be non-xul-library
|
||||
}
|
||||
# ifdef DEBUG
|
||||
~BaseProfilerCount() { mCanary = 0; }
|
||||
# endif
|
||||
|
||||
void Sample(int64_t& aCounter, uint64_t& aNumber) {
|
||||
MOZ_ASSERT(mCanary == COUNTER_CANARY);
|
||||
|
||||
aCounter = *mCounter;
|
||||
aNumber = mNumber ? *mNumber : 0;
|
||||
# ifdef DEBUG
|
||||
MOZ_ASSERT(aNumber >= mPrevNumber);
|
||||
mPrevNumber = aNumber;
|
||||
# endif
|
||||
}
|
||||
|
||||
// We don't define ++ and Add() here, since the static defines directly
|
||||
// increment the atomic counters, and the subclasses implement ++ and
|
||||
// Add() directly.
|
||||
|
||||
// These typically are static strings (for example if you use the macros
|
||||
// below)
|
||||
const char* mLabel;
|
||||
const char* mCategory;
|
||||
const char* mDescription;
|
||||
// We're ok with these being un-ordered in race conditions. These are
|
||||
// pointers because we want to be able to use statics and increment them
|
||||
// directly. Otherwise we could just have them inline, and not need the
|
||||
// constructor args.
|
||||
// These can be static globals (using the macros below), though they
|
||||
// don't have to be - their lifetime must be longer than the use of them
|
||||
// by the profiler (see profiler_add/remove_sampled_counter()). If you're
|
||||
// using a lot of these, they probably should be allocated at runtime (see
|
||||
// class ProfilerCountOnly below).
|
||||
ProfilerAtomicSigned* mCounter;
|
||||
ProfilerAtomicUnsigned* mNumber; // may be null
|
||||
|
||||
# ifdef DEBUG
|
||||
uint32_t mCanary;
|
||||
uint64_t mPrevNumber; // value of number from the last Sample()
|
||||
# endif
|
||||
};
|
||||
|
||||
// Designed to be allocated dynamically, and simply incremented with obj++
|
||||
// or obj->Add(n)
|
||||
class ProfilerCounter final : public BaseProfilerCount {
|
||||
public:
|
||||
ProfilerCounter(const char* aLabel, const char* aCategory,
|
||||
const char* aDescription)
|
||||
: BaseProfilerCount(aLabel, &mCounter, nullptr, aCategory, aDescription) {
|
||||
// Assume we're in libxul
|
||||
profiler_add_sampled_counter(this);
|
||||
}
|
||||
|
||||
virtual ~ProfilerCounter() { profiler_remove_sampled_counter(this); }
|
||||
|
||||
BaseProfilerCount& operator++() {
|
||||
Add(1);
|
||||
return *this;
|
||||
}
|
||||
|
||||
void Add(int64_t aNumber) { mCounter += aNumber; }
|
||||
|
||||
ProfilerAtomicSigned mCounter;
|
||||
};
|
||||
|
||||
// Also keeps a heatmap (number of calls to ++/Add())
|
||||
class ProfilerCounterTotal final : public BaseProfilerCount {
|
||||
public:
|
||||
ProfilerCounterTotal(const char* aLabel, const char* aCategory,
|
||||
const char* aDescription)
|
||||
: BaseProfilerCount(aLabel, &mCounter, &mNumber, aCategory,
|
||||
aDescription) {
|
||||
// Assume we're in libxul
|
||||
profiler_add_sampled_counter(this);
|
||||
}
|
||||
|
||||
virtual ~ProfilerCounterTotal() { profiler_remove_sampled_counter(this); }
|
||||
|
||||
BaseProfilerCount& operator++() {
|
||||
Add(1);
|
||||
return *this;
|
||||
}
|
||||
|
||||
void Add(int64_t aNumber) {
|
||||
mCounter += aNumber;
|
||||
mNumber++;
|
||||
}
|
||||
|
||||
ProfilerAtomicSigned mCounter;
|
||||
ProfilerAtomicUnsigned mNumber;
|
||||
};
|
||||
|
||||
// Defines a counter that is sampled on each profiler tick, with a running
|
||||
// count (signed), and number-of-instances. Note that because these are two
|
||||
// independent Atomics, there is a possiblity that count will not include
|
||||
// the last call, but number of uses will. I think this is not worth
|
||||
// worrying about
|
||||
# define BASE_PROFILER_DEFINE_COUNT_TOTAL(label, category, description) \
|
||||
ProfilerAtomicSigned profiler_count_##label(0); \
|
||||
ProfilerAtomicUnsigned profiler_number_##label(0); \
|
||||
const char profiler_category_##label[] = category; \
|
||||
const char profiler_description_##label[] = description; \
|
||||
UniquePtr<::mozilla::baseprofiler::BaseProfilerCount> AutoCount_##label;
|
||||
|
||||
// This counts, but doesn't keep track of the number of calls to
|
||||
// AUTO_PROFILER_COUNT()
|
||||
# define BASE_PROFILER_DEFINE_COUNT(label, category, description) \
|
||||
ProfilerAtomicSigned profiler_count_##label(0); \
|
||||
const char profiler_category_##label[] = category; \
|
||||
const char profiler_description_##label[] = description; \
|
||||
UniquePtr<::mozilla::baseprofiler::BaseProfilerCount> AutoCount_##label;
|
||||
|
||||
// This will create a static initializer if used, but avoids a possible
|
||||
// allocation.
|
||||
# define BASE_PROFILER_DEFINE_STATIC_COUNT_TOTAL(label, category, \
|
||||
description) \
|
||||
ProfilerAtomicSigned profiler_count_##label(0); \
|
||||
ProfilerAtomicUnsigned profiler_number_##label(0); \
|
||||
::mozilla::baseprofiler::BaseProfilerCount AutoCount_##label( \
|
||||
#label, &profiler_count_##label, &profiler_number_##label, category, \
|
||||
description);
|
||||
|
||||
// If we didn't care about static initializers, we could avoid the need for
|
||||
// a ptr to the BaseProfilerCount object.
|
||||
|
||||
// XXX It would be better to do this without the if() and without the
|
||||
// theoretical race to set the UniquePtr (i.e. possible leak).
|
||||
# define AUTO_BASE_PROFILER_COUNT_TOTAL(label, count) \
|
||||
do { \
|
||||
profiler_number_##label++; /* do this first*/ \
|
||||
profiler_count_##label += count; \
|
||||
if (!AutoCount_##label) { \
|
||||
/* Ignore that we could call this twice in theory, and that we leak \
|
||||
* them \
|
||||
*/ \
|
||||
AutoCount_##label.reset(new BaseProfilerCount( \
|
||||
#label, &profiler_count_##label, &profiler_number_##label, \
|
||||
profiler_category_##label, profiler_description_##label)); \
|
||||
::mozilla::baseprofiler::profiler_add_sampled_counter( \
|
||||
AutoCount_##label.get()); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
# define AUTO_BASE_PROFILER_COUNT(label, count) \
|
||||
do { \
|
||||
profiler_count_##label += count; /* do this first*/ \
|
||||
if (!AutoCount_##label) { \
|
||||
/* Ignore that we could call this twice in theory, and that we leak \
|
||||
* them \
|
||||
*/ \
|
||||
AutoCount_##label.reset(new BaseProfilerCount( \
|
||||
#label, nullptr, &profiler_number_##label, \
|
||||
profiler_category_##label, profiler_description_##label)); \
|
||||
::mozilla::baseprofiler::profiler_add_sampled_counter( \
|
||||
AutoCount_##label.get()); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
# define AUTO_BASE_PROFILER_STATIC_COUNT(label, count) \
|
||||
do { \
|
||||
profiler_number_##label++; /* do this first*/ \
|
||||
profiler_count_##label += count; \
|
||||
} while (0)
|
||||
|
||||
// if we need to force the allocation
|
||||
# define AUTO_BASE_PROFILER_FORCE_ALLOCATION(label) \
|
||||
do { \
|
||||
if (!AutoCount_##label) { \
|
||||
/* Ignore that we could call this twice in theory, and that we leak \
|
||||
* them \
|
||||
*/ \
|
||||
AutoCount_##label.reset( \
|
||||
new ::mozilla::baseprofiler::BaseProfilerCount( \
|
||||
#label, &profiler_count_##label, &profiler_number_##label, \
|
||||
profiler_category_##label, profiler_description_##label)); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // !MOZ_BASE_PROFILER
|
||||
|
||||
#endif // BaseProfilerCounts_h
|
|
@ -1,238 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef BaseProfilerMarkerPayload_h
|
||||
#define BaseProfilerMarkerPayload_h
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifndef MOZ_BASE_PROFILER
|
||||
# error Do not #include this header when MOZ_BASE_PROFILER is not #defined.
|
||||
#endif
|
||||
|
||||
#include "mozilla/Attributes.h"
|
||||
#include "mozilla/Maybe.h"
|
||||
#include "mozilla/RefPtr.h"
|
||||
#include "mozilla/TimeStamp.h"
|
||||
#include "mozilla/UniquePtr.h"
|
||||
#include "mozilla/UniquePtrExtensions.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
class SpliceableJSONWriter;
|
||||
class UniqueStacks;
|
||||
|
||||
// This is an abstract class that can be implemented to supply data to be
|
||||
// attached with a profiler marker.
|
||||
//
|
||||
// When subclassing this, note that the destructor can be called on any thread,
|
||||
// i.e. not necessarily on the thread that created the object.
|
||||
class ProfilerMarkerPayload {
|
||||
public:
|
||||
explicit ProfilerMarkerPayload(
|
||||
const Maybe<std::string>& aDocShellId = Nothing(),
|
||||
const Maybe<uint32_t>& aDocShellHistoryId = Nothing(),
|
||||
UniqueProfilerBacktrace aStack = nullptr)
|
||||
: mStack(std::move(aStack)),
|
||||
mDocShellId(aDocShellId),
|
||||
mDocShellHistoryId(aDocShellHistoryId) {}
|
||||
|
||||
ProfilerMarkerPayload(const TimeStamp& aStartTime, const TimeStamp& aEndTime,
|
||||
const Maybe<std::string>& aDocShellId = Nothing(),
|
||||
const Maybe<uint32_t>& aDocShellHistoryId = Nothing(),
|
||||
UniqueProfilerBacktrace aStack = nullptr)
|
||||
: mStartTime(aStartTime),
|
||||
mEndTime(aEndTime),
|
||||
mStack(std::move(aStack)),
|
||||
mDocShellId(aDocShellId),
|
||||
mDocShellHistoryId(aDocShellHistoryId) {}
|
||||
|
||||
virtual ~ProfilerMarkerPayload() {}
|
||||
|
||||
virtual void StreamPayload(SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
UniqueStacks& aUniqueStacks) = 0;
|
||||
|
||||
TimeStamp GetStartTime() const { return mStartTime; }
|
||||
|
||||
protected:
|
||||
MFBT_API void StreamType(const char* aMarkerType,
|
||||
SpliceableJSONWriter& aWriter);
|
||||
MFBT_API void StreamCommonProps(const char* aMarkerType,
|
||||
SpliceableJSONWriter& aWriter,
|
||||
const TimeStamp& aProcessStartTime,
|
||||
UniqueStacks& aUniqueStacks);
|
||||
|
||||
void SetStack(UniqueProfilerBacktrace aStack) { mStack = std::move(aStack); }
|
||||
|
||||
void SetDocShellHistoryId(const Maybe<uint32_t>& aDocShellHistoryId) {
|
||||
mDocShellHistoryId = aDocShellHistoryId;
|
||||
}
|
||||
|
||||
void SetDocShellId(const Maybe<std::string>& aDocShellId) {
|
||||
mDocShellId = aDocShellId;
|
||||
}
|
||||
|
||||
private:
|
||||
TimeStamp mStartTime;
|
||||
TimeStamp mEndTime;
|
||||
UniqueProfilerBacktrace mStack;
|
||||
Maybe<std::string> mDocShellId;
|
||||
Maybe<uint32_t> mDocShellHistoryId;
|
||||
};
|
||||
|
||||
#define DECL_BASE_STREAM_PAYLOAD \
|
||||
virtual void StreamPayload( \
|
||||
::mozilla::baseprofiler::SpliceableJSONWriter& aWriter, \
|
||||
const ::mozilla::TimeStamp& aProcessStartTime, \
|
||||
::mozilla::baseprofiler::UniqueStacks& aUniqueStacks) override;
|
||||
|
||||
// TODO: Increase the coverage of tracing markers that include DocShell
|
||||
// information
|
||||
class TracingMarkerPayload : public ProfilerMarkerPayload {
|
||||
public:
|
||||
TracingMarkerPayload(const char* aCategory, TracingKind aKind,
|
||||
const Maybe<std::string>& aDocShellId = Nothing(),
|
||||
const Maybe<uint32_t>& aDocShellHistoryId = Nothing(),
|
||||
UniqueProfilerBacktrace aCause = nullptr)
|
||||
: mCategory(aCategory), mKind(aKind) {
|
||||
if (aCause) {
|
||||
SetStack(std::move(aCause));
|
||||
}
|
||||
SetDocShellId(aDocShellId);
|
||||
SetDocShellHistoryId(aDocShellHistoryId);
|
||||
}
|
||||
|
||||
DECL_BASE_STREAM_PAYLOAD
|
||||
|
||||
private:
|
||||
const char* mCategory;
|
||||
TracingKind mKind;
|
||||
};
|
||||
|
||||
class FileIOMarkerPayload : public ProfilerMarkerPayload {
|
||||
public:
|
||||
FileIOMarkerPayload(const char* aOperation, const char* aSource,
|
||||
const char* aFilename, const TimeStamp& aStartTime,
|
||||
const TimeStamp& aEndTime, UniqueProfilerBacktrace aStack)
|
||||
: ProfilerMarkerPayload(aStartTime, aEndTime, Nothing(), Nothing(),
|
||||
std::move(aStack)),
|
||||
mSource(aSource),
|
||||
mOperation(aOperation ? strdup(aOperation) : nullptr),
|
||||
mFilename(aFilename ? strdup(aFilename) : nullptr) {
|
||||
MOZ_ASSERT(aSource);
|
||||
}
|
||||
|
||||
DECL_BASE_STREAM_PAYLOAD
|
||||
|
||||
private:
|
||||
const char* mSource;
|
||||
UniqueFreePtr<char> mOperation;
|
||||
UniqueFreePtr<char> mFilename;
|
||||
};
|
||||
|
||||
class UserTimingMarkerPayload : public ProfilerMarkerPayload {
|
||||
public:
|
||||
UserTimingMarkerPayload(const std::string& aName, const TimeStamp& aStartTime,
|
||||
const Maybe<std::string>& aDocShellId,
|
||||
const Maybe<uint32_t>& aDocShellHistoryId)
|
||||
: ProfilerMarkerPayload(aStartTime, aStartTime, aDocShellId,
|
||||
aDocShellHistoryId),
|
||||
mEntryType("mark"),
|
||||
mName(aName) {}
|
||||
|
||||
UserTimingMarkerPayload(const std::string& aName,
|
||||
const Maybe<std::string>& aStartMark,
|
||||
const Maybe<std::string>& aEndMark,
|
||||
const TimeStamp& aStartTime,
|
||||
const TimeStamp& aEndTime,
|
||||
const Maybe<std::string>& aDocShellId,
|
||||
const Maybe<uint32_t>& aDocShellHistoryId)
|
||||
: ProfilerMarkerPayload(aStartTime, aEndTime, aDocShellId,
|
||||
aDocShellHistoryId),
|
||||
mEntryType("measure"),
|
||||
mName(aName),
|
||||
mStartMark(aStartMark),
|
||||
mEndMark(aEndMark) {}
|
||||
|
||||
DECL_BASE_STREAM_PAYLOAD
|
||||
|
||||
private:
|
||||
// Either "mark" or "measure".
|
||||
const char* mEntryType;
|
||||
std::string mName;
|
||||
Maybe<std::string> mStartMark;
|
||||
Maybe<std::string> mEndMark;
|
||||
};
|
||||
|
||||
class HangMarkerPayload : public ProfilerMarkerPayload {
|
||||
public:
|
||||
HangMarkerPayload(const TimeStamp& aStartTime, const TimeStamp& aEndTime)
|
||||
: ProfilerMarkerPayload(aStartTime, aEndTime) {}
|
||||
|
||||
DECL_BASE_STREAM_PAYLOAD
|
||||
private:
|
||||
};
|
||||
|
||||
class LongTaskMarkerPayload : public ProfilerMarkerPayload {
|
||||
public:
|
||||
LongTaskMarkerPayload(const TimeStamp& aStartTime, const TimeStamp& aEndTime)
|
||||
: ProfilerMarkerPayload(aStartTime, aEndTime) {}
|
||||
|
||||
DECL_BASE_STREAM_PAYLOAD
|
||||
};
|
||||
|
||||
class TextMarkerPayload : public ProfilerMarkerPayload {
|
||||
public:
|
||||
TextMarkerPayload(const std::string& aText, const TimeStamp& aStartTime)
|
||||
: ProfilerMarkerPayload(aStartTime, aStartTime), mText(aText) {}
|
||||
|
||||
TextMarkerPayload(const std::string& aText, const TimeStamp& aStartTime,
|
||||
const TimeStamp& aEndTime)
|
||||
: ProfilerMarkerPayload(aStartTime, aEndTime), mText(aText) {}
|
||||
|
||||
TextMarkerPayload(const std::string& aText, const TimeStamp& aStartTime,
|
||||
const Maybe<std::string>& aDocShellId,
|
||||
const Maybe<uint32_t>& aDocShellHistoryId)
|
||||
: ProfilerMarkerPayload(aStartTime, aStartTime, aDocShellId,
|
||||
aDocShellHistoryId),
|
||||
mText(aText) {}
|
||||
|
||||
TextMarkerPayload(const std::string& aText, const TimeStamp& aStartTime,
|
||||
const TimeStamp& aEndTime,
|
||||
const Maybe<std::string>& aDocShellId,
|
||||
const Maybe<uint32_t>& aDocShellHistoryId,
|
||||
UniqueProfilerBacktrace aCause = nullptr)
|
||||
: ProfilerMarkerPayload(aStartTime, aEndTime, aDocShellId,
|
||||
aDocShellHistoryId, std::move(aCause)),
|
||||
mText(aText) {}
|
||||
|
||||
DECL_BASE_STREAM_PAYLOAD
|
||||
|
||||
private:
|
||||
std::string mText;
|
||||
};
|
||||
|
||||
class LogMarkerPayload : public ProfilerMarkerPayload {
|
||||
public:
|
||||
LogMarkerPayload(const char* aModule, const char* aText,
|
||||
const TimeStamp& aStartTime)
|
||||
: ProfilerMarkerPayload(aStartTime, aStartTime),
|
||||
mModule(aModule),
|
||||
mText(aText) {}
|
||||
|
||||
DECL_BASE_STREAM_PAYLOAD
|
||||
|
||||
private:
|
||||
std::string mModule; // longest known LazyLogModule name is ~24
|
||||
std::string mText;
|
||||
};
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // BaseProfilerMarkerPayload_h
|
|
@ -1,146 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim:set ts=2 sw=2 sts=2 et cindent: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef BASE_PROFILER_SHARED_LIBRARIES_H_
|
||||
#define BASE_PROFILER_SHARED_LIBRARIES_H_
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifndef MOZ_BASE_PROFILER
|
||||
# error Do not #include this header when MOZ_BASE_PROFILER is not #defined.
|
||||
#endif
|
||||
|
||||
#include <algorithm>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
class SharedLibrary {
|
||||
public:
|
||||
SharedLibrary(uintptr_t aStart, uintptr_t aEnd, uintptr_t aOffset,
|
||||
const std::string& aBreakpadId, const std::string& aModuleName,
|
||||
const std::string& aModulePath, const std::string& aDebugName,
|
||||
const std::string& aDebugPath, const std::string& aVersion,
|
||||
const char* aArch)
|
||||
: mStart(aStart),
|
||||
mEnd(aEnd),
|
||||
mOffset(aOffset),
|
||||
mBreakpadId(aBreakpadId),
|
||||
mModuleName(aModuleName),
|
||||
mModulePath(aModulePath),
|
||||
mDebugName(aDebugName),
|
||||
mDebugPath(aDebugPath),
|
||||
mVersion(aVersion),
|
||||
mArch(aArch) {}
|
||||
|
||||
SharedLibrary(const SharedLibrary& aEntry)
|
||||
: mStart(aEntry.mStart),
|
||||
mEnd(aEntry.mEnd),
|
||||
mOffset(aEntry.mOffset),
|
||||
mBreakpadId(aEntry.mBreakpadId),
|
||||
mModuleName(aEntry.mModuleName),
|
||||
mModulePath(aEntry.mModulePath),
|
||||
mDebugName(aEntry.mDebugName),
|
||||
mDebugPath(aEntry.mDebugPath),
|
||||
mVersion(aEntry.mVersion),
|
||||
mArch(aEntry.mArch) {}
|
||||
|
||||
SharedLibrary& operator=(const SharedLibrary& aEntry) {
|
||||
// Gracefully handle self assignment
|
||||
if (this == &aEntry) return *this;
|
||||
|
||||
mStart = aEntry.mStart;
|
||||
mEnd = aEntry.mEnd;
|
||||
mOffset = aEntry.mOffset;
|
||||
mBreakpadId = aEntry.mBreakpadId;
|
||||
mModuleName = aEntry.mModuleName;
|
||||
mModulePath = aEntry.mModulePath;
|
||||
mDebugName = aEntry.mDebugName;
|
||||
mDebugPath = aEntry.mDebugPath;
|
||||
mVersion = aEntry.mVersion;
|
||||
mArch = aEntry.mArch;
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool operator==(const SharedLibrary& other) const {
|
||||
return (mStart == other.mStart) && (mEnd == other.mEnd) &&
|
||||
(mOffset == other.mOffset) && (mModuleName == other.mModuleName) &&
|
||||
(mModulePath == other.mModulePath) &&
|
||||
(mDebugName == other.mDebugName) &&
|
||||
(mDebugPath == other.mDebugPath) &&
|
||||
(mBreakpadId == other.mBreakpadId) && (mVersion == other.mVersion) &&
|
||||
(mArch == other.mArch);
|
||||
}
|
||||
|
||||
uintptr_t GetStart() const { return mStart; }
|
||||
uintptr_t GetEnd() const { return mEnd; }
|
||||
uintptr_t GetOffset() const { return mOffset; }
|
||||
const std::string& GetBreakpadId() const { return mBreakpadId; }
|
||||
const std::string& GetModuleName() const { return mModuleName; }
|
||||
const std::string& GetModulePath() const { return mModulePath; }
|
||||
const std::string& GetDebugName() const { return mDebugName; }
|
||||
const std::string& GetDebugPath() const { return mDebugPath; }
|
||||
const std::string& GetVersion() const { return mVersion; }
|
||||
const std::string& GetArch() const { return mArch; }
|
||||
|
||||
private:
|
||||
SharedLibrary() : mStart{0}, mEnd{0}, mOffset{0} {}
|
||||
|
||||
uintptr_t mStart;
|
||||
uintptr_t mEnd;
|
||||
uintptr_t mOffset;
|
||||
std::string mBreakpadId;
|
||||
std::string mModuleName;
|
||||
std::string mModulePath;
|
||||
std::string mDebugName;
|
||||
std::string mDebugPath;
|
||||
std::string mVersion;
|
||||
std::string mArch;
|
||||
};
|
||||
|
||||
static bool CompareAddresses(const SharedLibrary& first,
|
||||
const SharedLibrary& second) {
|
||||
return first.GetStart() < second.GetStart();
|
||||
}
|
||||
|
||||
class SharedLibraryInfo {
|
||||
public:
|
||||
static SharedLibraryInfo GetInfoForSelf();
|
||||
static void Initialize();
|
||||
|
||||
SharedLibraryInfo() {}
|
||||
|
||||
void AddSharedLibrary(SharedLibrary entry) { mEntries.push_back(entry); }
|
||||
|
||||
const SharedLibrary& GetEntry(size_t i) const { return mEntries[i]; }
|
||||
|
||||
SharedLibrary& GetMutableEntry(size_t i) { return mEntries[i]; }
|
||||
|
||||
// Removes items in the range [first, last)
|
||||
// i.e. element at the "last" index is not removed
|
||||
void RemoveEntries(size_t first, size_t last) {
|
||||
mEntries.erase(mEntries.begin() + first, mEntries.begin() + last);
|
||||
}
|
||||
|
||||
bool Contains(const SharedLibrary& searchItem) const {
|
||||
return (mEntries.end() !=
|
||||
std::find(mEntries.begin(), mEntries.end(), searchItem));
|
||||
}
|
||||
|
||||
size_t GetSize() const { return mEntries.size(); }
|
||||
|
||||
void SortByAddress() {
|
||||
std::sort(mEntries.begin(), mEntries.end(), CompareAddresses);
|
||||
}
|
||||
|
||||
void Clear() { mEntries.clear(); }
|
||||
|
||||
private:
|
||||
std::vector<SharedLibrary> mEntries;
|
||||
};
|
||||
|
||||
#endif // BASE_PROFILER_SHARED_LIBRARIES_H_
|
|
@ -1,134 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef BaseProfilingCategory_h
|
||||
#define BaseProfilingCategory_h
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifndef MOZ_BASE_PROFILER
|
||||
# error Do not #include this header when MOZ_BASE_PROFILER is not #defined.
|
||||
#endif
|
||||
|
||||
#include "mozilla/Types.h"
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
// clang-format off
|
||||
|
||||
// This higher-order macro lists all categories with their subcategories.
|
||||
//
|
||||
// BASE_PROFILING_CATEGORY_LIST(BEGIN_CATEGORY, SUBCATEGORY, END_CATEGORY)
|
||||
// BEGIN_CATEGORY(name, labelAsString, colorAsString)
|
||||
// SUBCATEGORY(category, name, labelAsString)
|
||||
// END_CATEGORY
|
||||
//
|
||||
// The list of available color names for categories is:
|
||||
// transparent, grey, purple, yellow, orange, lightblue, green, blue, magenta
|
||||
//
|
||||
// Categories and subcategories are used for stack-based instrumentation. They
|
||||
// are specified in label frames in the profiling stack, see ProfilingStack.h.
|
||||
// At any point, the category pair of the topmost profiler label frame in the
|
||||
// label stack determines the category pair of that stack.
|
||||
// Each category describes a type of workload that the CPU can be busy with.
|
||||
// Categories should be non-overlapping: the list of categories should be
|
||||
// chosen in such a way that every possible stack can be mapped to a single
|
||||
// category unambiguously.
|
||||
|
||||
#define BASE_PROFILING_CATEGORY_LIST(BEGIN_CATEGORY, SUBCATEGORY, \
|
||||
END_CATEGORY) \
|
||||
BEGIN_CATEGORY(IDLE, "Idle", "transparent") \
|
||||
SUBCATEGORY(IDLE, IDLE, "Other") \
|
||||
END_CATEGORY \
|
||||
BEGIN_CATEGORY(OTHER, "Other", "grey") \
|
||||
SUBCATEGORY(OTHER, OTHER, "Other") \
|
||||
END_CATEGORY \
|
||||
BEGIN_CATEGORY(LAYOUT, "Layout", "purple") \
|
||||
SUBCATEGORY(LAYOUT, LAYOUT, "Other") \
|
||||
SUBCATEGORY(LAYOUT, LAYOUT_FrameConstruction, "Frame construction") \
|
||||
SUBCATEGORY(LAYOUT, LAYOUT_Reflow, "Reflow") \
|
||||
SUBCATEGORY(LAYOUT, LAYOUT_CSSParsing, "CSS parsing") \
|
||||
SUBCATEGORY(LAYOUT, LAYOUT_SelectorQuery, "Selector query") \
|
||||
SUBCATEGORY(LAYOUT, LAYOUT_StyleComputation, "Style computation") \
|
||||
END_CATEGORY \
|
||||
BEGIN_CATEGORY(JS, "JavaScript", "yellow") \
|
||||
SUBCATEGORY(JS, JS, "Other") \
|
||||
SUBCATEGORY(JS, JS_Parsing, "JS Parsing") \
|
||||
SUBCATEGORY(JS, JS_IonCompilation, "Ion JIT Compilation") \
|
||||
SUBCATEGORY(JS, JS_BaselineCompilation, "Baseline JIT Compilation") \
|
||||
END_CATEGORY \
|
||||
BEGIN_CATEGORY(GCCC, "GC / CC", "orange") \
|
||||
SUBCATEGORY(GCCC, GCCC, "Other") \
|
||||
END_CATEGORY \
|
||||
BEGIN_CATEGORY(NETWORK, "Network", "lightblue") \
|
||||
SUBCATEGORY(NETWORK, NETWORK, "Other") \
|
||||
END_CATEGORY \
|
||||
BEGIN_CATEGORY(GRAPHICS, "Graphics", "green") \
|
||||
SUBCATEGORY(GRAPHICS, GRAPHICS, "Other") \
|
||||
SUBCATEGORY(GRAPHICS, GRAPHICS_DisplayListBuilding, "DisplayList building") \
|
||||
SUBCATEGORY(GRAPHICS, GRAPHICS_DisplayListMerging, "DisplayList merging") \
|
||||
SUBCATEGORY(GRAPHICS, GRAPHICS_LayerBuilding, "Layer building") \
|
||||
SUBCATEGORY(GRAPHICS, GRAPHICS_TileAllocation, "Tile allocation") \
|
||||
SUBCATEGORY(GRAPHICS, GRAPHICS_WRDisplayList, "WebRender display list") \
|
||||
SUBCATEGORY(GRAPHICS, GRAPHICS_Rasterization, "Rasterization") \
|
||||
SUBCATEGORY(GRAPHICS, GRAPHICS_FlushingAsyncPaints, "Flushing async paints") \
|
||||
SUBCATEGORY(GRAPHICS, GRAPHICS_ImageDecoding, "Image decoding") \
|
||||
END_CATEGORY \
|
||||
BEGIN_CATEGORY(DOM, "DOM", "blue") \
|
||||
SUBCATEGORY(DOM, DOM, "Other") \
|
||||
END_CATEGORY
|
||||
|
||||
// An enum that lists all possible category pairs in one list.
|
||||
// This is the enum that is used in profiler stack labels. Having one list that
|
||||
// includes subcategories from all categories in one list allows assigning the
|
||||
// category pair to a stack label with just one number.
|
||||
#define CATEGORY_ENUM_BEGIN_CATEGORY(name, labelAsString, color)
|
||||
#define CATEGORY_ENUM_SUBCATEGORY(supercategory, name, labelAsString) name,
|
||||
#define CATEGORY_ENUM_END_CATEGORY
|
||||
enum class ProfilingCategoryPair : uint32_t {
|
||||
BASE_PROFILING_CATEGORY_LIST(CATEGORY_ENUM_BEGIN_CATEGORY,
|
||||
CATEGORY_ENUM_SUBCATEGORY,
|
||||
CATEGORY_ENUM_END_CATEGORY)
|
||||
COUNT,
|
||||
LAST = COUNT - 1,
|
||||
};
|
||||
#undef CATEGORY_ENUM_BEGIN_CATEGORY
|
||||
#undef CATEGORY_ENUM_SUBCATEGORY
|
||||
#undef CATEGORY_ENUM_END_CATEGORY
|
||||
|
||||
// An enum that lists just the categories without their subcategories.
|
||||
#define SUPERCATEGORY_ENUM_BEGIN_CATEGORY(name, labelAsString, color) name,
|
||||
#define SUPERCATEGORY_ENUM_SUBCATEGORY(supercategory, name, labelAsString)
|
||||
#define SUPERCATEGORY_ENUM_END_CATEGORY
|
||||
enum class ProfilingCategory : uint32_t {
|
||||
BASE_PROFILING_CATEGORY_LIST(SUPERCATEGORY_ENUM_BEGIN_CATEGORY,
|
||||
SUPERCATEGORY_ENUM_SUBCATEGORY,
|
||||
SUPERCATEGORY_ENUM_END_CATEGORY)
|
||||
COUNT,
|
||||
LAST = COUNT - 1,
|
||||
};
|
||||
#undef SUPERCATEGORY_ENUM_BEGIN_CATEGORY
|
||||
#undef SUPERCATEGORY_ENUM_SUBCATEGORY
|
||||
#undef SUPERCATEGORY_ENUM_END_CATEGORY
|
||||
|
||||
// clang-format on
|
||||
|
||||
struct ProfilingCategoryPairInfo {
|
||||
ProfilingCategory mCategory;
|
||||
uint32_t mSubcategoryIndex;
|
||||
const char* mLabel;
|
||||
};
|
||||
|
||||
MFBT_API const ProfilingCategoryPairInfo& GetProfilingCategoryPairInfo(
|
||||
ProfilingCategoryPair aCategoryPair);
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif /* BaseProfilingCategory_h */
|
|
@ -1,510 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
|
||||
* vim: set ts=8 sts=2 et sw=2 tw=80:
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef BaseProfilingStack_h
|
||||
#define BaseProfilingStack_h
|
||||
|
||||
#include "BaseProfilingCategory.h"
|
||||
|
||||
#include "mozilla/Atomics.h"
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifndef MOZ_BASE_PROFILER
|
||||
# error Do not #include this header when MOZ_BASE_PROFILER is not #defined.
|
||||
#endif
|
||||
|
||||
#include <algorithm>
|
||||
#include <stdint.h>
|
||||
|
||||
// This file defines the classes ProfilingStack and ProfilingStackFrame.
|
||||
// The ProfilingStack manages an array of ProfilingStackFrames.
|
||||
// It keeps track of the "label stack" and the JS interpreter stack.
|
||||
// The two stack types are interleaved.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// ProfilingStack* profilingStack = ...;
|
||||
//
|
||||
// // For label frames:
|
||||
// profilingStack->pushLabelFrame(...);
|
||||
// // Execute some code. When finished, pop the frame:
|
||||
// profilingStack->pop();
|
||||
//
|
||||
// // For JS stack frames:
|
||||
// profilingStack->pushJSFrame(...);
|
||||
// // Execute some code. When finished, pop the frame:
|
||||
// profilingStack->pop();
|
||||
//
|
||||
//
|
||||
// Concurrency considerations
|
||||
//
|
||||
// A thread's profiling stack (and the frames inside it) is only modified by
|
||||
// that thread. However, the profiling stack can be *read* by a different
|
||||
// thread, the sampler thread: Whenever the profiler wants to sample a given
|
||||
// thread A, the following happens:
|
||||
// (1) Thread A is suspended.
|
||||
// (2) The sampler thread (thread S) reads the ProfilingStack of thread A,
|
||||
// including all ProfilingStackFrames that are currently in that stack
|
||||
// (profilingStack->frames[0..profilingStack->stackSize()]).
|
||||
// (3) Thread A is resumed.
|
||||
//
|
||||
// Thread suspension is achieved using platform-specific APIs; refer to each
|
||||
// platform's Sampler::SuspendAndSampleAndResumeThread implementation in
|
||||
// platform-*.cpp for details.
|
||||
//
|
||||
// When the thread is suspended, the values in profilingStack->stackPointer and
|
||||
// in the stack frame range
|
||||
// profilingStack->frames[0..profilingStack->stackPointer] need to be in a
|
||||
// consistent state, so that thread S does not read partially- constructed stack
|
||||
// frames. More specifically, we have two requirements:
|
||||
// (1) When adding a new frame at the top of the stack, its ProfilingStackFrame
|
||||
// data needs to be put in place *before* the stackPointer is incremented,
|
||||
// and the compiler + CPU need to know that this order matters.
|
||||
// (2) When popping an frame from the stack and then preparing the
|
||||
// ProfilingStackFrame data for the next frame that is about to be pushed,
|
||||
// the decrement of the stackPointer in pop() needs to happen *before* the
|
||||
// ProfilingStackFrame for the new frame is being popuplated, and the
|
||||
// compiler + CPU need to know that this order matters.
|
||||
//
|
||||
// We can express the relevance of these orderings in multiple ways.
|
||||
// Option A is to make stackPointer an atomic with SequentiallyConsistent
|
||||
// memory ordering. This would ensure that no writes in thread A would be
|
||||
// reordered across any writes to stackPointer, which satisfies requirements
|
||||
// (1) and (2) at the same time. Option A is the simplest.
|
||||
// Option B is to use ReleaseAcquire memory ordering both for writes to
|
||||
// stackPointer *and* for writes to ProfilingStackFrame fields. Release-stores
|
||||
// ensure that all writes that happened *before this write in program order* are
|
||||
// not reordered to happen after this write. ReleaseAcquire ordering places no
|
||||
// requirements on the ordering of writes that happen *after* this write in
|
||||
// program order.
|
||||
// Using release-stores for writes to stackPointer expresses requirement (1),
|
||||
// and using release-stores for writes to the ProfilingStackFrame fields
|
||||
// expresses requirement (2).
|
||||
//
|
||||
// Option B is more complicated than option A, but has much better performance
|
||||
// on x86/64: In a microbenchmark run on a Macbook Pro from 2017, switching
|
||||
// from option A to option B reduced the overhead of pushing+popping a
|
||||
// ProfilingStackFrame by 10 nanoseconds.
|
||||
// On x86/64, release-stores require no explicit hardware barriers or lock
|
||||
// instructions.
|
||||
// On ARM/64, option B may be slower than option A, because the compiler will
|
||||
// generate hardware barriers for every single release-store instead of just
|
||||
// for the writes to stackPointer. However, the actual performance impact of
|
||||
// this has not yet been measured on ARM, so we're currently using option B
|
||||
// everywhere. This is something that we may want to change in the future once
|
||||
// we've done measurements.
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
|
||||
// A call stack can be specified to the JS engine such that all JS entry/exits
|
||||
// to functions push/pop a stack frame to/from the specified stack.
|
||||
//
|
||||
// For more detailed information, see vm/GeckoProfiler.h.
|
||||
//
|
||||
class ProfilingStackFrame {
|
||||
// A ProfilingStackFrame represents either a label frame or a JS frame.
|
||||
|
||||
// WARNING WARNING WARNING
|
||||
//
|
||||
// All the fields below are Atomic<...,ReleaseAcquire>. This is needed so
|
||||
// that writes to these fields are release-writes, which ensures that
|
||||
// earlier writes in this thread don't get reordered after the writes to
|
||||
// these fields. In particular, the decrement of the stack pointer in
|
||||
// ProfilingStack::pop() is a write that *must* happen before the values in
|
||||
// this ProfilingStackFrame are changed. Otherwise, the sampler thread might
|
||||
// see an inconsistent state where the stack pointer still points to a
|
||||
// ProfilingStackFrame which has already been popped off the stack and whose
|
||||
// fields have now been partially repopulated with new values.
|
||||
// See the "Concurrency considerations" paragraph at the top of this file
|
||||
// for more details.
|
||||
|
||||
// Descriptive label for this stack frame. Must be a static string! Can be
|
||||
// an empty string, but not a null pointer.
|
||||
Atomic<const char*, ReleaseAcquire, recordreplay::Behavior::DontPreserve>
|
||||
label_;
|
||||
|
||||
// An additional descriptive string of this frame which is combined with
|
||||
// |label_| in profiler output. Need not be (and usually isn't) static. Can
|
||||
// be null.
|
||||
Atomic<const char*, ReleaseAcquire, recordreplay::Behavior::DontPreserve>
|
||||
dynamicString_;
|
||||
|
||||
// Stack pointer for non-JS stack frames, the script pointer otherwise.
|
||||
Atomic<void*, ReleaseAcquire, recordreplay::Behavior::DontPreserve>
|
||||
spOrScript;
|
||||
|
||||
// The bytecode offset for JS stack frames.
|
||||
// Must not be used on non-JS frames; it'll contain either the default 0,
|
||||
// or a leftover value from a previous JS stack frame that was using this
|
||||
// ProfilingStackFrame object.
|
||||
Atomic<int32_t, ReleaseAcquire, recordreplay::Behavior::DontPreserve>
|
||||
pcOffsetIfJS_;
|
||||
|
||||
// Bits 0...8 hold the Flags. Bits 9...31 hold the category pair.
|
||||
Atomic<uint32_t, ReleaseAcquire, recordreplay::Behavior::DontPreserve>
|
||||
flagsAndCategoryPair_;
|
||||
|
||||
public:
|
||||
ProfilingStackFrame() = default;
|
||||
ProfilingStackFrame& operator=(const ProfilingStackFrame& other) {
|
||||
label_ = other.label();
|
||||
dynamicString_ = other.dynamicString();
|
||||
void* spScript = other.spOrScript;
|
||||
spOrScript = spScript;
|
||||
int32_t offsetIfJS = other.pcOffsetIfJS_;
|
||||
pcOffsetIfJS_ = offsetIfJS;
|
||||
uint32_t flagsAndCategory = other.flagsAndCategoryPair_;
|
||||
flagsAndCategoryPair_ = flagsAndCategory;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// 9 bits for the flags.
|
||||
// That leaves 32 - 9 = 23 bits for the category pair.
|
||||
enum class Flags : uint32_t {
|
||||
// The first three flags describe the kind of the frame and are
|
||||
// mutually exclusive. (We still give them individual bits for
|
||||
// simplicity.)
|
||||
|
||||
// A regular label frame. These usually come from AutoProfilerLabel.
|
||||
IS_LABEL_FRAME = 1 << 0,
|
||||
|
||||
// A special frame indicating the start of a run of JS profiling stack
|
||||
// frames. IS_SP_MARKER_FRAME frames are ignored, except for the sp
|
||||
// field. These frames are needed to get correct ordering between JS
|
||||
// and LABEL frames because JS frames don't carry sp information.
|
||||
// SP is short for "stack pointer".
|
||||
IS_SP_MARKER_FRAME = 1 << 1,
|
||||
|
||||
// A JS frame.
|
||||
IS_JS_FRAME = 1 << 2,
|
||||
|
||||
// An interpreter JS frame that has OSR-ed into baseline. IS_JS_FRAME
|
||||
// frames can have this flag set and unset during their lifetime.
|
||||
// JS_OSR frames are ignored.
|
||||
JS_OSR = 1 << 3,
|
||||
|
||||
// The next three are mutually exclusive.
|
||||
// By default, for profiling stack frames that have both a label and a
|
||||
// dynamic string, the two strings are combined into one string of the
|
||||
// form "<label> <dynamicString>" during JSON serialization. The
|
||||
// following flags can be used to change this preset.
|
||||
STRING_TEMPLATE_METHOD = 1 << 4, // "<label>.<dynamicString>"
|
||||
STRING_TEMPLATE_GETTER = 1 << 5, // "get <label>.<dynamicString>"
|
||||
STRING_TEMPLATE_SETTER = 1 << 6, // "set <label>.<dynamicString>"
|
||||
|
||||
// If set, causes this stack frame to be marked as "relevantForJS" in
|
||||
// the profile JSON, which will make it show up in the "JS only" call
|
||||
// tree view.
|
||||
RELEVANT_FOR_JS = 1 << 7,
|
||||
|
||||
// If set, causes the label on this ProfilingStackFrame to be ignored
|
||||
// and to be replaced by the subcategory's label.
|
||||
LABEL_DETERMINED_BY_CATEGORY_PAIR = 1 << 8,
|
||||
|
||||
FLAGS_BITCOUNT = 9,
|
||||
FLAGS_MASK = (1 << FLAGS_BITCOUNT) - 1
|
||||
};
|
||||
|
||||
static_assert(
|
||||
uint32_t(ProfilingCategoryPair::LAST) <=
|
||||
(UINT32_MAX >> uint32_t(Flags::FLAGS_BITCOUNT)),
|
||||
"Too many category pairs to fit into u32 with together with the "
|
||||
"reserved bits for the flags");
|
||||
|
||||
bool isLabelFrame() const {
|
||||
return uint32_t(flagsAndCategoryPair_) & uint32_t(Flags::IS_LABEL_FRAME);
|
||||
}
|
||||
|
||||
bool isSpMarkerFrame() const {
|
||||
return uint32_t(flagsAndCategoryPair_) &
|
||||
uint32_t(Flags::IS_SP_MARKER_FRAME);
|
||||
}
|
||||
|
||||
bool isJsFrame() const {
|
||||
return uint32_t(flagsAndCategoryPair_) & uint32_t(Flags::IS_JS_FRAME);
|
||||
}
|
||||
|
||||
bool isOSRFrame() const {
|
||||
return uint32_t(flagsAndCategoryPair_) & uint32_t(Flags::JS_OSR);
|
||||
}
|
||||
|
||||
void setIsOSRFrame(bool isOSR) {
|
||||
if (isOSR) {
|
||||
flagsAndCategoryPair_ =
|
||||
uint32_t(flagsAndCategoryPair_) | uint32_t(Flags::JS_OSR);
|
||||
} else {
|
||||
flagsAndCategoryPair_ =
|
||||
uint32_t(flagsAndCategoryPair_) & ~uint32_t(Flags::JS_OSR);
|
||||
}
|
||||
}
|
||||
|
||||
const char* label() const {
|
||||
uint32_t flagsAndCategoryPair = flagsAndCategoryPair_;
|
||||
if (flagsAndCategoryPair &
|
||||
uint32_t(Flags::LABEL_DETERMINED_BY_CATEGORY_PAIR)) {
|
||||
auto categoryPair = ProfilingCategoryPair(
|
||||
flagsAndCategoryPair >> uint32_t(Flags::FLAGS_BITCOUNT));
|
||||
return GetProfilingCategoryPairInfo(categoryPair).mLabel;
|
||||
}
|
||||
return label_;
|
||||
}
|
||||
|
||||
const char* dynamicString() const { return dynamicString_; }
|
||||
|
||||
void initLabelFrame(const char* aLabel, const char* aDynamicString, void* sp,
|
||||
ProfilingCategoryPair aCategoryPair, uint32_t aFlags) {
|
||||
label_ = aLabel;
|
||||
dynamicString_ = aDynamicString;
|
||||
spOrScript = sp;
|
||||
// pcOffsetIfJS_ is not set and must not be used on label frames.
|
||||
flagsAndCategoryPair_ =
|
||||
uint32_t(Flags::IS_LABEL_FRAME) |
|
||||
(uint32_t(aCategoryPair) << uint32_t(Flags::FLAGS_BITCOUNT)) | aFlags;
|
||||
MOZ_ASSERT(isLabelFrame());
|
||||
}
|
||||
|
||||
void initSpMarkerFrame(void* sp) {
|
||||
label_ = "";
|
||||
dynamicString_ = nullptr;
|
||||
spOrScript = sp;
|
||||
// pcOffsetIfJS_ is not set and must not be used on sp marker frames.
|
||||
flagsAndCategoryPair_ = uint32_t(Flags::IS_SP_MARKER_FRAME) |
|
||||
(uint32_t(ProfilingCategoryPair::OTHER)
|
||||
<< uint32_t(Flags::FLAGS_BITCOUNT));
|
||||
MOZ_ASSERT(isSpMarkerFrame());
|
||||
}
|
||||
|
||||
void initJsFrame(const char* aLabel, const char* aDynamicString,
|
||||
void* /* JSScript* */ aScript, int32_t aOffset) {
|
||||
label_ = aLabel;
|
||||
dynamicString_ = aDynamicString;
|
||||
spOrScript = aScript;
|
||||
pcOffsetIfJS_ = aOffset;
|
||||
flagsAndCategoryPair_ =
|
||||
uint32_t(Flags::IS_JS_FRAME) | (uint32_t(ProfilingCategoryPair::JS)
|
||||
<< uint32_t(Flags::FLAGS_BITCOUNT));
|
||||
MOZ_ASSERT(isJsFrame());
|
||||
}
|
||||
|
||||
uint32_t flags() const {
|
||||
return uint32_t(flagsAndCategoryPair_) & uint32_t(Flags::FLAGS_MASK);
|
||||
}
|
||||
|
||||
ProfilingCategoryPair categoryPair() const {
|
||||
return ProfilingCategoryPair(flagsAndCategoryPair_ >>
|
||||
uint32_t(Flags::FLAGS_BITCOUNT));
|
||||
}
|
||||
|
||||
void* stackAddress() const {
|
||||
MOZ_ASSERT(!isJsFrame());
|
||||
return spOrScript;
|
||||
}
|
||||
|
||||
// Note that the pointer returned might be invalid.
|
||||
void* rawScript() const {
|
||||
MOZ_ASSERT(isJsFrame());
|
||||
return spOrScript;
|
||||
}
|
||||
void setRawScript(void* aScript) {
|
||||
MOZ_ASSERT(isJsFrame());
|
||||
spOrScript = aScript;
|
||||
}
|
||||
|
||||
int32_t pcOffset() const {
|
||||
MOZ_ASSERT(isJsFrame());
|
||||
return pcOffsetIfJS_;
|
||||
}
|
||||
|
||||
void setPCOffset(int32_t aOffset) {
|
||||
MOZ_ASSERT(isJsFrame());
|
||||
pcOffsetIfJS_ = aOffset;
|
||||
}
|
||||
|
||||
// The offset of a pc into a script's code can actually be 0, so to
|
||||
// signify a nullptr pc, use a -1 index. This is checked against in
|
||||
// pc() and setPC() to set/get the right pc.
|
||||
static const int32_t NullPCOffset = -1;
|
||||
};
|
||||
|
||||
// Each thread has its own ProfilingStack. That thread modifies the
|
||||
// ProfilingStack, pushing and popping elements as necessary.
|
||||
//
|
||||
// The ProfilingStack is also read periodically by the profiler's sampler
|
||||
// thread. This happens only when the thread that owns the ProfilingStack is
|
||||
// suspended. So there are no genuine parallel accesses.
|
||||
//
|
||||
// However, it is possible for pushing/popping to be interrupted by a periodic
|
||||
// sample. Because of this, we need pushing/popping to be effectively atomic.
|
||||
//
|
||||
// - When pushing a new frame, we increment the stack pointer -- making the new
|
||||
// frame visible to the sampler thread -- only after the new frame has been
|
||||
// fully written. The stack pointer is Atomic<uint32_t,ReleaseAcquire>, so
|
||||
// the increment is a release-store, which ensures that this store is not
|
||||
// reordered before the writes of the frame.
|
||||
//
|
||||
// - When popping an old frame, the only operation is the decrementing of the
|
||||
// stack pointer, which is obviously atomic.
|
||||
//
|
||||
class ProfilingStack final {
|
||||
public:
|
||||
ProfilingStack() : stackPointer(0) {}
|
||||
|
||||
MFBT_API ~ProfilingStack();
|
||||
|
||||
void pushLabelFrame(const char* label, const char* dynamicString, void* sp,
|
||||
ProfilingCategoryPair categoryPair, uint32_t flags = 0) {
|
||||
// This thread is the only one that ever changes the value of
|
||||
// stackPointer.
|
||||
// Store the value of the atomic in a non-atomic local variable so that
|
||||
// the compiler won't generate two separate loads from the atomic for
|
||||
// the size check and the frames[] array indexing operation.
|
||||
uint32_t stackPointerVal = stackPointer;
|
||||
|
||||
if (MOZ_UNLIKELY(stackPointerVal >= capacity)) {
|
||||
ensureCapacitySlow();
|
||||
}
|
||||
frames[stackPointerVal].initLabelFrame(label, dynamicString, sp,
|
||||
categoryPair, flags);
|
||||
|
||||
// This must happen at the end! The compiler will not reorder this
|
||||
// update because stackPointer is Atomic<..., ReleaseAcquire>, so any
|
||||
// the writes above will not be reordered below the stackPointer store.
|
||||
// Do the read and the write as two separate statements, in order to
|
||||
// make it clear that we don't need an atomic increment, which would be
|
||||
// more expensive on x86 than the separate operations done here.
|
||||
// However, don't use stackPointerVal here; instead, allow the compiler
|
||||
// to turn this store into a non-atomic increment instruction which
|
||||
// takes up less code size.
|
||||
stackPointer = stackPointer + 1;
|
||||
}
|
||||
|
||||
void pushSpMarkerFrame(void* sp) {
|
||||
uint32_t oldStackPointer = stackPointer;
|
||||
|
||||
if (MOZ_UNLIKELY(oldStackPointer >= capacity)) {
|
||||
ensureCapacitySlow();
|
||||
}
|
||||
frames[oldStackPointer].initSpMarkerFrame(sp);
|
||||
|
||||
// This must happen at the end, see the comment in pushLabelFrame.
|
||||
stackPointer = oldStackPointer + 1;
|
||||
}
|
||||
|
||||
void pushJsOffsetFrame(const char* label, const char* dynamicString,
|
||||
void* script, int32_t offset) {
|
||||
// This thread is the only one that ever changes the value of
|
||||
// stackPointer. Only load the atomic once.
|
||||
uint32_t oldStackPointer = stackPointer;
|
||||
|
||||
if (MOZ_UNLIKELY(oldStackPointer >= capacity)) {
|
||||
ensureCapacitySlow();
|
||||
}
|
||||
frames[oldStackPointer].initJsFrame(label, dynamicString, script, offset);
|
||||
|
||||
// This must happen at the end, see the comment in pushLabelFrame.
|
||||
stackPointer = stackPointer + 1;
|
||||
}
|
||||
|
||||
void pop() {
|
||||
MOZ_ASSERT(stackPointer > 0);
|
||||
// Do the read and the write as two separate statements, in order to
|
||||
// make it clear that we don't need an atomic decrement, which would be
|
||||
// more expensive on x86 than the separate operations done here.
|
||||
// This thread is the only one that ever changes the value of
|
||||
// stackPointer.
|
||||
uint32_t oldStackPointer = stackPointer;
|
||||
stackPointer = oldStackPointer - 1;
|
||||
}
|
||||
|
||||
uint32_t stackSize() const { return stackPointer; }
|
||||
uint32_t stackCapacity() const { return capacity; }
|
||||
|
||||
private:
|
||||
// Out of line path for expanding the buffer, since otherwise this would get
|
||||
// inlined in every DOM WebIDL call.
|
||||
MFBT_API MOZ_COLD void ensureCapacitySlow();
|
||||
|
||||
// No copying.
|
||||
ProfilingStack(const ProfilingStack&) = delete;
|
||||
void operator=(const ProfilingStack&) = delete;
|
||||
|
||||
// No moving either.
|
||||
ProfilingStack(ProfilingStack&&) = delete;
|
||||
void operator=(ProfilingStack&&) = delete;
|
||||
|
||||
uint32_t capacity = 0;
|
||||
|
||||
public:
|
||||
// The pointer to the stack frames, this is read from the profiler thread and
|
||||
// written from the current thread.
|
||||
//
|
||||
// This is effectively a unique pointer.
|
||||
Atomic<ProfilingStackFrame*, SequentiallyConsistent,
|
||||
recordreplay::Behavior::DontPreserve>
|
||||
frames{nullptr};
|
||||
|
||||
// This may exceed the capacity, so instead use the stackSize() method to
|
||||
// determine the number of valid frames in stackFrames. When this is less
|
||||
// than stackCapacity(), it refers to the first free stackframe past the top
|
||||
// of the in-use stack (i.e. frames[stackPointer - 1] is the top stack
|
||||
// frame).
|
||||
//
|
||||
// WARNING WARNING WARNING
|
||||
//
|
||||
// This is an atomic variable that uses ReleaseAcquire memory ordering.
|
||||
// See the "Concurrency considerations" paragraph at the top of this file
|
||||
// for more details.
|
||||
Atomic<uint32_t, ReleaseAcquire, recordreplay::Behavior::DontPreserve>
|
||||
stackPointer;
|
||||
};
|
||||
|
||||
class AutoGeckoProfilerEntry;
|
||||
class GeckoProfilerEntryMarker;
|
||||
class GeckoProfilerBaselineOSRMarker;
|
||||
|
||||
class GeckoProfilerThread {
|
||||
friend class AutoGeckoProfilerEntry;
|
||||
friend class GeckoProfilerEntryMarker;
|
||||
friend class GeckoProfilerBaselineOSRMarker;
|
||||
|
||||
ProfilingStack* profilingStack_;
|
||||
|
||||
// Same as profilingStack_ if the profiler is currently active, otherwise
|
||||
// null.
|
||||
ProfilingStack* profilingStackIfEnabled_;
|
||||
|
||||
public:
|
||||
MFBT_API GeckoProfilerThread();
|
||||
|
||||
uint32_t stackPointer() {
|
||||
MOZ_ASSERT(infraInstalled());
|
||||
return profilingStack_->stackPointer;
|
||||
}
|
||||
ProfilingStackFrame* stack() { return profilingStack_->frames; }
|
||||
ProfilingStack* getProfilingStack() { return profilingStack_; }
|
||||
ProfilingStack* getProfilingStackIfEnabled() {
|
||||
return profilingStackIfEnabled_;
|
||||
}
|
||||
|
||||
/*
|
||||
* True if the profiler infrastructure is setup. Should be true in builds
|
||||
* that include profiler support except during early startup or late
|
||||
* shutdown. Unrelated to the presence of the Gecko Profiler addon.
|
||||
*/
|
||||
bool infraInstalled() { return profilingStack_ != nullptr; }
|
||||
|
||||
MFBT_API void setProfilingStack(ProfilingStack* profilingStack, bool enabled);
|
||||
void enable(bool enable) {
|
||||
profilingStackIfEnabled_ = enable ? profilingStack_ : nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
#endif /* BaseProfilingStack_h */
|
|
@ -14,7 +14,6 @@ if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'android':
|
|||
DIRS += ['android']
|
||||
|
||||
DIRS += [
|
||||
'baseprofiler',
|
||||
'build',
|
||||
'misc',
|
||||
]
|
||||
|
|
|
@ -1,158 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
|
||||
# include "mozilla/Attributes.h"
|
||||
# include "mozilla/Vector.h"
|
||||
|
||||
# if defined(_MSC_VER)
|
||||
# include <windows.h>
|
||||
# include <mmsystem.h>
|
||||
# include <process.h>
|
||||
# elif defined(__linux__) || (defined(__APPLE__) && defined(__x86_64__))
|
||||
# include <time.h>
|
||||
# include <unistd.h>
|
||||
# else
|
||||
# error
|
||||
# endif
|
||||
|
||||
using namespace mozilla;
|
||||
|
||||
// Increase the depth, to a maximum (to avoid too-deep recursion).
|
||||
static constexpr size_t NextDepth(size_t aDepth) {
|
||||
constexpr size_t MAX_DEPTH = 128;
|
||||
return (aDepth < MAX_DEPTH) ? (aDepth + 1) : aDepth;
|
||||
}
|
||||
|
||||
// Compute fibonacci the hard way (recursively: `f(n)=f(n-1)+f(n-2)`), and
|
||||
// prevent inlining.
|
||||
// The template parameter makes each depth be a separate function, to better
|
||||
// distinguish them in the profiler output.
|
||||
template <size_t DEPTH = 0>
|
||||
MOZ_NEVER_INLINE unsigned long long Fibonacci(unsigned long long n) {
|
||||
if (n == 0) {
|
||||
return 0;
|
||||
}
|
||||
if (n == 1) {
|
||||
return 1;
|
||||
}
|
||||
unsigned long long f2 = Fibonacci<NextDepth(DEPTH)>(n - 2);
|
||||
if (DEPTH == 0) {
|
||||
BASE_PROFILER_ADD_MARKER("Half-way through Fibonacci", OTHER);
|
||||
}
|
||||
unsigned long long f1 = Fibonacci<NextDepth(DEPTH)>(n - 1);
|
||||
return f2 + f1;
|
||||
}
|
||||
|
||||
static void SleepMilli(unsigned aMilliseconds) {
|
||||
# if defined(_MSC_VER)
|
||||
Sleep(aMilliseconds);
|
||||
# else
|
||||
struct timespec ts;
|
||||
ts.tv_sec = aMilliseconds / 1000;
|
||||
ts.tv_nsec = long(aMilliseconds % 1000) * 1000000;
|
||||
struct timespec tr;
|
||||
while (nanosleep(&ts, &tr)) {
|
||||
if (errno == EINTR) {
|
||||
ts = tr;
|
||||
} else {
|
||||
printf("nanosleep() -> %s\n", strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
void TestProfiler() {
|
||||
printf("TestProfiler starting -- pid: %d, tid: %d\n",
|
||||
baseprofiler::profiler_current_process_id(),
|
||||
baseprofiler::profiler_current_thread_id());
|
||||
// ::Sleep(10000);
|
||||
|
||||
{
|
||||
printf("profiler_init()...\n");
|
||||
AUTO_BASE_PROFILER_INIT;
|
||||
|
||||
MOZ_RELEASE_ASSERT(!baseprofiler::profiler_is_active());
|
||||
MOZ_RELEASE_ASSERT(!baseprofiler::profiler_thread_is_being_profiled());
|
||||
MOZ_RELEASE_ASSERT(!baseprofiler::profiler_thread_is_sleeping());
|
||||
|
||||
printf("profiler_start()...\n");
|
||||
mozilla::Vector<const char*> filters;
|
||||
// Profile all registered threads.
|
||||
MOZ_RELEASE_ASSERT(filters.append(""));
|
||||
const uint32_t features = baseprofiler::ProfilerFeature::Leaf |
|
||||
baseprofiler::ProfilerFeature::StackWalk |
|
||||
baseprofiler::ProfilerFeature::Threads;
|
||||
baseprofiler::profiler_start(baseprofiler::BASE_PROFILER_DEFAULT_ENTRIES,
|
||||
BASE_PROFILER_DEFAULT_INTERVAL, features,
|
||||
filters.begin(), filters.length());
|
||||
|
||||
MOZ_RELEASE_ASSERT(baseprofiler::profiler_is_active());
|
||||
MOZ_RELEASE_ASSERT(baseprofiler::profiler_thread_is_being_profiled());
|
||||
MOZ_RELEASE_ASSERT(!baseprofiler::profiler_thread_is_sleeping());
|
||||
|
||||
{
|
||||
AUTO_BASE_PROFILER_TEXT_MARKER_CAUSE("fibonacci", "First leaf call",
|
||||
OTHER, nullptr);
|
||||
static const unsigned long long fibStart = 40;
|
||||
printf("Fibonacci(%llu)...\n", fibStart);
|
||||
AUTO_BASE_PROFILER_LABEL("Label around Fibonacci", OTHER);
|
||||
unsigned long long f = Fibonacci(fibStart);
|
||||
printf("Fibonacci(%llu) = %llu\n", fibStart, f);
|
||||
}
|
||||
|
||||
printf("Sleep 1s...\n");
|
||||
{
|
||||
AUTO_BASE_PROFILER_THREAD_SLEEP;
|
||||
SleepMilli(1000);
|
||||
}
|
||||
|
||||
printf("baseprofiler_save_profile_to_file()...\n");
|
||||
baseprofiler::profiler_save_profile_to_file("TestProfiler_profile.json");
|
||||
|
||||
printf("profiler_stop()...\n");
|
||||
baseprofiler::profiler_stop();
|
||||
|
||||
MOZ_RELEASE_ASSERT(!baseprofiler::profiler_is_active());
|
||||
MOZ_RELEASE_ASSERT(!baseprofiler::profiler_thread_is_being_profiled());
|
||||
MOZ_RELEASE_ASSERT(!baseprofiler::profiler_thread_is_sleeping());
|
||||
|
||||
printf("profiler_shutdown()...\n");
|
||||
}
|
||||
|
||||
printf("TestProfiler done\n");
|
||||
}
|
||||
|
||||
#else // MOZ_BASE_PROFILER
|
||||
|
||||
// Testing that macros are still #defined (but do nothing) when
|
||||
// MOZ_BASE_PROFILER is disabled.
|
||||
void TestProfiler() {
|
||||
// These don't need to make sense, we just want to know that they're defined
|
||||
// and don't do anything.
|
||||
AUTO_BASE_PROFILER_INIT;
|
||||
|
||||
// This wouldn't build if the macro did output its arguments.
|
||||
AUTO_BASE_PROFILER_TEXT_MARKER_CAUSE(catch, catch, catch, catch);
|
||||
|
||||
AUTO_BASE_PROFILER_LABEL(catch, catch);
|
||||
|
||||
AUTO_BASE_PROFILER_THREAD_SLEEP;
|
||||
}
|
||||
|
||||
#endif // MOZ_BASE_PROFILER else
|
||||
|
||||
int main() {
|
||||
// Note that there are two `TestProfiler` functions above, depending on
|
||||
// whether MOZ_BASE_PROFILER is #defined.
|
||||
TestProfiler();
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -6,21 +6,14 @@
|
|||
|
||||
DisableStlWrapping()
|
||||
|
||||
# Important: for CppUnitTests to be run, they also need to be added
|
||||
# to testing/cppunittest.ini.
|
||||
|
||||
GeckoCppUnitTests([
|
||||
'ShowSSEConfig',
|
||||
], linkage=None)
|
||||
|
||||
CppUnitTests([
|
||||
'TestBaseProfiler',
|
||||
'TestPrintf',
|
||||
])
|
||||
|
||||
with Files('TestBaseProfiler.cpp'):
|
||||
BUG_COMPONENT = ('Core', 'Gecko Profiler')
|
||||
|
||||
if CONFIG['OS_ARCH'] == 'WINNT':
|
||||
GeckoCppUnitTests([
|
||||
'TestNativeNt',
|
||||
|
|
|
@ -32,7 +32,6 @@ skip-if = os != 'win'
|
|||
[TestMacroForEach]
|
||||
[TestMathAlgorithms]
|
||||
[TestMaybe]
|
||||
[TestBaseProfiler]
|
||||
[TestNonDereferenceable]
|
||||
[TestNotNull]
|
||||
[TestParseFTPList]
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include "mozilla/intl/LocaleService.h"
|
||||
#include "mozilla/recordreplay/ParentIPC.h"
|
||||
#include "mozilla/JSONWriter.h"
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#include "nsAppRunner.h"
|
||||
#include "mozilla/XREAppData.h"
|
||||
|
@ -4668,7 +4667,6 @@ int XREMain::XRE_main(int argc, char* argv[], const BootstrapConfig& aConfig) {
|
|||
CodeCoverageHandler::Init();
|
||||
#endif
|
||||
|
||||
AUTO_BASE_PROFILER_LABEL("XREMain::XRE_main (around Gecko Profiler)", OTHER);
|
||||
AUTO_PROFILER_INIT;
|
||||
AUTO_PROFILER_LABEL("XREMain::XRE_main", OTHER);
|
||||
|
||||
|
|
|
@ -90,7 +90,6 @@
|
|||
#include "mozilla/net/SocketProcessImpl.h"
|
||||
|
||||
#include "GeckoProfiler.h"
|
||||
#include "BaseProfiler.h"
|
||||
|
||||
#if defined(MOZ_SANDBOX) && defined(XP_WIN)
|
||||
# include "mozilla/sandboxTarget.h"
|
||||
|
@ -392,8 +391,6 @@ nsresult XRE_InitChildProcess(int aArgc, char* aArgv[],
|
|||
|
||||
mozilla::LogModule::Init(aArgc, aArgv);
|
||||
|
||||
AUTO_BASE_PROFILER_LABEL("XRE_InitChildProcess (around Gecko Profiler)",
|
||||
OTHER);
|
||||
AUTO_PROFILER_INIT;
|
||||
AUTO_PROFILER_LABEL("XRE_InitChildProcess", OTHER);
|
||||
|
||||
|
@ -811,10 +808,7 @@ nsresult XRE_InitParentProcess(int aArgc, char* aArgv[],
|
|||
|
||||
mozilla::LogModule::Init(aArgc, aArgv);
|
||||
|
||||
AUTO_BASE_PROFILER_LABEL("XRE_InitParentProcess (around Gecko Profiler)",
|
||||
OTHER);
|
||||
AUTO_PROFILER_INIT;
|
||||
AUTO_PROFILER_LABEL("XRE_InitParentProcess", OTHER);
|
||||
|
||||
ScopedXREEmbed embed;
|
||||
|
||||
|
|
|
@ -269,10 +269,6 @@ void Registers::SyncPopulate() {
|
|||
#endif
|
||||
|
||||
#if defined(GP_PLAT_amd64_windows)
|
||||
|
||||
# ifndef MOZ_BASE_PROFILER
|
||||
// If MOZ_BASE_PROFILER is *not* #defined, we need to implement this here, as
|
||||
// the one in mozglue/baseprofiler will not even be built.
|
||||
static WindowsDllInterceptor NtDllIntercept;
|
||||
|
||||
typedef NTSTATUS(NTAPI* LdrUnloadDll_func)(HMODULE module);
|
||||
|
@ -312,18 +308,4 @@ void InitializeWin64ProfilerHooks() {
|
|||
&patched_LdrResolveDelayLoadedAPI);
|
||||
}
|
||||
}
|
||||
|
||||
# else // ndef MOZ_BASE_PROFILER
|
||||
// If MOZ_BASE_PROFILER is #defined, we just use InitializeWin64ProfilerHooks
|
||||
// that it implements.
|
||||
|
||||
namespace mozilla {
|
||||
namespace baseprofiler {
|
||||
MFBT_API void InitializeWin64ProfilerHooks();
|
||||
} // namespace baseprofiler
|
||||
} // namespace mozilla
|
||||
|
||||
using mozilla::baseprofiler::InitializeWin64ProfilerHooks;
|
||||
|
||||
# endif // ndef MOZ_BASE_PROFILER else
|
||||
#endif // defined(GP_PLAT_amd64_windows)
|
||||
#endif // defined(GP_PLAT_amd64_windows)
|
||||
|
|
|
@ -59,7 +59,6 @@
|
|||
#include "mozilla/Tuple.h"
|
||||
#include "mozilla/UniquePtr.h"
|
||||
#include "mozilla/Vector.h"
|
||||
#include "BaseProfiler.h"
|
||||
#include "nsDirectoryServiceDefs.h"
|
||||
#include "nsDirectoryServiceUtils.h"
|
||||
#include "nsIHttpProtocolHandler.h"
|
||||
|
@ -855,30 +854,12 @@ class ActivePS {
|
|||
static void ClearExpiredExitProfiles(PSLockRef) {
|
||||
uint64_t bufferRangeStart = sInstance->mBuffer->mRangeStart;
|
||||
// Discard exit profiles that were gathered before our buffer RangeStart.
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
if (bufferRangeStart != 0 && sInstance->mBaseProfileThreads) {
|
||||
sInstance->mBaseProfileThreads.reset();
|
||||
}
|
||||
#endif
|
||||
sInstance->mExitProfiles.eraseIf(
|
||||
[bufferRangeStart](const ExitProfile& aExitProfile) {
|
||||
return aExitProfile.mBufferPositionAtGatherTime < bufferRangeStart;
|
||||
});
|
||||
}
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
static void AddBaseProfileThreads(PSLockRef aLock,
|
||||
UniquePtr<char[]> aBaseProfileThreads) {
|
||||
sInstance->mBaseProfileThreads = std::move(aBaseProfileThreads);
|
||||
}
|
||||
|
||||
static UniquePtr<char[]> MoveBaseProfileThreads(PSLockRef aLock) {
|
||||
ClearExpiredExitProfiles(aLock);
|
||||
|
||||
return std::move(sInstance->mBaseProfileThreads);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void AddExitProfile(PSLockRef aLock, const nsCString& aExitProfile) {
|
||||
ClearExpiredExitProfiles(aLock);
|
||||
|
||||
|
@ -973,11 +954,6 @@ class ActivePS {
|
|||
bool mWasPaused;
|
||||
#endif
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
// Optional startup profile thread array from BaseProfiler.
|
||||
UniquePtr<char[]> mBaseProfileThreads;
|
||||
#endif
|
||||
|
||||
struct ExitProfile {
|
||||
nsCString mJSON;
|
||||
uint64_t mBufferPositionAtGatherTime;
|
||||
|
@ -2141,14 +2117,6 @@ static void locked_profiler_stream_json_for_this_process(
|
|||
java::GeckoJavaSampler::Unpause();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
UniquePtr<char[]> baseProfileThreads =
|
||||
ActivePS::MoveBaseProfileThreads(aLock);
|
||||
if (baseProfileThreads) {
|
||||
aWriter.Splice(baseProfileThreads.get());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
aWriter.EndArray();
|
||||
|
||||
|
@ -2239,9 +2207,6 @@ static void PrintUsageThenExit(int aExitCode) {
|
|||
"\n"
|
||||
" MOZ_PROFILER_HELP\n"
|
||||
" If set to any value, prints this message.\n"
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
" Use MOZ_BASE_PROFILER_HELP for BaseProfiler help.\n"
|
||||
#endif
|
||||
"\n"
|
||||
" MOZ_LOG\n"
|
||||
" Enables logging. The levels of logging available are\n"
|
||||
|
@ -3195,24 +3160,6 @@ void GetProfilerEnvVarsForChildProcess(
|
|||
}
|
||||
}
|
||||
aSetEnv("MOZ_PROFILER_STARTUP_FILTERS", filtersString.c_str());
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
// Blindly copy MOZ_BASE_PROFILER_STARTUP* env-vars.
|
||||
auto copyEnv = [&](const char* aName) {
|
||||
const char* env = getenv(aName);
|
||||
if (!env) {
|
||||
return;
|
||||
}
|
||||
aSetEnv(aName, env);
|
||||
};
|
||||
copyEnv("MOZ_BASE_PROFILER_STARTUP");
|
||||
copyEnv("MOZ_BASE_PROFILER_STARTUP_ENTRIES");
|
||||
copyEnv("MOZ_BASE_PROFILER_STARTUP_DURATION");
|
||||
copyEnv("MOZ_BASE_PROFILER_STARTUP_INTERVAL");
|
||||
copyEnv("MOZ_BASE_PROFILER_STARTUP_FEATURES_BITFIELD");
|
||||
copyEnv("MOZ_BASE_PROFILER_STARTUP_FEATURES");
|
||||
copyEnv("MOZ_BASE_PROFILER_STARTUP_FILTERS");
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
||||
|
@ -3378,36 +3325,6 @@ static void locked_profiler_start(PSLockRef aLock, uint32_t aCapacity,
|
|||
ActivePS::Create(aLock, capacity, interval, aFeatures, aFilters, aFilterCount,
|
||||
duration);
|
||||
|
||||
// ActivePS::Create can only succeed or crash.
|
||||
MOZ_ASSERT(ActivePS::Exists(aLock));
|
||||
|
||||
#ifdef MOZ_BASE_PROFILER
|
||||
if (baseprofiler::profiler_is_active()) {
|
||||
// Note that we still hold the lock, so the sampler cannot run yet and
|
||||
// interact negatively with the still-active BaseProfiler sampler.
|
||||
// Assume that BaseProfiler is active because of MOZ_BASE_PROFILER_STARTUP.
|
||||
// Capture the BaseProfiler startup profile threads (if any).
|
||||
UniquePtr<char[]> baseprofile = baseprofiler::profiler_get_profile(
|
||||
/* aSinceTime */ 0, /* aIsShuttingDown */ false,
|
||||
/* aOnlyThreads */ true);
|
||||
|
||||
// Now stop BaseProfiler, as further recording will be ignored anyway, and
|
||||
// so that it won't clash with Gecko Profiler sampling starting after the
|
||||
// lock is dropped.
|
||||
// TODO: Allow non-sampling profiling to continue.
|
||||
// TODO: Re-start BaseProfiler after Gecko Profiler shutdown, to capture
|
||||
// post-XPCOM shutdown.
|
||||
baseprofiler::profiler_stop();
|
||||
|
||||
if (baseprofile && baseprofile.get()[0] != '\0') {
|
||||
// The BaseProfiler startup profile will be stored as a separate process
|
||||
// in the Gecko Profiler profile, and shown as a new track under the
|
||||
// corresponding Gecko Profiler thread.
|
||||
ActivePS::AddBaseProfileThreads(aLock, std::move(baseprofile));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Set up profiling for each registered thread, if appropriate.
|
||||
int tid = profiler_current_thread_id();
|
||||
const Vector<UniquePtr<RegisteredThread>>& registeredThreads =
|
||||
|
|
Загрузка…
Ссылка в новой задаче