зеркало из https://github.com/mozilla/gecko-dev.git
Bug 784739 - Switch from NULL to nullptr in tools/profiler/; r=ehsan
This commit is contained in:
Родитель
c01f63b478
Коммит
1e45690b3d
|
@ -103,18 +103,18 @@ void genProfileEntry(/*MODIFIED*/UnwinderThreadBuffer* utb,
|
|||
}
|
||||
if (entry.js()) {
|
||||
if (!entry.pc()) {
|
||||
// The JIT only allows the top-most entry to have a NULL pc
|
||||
// The JIT only allows the top-most entry to have a nullptr pc
|
||||
MOZ_ASSERT(&entry == &stack->mStack[stack->stackSize() - 1]);
|
||||
// If stack-walking was disabled, then that's just unfortunate
|
||||
if (lastpc) {
|
||||
jsbytecode *jspc = js::ProfilingGetPC(stack->mRuntime, entry.script(),
|
||||
lastpc);
|
||||
if (jspc) {
|
||||
lineno = JS_PCToLineNumber(NULL, entry.script(), jspc);
|
||||
lineno = JS_PCToLineNumber(nullptr, entry.script(), jspc);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
lineno = JS_PCToLineNumber(NULL, entry.script(), entry.pc());
|
||||
lineno = JS_PCToLineNumber(nullptr, entry.script(), entry.pc());
|
||||
}
|
||||
} else {
|
||||
lineno = entry.line();
|
||||
|
@ -294,13 +294,13 @@ void populateBuffer(UnwinderThreadBuffer* utb, TickSample* sample,
|
|||
# elif defined(SPS_OS_windows)
|
||||
/* Totally fake this up so it at least builds. No idea if we can
|
||||
even ever get here on Windows. */
|
||||
void* ucV = NULL;
|
||||
void* ucV = nullptr;
|
||||
# else
|
||||
# error "Unsupported platform"
|
||||
# endif
|
||||
releaseFunction(&sampledThreadProfile, utb, ucV);
|
||||
} else {
|
||||
releaseFunction(&sampledThreadProfile, utb, NULL);
|
||||
releaseFunction(&sampledThreadProfile, utb, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -480,13 +480,13 @@ bool operator<(const EHEntryHandle &lhs, const EHEntryHandle &rhs) {
|
|||
const EHEntry *EHTable::lookup(uint32_t aPC) const {
|
||||
MOZ_ASSERT(aPC >= mStartPC);
|
||||
if (aPC >= mEndPC)
|
||||
return NULL;
|
||||
return nullptr;
|
||||
|
||||
std::vector<EHEntryHandle>::const_iterator begin = mEntries.begin();
|
||||
std::vector<EHEntryHandle>::const_iterator end = mEntries.end();
|
||||
MOZ_ASSERT(begin < end);
|
||||
if (aPC < reinterpret_cast<uint32_t>(begin->value()->startPC.compute()))
|
||||
return NULL;
|
||||
return nullptr;
|
||||
|
||||
while (end - begin > 1) {
|
||||
std::vector<EHEntryHandle>::const_iterator mid = begin + (end - begin) / 2;
|
||||
|
|
|
@ -23,10 +23,11 @@ class ProfilerMarkerPayload;
|
|||
|
||||
// Returns a handle to pass on exit. This can check that we are popping the
|
||||
// correct callstack.
|
||||
inline void* mozilla_sampler_call_enter(const char *aInfo, void *aFrameAddress = NULL,
|
||||
inline void* mozilla_sampler_call_enter(const char *aInfo, void *aFrameAddress = nullptr,
|
||||
bool aCopy = false, uint32_t line = 0);
|
||||
inline void mozilla_sampler_call_exit(void* handle);
|
||||
inline void mozilla_sampler_add_marker(const char *aInfo, ProfilerMarkerPayload *aPayload = nullptr);
|
||||
inline void mozilla_sampler_add_marker(const char *aInfo,
|
||||
ProfilerMarkerPayload *aPayload = nullptr);
|
||||
|
||||
void mozilla_sampler_start(int aEntries, double aInterval,
|
||||
const char** aFeatures, uint32_t aFeatureCount,
|
||||
|
|
|
@ -312,7 +312,7 @@ public:
|
|||
mHandle = mozilla_sampler_call_enter(mDest, this, true, line);
|
||||
va_end(args);
|
||||
} else {
|
||||
mHandle = mozilla_sampler_call_enter(aDefault, NULL, false, line);
|
||||
mHandle = mozilla_sampler_call_enter(aDefault, nullptr, false, line);
|
||||
}
|
||||
}
|
||||
~SamplerStackFramePrintfRAII() {
|
||||
|
@ -328,7 +328,7 @@ private:
|
|||
inline PseudoStack* mozilla_get_pseudo_stack(void)
|
||||
{
|
||||
if (!stack_key_initialized)
|
||||
return NULL;
|
||||
return nullptr;
|
||||
return tlsPseudoStack.get();
|
||||
}
|
||||
|
||||
|
@ -338,7 +338,7 @@ inline void* mozilla_sampler_call_enter(const char *aInfo, void *aFrameAddress,
|
|||
// check if we've been initialized to avoid calling pthread_getspecific
|
||||
// with a null tlsStack which will return undefined results.
|
||||
if (!stack_key_initialized)
|
||||
return NULL;
|
||||
return nullptr;
|
||||
|
||||
PseudoStack *stack = tlsPseudoStack.get();
|
||||
// we can't infer whether 'stack' has been initialized
|
||||
|
|
|
@ -25,11 +25,11 @@ struct ObserverLists {
|
|||
}
|
||||
|
||||
// mObserverListsLock guards access to lists of observers
|
||||
// Note, we can use mozilla::Mutex here as the ObserverLists may be leaked, as
|
||||
// we want to monitor IO during shutdown. Furthermore, as we may have to
|
||||
// Note, we can use mozilla::Mutex here as the ObserverLists may be leaked,
|
||||
// as we want to monitor IO during shutdown. Furthermore, as we may have to
|
||||
// unregister observers during shutdown an OffTheBooksMutex is not an option
|
||||
// either, as it base calls into sDeadlockDetector which may be NULL during
|
||||
// shutdown.
|
||||
// either, as it base calls into sDeadlockDetector which may be nullptr
|
||||
// during shutdown.
|
||||
PRLock* mObserverListsLock;
|
||||
|
||||
~ObserverLists()
|
||||
|
@ -194,7 +194,7 @@ IOInterposeObserver::Operation IOInterposer::sObservedOperations =
|
|||
{
|
||||
// IOInterposer::Init most be called before this method
|
||||
MOZ_ASSERT(sObserverLists);
|
||||
// We should never register NULL as observer
|
||||
// We should never register nullptr as observer
|
||||
MOZ_ASSERT(aObserver);
|
||||
if (!sObserverLists || !aObserver) {
|
||||
return;
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
// BEGIN ProfileEntry
|
||||
|
||||
ProfileEntry::ProfileEntry()
|
||||
: mTagData(NULL)
|
||||
: mTagData(nullptr)
|
||||
, mTagName(0)
|
||||
{ }
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ SaveProfileTask::Run() {
|
|||
JS_PropertyStub, JS_DeletePropertyStub, JS_PropertyStub, JS_StrictPropertyStub,
|
||||
JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub
|
||||
};
|
||||
JSObject *obj = JS_NewGlobalObject(cx, &c, NULL, JS::FireOnNewGlobalHook);
|
||||
JSObject *obj = JS_NewGlobalObject(cx, &c, nullptr, JS::FireOnNewGlobalHook);
|
||||
|
||||
std::ofstream stream;
|
||||
stream.open(tmpPath.get());
|
||||
|
|
|
@ -342,18 +342,18 @@ void addProfileEntry(volatile StackEntry &entry, ThreadProfile &aProfile,
|
|||
addDynamicTag(aProfile, 'c', sampleLabel);
|
||||
if (entry.js()) {
|
||||
if (!entry.pc()) {
|
||||
// The JIT only allows the top-most entry to have a NULL pc
|
||||
// The JIT only allows the top-most entry to have a nullptr pc
|
||||
MOZ_ASSERT(&entry == &stack->mStack[stack->stackSize() - 1]);
|
||||
// If stack-walking was disabled, then that's just unfortunate
|
||||
if (lastpc) {
|
||||
jsbytecode *jspc = js::ProfilingGetPC(stack->mRuntime, entry.script(),
|
||||
lastpc);
|
||||
if (jspc) {
|
||||
lineno = JS_PCToLineNumber(NULL, entry.script(), jspc);
|
||||
lineno = JS_PCToLineNumber(nullptr, entry.script(), jspc);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
lineno = JS_PCToLineNumber(NULL, entry.script(), entry.pc());
|
||||
lineno = JS_PCToLineNumber(nullptr, entry.script(), entry.pc());
|
||||
}
|
||||
} else {
|
||||
lineno = entry.line();
|
||||
|
@ -385,13 +385,13 @@ static void mergeNativeBacktrace(ThreadProfile &aProfile, const PCArray &array)
|
|||
* and the pseudostack we managed during execution. We want to consolidate
|
||||
* the two in order. We do so by merging using the approximate stack address
|
||||
* when each entry was push. When pushing JS entry we may not now the stack
|
||||
* address in which case we have a NULL stack address in which case we assume
|
||||
* address in which case we have a nullptr stack address in which case we assume
|
||||
* that it follows immediatly the previous element.
|
||||
*
|
||||
* C Stack | Address -- Pseudo Stack | Address
|
||||
* main() | 0x100 run_js() | 0x40
|
||||
* start() | 0x80 jsCanvas() | NULL
|
||||
* timer() | 0x50 drawLine() | NULL
|
||||
* start() | 0x80 jsCanvas() | nullptr
|
||||
* timer() | 0x50 drawLine() | nullptr
|
||||
* azure() | 0x10
|
||||
*
|
||||
* Merged: main(), start(), timer(), run_js(), jsCanvas(), drawLine(), azure()
|
||||
|
@ -673,7 +673,7 @@ static void print_callback(const ProfileEntry& entry, const char* tagStringData)
|
|||
void mozilla_sampler_print_location1()
|
||||
{
|
||||
if (!stack_key_initialized)
|
||||
profiler_init(NULL);
|
||||
profiler_init(nullptr);
|
||||
|
||||
SyncProfile* syncProfile = NewSyncProfile();
|
||||
if (!syncProfile) {
|
||||
|
@ -681,7 +681,7 @@ void mozilla_sampler_print_location1()
|
|||
}
|
||||
|
||||
syncProfile->BeginUnwind();
|
||||
doSampleStackTrace(syncProfile->GetPseudoStack(), *syncProfile, NULL);
|
||||
doSampleStackTrace(syncProfile->GetPseudoStack(), *syncProfile, nullptr);
|
||||
syncProfile->EndUnwind();
|
||||
|
||||
printf_stderr("Backtrace:\n");
|
||||
|
|
|
@ -96,7 +96,7 @@ LinkedUWTBuffer* utb__acquire_sync_buffer(void* stackTop)
|
|||
// RUNS IN SIGHANDLER CONTEXT
|
||||
UnwinderThreadBuffer* uwt__acquire_empty_buffer()
|
||||
{
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -170,8 +170,8 @@ static void release_sync_buffer(LinkedUWTBuffer* utb);
|
|||
// chunk and register fields if a native unwind is requested.
|
||||
// APROFILE is where the profile data should be added to. UTB
|
||||
// is the partially-filled-in buffer, containing ProfileEntries.
|
||||
// UCV is the ucontext_t* from the signal handler. If non-NULL, is
|
||||
// taken as a cue to request native unwind.
|
||||
// UCV is the ucontext_t* from the signal handler. If non-nullptr,
|
||||
// is taken as a cue to request native unwind.
|
||||
static void release_full_buffer(ThreadProfile* aProfile,
|
||||
UnwinderThreadBuffer* utb,
|
||||
void* /* ucontext_t*, really */ ucV );
|
||||
|
@ -187,9 +187,9 @@ void uwt__init()
|
|||
{
|
||||
// Create the unwinder thread.
|
||||
MOZ_ASSERT(unwind_thr_exit_now == 0);
|
||||
int r = pthread_create( &unwind_thr, NULL,
|
||||
int r = pthread_create( &unwind_thr, nullptr,
|
||||
unwind_thr_fn, (void*)&unwind_thr_exit_now );
|
||||
MOZ_ALWAYS_TRUE(r==0);
|
||||
MOZ_ALWAYS_TRUE(r == 0);
|
||||
}
|
||||
|
||||
void uwt__stop()
|
||||
|
@ -198,7 +198,8 @@ void uwt__stop()
|
|||
MOZ_ASSERT(unwind_thr_exit_now == 0);
|
||||
unwind_thr_exit_now = 1;
|
||||
do_MBAR();
|
||||
int r = pthread_join(unwind_thr, NULL); MOZ_ALWAYS_TRUE(r==0);
|
||||
int r = pthread_join(unwind_thr, nullptr);
|
||||
MOZ_ALWAYS_TRUE(r == 0);
|
||||
}
|
||||
|
||||
void uwt__deinit()
|
||||
|
@ -401,7 +402,7 @@ typedef
|
|||
|
||||
/* Globals -- the buffer array */
|
||||
#define N_UNW_THR_BUFFERS 10
|
||||
/*SL*/ static UnwinderThreadBuffer** g_buffers = NULL;
|
||||
/*SL*/ static UnwinderThreadBuffer** g_buffers = nullptr;
|
||||
/*SL*/ static uint64_t g_seqNo = 0;
|
||||
/*SL*/ static SpinLock g_spinLock = { 0 };
|
||||
|
||||
|
@ -412,7 +413,7 @@ typedef
|
|||
allocate or expand the array, as that would risk deadlock against a
|
||||
sampling thread that holds the malloc lock and is trying to acquire
|
||||
the spinlock. */
|
||||
/*SL*/ static StackLimit* g_stackLimits = NULL;
|
||||
/*SL*/ static StackLimit* g_stackLimits = nullptr;
|
||||
/*SL*/ static size_t g_stackLimitsUsed = 0;
|
||||
/*SL*/ static size_t g_stackLimitsSize = 0;
|
||||
|
||||
|
@ -527,7 +528,7 @@ static void sleep_ms(unsigned int ms)
|
|||
struct timespec req;
|
||||
req.tv_sec = ((time_t)ms) / 1000;
|
||||
req.tv_nsec = 1000 * 1000 * (((unsigned long)ms) % 1000);
|
||||
nanosleep(&req, NULL);
|
||||
nanosleep(&req, nullptr);
|
||||
}
|
||||
|
||||
/* Use CAS to implement standalone atomic increment. */
|
||||
|
@ -553,7 +554,7 @@ static void thread_register_for_profiling(void* stackTop)
|
|||
int n_used;
|
||||
|
||||
// Ignore spurious calls which aren't really registering anything.
|
||||
if (stackTop == NULL) {
|
||||
if (stackTop == nullptr) {
|
||||
n_used = g_stackLimitsUsed;
|
||||
spinLock_release(&g_spinLock);
|
||||
LOGF("BPUnw: [%d total] thread_register_for_profiling"
|
||||
|
@ -720,7 +721,7 @@ static void show_registered_threads()
|
|||
static void init_empty_buffer(UnwinderThreadBuffer* buff, void* stackTop)
|
||||
{
|
||||
/* Now we own the buffer, initialise it. */
|
||||
buff->aProfile = NULL;
|
||||
buff->aProfile = nullptr;
|
||||
buff->entsUsed = 0;
|
||||
buff->haveNativeInfo = false;
|
||||
buff->stackImgUsed = 0;
|
||||
|
@ -756,7 +757,7 @@ static LinkedUWTBuffer* acquire_sync_buffer(void* stackTop)
|
|||
static UnwinderThreadBuffer* acquire_empty_buffer()
|
||||
{
|
||||
/* acq lock
|
||||
if buffers == NULL { rel lock; exit }
|
||||
if buffers == nullptr { rel lock; exit }
|
||||
scan to find a free buff; if none { rel lock; exit }
|
||||
set buff state to S_FILLING
|
||||
fillseqno++; and remember it
|
||||
|
@ -789,11 +790,11 @@ static UnwinderThreadBuffer* acquire_empty_buffer()
|
|||
}
|
||||
|
||||
/* If the thread isn't registered for profiling, just ignore the call
|
||||
and return NULL. */
|
||||
and return nullptr. */
|
||||
if (i == g_stackLimitsUsed) {
|
||||
spinLock_release(&g_spinLock);
|
||||
atomic_INC( &g_stats_thrUnregd );
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/* "this thread is registered for profiling" */
|
||||
|
@ -804,12 +805,12 @@ static UnwinderThreadBuffer* acquire_empty_buffer()
|
|||
g_stackLimits[i].nSamples++;
|
||||
|
||||
/* Try to find a free buffer to use. */
|
||||
if (g_buffers == NULL) {
|
||||
if (g_buffers == nullptr) {
|
||||
/* The unwinder thread hasn't allocated any buffers yet.
|
||||
Nothing we can do. */
|
||||
spinLock_release(&g_spinLock);
|
||||
atomic_INC( &g_stats_noBuffAvail );
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
for (i = 0; i < N_UNW_THR_BUFFERS; i++) {
|
||||
|
@ -824,7 +825,7 @@ static UnwinderThreadBuffer* acquire_empty_buffer()
|
|||
atomic_INC( &g_stats_noBuffAvail );
|
||||
if (LOGLEVEL >= 3)
|
||||
LOG("BPUnw: handler: no free buffers");
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/* So we can use this one safely. Whilst still holding the lock,
|
||||
|
@ -863,7 +864,7 @@ static void fill_buffer(ThreadProfile* aProfile,
|
|||
buff->aProfile = aProfile;
|
||||
|
||||
/* And, if we have register state, that and the stack top */
|
||||
buff->haveNativeInfo = ucV != NULL;
|
||||
buff->haveNativeInfo = ucV != nullptr;
|
||||
if (buff->haveNativeInfo) {
|
||||
# if defined(SPS_PLAT_amd64_linux)
|
||||
ucontext_t* uc = (ucontext_t*)ucV;
|
||||
|
@ -965,11 +966,11 @@ static void release_full_buffer(ThreadProfile* aProfile,
|
|||
static ProfEntsPage* mmap_anon_ProfEntsPage()
|
||||
{
|
||||
# if defined(SPS_OS_darwin)
|
||||
void* v = ::mmap(NULL, sizeof(ProfEntsPage), PROT_READ|PROT_WRITE,
|
||||
MAP_PRIVATE|MAP_ANON, -1, 0);
|
||||
void* v = ::mmap(nullptr, sizeof(ProfEntsPage), PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
# else
|
||||
void* v = ::mmap(NULL, sizeof(ProfEntsPage), PROT_READ|PROT_WRITE,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||
void* v = ::mmap(nullptr, sizeof(ProfEntsPage), PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
# endif
|
||||
if (v == MAP_FAILED) {
|
||||
return ProfEntsPage_INVALID;
|
||||
|
@ -1139,7 +1140,7 @@ static void process_buffer(UnwinderThreadBuffer* buff, int oldest_ix)
|
|||
// action a native-unwind-now hint
|
||||
if (ent.is_ent_hint('N')) {
|
||||
MOZ_ASSERT(buff->haveNativeInfo);
|
||||
PCandSP* pairs = NULL;
|
||||
PCandSP* pairs = nullptr;
|
||||
unsigned int nPairs = 0;
|
||||
do_breakpad_unwind_Buffer(&pairs, &nPairs, buff, oldest_ix);
|
||||
buff->aProfile->addTag( ProfileEntry('s', "(root)") );
|
||||
|
@ -1209,7 +1210,7 @@ static void process_buffer(UnwinderThreadBuffer* buff, int oldest_ix)
|
|||
MOZ_ASSERT(buff->haveNativeInfo);
|
||||
|
||||
// Get native unwind info
|
||||
PCandSP* pairs = NULL;
|
||||
PCandSP* pairs = nullptr;
|
||||
unsigned int n_pairs = 0;
|
||||
do_breakpad_unwind_Buffer(&pairs, &n_pairs, buff, oldest_ix);
|
||||
|
||||
|
@ -1362,7 +1363,7 @@ static void process_buffer(UnwinderThreadBuffer* buff, int oldest_ix)
|
|||
else if (ent.is_ent_hint('N')) {
|
||||
/* This is a do-a-native-unwind-right-now hint */
|
||||
MOZ_ASSERT(buff->haveNativeInfo);
|
||||
PCandSP* pairs = NULL;
|
||||
PCandSP* pairs = nullptr;
|
||||
unsigned int nPairs = 0;
|
||||
do_breakpad_unwind_Buffer(&pairs, &nPairs, buff, oldest_ix);
|
||||
buff->aProfile->addTag( ProfileEntry('s', "(root)") );
|
||||
|
@ -1388,7 +1389,7 @@ static void* unwind_thr_fn(void* exit_nowV)
|
|||
/* If we're the first thread in, we'll need to allocate the buffer
|
||||
array g_buffers plus the Buffer structs that it points at. */
|
||||
spinLock_acquire(&g_spinLock);
|
||||
if (g_buffers == NULL) {
|
||||
if (g_buffers == nullptr) {
|
||||
/* Drop the lock, make a complete copy in memory, reacquire the
|
||||
lock, and try to install it -- which might fail, if someone
|
||||
else beat us to it. */
|
||||
|
@ -1409,7 +1410,7 @@ static void* unwind_thr_fn(void* exit_nowV)
|
|||
}
|
||||
/* Try to install it */
|
||||
spinLock_acquire(&g_spinLock);
|
||||
if (g_buffers == NULL) {
|
||||
if (g_buffers == nullptr) {
|
||||
g_buffers = buffers;
|
||||
spinLock_release(&g_spinLock);
|
||||
} else {
|
||||
|
@ -1526,7 +1527,7 @@ static void* unwind_thr_fn(void* exit_nowV)
|
|||
ms_to_sleep_if_empty = 1;
|
||||
show_sleep_message = true;
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static void finish_sync_buffer(ThreadProfile* profile,
|
||||
|
@ -1582,7 +1583,7 @@ static void release_sync_buffer(LinkedUWTBuffer* buff)
|
|||
#include "google_breakpad/processor/memory_region.h"
|
||||
#include "google_breakpad/processor/code_modules.h"
|
||||
|
||||
google_breakpad::MemoryRegion* foo = NULL;
|
||||
google_breakpad::MemoryRegion* foo = nullptr;
|
||||
|
||||
using std::string;
|
||||
|
||||
|
@ -1692,7 +1693,7 @@ public:
|
|||
// ownership of. The new CodeModule may be of a different concrete class
|
||||
// than the CodeModule being copied, but will behave identically to the
|
||||
// copied CodeModule as far as the CodeModule interface is concerned.
|
||||
const CodeModule* Copy() const { MOZ_CRASH(); return NULL; }
|
||||
const CodeModule* Copy() const { MOZ_CRASH(); return nullptr; }
|
||||
|
||||
friend void read_procmaps(std::vector<MyCodeModule*>& mods_);
|
||||
|
||||
|
@ -1827,7 +1828,7 @@ class MyCodeModules : public google_breakpad::CodeModules
|
|||
// comparisons against {min_,max_}addr_ are only valid in the case
|
||||
// where nMods > 0, hence the ordering of tests.
|
||||
if (nMods == 0 || address < min_addr_ || address > max_addr_) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Binary search in |mods_|. lo and hi need to be signed, else
|
||||
|
@ -1838,7 +1839,7 @@ class MyCodeModules : public google_breakpad::CodeModules
|
|||
// current unsearched space is from lo to hi, inclusive.
|
||||
if (lo > hi) {
|
||||
// not found
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
long int mid = (lo + hi) / 2;
|
||||
MyCodeModule* mid_mod = mods_[mid];
|
||||
|
@ -1852,20 +1853,20 @@ class MyCodeModules : public google_breakpad::CodeModules
|
|||
}
|
||||
|
||||
const google_breakpad::CodeModule* GetMainModule() const {
|
||||
MOZ_CRASH(); return NULL; return NULL;
|
||||
MOZ_CRASH(); return nullptr; return nullptr;
|
||||
}
|
||||
|
||||
const google_breakpad::CodeModule* GetModuleAtSequence(
|
||||
unsigned int sequence) const {
|
||||
MOZ_CRASH(); return NULL;
|
||||
MOZ_CRASH(); return nullptr;
|
||||
}
|
||||
|
||||
const google_breakpad::CodeModule* GetModuleAtIndex(unsigned int index) const {
|
||||
MOZ_CRASH(); return NULL;
|
||||
MOZ_CRASH(); return nullptr;
|
||||
}
|
||||
|
||||
const CodeModules* Copy() const {
|
||||
MOZ_CRASH(); return NULL;
|
||||
MOZ_CRASH(); return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1888,8 +1889,8 @@ class MyCodeModules : public google_breakpad::CodeModules
|
|||
reason. Users of this function need to be aware of that.
|
||||
*/
|
||||
|
||||
MyCodeModules* sModules = NULL;
|
||||
google_breakpad::LocalDebugInfoSymbolizer* sSymbolizer = NULL;
|
||||
MyCodeModules* sModules = nullptr;
|
||||
google_breakpad::LocalDebugInfoSymbolizer* sSymbolizer = nullptr;
|
||||
|
||||
// Free up the above two singletons when the unwinder thread is shut
|
||||
// down.
|
||||
|
@ -1898,17 +1899,17 @@ void do_breakpad_unwind_Buffer_free_singletons()
|
|||
{
|
||||
if (sSymbolizer) {
|
||||
delete sSymbolizer;
|
||||
sSymbolizer = NULL;
|
||||
sSymbolizer = nullptr;
|
||||
}
|
||||
if (sModules) {
|
||||
delete sModules;
|
||||
sModules = NULL;
|
||||
sModules = nullptr;
|
||||
}
|
||||
|
||||
g_stackLimitsUsed = 0;
|
||||
g_seqNo = 0;
|
||||
free(g_buffers);
|
||||
g_buffers = NULL;
|
||||
g_buffers = nullptr;
|
||||
}
|
||||
|
||||
static void stats_notify_frame(google_breakpad::StackFrame::FrameTrust tr)
|
||||
|
@ -2030,18 +2031,18 @@ void do_breakpad_unwind_Buffer(/*OUT*/PCandSP** pairs,
|
|||
|
||||
# if defined(SPS_ARCH_amd64)
|
||||
google_breakpad::StackwalkerAMD64* sw
|
||||
= new google_breakpad::StackwalkerAMD64(NULL, context,
|
||||
= new google_breakpad::StackwalkerAMD64(nullptr, context,
|
||||
memory, sModules,
|
||||
sSymbolizer);
|
||||
# elif defined(SPS_ARCH_arm)
|
||||
google_breakpad::StackwalkerARM* sw
|
||||
= new google_breakpad::StackwalkerARM(NULL, context,
|
||||
= new google_breakpad::StackwalkerARM(nullptr, context,
|
||||
-1/*FP reg*/,
|
||||
memory, sModules,
|
||||
sSymbolizer);
|
||||
# elif defined(SPS_ARCH_x86)
|
||||
google_breakpad::StackwalkerX86* sw
|
||||
= new google_breakpad::StackwalkerX86(NULL, context,
|
||||
= new google_breakpad::StackwalkerX86(nullptr, context,
|
||||
memory, sModules,
|
||||
sSymbolizer);
|
||||
# else
|
||||
|
@ -2072,7 +2073,7 @@ void do_breakpad_unwind_Buffer(/*OUT*/PCandSP** pairs,
|
|||
|
||||
*pairs = (PCandSP*)calloc(n_frames, sizeof(PCandSP));
|
||||
*nPairs = n_frames;
|
||||
if (*pairs == NULL) {
|
||||
if (*pairs == nullptr) {
|
||||
*nPairs = 0;
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ void uwt__deinit();
|
|||
// Registers a sampler thread for profiling. Threads must be
|
||||
// registered before calls to call utb__acquire_empty_buffer or
|
||||
// utb__release_full_buffer have any effect. If stackTop is
|
||||
// NULL, the call is ignored.
|
||||
// nullptr, the call is ignored.
|
||||
void uwt__register_thread_for_profiling(void* stackTop);
|
||||
|
||||
// Deregister a sampler thread for profiling.
|
||||
|
@ -48,7 +48,7 @@ void uwt__unregister_thread_for_profiling();
|
|||
|
||||
// RUNS IN SIGHANDLER CONTEXT
|
||||
// Called in the sampled thread (signal) context. Get an empty buffer
|
||||
// into which ProfileEntries can be put. It may return NULL if no
|
||||
// into which ProfileEntries can be put. It may return nullptr if no
|
||||
// empty buffers can be found, which will be the case if the unwinder
|
||||
// thread(s) have fallen behind for some reason. In this case the
|
||||
// sampled thread must simply give up and return from the signal
|
||||
|
@ -56,7 +56,7 @@ void uwt__unregister_thread_for_profiling();
|
|||
//
|
||||
// If the calling thread has not previously registered itself for
|
||||
// profiling via uwt__register_thread_for_profiling, this routine
|
||||
// returns NULL.
|
||||
// returns nullptr.
|
||||
UnwinderThreadBuffer* uwt__acquire_empty_buffer();
|
||||
|
||||
// RUNS IN SIGHANDLER CONTEXT
|
||||
|
@ -64,7 +64,7 @@ UnwinderThreadBuffer* uwt__acquire_empty_buffer();
|
|||
// that the sampled thread has acquired, handing the contents to
|
||||
// the unwinder thread, and, if necessary, passing sufficient
|
||||
// information (stack top chunk, + registers) to also do a native
|
||||
// unwind. If 'ucV' is NULL, no native unwind is done. If non-NULL,
|
||||
// unwind. If 'ucV' is nullptr, no native unwind is done. If non-nullptr,
|
||||
// it is assumed to point to a ucontext_t* that holds the initial
|
||||
// register state for the unwind. The results of all of this are
|
||||
// dumped into |aProfile| (by the unwinder thread, not the calling thread).
|
||||
|
|
|
@ -13,13 +13,13 @@ NS_GENERIC_FACTORY_CONSTRUCTOR_INIT(nsProfiler, Init)
|
|||
NS_DEFINE_NAMED_CID(NS_PROFILER_CID);
|
||||
|
||||
static const mozilla::Module::CIDEntry kProfilerCIDs[] = {
|
||||
{ &kNS_PROFILER_CID, false, NULL, nsProfilerConstructor },
|
||||
{ NULL }
|
||||
{ &kNS_PROFILER_CID, false, nullptr, nsProfilerConstructor },
|
||||
{ nullptr }
|
||||
};
|
||||
|
||||
static const mozilla::Module::ContractIDEntry kProfilerContracts[] = {
|
||||
{ "@mozilla.org/tools/profiler;1", &kNS_PROFILER_CID },
|
||||
{ NULL }
|
||||
{ nullptr }
|
||||
};
|
||||
|
||||
static const mozilla::Module kProfilerModule = {
|
||||
|
|
|
@ -204,7 +204,7 @@ bool sps_version2()
|
|||
# error "Unknown platform"
|
||||
# endif
|
||||
|
||||
bool req2 = PR_GetEnv("MOZ_PROFILER_NEW") != NULL; // Has v2 been requested?
|
||||
bool req2 = PR_GetEnv("MOZ_PROFILER_NEW") != nullptr; // Has v2 been requested?
|
||||
|
||||
bool elfhackd = false;
|
||||
# if defined(USE_ELF_HACK)
|
||||
|
@ -238,7 +238,7 @@ bool moz_profiler_verbose()
|
|||
static int status = 0; // Raced on, potentially
|
||||
|
||||
if (status == 0) {
|
||||
if (PR_GetEnv("MOZ_PROFILER_VERBOSE") != NULL)
|
||||
if (PR_GetEnv("MOZ_PROFILER_VERBOSE") != nullptr)
|
||||
status = 2;
|
||||
else
|
||||
status = 1;
|
||||
|
@ -283,7 +283,7 @@ bool set_profiler_mode(const char* mode) {
|
|||
bool set_profiler_interval(const char* interval) {
|
||||
if (interval) {
|
||||
errno = 0;
|
||||
long int n = strtol(interval, (char**)NULL, 10);
|
||||
long int n = strtol(interval, (char**)nullptr, 10);
|
||||
if (errno == 0 && n >= 1 && n <= 1000) {
|
||||
sUnwindInterval = n;
|
||||
return true;
|
||||
|
@ -297,7 +297,7 @@ bool set_profiler_interval(const char* interval) {
|
|||
bool set_profiler_entries(const char* entries) {
|
||||
if (entries) {
|
||||
errno = 0;
|
||||
long int n = strtol(entries, (char**)NULL, 10);
|
||||
long int n = strtol(entries, (char**)nullptr, 10);
|
||||
if (errno == 0 && n > 0) {
|
||||
sProfileEntries = n;
|
||||
return true;
|
||||
|
@ -311,7 +311,7 @@ bool set_profiler_entries(const char* entries) {
|
|||
bool set_profiler_scan(const char* scanCount) {
|
||||
if (scanCount) {
|
||||
errno = 0;
|
||||
long int n = strtol(scanCount, (char**)NULL, 10);
|
||||
long int n = strtol(scanCount, (char**)nullptr, 10);
|
||||
if (errno == 0 && n >= 0 && n <= 100) {
|
||||
sUnwindStackScan = n;
|
||||
return true;
|
||||
|
@ -497,7 +497,7 @@ void mozilla_sampler_init(void* stackTop)
|
|||
profiler_start(PROFILE_DEFAULT_ENTRY, PROFILE_DEFAULT_INTERVAL,
|
||||
features, sizeof(features)/sizeof(const char*),
|
||||
// TODO Add env variable to select threads
|
||||
NULL, 0);
|
||||
nullptr, 0);
|
||||
LOG("END mozilla_sampler_init");
|
||||
}
|
||||
|
||||
|
@ -558,7 +558,7 @@ char* mozilla_sampler_get_profile()
|
|||
{
|
||||
TableTicker *t = tlsTicker.get();
|
||||
if (!t) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::stringstream stream;
|
||||
|
@ -571,7 +571,7 @@ JSObject *mozilla_sampler_get_profile_data(JSContext *aCx)
|
|||
{
|
||||
TableTicker *t = tlsTicker.get();
|
||||
if (!t) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return t->ToJSObject(aCx);
|
||||
|
@ -610,7 +610,7 @@ const char** mozilla_sampler_get_features()
|
|||
// Add power collection
|
||||
"power",
|
||||
#endif
|
||||
NULL
|
||||
nullptr
|
||||
};
|
||||
|
||||
return features;
|
||||
|
@ -623,7 +623,7 @@ void mozilla_sampler_start(int aProfileEntries, double aInterval,
|
|||
|
||||
{
|
||||
if (!stack_key_initialized)
|
||||
profiler_init(NULL);
|
||||
profiler_init(nullptr);
|
||||
|
||||
/* If the sampling interval was set using env vars, use that
|
||||
in preference to anything else. */
|
||||
|
@ -698,7 +698,7 @@ void mozilla_sampler_start(int aProfileEntries, double aInterval,
|
|||
void mozilla_sampler_stop()
|
||||
{
|
||||
if (!stack_key_initialized)
|
||||
profiler_init(NULL);
|
||||
profiler_init(nullptr);
|
||||
|
||||
TableTicker *t = tlsTicker.get();
|
||||
if (!t) {
|
||||
|
@ -718,11 +718,11 @@ void mozilla_sampler_stop()
|
|||
|
||||
t->Stop();
|
||||
delete t;
|
||||
tlsTicker.set(NULL);
|
||||
tlsTicker.set(nullptr);
|
||||
|
||||
if (disableJS) {
|
||||
PseudoStack *stack = tlsPseudoStack.get();
|
||||
ASSERT(stack != NULL);
|
||||
ASSERT(stack != nullptr);
|
||||
stack->disableJSSampling();
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче