Backed out 5 changesets (bug 1475899) for build bustages on /workspace/build/src/xpcom/base/MemoryMapping.cpp. CLOSED TREE

Backed out changeset fbf0e4b12c8e (bug 1475899)
Backed out changeset b4394660fde2 (bug 1475899)
Backed out changeset e89ebe1f22f2 (bug 1475899)
Backed out changeset 391b97f0e5c0 (bug 1475899)
Backed out changeset 2916b5e05b6d (bug 1475899)
This commit is contained in:
Brindusan Cristian 2018-07-19 02:02:40 +03:00
Родитель 432fb44cee
Коммит 9cde10a0a1
8 изменённых файлов: 39 добавлений и 878 удалений

Просмотреть файл

@ -1,105 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/MemoryInfo.h"
#include "mozilla/DebugOnly.h"
#include <WinBase.h>
namespace mozilla {
/* static */ MemoryInfo
MemoryInfo::Get(const void* aPtr, size_t aSize)
{
MemoryInfo result;
result.mStart = uintptr_t(aPtr);
const char* ptr = reinterpret_cast<const char*>(aPtr);
const char* end = ptr + aSize;
DebugOnly<void*> base = nullptr;
while (ptr < end) {
MEMORY_BASIC_INFORMATION basicInfo;
if (!VirtualQuery(ptr, &basicInfo, sizeof(basicInfo))) {
break;
}
MOZ_ASSERT_IF(base, base == basicInfo.AllocationBase);
base = basicInfo.AllocationBase;
size_t regionSize = std::min(size_t(basicInfo.RegionSize),
size_t(end - ptr));
if (basicInfo.State == MEM_COMMIT) {
result.mCommitted += regionSize;
} else if (basicInfo.State == MEM_RESERVE) {
result.mReserved += regionSize;
} else if (basicInfo.State == MEM_FREE) {
result.mFree += regionSize;
} else {
MOZ_ASSERT_UNREACHABLE("Unexpected region state");
}
result.mSize += regionSize;
ptr += regionSize;
if (result.mType.isEmpty()) {
if (basicInfo.Type & MEM_IMAGE) {
result.mType += PageType::Image;
}
if (basicInfo.Type & MEM_MAPPED) {
result.mType += PageType::Mapped;
}
if (basicInfo.Type & MEM_PRIVATE) {
result.mType += PageType::Private;
}
// The first 8 bits of AllocationProtect are an enum. The remaining bits
// are flags.
switch (basicInfo.AllocationProtect & 0xff) {
case PAGE_EXECUTE_WRITECOPY:
result.mPerms += Perm::CopyOnWrite;
MOZ_FALLTHROUGH;
case PAGE_EXECUTE_READWRITE:
result.mPerms += Perm::Write;
MOZ_FALLTHROUGH;
case PAGE_EXECUTE_READ:
result.mPerms += Perm::Read;
MOZ_FALLTHROUGH;
case PAGE_EXECUTE:
result.mPerms += Perm::Execute;
break;
case PAGE_WRITECOPY:
result.mPerms += Perm::CopyOnWrite;
MOZ_FALLTHROUGH;
case PAGE_READWRITE:
result.mPerms += Perm::Write;
MOZ_FALLTHROUGH;
case PAGE_READONLY:
result.mPerms += Perm::Read;
break;
default:
break;
}
if (basicInfo.AllocationProtect & PAGE_GUARD) {
result.mPerms += Perm::Guard;
}
if (basicInfo.AllocationProtect & PAGE_NOCACHE) {
result.mPerms += Perm::NoCache;
}
if (basicInfo.AllocationProtect & PAGE_WRITECOMBINE) {
result.mPerms += Perm::WriteCombine;
}
}
}
result.mEnd = uintptr_t(ptr);
return result;
}
} // namespace mozilla

Просмотреть файл

@ -1,83 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_MemoryInfo_h
#define mozilla_MemoryInfo_h
#include "mozilla/EnumSet.h"
#include "nsTArray.h"
/**
* MemoryInfo is a helper class which describes the attributes and sizes of a
* particular region of VM memory on Windows. It roughtly corresponds to the
* values in a MEMORY_BASIC_INFORMATION struct, summed over an entire region or
* memory.
*/
namespace mozilla {
class MemoryInfo final
{
public:
enum class Perm : uint8_t
{
Read,
Write,
Execute,
CopyOnWrite,
Guard,
NoCache,
WriteCombine,
};
enum class PageType : uint8_t
{
Image,
Mapped,
Private,
};
using PermSet = EnumSet<Perm>;
using PageTypeSet = EnumSet<PageType>;
MemoryInfo() = default;
MOZ_IMPLICIT MemoryInfo(const MemoryInfo&) = default;
uintptr_t Start() const { return mStart; }
uintptr_t End() const { return mEnd; }
PageTypeSet Type() const { return mType; }
PermSet Perms() const { return mPerms; }
size_t Reserved() const { return mReserved; }
size_t Committed() const { return mCommitted; }
size_t Free() const { return mFree; }
size_t Size() const { return mSize; }
// Returns a MemoryInfo object containing the sums of all region sizes,
// divided into Reserved, Committed, and Free, depending on their State
// properties.
//
// The entire range of aSize bytes starting at aPtr must correspond to a
// single allocation. This restriction is enforced in debug builds.
static MemoryInfo Get(const void* aPtr, size_t aSize);
private:
uintptr_t mStart = 0;
uintptr_t mEnd = 0;
size_t mReserved = 0;
size_t mCommitted = 0;
size_t mFree = 0;
size_t mSize = 0;
PageTypeSet mType{};
PermSet mPerms{};
};
} // namespace mozilla
#endif // mozilla_MemoryInfo_h

Просмотреть файл

@ -1,214 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/MemoryMapping.h"
#include "mozilla/BinarySearch.h"
#include "mozilla/FileUtils.h"
#include "mozilla/Scoped.h"
#include <stdio.h>
#include <string.h>
namespace mozilla {
MOZ_TYPE_SPECIFIC_SCOPED_POINTER_TEMPLATE(ScopedCharArray, char, free);
namespace {
struct VMFlagString
{
const char* mName;
const char* mPrettyName;
VMFlag mFlag;
};
static const VMFlagString sVMFlagStrings[] = {
{"ac", "Accountable", VMFlag::Accountable},
{"ar", "ArchSpecific", VMFlag::ArchSpecific},
{"dc", "NoFork", VMFlag::NoFork},
{"dd", "NoCore", VMFlag::NoCore},
{"de", "NoExpand", VMFlag::NoExpand},
{"dw", "DisabledWrite", VMFlag::DisabledWrite},
{"ex", "Executable", VMFlag::Executable},
{"gd", "GrowsDown", VMFlag::GrowsDown},
{"hg", "HugePage", VMFlag::HugePage},
{"ht", "HugeTLB", VMFlag::HugeTLB},
{"io", "IO", VMFlag::IO},
{"lo", "Locked", VMFlag::Locked},
{"me", "MayExecute", VMFlag::MayExecute},
{"mg", "Mergeable", VMFlag::Mergeable},
{"mm", "MixedMap", VMFlag::MixedMap},
{"mr", "MayRead", VMFlag::MayRead},
{"ms", "MayShare", VMFlag::MayShare},
{"mw", "MayWrite", VMFlag::MayWrite},
{"nh", "NoHugePage", VMFlag::NoHugePage},
{"nl", "NonLinear", VMFlag::NonLinear},
{"nr", "NotReserved", VMFlag::NotReserved},
{"pf", "PurePFN", VMFlag::PurePFN},
{"rd", "Readable", VMFlag::Readable},
{"rr", "Random", VMFlag::Random},
{"sd", "SoftDirty", VMFlag::SoftDirty},
{"sh", "Shared", VMFlag::Shared},
{"sr", "Sequential", VMFlag::Sequential},
{"wr", "Writable", VMFlag::Writable},
};
} // anonymous namespace
constexpr size_t kVMFlags = size_t(-1);
// An array of known field names which may be present in an smaps file, and the
// offsets of the corresponding fields in a MemoryMapping class.
const MemoryMapping::Field MemoryMapping::sFields[] = {
{"AnonHugePages", offsetof(MemoryMapping, mAnonHugePages)},
{"Anonymous", offsetof(MemoryMapping, mAnonymous)},
{"KernelPageSize", offsetof(MemoryMapping, mKernelPageSize)},
{"LazyFree", offsetof(MemoryMapping, mLazyFree)},
{"Locked", offsetof(MemoryMapping, mLocked)},
{"MMUPageSize", offsetof(MemoryMapping, mMMUPageSize)},
{"Private_Clean", offsetof(MemoryMapping, mPrivate_Clean)},
{"Private_Dirty", offsetof(MemoryMapping, mPrivate_Dirty)},
{"Private_Hugetlb", offsetof(MemoryMapping, mPrivate_Hugetlb)},
{"Pss", offsetof(MemoryMapping, mPss)},
{"Referenced", offsetof(MemoryMapping, mReferenced)},
{"Rss", offsetof(MemoryMapping, mRss)},
{"Shared_Clean", offsetof(MemoryMapping, mShared_Clean)},
{"Shared_Dirty", offsetof(MemoryMapping, mShared_Dirty)},
{"Shared_Hugetlb", offsetof(MemoryMapping, mShared_Hugetlb)},
{"ShmemPmdMapped", offsetof(MemoryMapping, mShmemPmdMapped)},
{"Size", offsetof(MemoryMapping, mSize)},
{"Swap", offsetof(MemoryMapping, mSwap)},
{"SwapPss", offsetof(MemoryMapping, mSwapPss)},
// VmFlags is a special case. It contains an array of flag strings, which
// describe attributes of the mapping, rather than a mapping size. We include
// it in this array to aid in parsing, but give it a separate sentinel value,
// and treat it specially.
{"VmFlags", kVMFlags},
};
template <typename T, int n>
const T*
FindEntry(const char* aName, const T (&aEntries)[n])
{
size_t index;
if (BinarySearchIf(aEntries, 0, n,
[&] (const T& aEntry) {
return strcmp(aName, aEntry.mName);
},
&index)) {
return &aEntries[index];
}
return nullptr;
}
using Perm = MemoryMapping::Perm;
using PermSet = MemoryMapping::PermSet;
nsresult
GetMemoryMappings(nsTArray<MemoryMapping>& aMappings)
{
ScopedCloseFile file(fopen("/proc/self/smaps", "r"));
if (NS_WARN_IF(!file)) {
return NS_ERROR_UNEXPECTED;
}
// Note: We unfortunately can't just use UniquePtr<char[]> for this, because
// getline() will reallocate the buffer if it isn't large enough, and
// replace the value in our pointer. In the end, we need to free whatever its
// final value is. UniquePtr does not give us a way to handle this. Scoped
// does.
size_t bufferSize = 1024;
ScopedCharArray buffer((char*)moz_xmalloc(bufferSize));
MemoryMapping* current = nullptr;
while (getline(&buffer.rwget(), &bufferSize, file) > 0) {
size_t start, end, offset;
char flags[4] = "---";
char name[512];
name[0] = 0;
// Match the start of an entry. A typical line looks something like:
//
// 1487118a7000-148711a5a000 r-xp 00000000 103:03 54004561 /usr/lib/libc-2.27.so
if (sscanf(buffer, "%zx-%zx %4c %zx %*u:%*u %*u %511s\n",
&start, &end, flags, &offset, name) >= 4) {
PermSet perms;
if (flags[0] == 'r') {
perms += Perm::Read;
}
if (flags[1] == 'w') {
perms += Perm::Write;
}
if (flags[2] == 'x') {
perms += Perm::Execute;
}
if (flags[3] == 'p') {
perms += Perm::Private;
} else if (flags[3] == 's') {
perms += Perm::Shared;
}
current = aMappings.AppendElement(MemoryMapping{start, end, perms, offset, name});
continue;
}
if (!current) {
continue;
}
char* line = buffer;
char* savePtr;
char* fieldName = strtok_r(line, ":", &savePtr);
if (!fieldName) {
continue;
}
auto* field = FindEntry(fieldName, MemoryMapping::sFields);
if (!field) {
continue;
}
if (field->mOffset == kVMFlags) {
while (char* flagName = strtok_r(nullptr, " \n", &savePtr)) {
if (auto* flag = FindEntry(flagName, sVMFlagStrings)) {
current->mFlags += flag->mFlag;
}
}
continue;
}
line = strtok_r(nullptr, "\n", &savePtr);
size_t value;
if (sscanf(line, "%zd kB", &value) > 0) {
current->ValueForField(*field) = value * 1024;
}
}
return NS_OK;
}
void
MemoryMapping::Dump(nsACString& aOut) const
{
aOut.AppendPrintf("%zx-%zx Size: %zu Offset: %zx %s\n",
mStart, mEnd,
mEnd - mStart,
mOffset, mName.get());
for (auto& field : MemoryMapping::sFields) {
if (field.mOffset < sizeof(*this)) {
aOut.AppendPrintf(" %s: %zd\n", field.mName, ValueForField(field));
}
}
aOut.AppendPrintf(" Flags: %x\n", mFlags.serialize());
for (auto& flag : sVMFlagStrings) {
if (mFlags.contains(flag.mFlag)) {
aOut.AppendPrintf(" : %s %s\n", flag.mName, flag.mPrettyName);
}
}
}
} // namespace mozilla

Просмотреть файл

@ -1,190 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_MemoryMapping_h
#define mozilla_MemoryMapping_h
#include "mozilla/EnumSet.h"
#include "nsString.h"
#include "nsTArray.h"
/**
* MemoryMapping is a helper class which describes an entry in the Linux
* /proc/<pid>/smaps file. See procfs(5) for details on the entry format.
*
* The GetMemoryMappings() function returns an array of such entries, sorted by
* start address, one for each entry in the current process's address space.
*/
namespace mozilla {
enum class VMFlag : uint8_t
{
Readable, // rd - readable
Writable, // wr - writable
Executable, // ex - executable
Shared, // sh - shared
MayRead, // mr - may read
MayWrite, // mw - may write
MayExecute, // me - may execute
MayShare, // ms - may share
GrowsDown, // gd - stack segment grows down
PurePFN, // pf - pure PFN range
DisabledWrite, // dw - disabled write to the mapped file
Locked, // lo - pages are locked in memory
IO, // io - memory mapped I/O area
Sequential, // sr - sequential read advise provided
Random, // rr - random read advise provided
NoFork, // dc - do not copy area on fork
NoExpand, // de - do not expand area on remapping
Accountable, // ac - area is accountable
NotReserved, // nr - swap space is not reserved for the area
HugeTLB, // ht - area uses huge tlb pages
NonLinear, // nl - non-linear mapping
ArchSpecific, // ar - architecture specific flag
NoCore, // dd - do not include area into core dump
SoftDirty, // sd - soft-dirty flag
MixedMap, // mm - mixed map area
HugePage, // hg - huge page advise flag
NoHugePage, // nh - no-huge page advise flag
Mergeable, // mg - mergeable advise flag
};
using VMFlagSet = EnumSet<VMFlag>;
class MemoryMapping final
{
public:
enum class Perm : uint8_t
{
Read,
Write,
Execute,
Shared,
Private,
};
using PermSet = EnumSet<Perm>;
MemoryMapping(uintptr_t aStart, uintptr_t aEnd,
PermSet aPerms, size_t aOffset,
const char* aName)
: mStart(aStart)
, mEnd(aEnd)
, mOffset(aOffset)
, mName(aName)
, mPerms(aPerms)
{}
const nsCString& Name() const { return mName; }
uintptr_t Start() const { return mStart; }
uintptr_t End() const { return mEnd; }
bool Includes(const void* aPtr) const
{
auto ptr = uintptr_t(aPtr);
return ptr >= mStart && ptr < mEnd;
}
PermSet Perms() const { return mPerms; }
VMFlagSet VMFlags() const { return mFlags; }
// For file mappings, the offset in the mapped file which corresponds to the
// start of the mapped region.
size_t Offset() const { return mOffset; }
size_t AnonHugePages() const { return mAnonHugePages; }
size_t Anonymous() const { return mAnonymous; }
size_t KernelPageSize() const { return mKernelPageSize; }
size_t LazyFree() const { return mLazyFree; }
size_t Locked() const { return mLocked; }
size_t MMUPageSize() const { return mMMUPageSize; }
size_t Private_Clean() const { return mPrivate_Clean; }
size_t Private_Dirty() const { return mPrivate_Dirty; }
size_t Private_Hugetlb() const { return mPrivate_Hugetlb; }
size_t Pss() const { return mPss; }
size_t Referenced() const { return mReferenced; }
size_t Rss() const { return mRss; }
size_t Shared_Clean() const { return mShared_Clean; }
size_t Shared_Dirty() const { return mShared_Dirty; }
size_t Shared_Hugetlb() const { return mShared_Hugetlb; }
size_t ShmemPmdMapped() const { return mShmemPmdMapped; }
size_t Size() const { return mSize; }
size_t Swap() const { return mSwap; }
size_t SwapPss() const { return mSwapPss; }
// Dumps a string representation of the entry, similar to its format in the
// smaps file, to the given string. Mainly useful for debugging.
void Dump(nsACString& aOut) const;
// These comparison operators are used for binary searching sorted arrays of
// MemoryMapping entries to find the one which contains a given pointer.
bool operator==(const void* aPtr) const { return Includes(aPtr); }
bool operator<(const void* aPtr) const { return mStart < uintptr_t(aPtr); }
private:
friend nsresult GetMemoryMappings(nsTArray<MemoryMapping>& aMappings);
uintptr_t mStart = 0;
uintptr_t mEnd = 0;
size_t mOffset = 0;
nsCString mName;
// Members for size fields in the smaps file. Please keep these in sync with
// the sFields array.
size_t mAnonHugePages = 0;
size_t mAnonymous = 0;
size_t mKernelPageSize = 0;
size_t mLazyFree = 0;
size_t mLocked = 0;
size_t mMMUPageSize = 0;
size_t mPrivate_Clean = 0;
size_t mPrivate_Dirty = 0;
size_t mPrivate_Hugetlb = 0;
size_t mPss = 0;
size_t mReferenced = 0;
size_t mRss = 0;
size_t mShared_Clean = 0;
size_t mShared_Dirty = 0;
size_t mShared_Hugetlb = 0;
size_t mShmemPmdMapped = 0;
size_t mSize = 0;
size_t mSwap = 0;
size_t mSwapPss = 0;
PermSet mPerms{};
VMFlagSet mFlags{};
// Contains the name and offset of one of the above size_t fields, for use in
// parsing in dumping. The below helpers contain a list of the fields, and map
// Field entries to the appropriate member in a class instance.
struct Field
{
const char* mName;
size_t mOffset;
};
static const Field sFields[20];
size_t& ValueForField(const Field& aField)
{
char* fieldPtr = reinterpret_cast<char*>(this) + aField.mOffset;
return reinterpret_cast<size_t*>(fieldPtr)[0];
}
size_t ValueForField(const Field& aField) const
{
return const_cast<MemoryMapping*>(this)->ValueForField(aField);
}
};
nsresult GetMemoryMappings(nsTArray<MemoryMapping>& aMappings);
} // namespace mozilla
#endif // mozilla_MemoryMapping_h

Просмотреть файл

@ -112,8 +112,6 @@ EXPORTS.mozilla += [
'IntentionalCrash.h',
'JSObjectHolder.h',
'Logging.h',
'MemoryInfo.h',
'MemoryMapping.h',
'MemoryReportingProcess.h',
'nsMemoryInfoDumper.h',
'NSPRLogModulesParser.h',
@ -175,16 +173,6 @@ UNIFIED_SOURCES += [
'nsWeakReference.cpp',
]
if CONFIG['OS_TARGET'] in ('Linux', 'Android'):
UNIFIED_SOURCES += [
'MemoryMapping.cpp',
]
if CONFIG['OS_TARGET'] == 'WINNT':
UNIFIED_SOURCES += [
'MemoryInfo.cpp',
]
GENERATED_FILES += [
"error_list.rs",
"ErrorList.h",

Просмотреть файл

@ -25,12 +25,10 @@
#include "nsMemoryInfoDumper.h"
#endif
#include "nsNetCID.h"
#include "nsThread.h"
#include "mozilla/Attributes.h"
#include "mozilla/MemoryReportingProcess.h"
#include "mozilla/PodOperations.h"
#include "mozilla/Preferences.h"
#include "mozilla/ResultExtensions.h"
#include "mozilla/Services.h"
#include "mozilla/Telemetry.h"
#include "mozilla/UniquePtrExtensions.h"
@ -40,8 +38,6 @@
#include "mozilla/ipc/FileDescriptorUtils.h"
#ifdef XP_WIN
#include "mozilla/MemoryInfo.h"
#include <process.h>
#ifndef getpid
#define getpid _getpid
@ -60,8 +56,6 @@ using namespace dom;
#if defined(XP_LINUX)
#include "mozilla/MemoryMapping.h"
#include <malloc.h>
#include <string.h>
#include <stdlib.h>
@ -95,15 +89,48 @@ GetProcSelfSmapsPrivate(int64_t* aN)
// little to do with whether the pages are actually shared. /proc/self/smaps
// on the other hand appears to give us the correct information.
nsTArray<MemoryMapping> mappings(1024);
MOZ_TRY(GetMemoryMappings(mappings));
FILE* f = fopen("/proc/self/smaps", "r");
if (NS_WARN_IF(!f)) {
return NS_ERROR_UNEXPECTED;
}
// We carry over the end of the buffer to the beginning to make sure we only
// interpret complete lines.
static const uint32_t carryOver = 32;
static const uint32_t readSize = 4096;
int64_t amount = 0;
for (auto& mapping : mappings) {
amount += mapping.Private_Clean();
amount += mapping.Private_Dirty();
char buffer[carryOver + readSize + 1];
// Fill the beginning of the buffer with spaces, as a sentinel for the first
// iteration.
memset(buffer, ' ', carryOver);
for (;;) {
size_t bytes = fread(buffer + carryOver, sizeof(*buffer), readSize, f);
char* end = buffer + bytes;
char* ptr = buffer;
end[carryOver] = '\0';
// We are looking for lines like "Private_{Clean,Dirty}: 4 kB".
while ((ptr = strstr(ptr, "Private"))) {
if (ptr >= end) {
break;
}
ptr += sizeof("Private_Xxxxx:");
amount += strtol(ptr, nullptr, 10);
}
if (bytes < readSize) {
// We do not expect any match within the end of the buffer.
MOZ_ASSERT(!strstr(end, "Private"));
break;
}
// Carry the end of the buffer over to the beginning.
memcpy(buffer, end, carryOver);
}
*aN = amount;
fclose(f);
// Convert from kB to bytes.
*aN = amount * 1024;
return NS_OK;
}
@ -1404,115 +1431,6 @@ public:
};
NS_IMPL_ISUPPORTS(AtomTablesReporter, nsIMemoryReporter)
#if defined(XP_LINUX) || defined(XP_WIN)
class ThreadStacksReporter final : public nsIMemoryReporter
{
~ThreadStacksReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override
{
#ifdef XP_LINUX
nsTArray<MemoryMapping> mappings(1024);
MOZ_TRY(GetMemoryMappings(mappings));
#endif
// Enumerating over active threads requires holding a lock, so we collect
// info on all threads, and then call our reporter callbacks after releasing
// the lock.
struct ThreadData
{
nsCString mName;
uint32_t mThreadId;
size_t mPrivateSize;
};
AutoTArray<ThreadData, 32> threads;
for (auto* thread : nsThread::Enumerate()) {
if (!thread->StackBase()) {
continue;
}
#ifdef XP_LINUX
int idx = mappings.BinaryIndexOf(thread->StackBase());
if (idx < 0) {
continue;
}
// Referenced() is the combined size of all pages in the region which have
// ever been touched, and are therefore consuming memory. For stack
// regions, these pages are guaranteed to be un-shared unless we fork
// after creating threads (which we don't).
size_t privateSize = mappings[idx].Referenced();
// On Linux, we have to be very careful matching memory regions to thread
// stacks.
//
// To begin with, the kernel only reports VM stats for regions of all
// adjacent pages with the same flags, protection, and backing file.
// There's no way to get finer-grained usage information for a subset of
// those pages.
//
// Stack segments always have a guard page at the bottom of the stack
// (assuming we only support stacks that grow down), so there's no danger
// of them being merged with other stack regions. At the top, there's no
// protection page, and no way to allocate one without using pthreads
// directly and allocating our own stacks. So we get around the problem by
// adding an extra VM flag (NOHUGEPAGES) to our stack region, which we
// don't expect to be set on any heap regions. But this is not fool-proof.
//
// A second kink is that different C libraries (and different versions
// thereof) report stack base locations and sizes differently with regard
// to the guard page. For the libraries that include the guard page in the
// stack size base pointer, we need to adjust those values to compensate.
// But it's possible that our logic will get out of sync with library
// changes, or someone will compile with an unexpected library.
//
//
// The upshot of all of this is that there may be configurations that our
// special cases don't cover. And if there are, we want to know about it.
// So assert that total size of the memory region we're reporting actually
// matches the allocated size of the thread stack.
MOZ_ASSERT(mappings[idx].Size() == thread->StackSize(),
"Mapping region size doesn't match stack allocation size");
#else
auto memInfo = MemoryInfo::Get(thread->StackBase(), thread->StackSize());
size_t privateSize = memInfo.Committed();
#endif
threads.AppendElement(ThreadData{
nsCString(PR_GetThreadName(thread->GetPRThread())),
thread->ThreadId(),
// On Linux, it's possible (but unlikely) that our stack region will
// have been merged with adjacent heap regions, in which case we'll get
// combined size information for both. So we take the minimum of the
// reported private size and the requested stack size to avoid the
// possible of majorly over-reporting in that case.
std::min(privateSize, thread->StackSize()),
});
}
for (auto& thread : threads) {
nsPrintfCString path("explicit/thread-stacks/%s (tid=%u)",
thread.mName.get(), thread.mThreadId);
aHandleReport->Callback(
EmptyCString(), path,
KIND_NONHEAP, UNITS_BYTES,
thread.mPrivateSize,
NS_LITERAL_CSTRING("The sizes of thread stacks which have been "
"committed to memory."),
aData);
}
return NS_OK;
}
};
NS_IMPL_ISUPPORTS(ThreadStacksReporter, nsIMemoryReporter)
#endif
#ifdef DEBUG
// Ideally, this would be implemented in BlockingResourceBase.cpp.
@ -1674,10 +1592,6 @@ nsMemoryReporterManager::Init()
RegisterStrongReporter(new AtomTablesReporter());
#if defined(XP_LINUX) || defined(XP_WIN)
RegisterStrongReporter(new ThreadStacksReporter());
#endif
#ifdef DEBUG
RegisterStrongReporter(new DeadlockDetectorReporter());
#endif

Просмотреть файл

@ -7,7 +7,6 @@
#include "nsThread.h"
#include "base/message_loop.h"
#include "base/platform_thread.h"
// Chromium's logging can sometimes leak through...
#ifdef LOG
@ -51,23 +50,9 @@
#include "mozilla/dom/ContentChild.h"
#ifdef XP_LINUX
#ifdef __GLIBC__
#include <gnu/libc-version.h>
#endif
#include <sys/mman.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sched.h>
#include <stdio.h>
#endif
#ifdef XP_WIN
#include "mozilla/DynamicallyLinkedFunctionPtr.h"
#include <Winbase.h>
using GetCurrentThreadStackLimitsFn = void (WINAPI*)(
PULONG_PTR LowLimit, PULONG_PTR HighLimit);
#endif
#define HAVE_UALARM _BSD_SOURCE || (_XOPEN_SOURCE >= 500 || \
@ -391,26 +376,6 @@ struct ThreadInitData {
}
/* static */ mozilla::OffTheBooksMutex&
nsThread::ThreadListMutex()
{
static OffTheBooksMutex sMutex("nsThread::ThreadListMutex");
return sMutex;
}
/* static */ LinkedList<nsThread>&
nsThread::ThreadList()
{
static LinkedList<nsThread> sList;
return sList;
}
/* static */ nsThreadEnumerator
nsThread::Enumerate()
{
return {};
}
/*static*/ void
nsThread::ThreadFunc(void* aArg)
{
@ -420,7 +385,6 @@ nsThread::ThreadFunc(void* aArg)
nsThread* self = initData->thread; // strong reference
self->mThread = PR_GetCurrentThread();
self->mThreadId = uint32_t(PlatformThread::CurrentId());
self->mVirtualThread = GetCurrentVirtualThread();
self->mEventTarget->SetCurrentThread();
SetupCurrentThreadForChaosMode();
@ -429,72 +393,6 @@ nsThread::ThreadFunc(void* aArg)
NS_SetCurrentThreadName(initData->name.BeginReading());
}
{
#if defined(XP_LINUX)
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_getattr_np(pthread_self(), &attr);
size_t stackSize;
pthread_attr_getstack(&attr, &self->mStackBase, &stackSize);
// Glibc prior to 2.27 reports the stack size and base including the guard
// region, so we need to compensate for it to get accurate accounting.
// Also, this behavior difference isn't guarded by a versioned symbol, so we
// actually need to check the runtime glibc version, not the version we were
// compiled against.
static bool sAdjustForGuardSize = ({
#ifdef __GLIBC__
unsigned major, minor;
sscanf(gnu_get_libc_version(), "%u.%u", &major, &minor) < 2 ||
major < 2 || (major == 2 && minor < 27);
#else
false;
#endif
});
if (sAdjustForGuardSize) {
size_t guardSize;
pthread_attr_getguardsize(&attr, &guardSize);
// Note: This assumes that the stack grows down, as is the case on all of
// our tier 1 platforms. On platforms where the stack grows up, the
// mStackBase adjustment is unnecessary, but doesn't cause any harm other
// than under-counting stack memory usage by one page.
self->mStackBase = reinterpret_cast<char*>(self->mStackBase) + guardSize;
stackSize -= guardSize;
}
self->mStackSize = stackSize;
// This is a bit of a hack.
//
// We really do want the NOHUGEPAGE flag on our thread stacks, since we
// don't expect any of them to need anywhere near 2MB of space. But setting
// it here is too late to have an effect, since the first stack page has
// already been faulted in existence, and NSPR doesn't give us a way to set
// it beforehand.
//
// What this does get us, however, is a different set of VM flags on our
// thread stacks compared to normal heap memory. Which makes the Linux
// kernel report them as separate regions, even when they are adjacent to
// heap memory. This allows us to accurately track the actual memory
// consumption of our allocated stacks.
madvise(self->mStackBase, stackSize, MADV_NOHUGEPAGE);
pthread_attr_destroy(&attr);
#elif defined(XP_WIN)
static const DynamicallyLinkedFunctionPtr<GetCurrentThreadStackLimitsFn>
sGetStackLimits(L"kernel32.dll", "GetCurrentThreadStackLimits");
if (sGetStackLimits) {
ULONG_PTR stackBottom, stackTop;
sGetStackLimits(&stackBottom, &stackTop);
self->mStackBase = reinterpret_cast<void*>(stackBottom);
self->mStackSize = stackTop - stackBottom;
}
#endif
}
// Inform the ThreadManager
nsThreadManager::get().RegisterCurrentThread(*self);
@ -670,7 +568,6 @@ nsThread::~nsThread()
{
NS_ASSERTION(mRequestedShutdownContexts.IsEmpty(),
"shouldn't be waiting on other threads to shutdown");
MOZ_ASSERT(!isInList());
#ifdef DEBUG
// We deliberately leak these so they can be tracked by the leak checker.
// If you're having nsThreadShutdownContext leaks, you can set:
@ -704,11 +601,6 @@ nsThread::Init(const nsACString& aName)
return NS_ERROR_OUT_OF_MEMORY;
}
{
OffTheBooksMutexAutoLock mal(ThreadListMutex());
ThreadList().insertBack(this);
}
// ThreadFunc will wait for this event to be run before it tries to access
// mThread. By delaying insertion of this event into the queue, we ensure
// that mThread is set properly.
@ -823,13 +715,6 @@ nsThread::ShutdownInternal(bool aSync)
return nullptr;
}
{
OffTheBooksMutexAutoLock mal(ThreadListMutex());
if (isInList()) {
removeFrom(ThreadList());
}
}
NotNull<nsThread*> currentThread =
WrapNotNull(nsThreadManager::get().GetCurrentThread());

Просмотреть файл

@ -15,7 +15,6 @@
#include "nsString.h"
#include "nsTObserverArray.h"
#include "mozilla/Attributes.h"
#include "mozilla/LinkedList.h"
#include "mozilla/SynchronizedEventQueue.h"
#include "mozilla/NotNull.h"
#include "mozilla/TimeStamp.h"
@ -32,16 +31,11 @@ class ThreadEventTarget;
using mozilla::NotNull;
class nsThreadEnumerator;
// A native thread
class nsThread
: public nsIThreadInternal
, public nsISupportsPriority
, private mozilla::LinkedListElement<nsThread>
{
friend mozilla::LinkedList<nsThread>;
friend mozilla::LinkedListElement<nsThread>;
public:
NS_DECL_THREADSAFE_ISUPPORTS
NS_DECL_NSIEVENTTARGET_FULL
@ -71,11 +65,6 @@ public:
return mThread;
}
const void* StackBase() const { return mStackBase; }
size_t StackSize() const { return mStackSize; }
uint32_t ThreadId() const { return mThreadId; }
// If this flag is true, then the nsThread was created using
// nsIThreadManager::NewThread.
bool ShutdownRequired()
@ -143,16 +132,12 @@ public:
virtual mozilla::PerformanceCounter* GetPerformanceCounter(nsIRunnable* aEvent);
static nsThreadEnumerator Enumerate();
private:
void DoMainThreadSpecificProcessing(bool aReallyWait);
protected:
friend class nsThreadShutdownEvent;
friend class nsThreadEnumerator;
virtual ~nsThread();
static void ThreadFunc(void* aArg);
@ -167,9 +152,6 @@ protected:
struct nsThreadShutdownContext* ShutdownInternal(bool aSync);
static mozilla::OffTheBooksMutex& ThreadListMutex();
static mozilla::LinkedList<nsThread>& ThreadList();
RefPtr<mozilla::SynchronizedEventQueue> mEvents;
RefPtr<mozilla::ThreadEventTarget> mEventTarget;
@ -179,11 +161,9 @@ protected:
nsAutoTObserverArray<NotNull<nsCOMPtr<nsIThreadObserver>>, 2> mEventObservers;
int32_t mPriority;
uint32_t mThreadId;
PRThread* mThread;
uint32_t mNestedEventLoopDepth;
uint32_t mStackSize;
void* mStackBase = nullptr;
// The shutdown context for ourselves.
struct nsThreadShutdownContext* mShutdownContext;
@ -204,20 +184,6 @@ protected:
RefPtr<mozilla::PerformanceCounter> mCurrentPerformanceCounter;
};
class MOZ_STACK_CLASS nsThreadEnumerator final
{
public:
nsThreadEnumerator()
: mMal(nsThread::ThreadListMutex())
{}
auto begin() { return nsThread::ThreadList().begin(); }
auto end() { return nsThread::ThreadList().end(); }
private:
mozilla::OffTheBooksMutexAutoLock mMal;
};
#if defined(XP_UNIX) && !defined(ANDROID) && !defined(DEBUG) && HAVE_UALARM \
&& defined(_GNU_SOURCE)
# define MOZ_CANARY