Bug 1481009 Part 7 - Treat inaccessible memory regions after thread stacks as untracked, r=froydnj.

--HG--
extra : rebase_source : 96ee59dd67123fa67014f554d7219f11555e16d4
This commit is contained in:
Brian Hackett 2018-08-13 20:48:14 +00:00
Родитель ed1cb3c6c3
Коммит 52b5bc2878
1 изменённых файлов: 80 добавлений и 17 удалений

Просмотреть файл

@ -304,7 +304,7 @@ struct MemoryInfo {
// Untracked memory regions allocated before the first checkpoint. This is only
// accessed on the main thread, and is not a vector because of reentrancy
// issues.
static const size_t MaxInitialUntrackedRegions = 256;
static const size_t MaxInitialUntrackedRegions = 512;
AllocatedMemoryRegion mInitialUntrackedRegions[MaxInitialUntrackedRegions];
SpinLock mInitialUntrackedRegionsLock;
@ -728,13 +728,80 @@ RemoveInitialUntrackedRegion(uint8_t* aBase, size_t aSize)
MOZ_CRASH();
}
// Get information about the mapped region containing *aAddress, or the next
// mapped region afterwards if aAddress is not mapped. aAddress is updated to
// the start of that region, and aSize, aProtection, and aMaxProtection are
// updated with the size and protection status of the region. Returns false if
// there are no more mapped regions after *aAddress.
static bool
QueryRegion(uint8_t** aAddress, size_t* aSize,
int* aProtection = nullptr, int* aMaxProtection = nullptr)
{
mach_vm_address_t addr = (mach_vm_address_t) *aAddress;
mach_vm_size_t nbytes;
vm_region_basic_info_64 info;
mach_msg_type_number_t info_count = sizeof(vm_region_basic_info_64);
mach_port_t some_port;
kern_return_t rv = mach_vm_region(mach_task_self(), &addr, &nbytes, VM_REGION_BASIC_INFO,
(vm_region_info_t) &info, &info_count, &some_port);
if (rv == KERN_INVALID_ADDRESS) {
return false;
}
MOZ_RELEASE_ASSERT(rv == KERN_SUCCESS);
*aAddress = (uint8_t*) addr;
*aSize = nbytes;
if (aProtection) {
*aProtection = info.protection;
}
if (aMaxProtection) {
*aMaxProtection = info.max_protection;
}
return true;
}
static void
MarkThreadStacksAsUntracked()
{
AutoPassThroughThreadEvents pt;
// Thread stacks are excluded from the tracked regions.
for (size_t i = MainThreadId; i <= MaxThreadId; i++) {
Thread* thread = Thread::GetById(i);
if (!thread->StackBase()) {
continue;
}
AddInitialUntrackedMemoryRegion(thread->StackBase(), thread->StackSize());
// Look for a mapped region with no access permissions immediately after
// the thread stack's allocated region, and include this in the untracked
// memory if found. This is done to avoid confusing breakpad, which will
// scan the allocated memory in this process and will not correctly
// determine stack boundaries if we track these trailing regions and end up
// marking them as readable.
// Find the mapped region containing the thread's stack.
uint8_t* base = thread->StackBase();
size_t size;
if (!QueryRegion(&base, &size)) {
MOZ_CRASH("Could not find memory region information for thread stack");
}
// Sanity check the region size. Note that we don't mark this entire region
// as untracked, since it may contain TLS data which should be tracked.
MOZ_RELEASE_ASSERT(base <= thread->StackBase());
MOZ_RELEASE_ASSERT(base + size >= thread->StackBase() + thread->StackSize());
uint8_t* trailing = base + size;
size_t trailingSize;
int protection;
if (QueryRegion(&trailing, &trailingSize, &protection)) {
if (trailing == base + size && protection == 0) {
AddInitialUntrackedMemoryRegion(trailing, trailingSize);
}
}
}
}
@ -830,7 +897,8 @@ AddInitialTrackedMemoryRegions(uint8_t* aAddress, size_t aSize, bool aExecutable
static void UpdateNumTrackedRegionsForSnapshot();
// Handle all initial untracked memory regions in the process.
// Fill in the set of tracked memory regions that are currently mapped within
// this process.
static void
ProcessAllInitialMemoryRegions()
{
@ -838,26 +906,21 @@ ProcessAllInitialMemoryRegions()
{
AutoPassThroughThreadEvents pt;
for (mach_vm_address_t addr = 0;;) {
mach_vm_size_t nbytes;
vm_region_basic_info_64 info;
mach_msg_type_number_t info_count = sizeof(vm_region_basic_info_64);
mach_port_t some_port;
kern_return_t rv = mach_vm_region(mach_task_self(), &addr, &nbytes, VM_REGION_BASIC_INFO,
(vm_region_info_t) &info, &info_count, &some_port);
if (rv == KERN_INVALID_ADDRESS) {
for (uint8_t* addr = nullptr;;) {
size_t size;
int maxProtection;
if (!QueryRegion(&addr, &size, nullptr, &maxProtection)) {
break;
}
MOZ_RELEASE_ASSERT(rv == KERN_SUCCESS);
if (info.max_protection & VM_PROT_WRITE) {
MOZ_RELEASE_ASSERT(info.max_protection & VM_PROT_READ);
AddInitialTrackedMemoryRegions(reinterpret_cast<uint8_t*>(addr), nbytes,
info.max_protection & VM_PROT_EXECUTE);
// Consider all memory regions that can possibly be written to, even if
// they aren't currently writable.
if (maxProtection & VM_PROT_WRITE) {
MOZ_RELEASE_ASSERT(maxProtection & VM_PROT_READ);
AddInitialTrackedMemoryRegions(addr, size, maxProtection & VM_PROT_EXECUTE);
}
addr += nbytes;
addr += size;
}
}