Bug 1389464 - Share memory mapping code between SharedArrayRawBuffer and WasmArrayRawBuffer. r=luke

--HG--
extra : rebase_source : 160a7d3ea16d47bc266748001edf16af2da08ab2
This commit is contained in:
Lars T Hansen 2017-10-26 13:32:15 +02:00
Родитель cbb37dbb2b
Коммит 8f157de25d
5 изменённых файлов: 185 добавлений и 180 удалений

Просмотреть файл

@ -3164,7 +3164,7 @@ static bool
SharedArrayRawBufferCount(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
args.rval().setInt32(SharedArrayRawBuffer::liveBuffers());
args.rval().setInt32(LiveMappedBufferCount());
return true;
}

Просмотреть файл

@ -89,6 +89,145 @@ js::ToClampedIndex(JSContext* cx, HandleValue v, uint32_t length, uint32_t* out)
return true;
}
// If there are too many 4GB buffers live we run up against system resource
// exhaustion (address space or number of memory map descriptors), see
// bug 1068684, bug 1073934 for details. The limiting case seems to be
// Windows Vista Home 64-bit, where the per-process address space is limited
// to 8TB. Thus we track the number of live objects, and set a limit of
// 1000 live objects per process; we run synchronous GC if necessary; and
// we throw an OOM error if the per-process limit is exceeded.
static mozilla::Atomic<int32_t, mozilla::ReleaseAcquire> liveBufferCount(0);
static const int32_t MaximumLiveMappedBuffers = 1000;
int32_t
js::LiveMappedBufferCount()
{
return liveBufferCount;
}
void*
js::MapBufferMemory(size_t mappedSize, size_t initialCommittedSize)
{
MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
MOZ_ASSERT(initialCommittedSize % gc::SystemPageSize() == 0);
MOZ_ASSERT(initialCommittedSize <= mappedSize);
// Test >= to guard against the case where multiple extant runtimes
// race to allocate.
if (++liveBufferCount >= MaximumLiveMappedBuffers) {
if (OnLargeAllocationFailure)
OnLargeAllocationFailure();
if (liveBufferCount >= MaximumLiveMappedBuffers) {
liveBufferCount--;
return nullptr;
}
}
#ifdef XP_WIN
void* data = VirtualAlloc(nullptr, mappedSize, MEM_RESERVE, PAGE_NOACCESS);
if (!data) {
liveBufferCount--;
return nullptr;
}
if (!VirtualAlloc(data, initialCommittedSize, MEM_COMMIT, PAGE_READWRITE)) {
VirtualFree(data, 0, MEM_RELEASE);
liveBufferCount--;
return nullptr;
}
#else // XP_WIN
void* data = MozTaggedAnonymousMmap(nullptr, mappedSize, PROT_NONE,
MAP_PRIVATE | MAP_ANON, -1, 0, "wasm-reserved");
if (data == MAP_FAILED) {
liveBufferCount--;
return nullptr;
}
// Note we will waste a page on zero-sized memories here
if (mprotect(data, initialCommittedSize, PROT_READ | PROT_WRITE)) {
munmap(data, mappedSize);
liveBufferCount--;
return nullptr;
}
#endif // !XP_WIN
#if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)data + initialCommittedSize,
mappedSize - initialCommittedSize);
#endif
return data;
}
bool
js::CommitBufferMemory(void* dataEnd, uint32_t delta)
{
MOZ_ASSERT(delta);
MOZ_ASSERT(delta % gc::SystemPageSize() == 0);
#ifdef XP_WIN
if (!VirtualAlloc(dataEnd, delta, MEM_COMMIT, PAGE_READWRITE))
return false;
#else // XP_WIN
if (mprotect(dataEnd, delta, PROT_READ | PROT_WRITE))
return false;
#endif // !XP_WIN
#if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)dataEnd, delta);
#endif
return true;
}
#ifndef WASM_HUGE_MEMORY
bool
js::ExtendBufferMapping(void* dataPointer, size_t mappedSize, size_t newMappedSize)
{
MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
MOZ_ASSERT(newMappedSize % gc::SystemPageSize() == 0);
MOZ_ASSERT(newMappedSize >= mappedSize);
#ifdef XP_WIN
void* mappedEnd = (char*)dataPointer + mappedSize;
uint32_t delta = newMappedSize - mappedSize;
if (!VirtualAlloc(mappedEnd, delta, MEM_RESERVE, PAGE_NOACCESS))
return false;
return true;
#elif defined(XP_LINUX)
// Note this will not move memory (no MREMAP_MAYMOVE specified)
if (MAP_FAILED == mremap(dataPointer, mappedSize, newMappedSize, 0))
return false;
return true;
#else
// No mechanism for remapping on MacOS and other Unices. Luckily
// shouldn't need it here as most of these are 64-bit.
return false;
#endif
}
#endif
void
js::UnmapBufferMemory(void* base, size_t mappedSize)
{
MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
#ifdef XP_WIN
VirtualFree(base, 0, MEM_RELEASE);
#else // XP_WIN
munmap(base, mappedSize);
#endif // !XP_WIN
#if defined(MOZ_VALGRIND) && defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE)
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)base, mappedSize);
#endif
// Decrement the buffer counter at the end -- otherwise, a race condition
// could enable the creation of unlimited buffers.
liveBufferCount--;
}
/*
* ArrayBufferObject
*
@ -563,17 +702,9 @@ class js::WasmArrayRawBuffer
uint8_t* dataEnd = dataPointer() + oldSize;
MOZ_ASSERT(uintptr_t(dataEnd) % gc::SystemPageSize() == 0);
# ifdef XP_WIN
if (delta && !VirtualAlloc(dataEnd, delta, MEM_COMMIT, PAGE_READWRITE))
return false;
# else // XP_WIN
if (delta && mprotect(dataEnd, delta, PROT_READ | PROT_WRITE))
return false;
# endif // !XP_WIN
# if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)dataEnd, delta);
# endif
if (delta && !CommitBufferMemory(dataEnd, delta))
return false;
return true;
}
@ -585,20 +716,8 @@ class js::WasmArrayRawBuffer
if (mappedSize_ == newMappedSize)
return true;
# ifdef XP_WIN
uint8_t* mappedEnd = dataPointer() + mappedSize_;
uint32_t delta = newMappedSize - mappedSize_;
if (!VirtualAlloc(mappedEnd, delta, MEM_RESERVE, PAGE_NOACCESS))
if (!ExtendBufferMapping(dataPointer(), mappedSize_, newMappedSize))
return false;
# elif defined(XP_LINUX)
// Note this will not move memory (no MREMAP_MAYMOVE specified)
if (MAP_FAILED == mremap(dataPointer(), mappedSize_, newMappedSize, 0))
return false;
# else
// No mechanism for remapping on MacOS and other Unices. Luckily
// shouldn't need it here as most of these are 64-bit.
return false;
# endif
mappedSize_ = newMappedSize;
return true;
@ -638,33 +757,10 @@ WasmArrayRawBuffer::Allocate(uint32_t numBytes, const Maybe<uint32_t>& maxSize)
uint64_t mappedSizeWithHeader = mappedSize + gc::SystemPageSize();
uint64_t numBytesWithHeader = numBytes + gc::SystemPageSize();
# ifdef XP_WIN
void* data = VirtualAlloc(nullptr, (size_t) mappedSizeWithHeader, MEM_RESERVE, PAGE_NOACCESS);
void* data = MapBufferMemory((size_t) mappedSizeWithHeader, (size_t) numBytesWithHeader);
if (!data)
return nullptr;
if (!VirtualAlloc(data, numBytesWithHeader, MEM_COMMIT, PAGE_READWRITE)) {
VirtualFree(data, 0, MEM_RELEASE);
return nullptr;
}
# else // XP_WIN
void* data = MozTaggedAnonymousMmap(nullptr, (size_t) mappedSizeWithHeader, PROT_NONE,
MAP_PRIVATE | MAP_ANON, -1, 0, "wasm-reserved");
if (data == MAP_FAILED)
return nullptr;
// Note we will waste a page on zero-sized memories here
if (mprotect(data, numBytesWithHeader, PROT_READ | PROT_WRITE)) {
munmap(data, mappedSizeWithHeader);
return nullptr;
}
# endif // !XP_WIN
# if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)data + numBytesWithHeader,
mappedSizeWithHeader - numBytesWithHeader);
# endif
uint8_t* base = reinterpret_cast<uint8_t*>(data) + gc::SystemPageSize();
uint8_t* header = base - sizeof(WasmArrayRawBuffer);
@ -676,19 +772,11 @@ WasmArrayRawBuffer::Allocate(uint32_t numBytes, const Maybe<uint32_t>& maxSize)
WasmArrayRawBuffer::Release(void* mem)
{
WasmArrayRawBuffer* header = (WasmArrayRawBuffer*)((uint8_t*)mem - sizeof(WasmArrayRawBuffer));
uint8_t* base = header->basePointer();
MOZ_RELEASE_ASSERT(header->mappedSize() <= SIZE_MAX - gc::SystemPageSize());
# ifdef XP_WIN
VirtualFree(base, 0, MEM_RELEASE);
# else // XP_WIN
size_t mappedSizeWithHeader = header->mappedSize() + gc::SystemPageSize();
munmap(base, mappedSizeWithHeader);
# endif // !XP_WIN
# if defined(MOZ_VALGRIND) && defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE)
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(base, mappedSizeWithHeader);
# endif
UnmapBufferMemory(header->basePointer(), mappedSizeWithHeader);
}
WasmArrayRawBuffer*

Просмотреть файл

@ -23,6 +23,34 @@ namespace js {
class ArrayBufferViewObject;
class WasmArrayRawBuffer;
// Create a new mapping of size `mappedSize` with an initially committed prefix
// of size `initialCommittedSize`. Both arguments denote bytes and must be
// multiples of the page size, with `initialCommittedSize` <= `mappedSize`.
// Returns nullptr on failure.
void* MapBufferMemory(size_t mappedSize, size_t initialCommittedSize);
// Commit additional memory in an existing mapping. `dataEnd` must be the
// correct value for the end of the existing committed area, and `delta` must be
// a byte amount to grow the mapping by, and must be a multiple of the page
// size. Returns false on failure.
bool CommitBufferMemory(void* dataEnd, uint32_t delta);
#ifndef WASM_HUGE_MEMORY
// Extend an existing mapping by adding uncommited pages to it. `dataStart`
// must be the pointer to the start of the existing mapping, `mappedSize` the
// size of the existing mapping, and `newMappedSize` the size of the extended
// mapping (sizes in bytes), with `mappedSize` <= `newMappedSize`. Both sizes
// must be divisible by the page size. Returns false on failure.
bool ExtendBufferMapping(void* dataStart, size_t mappedSize, size_t newMappedSize);
#endif
// Remove an existing mapping. `dataStart` must be the pointer to the start of
// the mapping, and `mappedSize` the size of that mapping.
void UnmapBufferMemory(void* dataStart, size_t mappedSize);
// Return the number of currently live mapped buffers.
int32_t LiveMappedBufferCount();
// The inheritance hierarchy for the various classes relating to typed arrays
// is as follows.
//

Просмотреть файл

@ -202,6 +202,7 @@ JS_ShutDown(void)
if (!JSRuntime::hasLiveRuntimes()) {
js::wasm::ReleaseBuiltinThunks();
js::jit::ReleaseProcessExecutableMemory();
MOZ_ASSERT(!js::LiveMappedBufferCount());
}
js::ShutDownMallocAllocator();

Просмотреть файл

@ -10,17 +10,7 @@
#include "jsfriendapi.h"
#include "jsprf.h"
#ifdef XP_WIN
# include "jswin.h"
#endif
#include "jswrapper.h"
#ifndef XP_WIN
# include <sys/mman.h>
#endif
#ifdef MOZ_VALGRIND
# include <valgrind/memcheck.h>
#endif
#include "jit/AtomicOperations.h"
#include "vm/SharedMem.h"
@ -33,46 +23,6 @@
using namespace js;
static inline void*
MapMemory(size_t length, bool commit)
{
#ifdef XP_WIN
int prot = (commit ? MEM_COMMIT : MEM_RESERVE);
int flags = (commit ? PAGE_READWRITE : PAGE_NOACCESS);
return VirtualAlloc(nullptr, length, prot, flags);
#else
int prot = (commit ? (PROT_READ | PROT_WRITE) : PROT_NONE);
void* p = mmap(nullptr, length, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (p == MAP_FAILED)
return nullptr;
return p;
#endif
}
static inline void
UnmapMemory(void* addr, size_t len)
{
#ifdef XP_WIN
VirtualFree(addr, 0, MEM_RELEASE);
#else
munmap(addr, len);
#endif
}
static inline bool
MarkValidRegion(void* addr, size_t len)
{
#ifdef XP_WIN
if (!VirtualAlloc(addr, len, MEM_COMMIT, PAGE_READWRITE))
return false;
return true;
#else
if (mprotect(addr, len, PROT_READ | PROT_WRITE))
return false;
return true;
#endif
}
// Since this SharedArrayBuffer will likely be used for asm.js code, prepare it
// for asm.js by mapping the 4gb protected zone described in WasmTypes.h.
// Since we want to put the SharedArrayBuffer header immediately before the
@ -88,28 +38,12 @@ SharedArrayMappedSize(uint32_t allocSize)
#endif
}
// If there are too many 4GB buffers live we run up against system resource
// exhaustion (address space or number of memory map descriptors), see
// bug 1068684, bug 1073934 for details. The limiting case seems to be
// Windows Vista Home 64-bit, where the per-process address space is limited
// to 8TB. Thus we track the number of live objects, and set a limit of
// 1000 live objects per process; we run synchronous GC if necessary; and
// we throw an OOM error if the per-process limit is exceeded.
static mozilla::Atomic<int32_t, mozilla::ReleaseAcquire> liveBufferCount(0);
static const int32_t MaximumLiveSharedArrayBuffers = 1000;
static uint32_t
SharedArrayAllocSize(uint32_t length)
{
return AlignBytes(length + gc::SystemPageSize(), gc::SystemPageSize());
}
int32_t
SharedArrayRawBuffer::liveBuffers()
{
return liveBufferCount;
}
SharedArrayRawBuffer*
SharedArrayRawBuffer::New(JSContext* cx, uint32_t length)
{
@ -122,48 +56,15 @@ SharedArrayRawBuffer::New(JSContext* cx, uint32_t length)
if (allocSize <= length)
return nullptr;
// Test >= to guard against the case where multiple extant runtimes
// race to allocate.
if (++liveBufferCount >= MaximumLiveSharedArrayBuffers) {
if (OnLargeAllocationFailure)
OnLargeAllocationFailure();
if (liveBufferCount >= MaximumLiveSharedArrayBuffers) {
liveBufferCount--;
return nullptr;
}
}
bool preparedForAsmJS = jit::JitOptions.asmJSAtomicsEnable && IsValidAsmJSHeapLength(length);
void* p = nullptr;
if (preparedForAsmJS) {
uint32_t mappedSize = SharedArrayMappedSize(allocSize);
// Get the entire reserved region (with all pages inaccessible)
p = MapMemory(mappedSize, false);
if (!p) {
liveBufferCount--;
return nullptr;
}
if (!MarkValidRegion(p, allocSize)) {
UnmapMemory(p, mappedSize);
liveBufferCount--;
return nullptr;
}
# if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
// Tell Valgrind/Memcheck to not report accesses in the inaccessible region.
VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)p + allocSize,
mappedSize - allocSize);
# endif
} else {
p = MapMemory(allocSize, true);
if (!p) {
liveBufferCount--;
return nullptr;
}
}
if (preparedForAsmJS)
p = MapBufferMemory(SharedArrayMappedSize(allocSize), allocSize);
else
p = MapBufferMemory(allocSize, allocSize);
if (!p)
return nullptr;
uint8_t* buffer = reinterpret_cast<uint8_t*>(p) + gc::SystemPageSize();
uint8_t* base = buffer - sizeof(SharedArrayRawBuffer);
@ -207,23 +108,10 @@ SharedArrayRawBuffer::dropReference()
uint8_t* address = p.unwrap(/*safe - only reference*/);
uint32_t allocSize = SharedArrayAllocSize(this->length);
if (this->preparedForAsmJS) {
uint32_t mappedSize = SharedArrayMappedSize(allocSize);
UnmapMemory(address, mappedSize);
# if defined(MOZ_VALGRIND) && defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE)
// Tell Valgrind/Memcheck to recommence reporting accesses in the
// previously-inaccessible region.
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(address, mappedSize);
# endif
} else {
UnmapMemory(address, allocSize);
}
// Decrement the buffer counter at the end -- otherwise, a race condition
// could enable the creation of unlimited buffers.
liveBufferCount--;
if (this->preparedForAsmJS)
UnmapBufferMemory(address, SharedArrayMappedSize(allocSize));
else
UnmapBufferMemory(address, allocSize);
}