Bug 1814924: Implement memory.discard for shared memories. r=rhunt

Differential Revision: https://phabricator.services.mozilla.com/D170839
This commit is contained in:
Ben Visness 2023-03-22 16:31:10 +00:00
Родитель 5717354dcd
Коммит 2182e672ee
8 изменённых файлов: 169 добавлений и 57 удалений

Просмотреть файл

@ -442,7 +442,6 @@ MSG_DEF(JSMSG_WASM_BAD_CAST, 0, JSEXN_WASMRUNTIMEERROR, "bad cast")
MSG_DEF(JSMSG_WASM_MEM_IMP_LIMIT, 0, JSEXN_WASMRUNTIMEERROR, "too many memory pages")
MSG_DEF(JSMSG_WASM_TABLE_IMP_LIMIT, 0, JSEXN_WASMRUNTIMEERROR, "too many table elements")
MSG_DEF(JSMSG_WASM_ARRAY_IMP_LIMIT, 0, JSEXN_WASMRUNTIMEERROR, "too many array elements")
MSG_DEF(JSMSG_WASM_NOT_IMPLEMENTED, 0, JSEXN_WASMRUNTIMEERROR, "instruction not yet implemented")
MSG_DEF(JSMSG_WASM_BAD_RANGE, 2, JSEXN_RANGEERR, "bad {0} {1}")
MSG_DEF(JSMSG_WASM_BAD_GROW, 1, JSEXN_RANGEERR, "failed to grow {0}")
MSG_DEF(JSMSG_WASM_TABLE_OUT_OF_BOUNDS, 0, JSEXN_WASMRUNTIMEERROR, "table index out of bounds")

Просмотреть файл

@ -8,9 +8,10 @@
// The ultimate goal is to release physical pages of memory back to the
// operating system, but we can't really observe memory metrics here. Oh well.
function initModule(discardOffset, discardLen, discardViaJS, memType = 'i32') {
function initModule(discardOffset, discardLen, discardViaJS, shared, memType = 'i32') {
const memProps = shared ? '4 4 shared' : '4'; // 4 pages
const text = `(module
(memory (export "memory") ${memType} 4) ;; 4 pages
(memory (export "memory") ${memType} ${memProps})
(data "abcdefghijklmnopqrstuvwxyz")
(func (export "init")
;; splat alphabet halfway across the 3/4 page boundary.
@ -41,17 +42,21 @@ function checkSecondHalf(exp, expectLetters) { return checkRegion(exp, 13, 26, e
function checkWholeAlphabet(exp, expectLetters) { return checkRegion(exp, 0, 26, expectLetters) }
function testAll(func) {
func(false, 'i32');
func(true, 'i32');
func(false, false, 'i32');
func(false, true, 'i32');
func(true, false, 'i32');
func(true, true, 'i32');
if (wasmMemory64Enabled()) {
func(false, 'i64');
func(true, 'i64');
func(false, false, 'i64');
func(false, true, 'i64');
func(true, false, 'i64');
func(true, true, 'i64');
}
}
testAll(function testHappyPath(discardViaJS, memType) {
testAll(function testHappyPath(discardViaJS, shared, memType) {
// Only page 3 of memory, half the alphabet
const [exp, discard] = initModule(65536 * 2, 65536, discardViaJS, memType);
const [exp, discard] = initModule(65536 * 2, 65536, discardViaJS, shared, memType);
// All zero to start
checkWholeAlphabet(exp, false);
@ -75,9 +80,9 @@ testAll(function testHappyPath(discardViaJS, memType) {
checkSecondHalf(exp, true);
});
testAll(function testZeroLen(discardViaJS) {
testAll(function testZeroLen(discardViaJS, shared) {
// Discard zero bytes
const [exp, discard] = initModule(PageSizeInBytes * 2, 0, discardViaJS);
const [exp, discard] = initModule(PageSizeInBytes * 2, 0, discardViaJS, shared);
// Init the stuff
exp.init();
@ -90,10 +95,14 @@ testAll(function testZeroLen(discardViaJS) {
checkWholeAlphabet(exp, true);
});
testAll(function testWithGrow(discardViaJS, memType) {
testAll(function testWithGrow(discardViaJS, shared, memType) {
if (shared) {
return; // shared memories cannot grow
}
// Only page 3 of memory, half the alphabet. There is no max size on the
// memory, so it will be subject to moving grows in 32-bit mode.
const [exp, discard] = initModule(65536 * 2, 65536, discardViaJS, memType);
const [exp, discard] = initModule(65536 * 2, 65536, discardViaJS, false, memType);
// Start with the whole alphabet
exp.init();
@ -119,11 +128,11 @@ testAll(function testWithGrow(discardViaJS, memType) {
discard();
checkFirstHalf(exp, false);
checkSecondHalf(exp, true);
})
});
testAll(function testOOB(discardViaJS) {
testAll(function testOOB(discardViaJS, shared) {
// Discard two pages where there is only one
const [exp, discard] = initModule(PageSizeInBytes * 3, PageSizeInBytes * 2, discardViaJS);
const [exp, discard] = initModule(PageSizeInBytes * 3, PageSizeInBytes * 2, discardViaJS, shared);
exp.init();
checkWholeAlphabet(exp, true);
@ -133,10 +142,10 @@ testAll(function testOOB(discardViaJS) {
checkWholeAlphabet(exp, true);
});
testAll(function testOOB2(discardViaJS) {
testAll(function testOOB2(discardViaJS, shared) {
// Discard two pages starting near the end of 32-bit address space
// (would trigger an overflow in 32-bit world)
const [exp, discard] = initModule(2 ** 32 - PageSizeInBytes, PageSizeInBytes * 2, discardViaJS);
const [exp, discard] = initModule(2 ** 32 - PageSizeInBytes, PageSizeInBytes * 2, discardViaJS, shared);
exp.init();
checkWholeAlphabet(exp, true);
@ -146,9 +155,9 @@ testAll(function testOOB2(discardViaJS) {
checkWholeAlphabet(exp, true);
});
testAll(function testOOB3(discardViaJS) {
testAll(function testOOB3(discardViaJS, shared) {
// Discard nearly an entire 32-bit address space's worth of pages. Very exciting!
const [exp, discard] = initModule(0, 2 ** 32 - PageSizeInBytes, discardViaJS);
const [exp, discard] = initModule(0, 2 ** 32 - PageSizeInBytes, discardViaJS, shared);
exp.init();
checkWholeAlphabet(exp, true);
@ -179,7 +188,7 @@ if (wasmMemory64Enabled()) {
// This cannot be done with a JS discard because JS can't actually represent big enough integers.
// The big ol' number here is 2^64 - (65536 * 2)
const [exp, discard] = initModule(65536 * 3, `18_446_744_073_709_420_544`, false, 'i64');
const [exp, discard] = initModule(65536 * 3, `18_446_744_073_709_420_544`, false, false, 'i64');
// Init the stuff
exp.init();
@ -193,9 +202,9 @@ if (wasmMemory64Enabled()) {
})();
}
testAll(function testMisalignedStart(discardViaJS) {
testAll(function testMisalignedStart(discardViaJS, shared) {
// Discard only the first half of the alphabet (this misaligns the start)
const [exp, discard] = initModule(PageSizeInBytes * 3 - 13, 13, discardViaJS);
const [exp, discard] = initModule(PageSizeInBytes * 3 - 13, 13, discardViaJS, shared);
exp.init();
checkWholeAlphabet(exp, true);
@ -205,9 +214,9 @@ testAll(function testMisalignedStart(discardViaJS) {
checkWholeAlphabet(exp, true);
});
testAll(function testMisalignedEnd(discardViaJS) {
testAll(function testMisalignedEnd(discardViaJS, shared) {
// Discard only the second half of the alphabet (this misaligns the end)
const [exp, discard] = initModule(PageSizeInBytes * 3, 13, discardViaJS);
const [exp, discard] = initModule(PageSizeInBytes * 3, 13, discardViaJS, shared);
exp.init();
checkWholeAlphabet(exp, true);

Просмотреть файл

@ -8,14 +8,18 @@
#include "mozilla/Atomics.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/TaggedAnonymousMemory.h"
#include "gc/GCContext.h"
#include "gc/Memory.h"
#include "jit/AtomicOperations.h"
#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
#include "js/PropertySpec.h"
#include "js/SharedArrayBuffer.h"
#include "util/Memory.h"
#include "util/WindowsWrapper.h"
#include "vm/SharedMem.h"
#include "wasm/WasmConstants.h"
#include "wasm/WasmMemory.h"
#include "vm/ArrayBufferObject-inl.h"
@ -29,6 +33,7 @@ using mozilla::Nothing;
using mozilla::Some;
using namespace js;
using namespace js::jit;
static size_t WasmSharedArrayAccessibleSize(size_t length) {
return AlignBytes(length, gc::SystemPageSize());
@ -169,6 +174,67 @@ bool WasmSharedArrayRawBuffer::wasmGrowToPagesInPlace(const Lock&,
return true;
}
void WasmSharedArrayRawBuffer::discard(size_t byteOffset, size_t byteLen) {
SharedMem<uint8_t*> memBase = dataPointerShared();
// The caller is responsible for ensuring these conditions are met; see this
// function's comment in SharedArrayObject.h.
MOZ_ASSERT(byteOffset % wasm::PageSize == 0);
MOZ_ASSERT(byteLen % wasm::PageSize == 0);
MOZ_ASSERT(wasm::MemoryBoundsCheck(uint64_t(byteOffset), uint64_t(byteLen),
volatileByteLength()));
// Discarding zero bytes "succeeds" with no effect.
if (byteLen == 0) {
return;
}
SharedMem<uint8_t*> addr = memBase + uintptr_t(byteOffset);
// On POSIX-ish platforms, we discard memory by overwriting previously-mapped
// pages with freshly-mapped pages (which are all zeroed). The operating
// system recognizes this and decreases the process RSS, and eventually
// collects the abandoned physical pages.
//
// On Windows, committing over previously-committed pages has no effect. We
// could decommit and recommit, but this doesn't work for shared memories
// since other threads could access decommitted memory - causing a trap.
// Instead, we simply zero memory (memset 0), and then VirtualUnlock(), which
// for Historical Reasons immediately removes the pages from the working set.
// And then, because the pages were zeroed, Windows will actually reclaim the
// memory entirely instead of paging it out to disk. Naturally this behavior
// is not officially documented, but a Raymond Chen blog post is basically as
// good as MSDN, right?
//
// https://devblogs.microsoft.com/oldnewthing/20170113-00/?p=95185
#ifdef XP_WIN
// Discarding the entire region at once causes us to page the entire region
// into the working set, only to throw it out again. This can be actually
// disastrous when discarding already-discarded memory. To mitigate this, we
// discard a chunk of memory at a time - this comes at a small performance
// cost from syscalls and potentially less-optimal memsets.
size_t numPages = byteLen / wasm::PageSize;
for (size_t i = 0; i < numPages; i++) {
AtomicOperations::memsetSafeWhenRacy(addr + (i * wasm::PageSize), 0,
wasm::PageSize);
DebugOnly<bool> result =
VirtualUnlock(addr.unwrap() + (i * wasm::PageSize), wasm::PageSize);
MOZ_ASSERT(!result); // this always "fails" when unlocking unlocked
// memory...which is the only case we care about
}
#elif defined(__wasi__)
AtomicOperations::memsetSafeWhenRacy(addr, 0, byteLen);
#else // !XP_WIN
void* data = MozTaggedAnonymousMmap(
addr.unwrap(), byteLen, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0, "wasm-reserved");
if (data == MAP_FAILED) {
MOZ_CRASH("failed to discard wasm memory; memory mappings may be broken");
}
#endif
}
bool SharedArrayRawBuffer::addReference() {
MOZ_RELEASE_ASSERT(refcount_ > 0);
@ -417,6 +483,14 @@ SharedArrayBufferObject* SharedArrayBufferObject::createFromNewRawBuffer(
return obj;
}
/* static */
void SharedArrayBufferObject::wasmDiscard(HandleSharedArrayBufferObject buf,
uint64_t byteOffset,
uint64_t byteLen) {
MOZ_ASSERT(buf->isWasm());
buf->rawWasmBufferObject()->discard(byteOffset, byteLen);
}
static const JSClassOps SharedArrayBufferObjectClassOps = {
nullptr, // addProperty
nullptr, // delProperty

Просмотреть файл

@ -151,6 +151,11 @@ class WasmSharedArrayRawBuffer : public SharedArrayRawBuffer {
dataPtr - sizeof(WasmSharedArrayRawBuffer));
}
static WasmSharedArrayRawBuffer* fromDataPtr(uint8_t* dataPtr) {
return reinterpret_cast<WasmSharedArrayRawBuffer*>(
dataPtr - sizeof(WasmSharedArrayRawBuffer));
}
wasm::IndexType wasmIndexType() const { return indexType_; }
wasm::Pages volatileWasmPages() const {
@ -166,6 +171,11 @@ class WasmSharedArrayRawBuffer : public SharedArrayRawBuffer {
bool wasmGrowToPagesInPlace(const Lock&, wasm::IndexType t,
wasm::Pages newPages);
// Discard a region of memory, zeroing the pages and releasing physical memory
// back to the operating system. byteOffset and byteLen must be wasm page
// aligned and in bounds. A discard of zero bytes will have no effect.
void discard(size_t byteOffset, size_t byteLen);
};
inline WasmSharedArrayRawBuffer* SharedArrayRawBuffer::toWasmBuffer() {
@ -298,6 +308,9 @@ class SharedArrayBufferObject : public ArrayBufferObjectMaybeShared {
size_t wasmMappedSize() const { return rawWasmBufferObject()->mappedSize(); }
static void wasmDiscard(Handle<SharedArrayBufferObject*> buf,
uint64_t byteOffset, uint64_t byteLength);
private:
[[nodiscard]] bool acceptRawBuffer(SharedArrayRawBuffer* buffer,
size_t length);

Просмотреть файл

@ -918,23 +918,47 @@ bool Instance::initElems(uint32_t tableIndex, const ElemSegment& seg,
}
template <typename I>
static int32_t MemDiscardNotShared(Instance* instance, I byteOffset, I byteLen,
uint8_t* memBase) {
static bool WasmDiscardCheck(Instance* instance, I byteOffset, I byteLen,
size_t memLen, bool shared) {
JSContext* cx = instance->cx();
if (byteOffset % wasm::PageSize != 0 || byteLen % wasm::PageSize != 0) {
ReportTrapError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
return -1;
return false;
}
WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->byteLength();
if (!MemoryBoundsCheck(byteOffset, byteLen, memLen)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
return false;
}
return true;
}
template <typename I>
static int32_t MemDiscardNotShared(Instance* instance, I byteOffset, I byteLen,
uint8_t* memBase) {
WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->byteLength();
if (!WasmDiscardCheck(instance, byteOffset, byteLen, memLen, false)) {
return -1;
}
rawBuf->discard(byteOffset, byteLen);
return 0;
}
template <typename I>
static int32_t MemDiscardShared(Instance* instance, I byteOffset, I byteLen,
uint8_t* memBase) {
WasmSharedArrayRawBuffer* rawBuf =
WasmSharedArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->volatileByteLength();
if (!WasmDiscardCheck(instance, byteOffset, byteLen, memLen, true)) {
return -1;
}
rawBuf->discard(byteOffset, byteLen);
return 0;
@ -956,18 +980,16 @@ static int32_t MemDiscardNotShared(Instance* instance, I byteOffset, I byteLen,
/* static */ int32_t Instance::memDiscardShared_m32(Instance* instance,
uint32_t byteOffset,
uint32_t len,
uint32_t byteLen,
uint8_t* memBase) {
ReportTrapError(instance->cx(), JSMSG_WASM_NOT_IMPLEMENTED);
return -1;
return MemDiscardShared(instance, byteOffset, byteLen, memBase);
}
/* static */ int32_t Instance::memDiscardShared_m64(Instance* instance,
uint64_t byteOffset,
uint64_t len,
uint64_t byteLen,
uint8_t* memBase) {
ReportTrapError(instance->cx(), JSMSG_WASM_NOT_IMPLEMENTED);
return -1;
return MemDiscardShared(instance, byteOffset, byteLen, memBase);
}
/* static */ void* Instance::tableGet(Instance* instance, uint32_t index,

Просмотреть файл

@ -430,13 +430,13 @@ class alignas(16) Instance {
static int32_t tableFill(Instance* instance, uint32_t start, void* value,
uint32_t len, uint32_t tableIndex);
static int32_t memDiscard_m32(Instance* instance, uint32_t byteOffset,
uint32_t len, uint8_t* memBase);
uint32_t byteLen, uint8_t* memBase);
static int32_t memDiscardShared_m32(Instance* instance, uint32_t byteOffset,
uint32_t len, uint8_t* memBase);
uint32_t byteLen, uint8_t* memBase);
static int32_t memDiscard_m64(Instance* instance, uint64_t byteOffset,
uint64_t len, uint8_t* memBase);
uint64_t byteLen, uint8_t* memBase);
static int32_t memDiscardShared_m64(Instance* instance, uint64_t byteOffset,
uint64_t len, uint8_t* memBase);
uint64_t byteLen, uint8_t* memBase);
static void* tableGet(Instance* instance, uint32_t index,
uint32_t tableIndex);
static uint32_t tableGrow(Instance* instance, void* initValue, uint32_t delta,

Просмотреть файл

@ -54,6 +54,7 @@
#include "vm/JSFunction.h"
#include "vm/PlainObject.h" // js::PlainObject
#include "vm/PromiseObject.h" // js::PromiseObject
#include "vm/SharedArrayObject.h"
#include "vm/StringType.h"
#include "vm/Warnings.h" // js::WarnNumberASCII
#include "vm/WellKnownAtom.h" // js_*_str
@ -2724,9 +2725,7 @@ bool WasmMemoryObject::discardImpl(JSContext* cx, const CallArgs& args) {
return false;
}
if (!discard(memory, byteOffset, byteLen, cx)) {
return false;
}
discard(memory, byteOffset, byteLen, cx);
args.rval().setUndefined();
return true;
@ -2963,21 +2962,17 @@ uint64_t WasmMemoryObject::grow(Handle<WasmMemoryObject*> memory,
}
/* static */
bool WasmMemoryObject::discard(Handle<WasmMemoryObject*> memory,
void WasmMemoryObject::discard(Handle<WasmMemoryObject*> memory,
uint64_t byteOffset, uint64_t byteLen,
JSContext* cx) {
// TODO: Discard should never actually fail. Once we have implemented this for
// shared memories, change the return type of this function back to void and
// clean up the usage site.
if (memory->isShared()) {
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_NOT_IMPLEMENTED);
return false;
RootedSharedArrayBufferObject buf(
cx, &memory->buffer().as<SharedArrayBufferObject>());
SharedArrayBufferObject::wasmDiscard(buf, byteOffset, byteLen);
} else {
RootedArrayBufferObject buf(cx, &memory->buffer().as<ArrayBufferObject>());
ArrayBufferObject::wasmDiscard(buf, byteOffset, byteLen);
}
RootedArrayBufferObject buf(cx, &memory->buffer().as<ArrayBufferObject>());
ArrayBufferObject::wasmDiscard(buf, byteOffset, byteLen);
return true;
}
bool js::wasm::IsSharedWasmMemoryObject(JSObject* obj) {

Просмотреть файл

@ -433,7 +433,7 @@ class WasmMemoryObject : public NativeObject {
bool addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance);
static uint64_t grow(Handle<WasmMemoryObject*> memory, uint64_t delta,
JSContext* cx);
static bool discard(Handle<WasmMemoryObject*> memory, uint64_t byteOffset,
static void discard(Handle<WasmMemoryObject*> memory, uint64_t byteOffset,
uint64_t len, JSContext* cx);
};