Bug 1701620 part 4 - Fix memory support for WASI. r=jandem,sfink

WASI lacks of support many memory stuff like mmap, memory protections and etc, but
it has malloc so we can use it instead. Also, here we are stubbing out all
uses of the missing WASI memory functionality.

Differential Revision: https://phabricator.services.mozilla.com/D110075
This commit is contained in:
Chris Fallin 2021-04-13 08:25:10 +00:00
Родитель 0ba7033c5a
Коммит 605271bd29
15 изменённых файлов: 228 добавлений и 46 удалений

Просмотреть файл

@ -11,6 +11,8 @@
#if defined(XP_WIN)
# include "util/Windows.h"
#elif defined(__wasi__)
// Nothing.
#elif defined(XP_UNIX) && !defined(XP_DARWIN)
# include <signal.h>
# include <sys/types.h>
@ -121,6 +123,8 @@ bool MemoryProtectionExceptionHandler::isDisabled() {
#elif !defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
// Disable the exception handler for Beta and Release builds.
return true;
#elif defined(__wasi__)
return true;
#else
return false;
#endif
@ -232,6 +236,12 @@ void MemoryProtectionExceptionHandler::uninstall() {
}
}
#elif defined(__wasi__)
bool MemoryProtectionExceptionHandler::install() { return true; }
void MemoryProtectionExceptionHandler::uninstall() {}
#elif defined(XP_UNIX) && !defined(XP_DARWIN)
static struct sigaction sPrevSEGVHandler = {};

Просмотреть файл

@ -206,7 +206,7 @@
#include <iterator>
#include <string.h>
#include <utility>
#ifndef XP_WIN
#if !defined(XP_WIN) && !defined(__wasi__)
# include <sys/mman.h>
# include <unistd.h>
#endif

Просмотреть файл

@ -20,6 +20,10 @@
# include "util/Windows.h"
# include <psapi.h>
#elif defined(__wasi__)
/* nothing */
#else
# include <algorithm>
@ -140,6 +144,15 @@ enum class PageAccess : DWORD {
ReadExecute = PAGE_EXECUTE_READ,
ReadWriteExecute = PAGE_EXECUTE_READWRITE,
};
#elif defined(__wasi__)
enum class PageAccess : int {
None = 0,
Read = 0,
ReadWrite = 0,
Execute = 0,
ReadExecute = 0,
ReadWriteExecute = 0,
};
#else
enum class PageAccess : int {
None = PROT_NONE,
@ -155,7 +168,9 @@ template <bool AlwaysGetNew = true>
static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
size_t length, size_t alignment);
#ifndef __wasi__
static void* MapAlignedPagesSlow(size_t length, size_t alignment);
#endif // wasi
static void* MapAlignedPagesLastDitch(size_t length, size_t alignment);
#ifdef JS_64BIT
@ -180,6 +195,14 @@ static inline void* MapInternal(void* desired, size_t length) {
DWORD flags =
(commit == Commit::Yes ? MEM_RESERVE | MEM_COMMIT : MEM_RESERVE);
region = VirtualAlloc(desired, length, flags, DWORD(prot));
#elif defined(__wasi__)
if (int err = posix_memalign(&region, gc::SystemPageSize(), length)) {
MOZ_RELEASE_ASSERT(err == ENOMEM);
return nullptr;
}
if (region) {
memset(region, 0, length);
}
#else
int flags = MAP_PRIVATE | MAP_ANON;
region = MozTaggedAnonymousMmap(desired, length, int(prot), flags, -1, 0,
@ -197,6 +220,8 @@ static inline void UnmapInternal(void* region, size_t length) {
#ifdef XP_WIN
MOZ_RELEASE_ASSERT(VirtualFree(region, 0, MEM_RELEASE) != 0);
#elif defined(__wasi__)
free(region);
#else
if (munmap(region, length)) {
MOZ_RELEASE_ASSERT(errno == ENOMEM);
@ -407,7 +432,18 @@ void* MapAlignedPages(size_t length, size_t alignment) {
alignment = allocGranularity;
}
#ifdef JS_64BIT
#ifdef __wasi__
void* region = nullptr;
if (int err = posix_memalign(&region, alignment, length)) {
MOZ_ASSERT(err == ENOMEM);
return nullptr;
}
MOZ_ASSERT(region != nullptr);
memset(region, 0, length);
return region;
#else
# ifdef JS_64BIT
// Use the scattershot allocator if the address range is large enough.
if (UsingScattershotAllocator()) {
void* region = MapAlignedPagesRandom(length, alignment);
@ -417,7 +453,7 @@ void* MapAlignedPages(size_t length, size_t alignment) {
return region;
}
#endif
# endif
// Try to allocate the region. If the returned address is aligned,
// either we OOMed (region is nullptr) or we're done.
@ -460,6 +496,7 @@ void* MapAlignedPages(size_t length, size_t alignment) {
// At this point we should either have an aligned region or nullptr.
MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
return region;
#endif // !__wasi__
}
#ifdef JS_64BIT
@ -546,27 +583,28 @@ static void* MapAlignedPagesRandom(size_t length, size_t alignment) {
#endif // defined(JS_64BIT)
#ifndef __wasi__
static void* MapAlignedPagesSlow(size_t length, size_t alignment) {
void* alignedRegion = nullptr;
do {
size_t reserveLength = length + alignment - pageSize;
#ifdef XP_WIN
# ifdef XP_WIN
// Don't commit the requested pages as we won't use the region directly.
void* region = MapMemory<Commit::No>(reserveLength);
#else
# else
void* region = MapMemory(reserveLength);
#endif
# endif
if (!region) {
return nullptr;
}
alignedRegion =
reinterpret_cast<void*>(AlignBytes(uintptr_t(region), alignment));
#ifdef XP_WIN
# ifdef XP_WIN
// Windows requires that map and unmap calls be matched, so deallocate
// and immediately reallocate at the desired (aligned) address.
UnmapInternal(region, reserveLength);
alignedRegion = MapMemoryAt(alignedRegion, length);
#else
# else
// munmap allows us to simply unmap the pages that don't interest us.
if (alignedRegion != region) {
UnmapInternal(region, uintptr_t(alignedRegion) - uintptr_t(region));
@ -578,12 +616,13 @@ static void* MapAlignedPagesSlow(size_t length, size_t alignment) {
if (alignedEnd != regionEnd) {
UnmapInternal(alignedEnd, uintptr_t(regionEnd) - uintptr_t(alignedEnd));
}
#endif
# endif
// On Windows we may have raced with another thread; if so, try again.
} while (!alignedRegion);
return alignedRegion;
}
#endif // wasi
/*
* In a low memory or high fragmentation situation, alignable chunks of the
@ -767,6 +806,8 @@ bool MarkPagesUnusedSoft(void* region, size_t length) {
#if defined(XP_WIN)
return VirtualAlloc(region, length, MEM_RESET,
DWORD(PageAccess::ReadWrite)) == region;
#elif defined(__wasi__)
return 0;
#else
int status;
do {
@ -838,6 +879,8 @@ size_t GetPageFaultCount() {
return 0;
}
return pmc.PageFaultCount;
#elif defined(__wasi__)
return 0;
#else
struct rusage usage;
int err = getrusage(RUSAGE_SELF, &usage);
@ -850,6 +893,9 @@ size_t GetPageFaultCount() {
void* AllocateMappedContent(int fd, size_t offset, size_t length,
size_t alignment) {
#ifdef __wasi__
MOZ_CRASH("Not yet supported for WASI");
#else
if (length == 0 || alignment == 0 || offset % alignment != 0 ||
std::max(alignment, allocGranularity) %
std::min(alignment, allocGranularity) !=
@ -867,7 +913,7 @@ void* AllocateMappedContent(int fd, size_t offset, size_t length,
mappedLength += pageSize - alignedLength % pageSize;
}
#ifdef XP_WIN
# ifdef XP_WIN
HANDLE hFile = reinterpret_cast<HANDLE>(intptr_t(fd));
// This call will fail if the file does not exist.
@ -906,7 +952,7 @@ void* AllocateMappedContent(int fd, size_t offset, size_t length,
if (!map) {
return nullptr;
}
#else // !defined(XP_WIN)
# else // !defined(XP_WIN)
// Sanity check the offset and length, as mmap does not do this for us.
struct stat st;
if (fstat(fd, &st) || offset >= uint64_t(st.st_size) ||
@ -928,9 +974,9 @@ void* AllocateMappedContent(int fd, size_t offset, size_t length,
UnmapInternal(region, mappedLength);
return nullptr;
}
#endif
# endif
#ifdef DEBUG
# ifdef DEBUG
// Zero out data before and after the desired mapping to catch errors early.
if (offset != alignedOffset) {
memset(map, 0, offset - alignedOffset);
@ -938,12 +984,16 @@ void* AllocateMappedContent(int fd, size_t offset, size_t length,
if (alignedLength % pageSize) {
memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
}
#endif
# endif
return map + (offset - alignedOffset);
#endif // __wasi__
}
void DeallocateMappedContent(void* region, size_t length) {
#ifdef __wasi__
MOZ_CRASH("Not yet supported for WASI");
#else
if (!region) {
return;
}
@ -956,14 +1006,15 @@ void DeallocateMappedContent(void* region, size_t length) {
// that might be offset from the mapping, as the beginning of a
// mapping must be aligned with the allocation granularity.
uintptr_t map = uintptr_t(region) - (uintptr_t(region) % allocGranularity);
#ifdef XP_WIN
# ifdef XP_WIN
MOZ_RELEASE_ASSERT(UnmapViewOfFile(reinterpret_cast<void*>(map)) != 0);
#else
# else
size_t alignedLength = length + (uintptr_t(region) % allocGranularity);
if (munmap(reinterpret_cast<void*>(map), alignedLength)) {
MOZ_RELEASE_ASSERT(errno == ENOMEM);
}
#endif
# endif
#endif // __wasi__
}
static inline void ProtectMemory(void* region, size_t length, PageAccess prot) {
@ -973,6 +1024,8 @@ static inline void ProtectMemory(void* region, size_t length, PageAccess prot) {
DWORD oldProtect;
MOZ_RELEASE_ASSERT(VirtualProtect(region, length, DWORD(prot), &oldProtect) !=
0);
#elif defined(__wasi__)
/* nothing */
#else
MOZ_RELEASE_ASSERT(mprotect(region, length, int(prot)) == 0);
#endif

Просмотреть файл

@ -34,6 +34,8 @@
#ifdef XP_WIN
# include "mozilla/StackWalk_windows.h"
# include "mozilla/WindowsVersion.h"
#elif defined(__wasi__)
// Nothing.
#else
# include <sys/mman.h>
# include <unistd.h>
@ -324,7 +326,20 @@ static void DecommitPages(void* addr, size_t bytes) {
MOZ_CRASH("DecommitPages failed");
}
}
#else // !XP_WIN
#elif defined(__wasi__)
static void* ReserveProcessExecutableMemory(size_t bytes) {
MOZ_CRASH("NYI for WASI.");
return nullptr;
}
[[nodiscard]] static bool CommitPages(void* addr, size_t bytes,
ProtectionSetting protection) {
MOZ_CRASH("NYI for WASI.");
return false;
}
static void DecommitPages(void* addr, size_t bytes) {
MOZ_CRASH("NYI for WASI.");
}
#else // !XP_WIN && !__wasi__
# ifndef MAP_NORESERVE
# define MAP_NORESERVE 0
# endif
@ -556,6 +571,9 @@ class ProcessExecutableMemory {
}
void release() {
#if defined(__wasi__)
MOZ_ASSERT(!initialized());
#else
MOZ_ASSERT(initialized());
MOZ_ASSERT(pages_.empty());
MOZ_ASSERT(pagesAllocated_ == 0);
@ -563,6 +581,7 @@ class ProcessExecutableMemory {
base_ = nullptr;
rng_.reset();
MOZ_ASSERT(!initialized());
#endif
}
void assertValidAddress(void* p, size_t bytes) const {
@ -709,6 +728,8 @@ bool js::jit::InitProcessExecutableMemory() {
#ifdef JS_CODEGEN_ARM64
// Initialize instruction cache flushing.
vixl::CPU::SetUp();
#elif defined(__wasi__)
return true;
#endif
return execMemory.init();
}
@ -772,20 +793,24 @@ bool js::jit::ReprotectRegion(void* start, size_t size,
// We use the C++ fence here -- and not AtomicOperations::fenceSeqCst() --
// primarily because ReprotectRegion will be called while we construct our own
// jitted atomics. But the C++ fence is sufficient and correct, too.
#ifdef __wasi__
MOZ_CRASH("NYI FOR WASI.");
#else
std::atomic_thread_fence(std::memory_order_seq_cst);
#ifdef XP_WIN
# ifdef XP_WIN
DWORD oldProtect;
DWORD flags = ProtectionSettingToFlags(protection);
if (!VirtualProtect(pageStart, size, flags, &oldProtect)) {
return false;
}
#else
# else
unsigned flags = ProtectionSettingToFlags(protection);
if (mprotect(pageStart, size, flags)) {
return false;
}
#endif
# endif
#endif // __wasi__
execMemory.assertValidAddress(pageStart, size);
return true;

Просмотреть файл

@ -13,6 +13,8 @@
#if defined(XP_WIN)
# include "util/Windows.h"
# include <psapi.h>
#elif defined(__wasi__)
// Nothing.
#else
# include <algorithm>
# include <errno.h>
@ -296,6 +298,22 @@ void unmapPages(void* p, size_t size) {
MOZ_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
}
#elif defined(__wasi__)
void* mapMemoryAt(void* desired, size_t length) { return nullptr; }
void* mapMemory(size_t length) {
void* addr = nullptr;
if (int err = posix_memalign(&addr, js::gc::SystemPageSize(), length)) {
MOZ_ASSERT(err == ENOMEM);
}
MOZ_ASSERT(addr);
memset(addr, 0, length);
return addr;
}
void unmapPages(void* p, size_t size) { free(p); }
#else
void* mapMemoryAt(void* desired, size_t length) {

Просмотреть файл

@ -1505,21 +1505,28 @@ JS_GetExternalStringCallbacks(JSString* str) {
static void SetNativeStackLimit(JSContext* cx, JS::StackKind kind,
size_t stackSize) {
#if JS_STACK_GROWTH_DIRECTION > 0
#ifdef __wasi__
// WASI makes this easy: we build with the "stack-first" wasm-ld option, so
// the stack grows downward toward zero. Let's set a limit just a bit above
// this so that we catch an overflow before a Wasm trap occurs.
cx->nativeStackLimit[kind] = 1024;
#else // __wasi__
# if JS_STACK_GROWTH_DIRECTION > 0
if (stackSize == 0) {
cx->nativeStackLimit[kind] = UINTPTR_MAX;
} else {
MOZ_ASSERT(cx->nativeStackBase() <= size_t(-1) - stackSize);
cx->nativeStackLimit[kind] = cx->nativeStackBase() + stackSize - 1;
}
#else
# else // stack grows up
if (stackSize == 0) {
cx->nativeStackLimit[kind] = 0;
} else {
MOZ_ASSERT(cx->nativeStackBase() >= stackSize);
cx->nativeStackLimit[kind] = cx->nativeStackBase() - (stackSize - 1);
}
#endif
# endif // stack grows down
#endif // !__wasi__
}
JS_PUBLIC_API void JS_SetNativeStackQuota(JSContext* cx,

Просмотреть файл

@ -47,9 +47,11 @@
#include <sys/types.h>
#include <utility>
#ifdef XP_UNIX
# include <sys/mman.h>
# ifndef __wasi__
# include <sys/mman.h>
# include <sys/wait.h>
# endif
# include <sys/stat.h>
# include <sys/wait.h>
# include <unistd.h>
#endif
#ifdef XP_LINUX

Просмотреть файл

@ -10,6 +10,8 @@
#ifdef XP_WIN
# include "util/Windows.h"
#elif defined(__wasi__)
// Nothing
#elif defined(XP_DARWIN) || defined(DARWIN) || defined(XP_UNIX)
# include <pthread.h>
# if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
@ -115,7 +117,18 @@ void* js::GetNativeStackBaseImpl() {
# endif
}
#else /* XP_UNIX */
#elif defined(__wasi__)
// Since we rearrange the layout for wasi via --stack-first flag for the linker
// the final layout is: 0x00 | <- stack | data | heap -> |.
static void* const NativeStackBase = __builtin_frame_address(0);
void* js::GetNativeStackBaseImpl() {
MOZ_ASSERT(JS_STACK_GROWTH_DIRECTION < 0);
return NativeStackBase;
}
#else // __wasi__
void* js::GetNativeStackBaseImpl() {
pthread_t thread = pthread_self();

Просмотреть файл

@ -21,7 +21,7 @@
#include <algorithm> // std::max, std::min
#include <memory> // std::uninitialized_copy_n
#include <string.h>
#ifndef XP_WIN
#if !defined(XP_WIN) && !defined(__wasi__)
# include <sys/mman.h>
#endif
#include <tuple> // std::tuple
@ -180,7 +180,15 @@ void* js::MapBufferMemory(size_t mappedSize, size_t initialCommittedSize) {
VirtualFree(data, 0, MEM_RELEASE);
return nullptr;
}
#else // XP_WIN
#elif defined(__wasi__)
void* data = nullptr;
if (int err = posix_memalign(&data, gc::SystemPageSize(), mappedSize)) {
MOZ_ASSERT(err == ENOMEM);
return nullptr;
}
MOZ_ASSERT(data);
memset(data, 0, mappedSize);
#else // !XP_WIN && !__wasi__
void* data =
MozTaggedAnonymousMmap(nullptr, mappedSize, PROT_NONE,
MAP_PRIVATE | MAP_ANON, -1, 0, "wasm-reserved");
@ -193,7 +201,7 @@ void* js::MapBufferMemory(size_t mappedSize, size_t initialCommittedSize) {
munmap(data, mappedSize);
return nullptr;
}
#endif // !XP_WIN
#endif // !XP_WIN && !__wasi__
#if defined(MOZ_VALGRIND) && \
defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
@ -214,11 +222,14 @@ bool js::CommitBufferMemory(void* dataEnd, size_t delta) {
if (!VirtualAlloc(dataEnd, delta, MEM_COMMIT, PAGE_READWRITE)) {
return false;
}
#else // XP_WIN
#elif defined(__wasi__)
// posix_memalign'd memory is already committed
return true;
#else
if (mprotect(dataEnd, delta, PROT_READ | PROT_WRITE)) {
return false;
}
#endif // !XP_WIN
#endif // XP_WIN
#if defined(MOZ_VALGRIND) && \
defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
@ -241,6 +252,8 @@ bool js::ExtendBufferMapping(void* dataPointer, size_t mappedSize,
return false;
}
return true;
#elif defined(__wasi__)
return false;
#elif defined(XP_LINUX)
// Note this will not move memory (no MREMAP_MAYMOVE specified)
if (MAP_FAILED == mremap(dataPointer, mappedSize, newMappedSize, 0)) {
@ -259,9 +272,11 @@ void js::UnmapBufferMemory(void* base, size_t mappedSize) {
#ifdef XP_WIN
VirtualFree(base, 0, MEM_RELEASE);
#else // XP_WIN
#elif defined(__wasi__)
free(base);
#else
munmap(base, mappedSize);
#endif // !XP_WIN
#endif // XP_WIN
#if defined(MOZ_VALGRIND) && \
defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE)

Просмотреть файл

@ -14,7 +14,7 @@
#include <string>
#include <string.h>
#ifndef XP_WIN
#if !defined(XP_WIN) && !defined(__wasi__)
# include <sys/mman.h>
#endif

Просмотреть файл

@ -23,7 +23,10 @@
#include <algorithm>
#include "jit/ProcessExecutableMemory.h"
#ifndef __wasi__
# include "jit/ProcessExecutableMemory.h"
#endif
#include "util/Text.h"
#include "vm/HelperThreadState.h"
#include "vm/Realm.h"

Просмотреть файл

@ -16,12 +16,14 @@
# include <windows.h>
#elif !defined(__OS2__)
# include <unistd.h>
# include <sys/mman.h>
# ifndef MAP_ANON
# ifdef MAP_ANONYMOUS
# define MAP_ANON MAP_ANONYMOUS
# else
# error "Don't know how to get anonymous memory"
# ifndef __wasi__
# include <sys/mman.h>
# ifndef MAP_ANON
# ifdef MAP_ANONYMOUS
# define MAP_ANON MAP_ANONYMOUS
# else
# error "Don't know how to get anonymous memory"
# endif
# endif
# endif
#endif
@ -84,7 +86,26 @@ static uintptr_t GetDesiredRegionSize() {
# define RESERVE_FAILED 0
#else // Unix
#elif defined(__wasi__)
# define RESERVE_FAILED 0
static void* ReserveRegion(uintptr_t aRegion, uintptr_t aSize) {
return RESERVE_FAILED;
}
static void ReleaseRegion(void* aRegion, uintptr_t aSize) { return; }
static bool ProbeRegion(uintptr_t aRegion, uintptr_t aSize) {
const auto pageSize = 1 << 16;
MOZ_ASSERT(pageSize == sysconf(_SC_PAGESIZE));
auto heapSize = __builtin_wasm_memory_size(0) * pageSize;
return aRegion + aSize < heapSize;
}
static uintptr_t GetDesiredRegionSize() { return 0; }
#else // __wasi__
# include "mozilla/TaggedAnonymousMemory.h"

Просмотреть файл

@ -34,8 +34,12 @@
#ifndef XP_WIN
# include <sys/types.h>
# include <sys/mman.h>
# ifdef __wasi__
# include <stdlib.h>
# else
# include <sys/types.h>
# include <sys/mman.h>
# endif // __wasi__
# include "mozilla/Types.h"
@ -66,7 +70,12 @@ static inline void MozTagAnonymousMemory(const void* aPtr, size_t aLength,
static inline void* MozTaggedAnonymousMmap(void* aAddr, size_t aLength,
int aProt, int aFlags, int aFd,
off_t aOffset, const char* aTag) {
# ifdef __wasi__
MOZ_CRASH("We don't use this memory for WASI right now.");
return nullptr;
# else
return mmap(aAddr, aLength, aProt, aFlags, aFd, aOffset);
# endif
}
static inline int MozTaggedMemoryIsSupported(void) { return 0; }

Просмотреть файл

@ -6,7 +6,7 @@
#include "MmapFaultHandler.h"
#if defined(XP_UNIX) && !defined(XP_DARWIN)
#if defined(XP_UNIX) && !defined(XP_DARWIN) && !defined(__wasi__)
# include "PlatformMutex.h"
# include "mozilla/Atomics.h"

Просмотреть файл

@ -33,6 +33,12 @@
# define MMAP_FAULT_HANDLER_BEGIN_BUFFER(buf, bufLen) {
# define MMAP_FAULT_HANDLER_CATCH(retval) }
#elif defined(__wasi__)
# define MMAP_FAULT_HANDLER_BEGIN_HANDLE(fd) {
# define MMAP_FAULT_HANDLER_BEGIN_BUFFER(buf, bufLen) {
# define MMAP_FAULT_HANDLER_CATCH(retval) }
#else
// Linux