Backed out changeset f48cb748519b (bug 1701620) for bustage on Memory.cpp. CLOSED TREE

This commit is contained in:
Csoregi Natalia 2021-04-13 10:09:41 +03:00
Родитель 4e5ae46c03
Коммит 91bd333c25
15 изменённых файлов: 48 добавлений и 230 удалений

Просмотреть файл

@ -11,8 +11,6 @@
#if defined(XP_WIN)
# include "util/Windows.h"
#elif defined(__wasi__)
// Nothing.
#elif defined(XP_UNIX) && !defined(XP_DARWIN)
# include <signal.h>
# include <sys/types.h>
@ -123,8 +121,6 @@ bool MemoryProtectionExceptionHandler::isDisabled() {
#elif !defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
// Disable the exception handler for Beta and Release builds.
return true;
#elif defined(__wasi__)
return true;
#else
return false;
#endif
@ -236,12 +232,6 @@ void MemoryProtectionExceptionHandler::uninstall() {
}
}
#elif defined(__wasi__)
bool MemoryProtectionExceptionHandler::install() { return true; }
void MemoryProtectionExceptionHandler::uninstall() {}
#elif defined(XP_UNIX) && !defined(XP_DARWIN)
static struct sigaction sPrevSEGVHandler = {};

Просмотреть файл

@ -206,7 +206,7 @@
#include <iterator>
#include <string.h>
#include <utility>
#if !defined(XP_WIN) && !defined(__wasi__)
#ifndef XP_WIN
# include <sys/mman.h>
# include <unistd.h>
#endif

Просмотреть файл

@ -20,10 +20,6 @@
# include "util/Windows.h"
# include <psapi.h>
#elif defined(__wasi__)
/* nothing */
#else
# include <algorithm>
@ -144,15 +140,6 @@ enum class PageAccess : DWORD {
ReadExecute = PAGE_EXECUTE_READ,
ReadWriteExecute = PAGE_EXECUTE_READWRITE,
};
#elif defined(__wasi__)
enum class PageAccess : int {
None = 0,
Read = 0,
ReadWrite = 0,
Execute = 0,
ReadExecute = 0,
ReadWriteExecute = 0,
};
#else
enum class PageAccess : int {
None = PROT_NONE,
@ -168,9 +155,7 @@ template <bool AlwaysGetNew = true>
static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
size_t length, size_t alignment);
#ifndef __wasi__
static void* MapAlignedPagesSlow(size_t length, size_t alignment);
#endif // wasi
static void* MapAlignedPagesLastDitch(size_t length, size_t alignment);
#ifdef JS_64BIT
@ -195,14 +180,6 @@ static inline void* MapInternal(void* desired, size_t length) {
DWORD flags =
(commit == Commit::Yes ? MEM_RESERVE | MEM_COMMIT : MEM_RESERVE);
region = VirtualAlloc(desired, length, flags, DWORD(prot));
#elif defined(__wasi__)
if (int err = posix_memalign(&region, gc::SystemPageSize(), length)) {
MOZ_RELEASE_ASSERT(err == ENOMEM);
return nullptr;
}
if (region) {
memset(region, 0, length);
}
#else
int flags = MAP_PRIVATE | MAP_ANON;
region = MozTaggedAnonymousMmap(desired, length, int(prot), flags, -1, 0,
@ -220,8 +197,6 @@ static inline void UnmapInternal(void* region, size_t length) {
#ifdef XP_WIN
MOZ_RELEASE_ASSERT(VirtualFree(region, 0, MEM_RELEASE) != 0);
#elif defined(__wasi__)
free(region);
#else
if (munmap(region, length)) {
MOZ_RELEASE_ASSERT(errno == ENOMEM);
@ -432,18 +407,7 @@ void* MapAlignedPages(size_t length, size_t alignment) {
alignment = allocGranularity;
}
#ifdef __wasi__
void* region = nullptr;
if (int err = posix_memalign(&region, alignment, length)) {
MOZ_ASSERT(err == ENOMEM);
return nullptr;
}
MOZ_ASSERT(region != nullptr);
memset(region, 0, length);
return region;
#else
# ifdef JS_64BIT
#ifdef JS_64BIT
// Use the scattershot allocator if the address range is large enough.
if (UsingScattershotAllocator()) {
void* region = MapAlignedPagesRandom(length, alignment);
@ -453,7 +417,7 @@ void* MapAlignedPages(size_t length, size_t alignment) {
return region;
}
# endif
#endif
// Try to allocate the region. If the returned address is aligned,
// either we OOMed (region is nullptr) or we're done.
@ -496,7 +460,6 @@ void* MapAlignedPages(size_t length, size_t alignment) {
// At this point we should either have an aligned region or nullptr.
MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
return region;
#endif // !__wasi__
}
#ifdef JS_64BIT
@ -583,28 +546,27 @@ static void* MapAlignedPagesRandom(size_t length, size_t alignment) {
#endif // defined(JS_64BIT)
#ifndef __wasi__
static void* MapAlignedPagesSlow(size_t length, size_t alignment) {
void* alignedRegion = nullptr;
do {
size_t reserveLength = length + alignment - pageSize;
# ifdef XP_WIN
#ifdef XP_WIN
// Don't commit the requested pages as we won't use the region directly.
void* region = MapMemory<Commit::No>(reserveLength);
# else
#else
void* region = MapMemory(reserveLength);
# endif
#endif
if (!region) {
return nullptr;
}
alignedRegion =
reinterpret_cast<void*>(AlignBytes(uintptr_t(region), alignment));
# ifdef XP_WIN
#ifdef XP_WIN
// Windows requires that map and unmap calls be matched, so deallocate
// and immediately reallocate at the desired (aligned) address.
UnmapInternal(region, reserveLength);
alignedRegion = MapMemoryAt(alignedRegion, length);
# else
#else
// munmap allows us to simply unmap the pages that don't interest us.
if (alignedRegion != region) {
UnmapInternal(region, uintptr_t(alignedRegion) - uintptr_t(region));
@ -616,13 +578,12 @@ static void* MapAlignedPagesSlow(size_t length, size_t alignment) {
if (alignedEnd != regionEnd) {
UnmapInternal(alignedEnd, uintptr_t(regionEnd) - uintptr_t(alignedEnd));
}
# endif
#endif
// On Windows we may have raced with another thread; if so, try again.
} while (!alignedRegion);
return alignedRegion;
}
#endif // wasi
/*
* In a low memory or high fragmentation situation, alignable chunks of the
@ -806,8 +767,6 @@ bool MarkPagesUnusedSoft(void* region, size_t length) {
#if defined(XP_WIN)
return VirtualAlloc(region, length, MEM_RESET,
DWORD(PageAccess::ReadWrite)) == region;
#elif defined(__wasi__)
return 0;
#else
int status;
do {
@ -879,8 +838,6 @@ size_t GetPageFaultCount() {
return 0;
}
return pmc.PageFaultCount;
#elif defined(__wasi__)
return 0;
#else
struct rusage usage;
int err = getrusage(RUSAGE_SELF, &usage);
@ -893,9 +850,6 @@ size_t GetPageFaultCount() {
void* AllocateMappedContent(int fd, size_t offset, size_t length,
size_t alignment) {
#ifdef __wasi__
MOZ_CRASH("Not yet supported for WASI");
#else
if (length == 0 || alignment == 0 || offset % alignment != 0 ||
std::max(alignment, allocGranularity) %
std::min(alignment, allocGranularity) !=
@ -913,7 +867,7 @@ void* AllocateMappedContent(int fd, size_t offset, size_t length,
mappedLength += pageSize - alignedLength % pageSize;
}
# ifdef XP_WIN
#ifdef XP_WIN
HANDLE hFile = reinterpret_cast<HANDLE>(intptr_t(fd));
// This call will fail if the file does not exist.
@ -952,7 +906,7 @@ void* AllocateMappedContent(int fd, size_t offset, size_t length,
if (!map) {
return nullptr;
}
# else // !defined(XP_WIN)
#else // !defined(XP_WIN)
// Sanity check the offset and length, as mmap does not do this for us.
struct stat st;
if (fstat(fd, &st) || offset >= uint64_t(st.st_size) ||
@ -974,9 +928,9 @@ void* AllocateMappedContent(int fd, size_t offset, size_t length,
UnmapInternal(region, mappedLength);
return nullptr;
}
# endif
#endif
# ifdef DEBUG
#ifdef DEBUG
// Zero out data before and after the desired mapping to catch errors early.
if (offset != alignedOffset) {
memset(map, 0, offset - alignedOffset);
@ -984,37 +938,32 @@ void* AllocateMappedContent(int fd, size_t offset, size_t length,
if (alignedLength % pageSize) {
memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
}
# endif
#endif
return map + (offset - alignedOffset);
#endif // __wasi__
}
void DeallocateMappedContent(void* region, size_t length) {
#ifdef __wasi__
MOZ_CRASH("Not yet supported for WASI");
#else
if (!region) {
return;
}
// Due to bug 1502562, the following assertion does not currently hold.
// MOZ_RELEASE_ASSERT(length > 0);
# ifdef XP_WIN
MOZ_RELEASE_ASSERT(UnmapViewOfFile(reinterpret_cast<void*>(map)) != 0);
# else
// Calculate the address originally returned by the system call.
// This is needed because AllocateMappedContent returns a pointer
// that might be offset from the mapping, as the beginning of a
// mapping must be aligned with the allocation granularity.
uintptr_t map = uintptr_t(region) - (uintptr_t(region) % allocGranularity);
#ifdef XP_WIN
MOZ_RELEASE_ASSERT(UnmapViewOfFile(reinterpret_cast<void*>(map)) != 0);
#else
size_t alignedLength = length + (uintptr_t(region) % allocGranularity);
if (munmap(reinterpret_cast<void*>(map), alignedLength)) {
MOZ_RELEASE_ASSERT(errno == ENOMEM);
}
# endif
#endif // __wasi__
#endif
}
static inline void ProtectMemory(void* region, size_t length, PageAccess prot) {
@ -1024,8 +973,6 @@ static inline void ProtectMemory(void* region, size_t length, PageAccess prot) {
DWORD oldProtect;
MOZ_RELEASE_ASSERT(VirtualProtect(region, length, DWORD(prot), &oldProtect) !=
0);
#elif defined(__wasi__)
/* nothing */
#else
MOZ_RELEASE_ASSERT(mprotect(region, length, int(prot)) == 0);
#endif

Просмотреть файл

@ -34,8 +34,6 @@
#ifdef XP_WIN
# include "mozilla/StackWalk_windows.h"
# include "mozilla/WindowsVersion.h"
#elif defined(__wasi__)
// Nothing.
#else
# include <sys/mman.h>
# include <unistd.h>
@ -326,20 +324,7 @@ static void DecommitPages(void* addr, size_t bytes) {
MOZ_CRASH("DecommitPages failed");
}
}
#elif defined(__wasi__)
static void* ReserveProcessExecutableMemory(size_t bytes) {
MOZ_CRASH("NYI for WASI.");
return nullptr;
}
[[nodiscard]] static bool CommitPages(void* addr, size_t bytes,
ProtectionSetting protection) {
MOZ_CRASH("NYI for WASI.");
return false;
}
static void DecommitPages(void* addr, size_t bytes) {
MOZ_CRASH("NYI for WASI.");
}
#else // !XP_WIN && !__wasi__
#else // !XP_WIN
# ifndef MAP_NORESERVE
# define MAP_NORESERVE 0
# endif
@ -571,9 +556,6 @@ class ProcessExecutableMemory {
}
void release() {
#if defined(__wasi__)
MOZ_ASSERT(!initialized());
#else
MOZ_ASSERT(initialized());
MOZ_ASSERT(pages_.empty());
MOZ_ASSERT(pagesAllocated_ == 0);
@ -581,7 +563,6 @@ class ProcessExecutableMemory {
base_ = nullptr;
rng_.reset();
MOZ_ASSERT(!initialized());
#endif
}
void assertValidAddress(void* p, size_t bytes) const {
@ -728,8 +709,6 @@ bool js::jit::InitProcessExecutableMemory() {
#ifdef JS_CODEGEN_ARM64
// Initialize instruction cache flushing.
vixl::CPU::SetUp();
#elif defined(__wasi__)
return true;
#endif
return execMemory.init();
}
@ -793,24 +772,20 @@ bool js::jit::ReprotectRegion(void* start, size_t size,
// We use the C++ fence here -- and not AtomicOperations::fenceSeqCst() --
// primarily because ReprotectRegion will be called while we construct our own
// jitted atomics. But the C++ fence is sufficient and correct, too.
#ifdef __wasi__
MOZ_CRASH("NYI FOR WASI.");
#else
std::atomic_thread_fence(std::memory_order_seq_cst);
# ifdef XP_WIN
#ifdef XP_WIN
DWORD oldProtect;
DWORD flags = ProtectionSettingToFlags(protection);
if (!VirtualProtect(pageStart, size, flags, &oldProtect)) {
return false;
}
# else
#else
unsigned flags = ProtectionSettingToFlags(protection);
if (mprotect(pageStart, size, flags)) {
return false;
}
# endif
#endif // __wasi__
#endif
execMemory.assertValidAddress(pageStart, size);
return true;

Просмотреть файл

@ -13,8 +13,6 @@
#if defined(XP_WIN)
# include "util/Windows.h"
# include <psapi.h>
#elif defined(__wasi__)
// Nothing.
#else
# include <algorithm>
# include <errno.h>
@ -298,22 +296,6 @@ void unmapPages(void* p, size_t size) {
MOZ_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
}
#elif defined(__wasi__)
void* mapMemoryAt(void* desired, size_t length) { return nullptr; }
void* mapMemory(size_t length) {
void* addr = nullptr;
if (int err = posix_memalign(&addr, js::gc::SystemPageSize(), length)) {
MOZ_ASSERT(err == ENOMEM);
}
MOZ_ASSERT(addr);
memset(addr, 0, length);
return addr;
}
void unmapPages(void* p, size_t size) { free(p); }
#else
void* mapMemoryAt(void* desired, size_t length) {

Просмотреть файл

@ -1505,28 +1505,21 @@ JS_GetExternalStringCallbacks(JSString* str) {
static void SetNativeStackLimit(JSContext* cx, JS::StackKind kind,
size_t stackSize) {
#ifdef __wasi__
// WASI makes this easy: we build with the "stack-first" wasm-ld option, so
// the stack grows downward toward zero. Let's set a limit just a bit above
// this so that we catch an overflow before a Wasm trap occurs.
cx->nativeStackLimit[kind] = 1024;
#else // __wasi__
# if JS_STACK_GROWTH_DIRECTION > 0
#if JS_STACK_GROWTH_DIRECTION > 0
if (stackSize == 0) {
cx->nativeStackLimit[kind] = UINTPTR_MAX;
} else {
MOZ_ASSERT(cx->nativeStackBase() <= size_t(-1) - stackSize);
cx->nativeStackLimit[kind] = cx->nativeStackBase() + stackSize - 1;
}
# else // stack grows up
#else
if (stackSize == 0) {
cx->nativeStackLimit[kind] = 0;
} else {
MOZ_ASSERT(cx->nativeStackBase() >= stackSize);
cx->nativeStackLimit[kind] = cx->nativeStackBase() - (stackSize - 1);
}
# endif // stack grows down
#endif // !__wasi__
#endif
}
JS_PUBLIC_API void JS_SetNativeStackQuota(JSContext* cx,

Просмотреть файл

@ -47,11 +47,9 @@
#include <sys/types.h>
#include <utility>
#ifdef XP_UNIX
# ifndef __wasi__
# include <sys/mman.h>
# include <sys/wait.h>
# endif
# include <sys/mman.h>
# include <sys/stat.h>
# include <sys/wait.h>
# include <unistd.h>
#endif
#ifdef XP_LINUX

Просмотреть файл

@ -10,8 +10,6 @@
#ifdef XP_WIN
# include "util/Windows.h"
#elif defined(__wasi__)
// Nothing
#elif defined(XP_DARWIN) || defined(DARWIN) || defined(XP_UNIX)
# include <pthread.h>
# if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
@ -117,18 +115,7 @@ void* js::GetNativeStackBaseImpl() {
# endif
}
#elif defined(__wasi__)
// Since we rearrange the layout for wasi via --stack-first flag for the linker
// the final layout is: 0x00 | <- stack | data | heap -> |.
static void* NativeStackBase = __builtin_frame_address(0);
void* js::GetNativeStackBaseImpl() {
MOZ_ASSERT(JS_STACK_GROWTH_DIRECTION < 0);
return NativeStackBase;
}
#else // __wasi__
#else /* XP_UNIX */
void* js::GetNativeStackBaseImpl() {
pthread_t thread = pthread_self();

Просмотреть файл

@ -21,7 +21,7 @@
#include <algorithm> // std::max, std::min
#include <memory> // std::uninitialized_copy_n
#include <string.h>
#if !defined(XP_WIN) && !defined(__wasi__)
#ifndef XP_WIN
# include <sys/mman.h>
#endif
#include <tuple> // std::tuple
@ -180,15 +180,7 @@ void* js::MapBufferMemory(size_t mappedSize, size_t initialCommittedSize) {
VirtualFree(data, 0, MEM_RELEASE);
return nullptr;
}
#elif defined(__wasi__)
void* data = nullptr;
if (int err = posix_memalign(&data, gc::SystemPageSize(), mappedSize)) {
MOZ_ASSERT(err == ENOMEM);
return nullptr;
}
MOZ_ASSERT(data);
memset(data, 0, mappedSize);
#else // !XP_WIN && !__wasi__
#else // XP_WIN
void* data =
MozTaggedAnonymousMmap(nullptr, mappedSize, PROT_NONE,
MAP_PRIVATE | MAP_ANON, -1, 0, "wasm-reserved");
@ -201,7 +193,7 @@ void* js::MapBufferMemory(size_t mappedSize, size_t initialCommittedSize) {
munmap(data, mappedSize);
return nullptr;
}
#endif // !XP_WIN && !__wasi__
#endif // !XP_WIN
#if defined(MOZ_VALGRIND) && \
defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
@ -222,14 +214,11 @@ bool js::CommitBufferMemory(void* dataEnd, size_t delta) {
if (!VirtualAlloc(dataEnd, delta, MEM_COMMIT, PAGE_READWRITE)) {
return false;
}
#elif defined(__wasi__)
// posix_memalign'd memory is already committed
return true;
#else
#else // XP_WIN
if (mprotect(dataEnd, delta, PROT_READ | PROT_WRITE)) {
return false;
}
#endif // XP_WIN
#endif // !XP_WIN
#if defined(MOZ_VALGRIND) && \
defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
@ -252,8 +241,6 @@ bool js::ExtendBufferMapping(void* dataPointer, size_t mappedSize,
return false;
}
return true;
#elif defined(__wasi__)
return false;
#elif defined(XP_LINUX)
// Note this will not move memory (no MREMAP_MAYMOVE specified)
if (MAP_FAILED == mremap(dataPointer, mappedSize, newMappedSize, 0)) {
@ -272,11 +259,9 @@ void js::UnmapBufferMemory(void* base, size_t mappedSize) {
#ifdef XP_WIN
VirtualFree(base, 0, MEM_RELEASE);
#elif defined(__wasi__)
free(base);
#else
#else // XP_WIN
munmap(base, mappedSize);
#endif // XP_WIN
#endif // !XP_WIN
#if defined(MOZ_VALGRIND) && \
defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE)

Просмотреть файл

@ -14,7 +14,7 @@
#include <string>
#include <string.h>
#if !defined(XP_WIN) && !defined(__wasi__)
#ifndef XP_WIN
# include <sys/mman.h>
#endif

Просмотреть файл

@ -23,10 +23,7 @@
#include <algorithm>
#ifndef __wasi__
# include "jit/ProcessExecutableMemory.h"
#endif
#include "jit/ProcessExecutableMemory.h"
#include "util/Text.h"
#include "vm/HelperThreadState.h"
#include "vm/Realm.h"

Просмотреть файл

@ -16,14 +16,12 @@
# include <windows.h>
#elif !defined(__OS2__)
# include <unistd.h>
# ifndef __wasi__
# include <sys/mman.h>
# ifndef MAP_ANON
# ifdef MAP_ANONYMOUS
# define MAP_ANON MAP_ANONYMOUS
# else
# error "Don't know how to get anonymous memory"
# endif
# include <sys/mman.h>
# ifndef MAP_ANON
# ifdef MAP_ANONYMOUS
# define MAP_ANON MAP_ANONYMOUS
# else
# error "Don't know how to get anonymous memory"
# endif
# endif
#endif
@ -86,26 +84,7 @@ static uintptr_t GetDesiredRegionSize() {
# define RESERVE_FAILED 0
#elif defined(__wasi__)
# define RESERVE_FAILED 0
static void* ReserveRegion(uintptr_t aRegion, uintptr_t aSize) {
return RESERVE_FAILED;
}
static void ReleaseRegion(void* aRegion, uintptr_t aSize) { return; }
static bool ProbeRegion(uintptr_t aRegion, uintptr_t aSize) {
const auto pageSize = 1 << 16;
MOZ_ASSERT(pageSize == sysconf(_SC_PAGESIZE));
auto heapSize = __builtin_wasm_memory_size(0) * pageSize;
return aRegion + aSize < heapSize;
}
static uintptr_t GetDesiredRegionSize() { return 0; }
#else // __wasi__
#else // Unix
# include "mozilla/TaggedAnonymousMemory.h"

Просмотреть файл

@ -34,12 +34,8 @@
#ifndef XP_WIN
# ifdef __wasi__
# include <stdlib.h>
# else
# include <sys/types.h>
# include <sys/mman.h>
# endif // __wasi__
# include <sys/types.h>
# include <sys/mman.h>
# include "mozilla/Types.h"
@ -70,12 +66,7 @@ static inline void MozTagAnonymousMemory(const void* aPtr, size_t aLength,
static inline void* MozTaggedAnonymousMmap(void* aAddr, size_t aLength,
int aProt, int aFlags, int aFd,
off_t aOffset, const char* aTag) {
# ifdef __wasi__
MOZ_CRASH("We don't use this memory for WASI right now.");
return nullptr;
# else
return mmap(aAddr, aLength, aProt, aFlags, aFd, aOffset);
# endif
}
static inline int MozTaggedMemoryIsSupported(void) { return 0; }

Просмотреть файл

@ -6,7 +6,7 @@
#include "MmapFaultHandler.h"
#if defined(XP_UNIX) && !defined(XP_DARWIN) && !defined(__wasi__)
#if defined(XP_UNIX) && !defined(XP_DARWIN)
# include "PlatformMutex.h"
# include "mozilla/Atomics.h"

Просмотреть файл

@ -33,12 +33,6 @@
# define MMAP_FAULT_HANDLER_BEGIN_BUFFER(buf, bufLen) {
# define MMAP_FAULT_HANDLER_CATCH(retval) }
#elif defined(__wasi__)
# define MMAP_FAULT_HANDLER_BEGIN_HANDLE(fd) {
# define MMAP_FAULT_HANDLER_BEGIN_BUFFER(buf, bufLen) {
# define MMAP_FAULT_HANDLER_CATCH(retval) }
#else
// Linux