Bug 1268024: split HeapAccess into MemoryAccess and BoundsCheck; r=luke

MozReview-Commit-ID: 5F3fFNACx7u

--HG--
extra : rebase_source : 8de8fab7dcdafcbc7123465bb411f9b230c3e68e
This commit is contained in:
Benjamin Bouvier 2016-06-13 10:24:51 +02:00
Родитель 6d8a7a35d7
Коммит 8bd74b2915
27 изменённых файлов: 299 добавлений и 338 удалений

Просмотреть файл

@ -108,43 +108,17 @@ StaticallyLink(CodeSegment& cs, const LinkData& linkData, ExclusiveContext* cx)
static void
SpecializeToHeap(CodeSegment& cs, const Metadata& metadata, uint8_t* heapBase, uint32_t heapLength)
{
#if defined(JS_CODEGEN_X86)
for (const BoundsCheck& check : metadata.boundsChecks)
Assembler::UpdateBoundsCheck(check.patchAt(cs.code()), heapLength);
// An access is out-of-bounds iff
// ptr + offset + data-type-byte-size > heapLength
// i.e. ptr > heapLength - data-type-byte-size - offset. data-type-byte-size
// and offset are already included in the addend so we
// just have to add the heap length here.
for (const HeapAccess& access : metadata.heapAccesses) {
if (access.hasLengthCheck())
X86Encoding::AddInt32(access.patchLengthAt(cs.code()), heapLength);
#if defined(JS_CODEGEN_X86)
for (const MemoryAccess& access : metadata.memoryAccesses) {
// Patch memory pointer immediate.
void* addr = access.patchHeapPtrImmAt(cs.code());
uint32_t disp = reinterpret_cast<uint32_t>(X86Encoding::GetPointer(addr));
MOZ_ASSERT(disp <= INT32_MAX);
X86Encoding::SetPointer(addr, (void*)(heapBase + disp));
}
#elif defined(JS_CODEGEN_X64)
// Even with signal handling being used for most bounds checks, there may be
// atomic operations that depend on explicit checks.
//
// If we have any explicit bounds checks, we need to patch the heap length
// checks at the right places. All accesses that have been recorded are the
// only ones that need bound checks (see also
// CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,Exchange,AtomicBinop}Heap)
for (const HeapAccess& access : metadata.heapAccesses) {
// See comment above for x86 codegen.
if (access.hasLengthCheck())
X86Encoding::AddInt32(access.patchLengthAt(cs.code()), heapLength);
}
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
for (const HeapAccess& access : metadata.heapAccesses)
Assembler::UpdateBoundsCheck(heapLength, (Instruction*)(access.insnOffset() + cs.code()));
#endif
}
@ -503,7 +477,8 @@ Metadata::serializedSize() const
return sizeof(pod()) +
SerializedVectorSize(imports) +
SerializedVectorSize(exports) +
SerializedPodVectorSize(heapAccesses) +
SerializedPodVectorSize(memoryAccesses) +
SerializedPodVectorSize(boundsChecks) +
SerializedPodVectorSize(codeRanges) +
SerializedPodVectorSize(callSites) +
SerializedPodVectorSize(callThunks) +
@ -517,7 +492,8 @@ Metadata::serialize(uint8_t* cursor) const
cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
cursor = SerializeVector(cursor, imports);
cursor = SerializeVector(cursor, exports);
cursor = SerializePodVector(cursor, heapAccesses);
cursor = SerializePodVector(cursor, memoryAccesses);
cursor = SerializePodVector(cursor, boundsChecks);
cursor = SerializePodVector(cursor, codeRanges);
cursor = SerializePodVector(cursor, callSites);
cursor = SerializePodVector(cursor, callThunks);
@ -532,7 +508,8 @@ Metadata::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
(cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
(cursor = DeserializeVector(cx, cursor, &imports)) &&
(cursor = DeserializeVector(cx, cursor, &exports)) &&
(cursor = DeserializePodVector(cx, cursor, &heapAccesses)) &&
(cursor = DeserializePodVector(cx, cursor, &memoryAccesses)) &&
(cursor = DeserializePodVector(cx, cursor, &boundsChecks)) &&
(cursor = DeserializePodVector(cx, cursor, &codeRanges)) &&
(cursor = DeserializePodVector(cx, cursor, &callSites)) &&
(cursor = DeserializePodVector(cx, cursor, &callThunks)) &&
@ -546,7 +523,8 @@ Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
{
return SizeOfVectorExcludingThis(imports, mallocSizeOf) +
SizeOfVectorExcludingThis(exports, mallocSizeOf) +
heapAccesses.sizeOfExcludingThis(mallocSizeOf) +
memoryAccesses.sizeOfExcludingThis(mallocSizeOf) +
boundsChecks.sizeOfExcludingThis(mallocSizeOf) +
codeRanges.sizeOfExcludingThis(mallocSizeOf) +
callSites.sizeOfExcludingThis(mallocSizeOf) +
callThunks.sizeOfExcludingThis(mallocSizeOf) +

Просмотреть файл

@ -409,7 +409,8 @@ struct Metadata : ShareableBase<Metadata>, MetadataCacheablePod
ImportVector imports;
ExportVector exports;
HeapAccessVector heapAccesses;
MemoryAccessVector memoryAccesses;
BoundsCheckVector boundsChecks;
CodeRangeVector codeRanges;
CallSiteVector callSites;
CallThunkVector callThunks;

Просмотреть файл

@ -915,11 +915,13 @@ ModuleGenerator::finish(ImportNameVector&& importNames, const ShareableBytes& by
return nullptr;
// The MacroAssembler has accumulated all the heap accesses during codegen.
metadata_->heapAccesses = masm_.extractHeapAccesses();
metadata_->memoryAccesses = masm_.extractMemoryAccesses();
metadata_->boundsChecks = masm_.extractBoundsChecks();
// These Vectors can get large and the excess capacity can be significant,
// so realloc them down to size.
metadata_->heapAccesses.podResizeToFit();
metadata_->memoryAccesses.podResizeToFit();
metadata_->boundsChecks.podResizeToFit();
metadata_->codeRanges.podResizeToFit();
metadata_->callSites.podResizeToFit();
metadata_->callThunks.podResizeToFit();

Просмотреть файл

@ -834,30 +834,32 @@ Instance::lookupCodeRange(void* pc) const
return &metadata_->codeRanges[match];
}
struct HeapAccessOffset
#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS
struct MemoryAccessOffset
{
const HeapAccessVector& accesses;
explicit HeapAccessOffset(const HeapAccessVector& accesses) : accesses(accesses) {}
const MemoryAccessVector& accesses;
explicit MemoryAccessOffset(const MemoryAccessVector& accesses) : accesses(accesses) {}
uintptr_t operator[](size_t index) const {
return accesses[index].insnOffset();
}
};
const HeapAccess*
Instance::lookupHeapAccess(void* pc) const
const MemoryAccess*
Instance::lookupMemoryAccess(void* pc) const
{
MOZ_ASSERT(codeSegment_->containsFunctionPC(pc));
uint32_t target = ((uint8_t*)pc) - codeSegment_->code();
size_t lowerBound = 0;
size_t upperBound = metadata_->heapAccesses.length();
size_t upperBound = metadata_->memoryAccesses.length();
size_t match;
if (!BinarySearch(HeapAccessOffset(metadata_->heapAccesses), lowerBound, upperBound, target, &match))
if (!BinarySearch(MemoryAccessOffset(metadata_->memoryAccesses), lowerBound, upperBound, target, &match))
return nullptr;
return &metadata_->heapAccesses[match];
return &metadata_->memoryAccesses[match];
}
#endif // ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
void
Instance::addSizeOfMisc(MallocSizeOf mallocSizeOf,

Просмотреть файл

@ -142,7 +142,9 @@ class Instance
const CallSite* lookupCallSite(void* returnAddress) const;
const CodeRange* lookupCodeRange(void* pc) const;
const HeapAccess* lookupHeapAccess(void* pc) const;
#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS
const MemoryAccess* lookupMemoryAccess(void* pc) const;
#endif
// about:memory reporting:

Просмотреть файл

@ -135,7 +135,7 @@ struct ExportMap
WASM_DECLARE_SERIALIZABLE(ExportMap)
};
// Module represents a compiled wasm module and primarily provides two
// Module represents a compiled wasm module and primarily provides two
// operations: instantiation and serialization. A Module can be instantiated any
// number of times to produce new Instance objects. A Module can be serialized
// any number of times such that the serialized bytes can be deserialized later

Просмотреть файл

@ -602,12 +602,11 @@ ComputeAccessAddress(EMULATOR_CONTEXT* context, const Disassembler::ComplexAddre
MOZ_COLD static uint8_t*
EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
const HeapAccess* heapAccess, const Instance& instance)
const MemoryAccess* memoryAccess, const Instance& instance)
{
MOZ_RELEASE_ASSERT(instance.codeSegment().containsFunctionPC(pc));
MOZ_RELEASE_ASSERT(instance.metadata().compileArgs.useSignalHandlersForOOB);
MOZ_RELEASE_ASSERT(!heapAccess->hasLengthCheck());
MOZ_RELEASE_ASSERT(heapAccess->insnOffset() == (pc - instance.codeSegment().code()));
MOZ_RELEASE_ASSERT(memoryAccess->insnOffset() == (pc - instance.codeSegment().code()));
// Disassemble the instruction which caused the trap so that we can extract
// information about it and decide what to do.
@ -674,8 +673,8 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
// If this is storing Z of an XYZ, check whether X is also in bounds, so
// that we don't store anything before throwing.
MOZ_RELEASE_ASSERT(unwrappedOffset > heapAccess->offsetWithinWholeSimdVector());
uint32_t wrappedBaseOffset = uint32_t(unwrappedOffset - heapAccess->offsetWithinWholeSimdVector());
MOZ_RELEASE_ASSERT(unwrappedOffset > memoryAccess->offsetWithinWholeSimdVector());
uint32_t wrappedBaseOffset = uint32_t(unwrappedOffset - memoryAccess->offsetWithinWholeSimdVector());
if (wrappedBaseOffset >= instance.heapLength())
inBounds = false;
@ -704,7 +703,7 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
// We now know that this is an out-of-bounds access made by an asm.js
// load/store that we should handle.
if (heapAccess->throwOnOOB())
if (memoryAccess->throwOnOOB())
return instance.codeSegment().outOfBoundsCode();
switch (access.kind()) {
@ -733,7 +732,7 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
MOZ_COLD static uint8_t*
EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
const HeapAccess* heapAccess, const Instance& instance)
const MemoryAccess* memoryAccess, const Instance& instance)
{
// TODO: Implement unaligned accesses.
return instance.codeSegment().outOfBoundsCode();
@ -804,14 +803,14 @@ HandleFault(PEXCEPTION_POINTERS exception)
// retrigger after the interrupt jumps back to resumePC).
return pc == instance.codeSegment().interruptCode() &&
instance.codeSegment().containsFunctionPC(activation->resumePC()) &&
instance.lookupHeapAccess(activation->resumePC());
instance.lookupMemoryAccess(activation->resumePC());
}
const HeapAccess* heapAccess = instance.lookupHeapAccess(pc);
if (!heapAccess)
const MemoryAccess* memoryAccess = instance.lookupMemoryAccess(pc);
if (!memoryAccess)
return false;
*ppc = EmulateHeapAccess(context, pc, faultingAddress, heapAccess, instance);
*ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, instance);
return true;
}
@ -934,11 +933,11 @@ HandleMachException(JSRuntime* rt, const ExceptionRequest& request)
if (!IsHeapAccessAddress(instance, faultingAddress))
return false;
const HeapAccess* heapAccess = instance.lookupHeapAccess(pc);
if (!heapAccess)
const MemoryAccess* memoryAccess = instance.lookupMemoryAccess(pc);
if (!memoryAccess)
return false;
*ppc = EmulateHeapAccess(&context, pc, faultingAddress, heapAccess, instance);
*ppc = EmulateHeapAccess(&context, pc, faultingAddress, memoryAccess, instance);
// Update the thread state with the new pc and register values.
kret = thread_set_state(rtThread, float_state, (thread_state_t)&context.float_, float_state_count);
@ -1136,11 +1135,11 @@ HandleFault(int signum, siginfo_t* info, void* ctx)
if (!IsHeapAccessAddress(instance, faultingAddress))
return false;
const HeapAccess* heapAccess = instance.lookupHeapAccess(pc);
if (!heapAccess)
const MemoryAccess* memoryAccess = instance.lookupMemoryAccess(pc);
if (!memoryAccess)
return false;
*ppc = EmulateHeapAccess(context, pc, faultingAddress, heapAccess, instance);
*ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, instance);
return true;
}

Просмотреть файл

@ -579,106 +579,94 @@ class CallSiteAndTarget : public CallSite
typedef Vector<CallSiteAndTarget, 0, SystemAllocPolicy> CallSiteAndTargetVector;
// Metadata for a bounds check that may need patching later.
class BoundsCheck
{
public:
BoundsCheck() = default;
explicit BoundsCheck(uint32_t cmpOffset)
: cmpOffset_(cmpOffset)
{ }
uint8_t* patchAt(uint8_t* code) const { return code + cmpOffset_; }
void offsetBy(uint32_t offset) { cmpOffset_ += offset; }
private:
uint32_t cmpOffset_; // absolute offset of the comparison
};
// Summarizes a heap access made by wasm code that needs to be patched later
// and/or looked up by the wasm signal handlers. Different architectures need
// to know different things (x64: offset and length, ARM: where to patch in
// heap length, x86: where to patch in heap length and base).
#if defined(JS_CODEGEN_X86)
class HeapAccess
class MemoryAccess
{
uint32_t insnOffset_;
uint8_t opLength_; // the length of the load/store instruction
uint8_t cmpDelta_; // the number of bytes from the cmp to the load/store instruction
uint32_t nextInsOffset_;
public:
HeapAccess() = default;
static const uint32_t NoLengthCheck = UINT32_MAX;
MemoryAccess() = default;
// If 'cmp' equals 'insnOffset' or if it is not supplied then the
// cmpDelta_ is zero indicating that there is no length to patch.
HeapAccess(uint32_t insnOffset, uint32_t after, uint32_t cmp = NoLengthCheck) {
mozilla::PodZero(this); // zero padding for Valgrind
insnOffset_ = insnOffset;
opLength_ = after - insnOffset;
cmpDelta_ = cmp == NoLengthCheck ? 0 : insnOffset - cmp;
}
explicit MemoryAccess(uint32_t nextInsOffset)
: nextInsOffset_(nextInsOffset)
{ }
uint32_t insnOffset() const { return insnOffset_; }
void setInsnOffset(uint32_t insnOffset) { insnOffset_ = insnOffset; }
void offsetInsnOffsetBy(uint32_t offset) { insnOffset_ += offset; }
void* patchHeapPtrImmAt(uint8_t* code) const { return code + (insnOffset_ + opLength_); }
bool hasLengthCheck() const { return cmpDelta_ > 0; }
void* patchLengthAt(uint8_t* code) const {
MOZ_ASSERT(hasLengthCheck());
return code + (insnOffset_ - cmpDelta_);
}
void* patchHeapPtrImmAt(uint8_t* code) const { return code + nextInsOffset_; }
void offsetBy(uint32_t offset) { nextInsOffset_ += offset; }
};
#elif defined(JS_CODEGEN_X64)
class HeapAccess
class MemoryAccess
{
public:
enum WhatToDoOnOOB {
CarryOn, // loads return undefined, stores do nothing.
Throw // throw a RangeError
};
private:
uint32_t insnOffset_;
uint8_t offsetWithinWholeSimdVector_; // if is this e.g. the Z of an XYZ
bool throwOnOOB_; // should we throw on OOB?
uint8_t cmpDelta_; // the number of bytes from the cmp to the load/store instruction
bool wrapOffset_; // should we wrap the offset on OOB?
public:
HeapAccess() = default;
static const uint32_t NoLengthCheck = UINT32_MAX;
enum OutOfBoundsBehavior {
Throw,
CarryOn,
};
enum WrappingBehavior {
WrapOffset,
DontWrapOffset,
};
// If 'cmp' equals 'insnOffset' or if it is not supplied then the
// cmpDelta_ is zero indicating that there is no length to patch.
HeapAccess(uint32_t insnOffset, WhatToDoOnOOB oob,
uint32_t cmp = NoLengthCheck,
uint32_t offsetWithinWholeSimdVector = 0)
MemoryAccess() = default;
MemoryAccess(uint32_t insnOffset, OutOfBoundsBehavior onOOB, WrappingBehavior onWrap,
uint32_t offsetWithinWholeSimdVector = 0)
: insnOffset_(insnOffset),
offsetWithinWholeSimdVector_(offsetWithinWholeSimdVector),
throwOnOOB_(onOOB == OutOfBoundsBehavior::Throw),
wrapOffset_(onWrap == WrappingBehavior::WrapOffset)
{
mozilla::PodZero(this); // zero padding for Valgrind
insnOffset_ = insnOffset;
offsetWithinWholeSimdVector_ = offsetWithinWholeSimdVector;
throwOnOOB_ = oob == Throw;
cmpDelta_ = cmp == NoLengthCheck ? 0 : insnOffset - cmp;
MOZ_ASSERT(offsetWithinWholeSimdVector_ == offsetWithinWholeSimdVector);
MOZ_ASSERT(offsetWithinWholeSimdVector_ == offsetWithinWholeSimdVector, "fits in uint8");
}
uint32_t insnOffset() const { return insnOffset_; }
void setInsnOffset(uint32_t insnOffset) { insnOffset_ = insnOffset; }
void offsetInsnOffsetBy(uint32_t offset) { insnOffset_ += offset; }
bool throwOnOOB() const { return throwOnOOB_; }
uint32_t offsetWithinWholeSimdVector() const { return offsetWithinWholeSimdVector_; }
bool hasLengthCheck() const { return cmpDelta_ > 0; }
void* patchLengthAt(uint8_t* code) const {
MOZ_ASSERT(hasLengthCheck());
return code + (insnOffset_ - cmpDelta_);
}
bool throwOnOOB() const { return throwOnOOB_; }
bool wrapOffset() const { return wrapOffset_; }
void offsetBy(uint32_t offset) { insnOffset_ += offset; }
};
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
class HeapAccess
{
uint32_t insnOffset_;
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
defined(JS_CODEGEN_NONE)
// Nothing! We just want bounds checks on these platforms.
class MemoryAccess {
public:
HeapAccess() = default;
explicit HeapAccess(uint32_t insnOffset) : insnOffset_(insnOffset) {}
uint32_t insnOffset() const { return insnOffset_; }
void setInsnOffset(uint32_t insnOffset) { insnOffset_ = insnOffset; }
void offsetInsnOffsetBy(uint32_t offset) { insnOffset_ += offset; }
};
#elif defined(JS_CODEGEN_NONE)
class HeapAccess {
public:
void offsetInsnOffsetBy(uint32_t) { MOZ_CRASH(); }
void offsetBy(uint32_t) { MOZ_CRASH(); }
uint32_t insnOffset() const { MOZ_CRASH(); }
};
#endif
WASM_DECLARE_POD_VECTOR(HeapAccess, HeapAccessVector)
WASM_DECLARE_POD_VECTOR(MemoryAccess, MemoryAccessVector)
WASM_DECLARE_POD_VECTOR(BoundsCheck, BoundsCheckVector)
// A wasm::SymbolicAddress represents a pointer to a well-known function or
// object that is embedded in wasm code. Since wasm code is serialized and

Просмотреть файл

@ -1367,7 +1367,7 @@ function test_int8(heap) {
var i8m = loadModule_int8(this, {}, heap);
for ( var i=0 ; i < i8a.length ; i++ )
i8a[i] = 0;
i8a[i] = 0;
var size = Int8Array.BYTES_PER_ELEMENT;

Просмотреть файл

@ -9,10 +9,10 @@ function m(stdlib, ffi, heap) {
var add = stdlib.Atomics.add;
var load = stdlib.Atomics.load;
function add_sharedEv(i1) {
i1 = i1 | 0;
load(HEAP32, i1 >> 2);
add(HEAP32, i1 >> 2, 1);
load(HEAP32, i1 >> 2);
i1 = i1 | 0;
load(HEAP32, i1 >> 2);
add(HEAP32, i1 >> 2, 1);
load(HEAP32, i1 >> 2);
}
return {add_sharedEv:add_sharedEv};
}
@ -23,4 +23,3 @@ if (isAsmJSCompilationAvailable())
var sab = new SharedArrayBuffer(65536);
var {add_sharedEv} = m(this, {}, sab);
assertErrorMessage(() => add_sharedEv(sab.byteLength), RangeError, /out-of-range index/);

Просмотреть файл

@ -3306,20 +3306,19 @@ Assembler::BailoutTableStart(uint8_t* code)
return (uint8_t*) inst;
}
void Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction* inst)
void
Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength)
{
Instruction* inst = (Instruction*) patchAt;
MOZ_ASSERT(inst->is<InstCMP>());
InstCMP* cmp = inst->as<InstCMP>();
Register index;
cmp->extractOp1(&index);
#ifdef DEBUG
Operand2 op = cmp->extractOp2();
MOZ_ASSERT(op.isImm8());
#endif
MOZ_ASSERT(cmp->extractOp2().isImm8());
Imm8 imm8 = Imm8(heapSize);
Imm8 imm8 = Imm8(heapLength);
MOZ_ASSERT(!imm8.invalid);
*inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCC, Always);

Просмотреть файл

@ -1952,7 +1952,7 @@ class Assembler : public AssemblerShared
static size_t ToggledCallSize(uint8_t* code);
static void ToggleCall(CodeLocationLabel inst_, bool enabled);
static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction* inst);
static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength);
void processCodeLabels(uint8_t* rawCode);
bool bailed() {

Просмотреть файл

@ -2277,7 +2277,9 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
return;
}
BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
masm.append(wasm::BoundsCheck(cmpOffset));
if (isFloat) {
FloatRegister dst = ToFloatRegister(ins->output());
VFPRegister vd(dst);
@ -2299,7 +2301,6 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, d, Offset, Assembler::Below);
}
memoryBarrier(mir->barrierAfter());
masm.append(wasm::HeapAccess(bo.getOffset()));
}
void
@ -2360,7 +2361,9 @@ CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
return;
}
BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
masm.append(wasm::BoundsCheck(cmpOffset));
if (isFloat) {
VFPRegister vd(ToFloatRegister(ins->value()));
if (size == 32)
@ -2374,7 +2377,6 @@ CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
ToRegister(ins->value()), Offset, Assembler::Below);
}
memoryBarrier(mir->barrierAfter());
masm.append(wasm::HeapAccess(bo.getOffset()));
}
void
@ -2390,17 +2392,14 @@ CodeGeneratorARM::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
Register oldval = ToRegister(ins->oldValue());
Register newval = ToRegister(ins->newValue());
uint32_t maybeCmpOffset = 0;
if (mir->needsBoundsCheck()) {
BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
maybeCmpOffset = bo.getOffset();
uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
masm.append(wasm::BoundsCheck(cmpOffset));
}
masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
srcAddr, oldval, newval, InvalidReg,
ToAnyRegister(ins->output()));
if (mir->needsBoundsCheck())
masm.append(wasm::HeapAccess(maybeCmpOffset));
}
void
@ -2436,18 +2435,14 @@ CodeGeneratorARM::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
uint32_t maybeCmpOffset = 0;
if (mir->needsBoundsCheck()) {
BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
maybeCmpOffset = bo.getOffset();
uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
masm.append(wasm::BoundsCheck(cmpOffset));
}
masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
srcAddr, value, InvalidReg, ToAnyRegister(ins->output()));
if (mir->needsBoundsCheck())
masm.append(wasm::HeapAccess(maybeCmpOffset));
}
void
@ -2487,11 +2482,10 @@ CodeGeneratorARM::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
uint32_t maybeCmpOffset = 0;
if (mir->needsBoundsCheck()) {
BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
maybeCmpOffset = bo.getOffset();
uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
masm.append(wasm::BoundsCheck(cmpOffset));
}
if (value->isConstant())
@ -2502,9 +2496,6 @@ CodeGeneratorARM::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
ToRegister(value), srcAddr, flagTemp, InvalidReg,
ToAnyRegister(ins->output()));
if (mir->needsBoundsCheck())
masm.append(wasm::HeapAccess(maybeCmpOffset));
}
void
@ -2522,20 +2513,16 @@ CodeGeneratorARM::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEff
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
uint32_t maybeCmpOffset = 0;
if (mir->needsBoundsCheck()) {
BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
maybeCmpOffset = bo.getOffset();
uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
masm.append(wasm::BoundsCheck(cmpOffset));
}
if (value->isConstant())
atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp);
else
atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp);
if (mir->needsBoundsCheck())
masm.append(wasm::HeapAccess(maybeCmpOffset));
}
void

Просмотреть файл

@ -635,9 +635,10 @@ Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
}
void
Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction* inst)
Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength)
{
int32_t mask = ~(heapSize - 1);
Instruction* inst = (Instruction*) patchAt;
int32_t mask = ~(heapLength - 1);
unsigned n, imm_s, imm_r;
if (!IsImmLogical(mask, 32, &n, &imm_s, &imm_r))
MOZ_CRASH("Could not encode immediate!?");

Просмотреть файл

@ -373,7 +373,7 @@ class Assembler : public vixl::Assembler
static const size_t OffsetOfJumpTableEntryPointer = 8;
public:
static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction* inst);
static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength);
void writeCodePointer(AbsoluteLabel* absoluteLabel) {
MOZ_ASSERT(!absoluteLabel->bound());

Просмотреть файл

@ -1812,7 +1812,7 @@ CodeGeneratorMIPSShared::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
masm.bind(&done);
memoryBarrier(mir->barrierAfter());
masm.append(wasm::HeapAccess(bo.getOffset()));
masm.append(wasm::BoundsCheck(bo.getOffset()));
}
void
@ -1900,7 +1900,7 @@ CodeGeneratorMIPSShared::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
masm.bind(&done);
memoryBarrier(mir->barrierAfter());
masm.append(wasm::HeapAccess(bo.getOffset()));
masm.append(wasm::BoundsCheck(bo.getOffset()));
}
void
@ -1930,7 +1930,7 @@ CodeGeneratorMIPSShared::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap
valueTemp, offsetTemp, maskTemp,
ToAnyRegister(ins->output()));
if (mir->needsBoundsCheck())
masm.append(wasm::HeapAccess(maybeCmpOffset));
masm.append(wasm::BoundsCheck(maybeCmpOffset));
}
void
@ -1957,7 +1957,7 @@ CodeGeneratorMIPSShared::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap*
srcAddr, value, InvalidReg, valueTemp,
offsetTemp, maskTemp, ToAnyRegister(ins->output()));
if (mir->needsBoundsCheck())
masm.append(wasm::HeapAccess(maybeCmpOffset));
masm.append(wasm::BoundsCheck(maybeCmpOffset));
}
void
@ -1995,7 +1995,7 @@ CodeGeneratorMIPSShared::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
valueTemp, offsetTemp, maskTemp,
ToAnyRegister(ins->output()));
if (mir->needsBoundsCheck())
masm.append(wasm::HeapAccess(maybeCmpOffset));
masm.append(wasm::BoundsCheck(maybeCmpOffset));
}
void
@ -2031,7 +2031,7 @@ CodeGeneratorMIPSShared::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHea
valueTemp, offsetTemp, maskTemp);
if (mir->needsBoundsCheck())
masm.append(wasm::HeapAccess(maybeCmpOffset));
masm.append(wasm::BoundsCheck(maybeCmpOffset));
}
void

Просмотреть файл

@ -449,8 +449,9 @@ Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
}
void
Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction* inst)
Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength)
{
Instruction* inst = (Instruction*) patchAt;
InstImm* i0 = (InstImm*) inst;
InstImm* i1 = (InstImm*) i0->next();

Просмотреть файл

@ -166,7 +166,7 @@ class Assembler : public AssemblerMIPSShared
static void ToggleCall(CodeLocationLabel inst_, bool enabled);
static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction* inst);
static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength);
}; // Assembler
static const uint32_t NumIntArgRegs = 4;

Просмотреть файл

@ -490,8 +490,8 @@ Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
}
void
Assembler::UpdateBoundsCheck(uint64_t heapSize, Instruction* inst)
Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength)
{
// Replace with new value
Assembler::UpdateLoad64Value(inst, heapSize);
Assembler::UpdateLoad64Value((Instruction*) patchAt, heapLength);
}

Просмотреть файл

@ -168,7 +168,7 @@ class Assembler : public AssemblerMIPSShared
static void ToggleCall(CodeLocationLabel inst_, bool enabled);
static void UpdateBoundsCheck(uint64_t logHeapSize, Instruction* inst);
static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength);
}; // Assembler
static const uint32_t NumIntArgRegs = 8;

Просмотреть файл

@ -147,6 +147,8 @@ class Assembler : public AssemblerShared
static void ToggleToCmp(CodeLocationLabel) { MOZ_CRASH(); }
static void ToggleCall(CodeLocationLabel, bool) { MOZ_CRASH(); }
static void UpdateBoundsCheck(uint8_t*, uint32_t) { MOZ_CRASH(); }
static uintptr_t GetPointer(uint8_t*) { MOZ_CRASH(); }
void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,

Просмотреть файл

@ -707,7 +707,8 @@ class AssemblerShared
{
wasm::CallSiteAndTargetVector callsites_;
wasm::JumpSiteArray jumpsites_;
wasm::HeapAccessVector heapAccesses_;
wasm::MemoryAccessVector memoryAccesses_;
wasm::BoundsCheckVector boundsChecks_;
Vector<AsmJSGlobalAccess, 0, SystemAllocPolicy> asmJSGlobalAccesses_;
Vector<AsmJSAbsoluteAddress, 0, SystemAllocPolicy> asmJSAbsoluteAddresses_;
@ -755,8 +756,11 @@ class AssemblerShared
const wasm::JumpSiteArray& jumpSites() { return jumpsites_; }
void clearJumpSites() { for (auto& v : jumpsites_) v.clear(); }
void append(wasm::HeapAccess access) { enoughMemory_ &= heapAccesses_.append(access); }
wasm::HeapAccessVector&& extractHeapAccesses() { return Move(heapAccesses_); }
void append(wasm::MemoryAccess access) { enoughMemory_ &= memoryAccesses_.append(access); }
wasm::MemoryAccessVector&& extractMemoryAccesses() { return Move(memoryAccesses_); }
void append(wasm::BoundsCheck check) { enoughMemory_ &= boundsChecks_.append(check); }
wasm::BoundsCheckVector&& extractBoundsChecks() { return Move(boundsChecks_); }
void append(AsmJSGlobalAccess access) { enoughMemory_ &= asmJSGlobalAccesses_.append(access); }
size_t numAsmJSGlobalAccesses() const { return asmJSGlobalAccesses_.length(); }
@ -794,10 +798,15 @@ class AssemblerShared
offsets[i] += delta;
}
i = heapAccesses_.length();
enoughMemory_ &= heapAccesses_.appendAll(other.heapAccesses_);
for (; i < heapAccesses_.length(); i++)
heapAccesses_[i].offsetInsnOffsetBy(delta);
i = memoryAccesses_.length();
enoughMemory_ &= memoryAccesses_.appendAll(other.memoryAccesses_);
for (; i < memoryAccesses_.length(); i++)
memoryAccesses_[i].offsetBy(delta);
i = boundsChecks_.length();
enoughMemory_ &= boundsChecks_.appendAll(other.boundsChecks_);
for (; i < boundsChecks_.length(); i++)
boundsChecks_[i].offsetBy(delta);
i = asmJSGlobalAccesses_.length();
enoughMemory_ &= asmJSGlobalAccesses_.appendAll(other.asmJSGlobalAccesses_);

Просмотреть файл

@ -620,6 +620,14 @@ CodeGeneratorX64::loadSimd(Scalar::Type type, unsigned numElems, const Operand&
}
}
static wasm::MemoryAccess
AsmJSMemoryAccess(uint32_t before, wasm::MemoryAccess::OutOfBoundsBehavior throwBehavior,
uint32_t offsetWithinWholeSimdVector = 0)
{
return wasm::MemoryAccess(before, throwBehavior, wasm::MemoryAccess::WrapOffset,
offsetWithinWholeSimdVector);
}
void
CodeGeneratorX64::emitSimdLoad(LAsmJSLoadHeap* ins)
{
@ -631,7 +639,7 @@ CodeGeneratorX64::emitSimdLoad(LAsmJSLoadHeap* ins)
? Operand(HeapReg, mir->offset())
: Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
uint32_t maybeCmpOffset = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
bool hasBoundsCheck = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
unsigned numElems = mir->numSimdElems();
if (numElems == 3) {
@ -648,7 +656,7 @@ CodeGeneratorX64::emitSimdLoad(LAsmJSLoadHeap* ins)
uint32_t after = masm.size();
verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, 2, srcAddr,
*ins->output()->output());
masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw));
// Load Z (W is zeroed)
// This is still in bounds, as we've checked with a manual bounds check
@ -658,8 +666,7 @@ CodeGeneratorX64::emitSimdLoad(LAsmJSLoadHeap* ins)
after = masm.size();
verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, 1, srcAddrZ,
LFloatReg(ScratchSimd128Reg));
masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw,
wasm::HeapAccess::NoLengthCheck, 8));
masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw, 8));
// Move ZW atop XY
masm.vmovlhps(ScratchSimd128Reg, out, out);
@ -667,11 +674,12 @@ CodeGeneratorX64::emitSimdLoad(LAsmJSLoadHeap* ins)
uint32_t before = masm.size();
loadSimd(type, numElems, srcAddr, out);
uint32_t after = masm.size();
verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, numElems, srcAddr, *ins->output()->output());
masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, numElems, srcAddr,
*ins->output()->output());
masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw));
}
if (maybeCmpOffset != wasm::HeapAccess::NoLengthCheck)
if (hasBoundsCheck)
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
}
@ -693,7 +701,7 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
memoryBarrier(mir->barrierBefore());
OutOfLineLoadTypedArrayOutOfBounds* ool;
uint32_t maybeCmpOffset = maybeEmitAsmJSLoadBoundsCheck(mir, ins, &ool);
DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSLoadBoundsCheck(mir, ins, &ool);
uint32_t before = masm.size();
switch (accessType) {
@ -718,13 +726,14 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, accessType, 0, srcAddr, *out->output());
if (ool) {
MOZ_ASSERT(hasBoundsCheck);
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
masm.bind(ool->rejoin());
}
memoryBarrier(mir->barrierAfter());
masm.append(wasm::HeapAccess(before, wasm::HeapAccess::CarryOn, maybeCmpOffset));
masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::CarryOn));
}
void
@ -787,7 +796,7 @@ CodeGeneratorX64::emitSimdStore(LAsmJSStoreHeap* ins)
? Operand(HeapReg, mir->offset())
: Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
uint32_t maybeCmpOffset = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
bool hasBoundsCheck = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
unsigned numElems = mir->numSimdElems();
if (numElems == 3) {
@ -800,7 +809,7 @@ CodeGeneratorX64::emitSimdStore(LAsmJSStoreHeap* ins)
// It's possible that the Z could be out of bounds when the XY is in
// bounds. To avoid storing the XY before the exception is thrown, we
// store the Z first, and record its offset in the HeapAccess so
// store the Z first, and record its offset in the MemoryAccess so
// that the signal handler knows to check the bounds of the full
// access, rather than just the Z.
masm.vmovhlps(in, ScratchSimd128Reg, ScratchSimd128Reg);
@ -809,23 +818,23 @@ CodeGeneratorX64::emitSimdStore(LAsmJSStoreHeap* ins)
uint32_t after = masm.size();
verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, type, 1, dstAddrZ,
LFloatReg(ScratchSimd128Reg));
masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset, 8));
masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw, 8));
// Store XY
before = after;
storeSimd(type, 2, in, dstAddr);
after = masm.size();
verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, type, 2, dstAddr, *ins->value());
masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw));
masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw));
} else {
uint32_t before = masm.size();
storeSimd(type, numElems, in, dstAddr);
uint32_t after = masm.size();
verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, type, numElems, dstAddr, *ins->value());
masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw));
}
if (maybeCmpOffset != wasm::HeapAccess::NoLengthCheck)
if (hasBoundsCheck)
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
}
@ -849,7 +858,7 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
memoryBarrier(mir->barrierBefore());
Label* rejoin;
uint32_t maybeCmpOffset = maybeEmitAsmJSStoreBoundsCheck(mir, ins, &rejoin);
DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSStoreBoundsCheck(mir, ins, &rejoin);
uint32_t before = masm.size();
if (value->isConstant()) {
@ -905,37 +914,45 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, accessType, 0, dstAddr, *value);
if (rejoin) {
MOZ_ASSERT(hasBoundsCheck);
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
masm.bind(rejoin);
}
memoryBarrier(mir->barrierAfter());
masm.append(wasm::HeapAccess(before, wasm::HeapAccess::CarryOn, maybeCmpOffset));
masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::CarryOn));
}
static void
MaybeAddAtomicsBoundsCheck(MacroAssemblerX64& masm, MAsmJSHeapAccess* mir, Register ptr)
{
if (!mir->needsBoundsCheck())
return;
// Note that we can't use the same machinery as normal asm.js loads/stores
// since signal-handler bounds checking is not yet implemented for atomic
// accesses.
uint32_t cmpOffset = masm.cmp32WithPatch(ptr, Imm32(-mir->endOffset())).offset();
masm.append(wasm::BoundsCheck(cmpOffset));
masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
}
void
CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
{
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
MAsmJSCompareExchangeHeap* mir = ins->mir();
Scalar::Type accessType = mir->accessType();
const LAllocation* ptr = ins->ptr();
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
MOZ_ASSERT(ptr->isRegister());
BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
Register ptr = ToRegister(ins->ptr());
BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset());
Register oldval = ToRegister(ins->oldValue());
Register newval = ToRegister(ins->newValue());
// Note that we can't use the same machinery as normal asm.js loads/stores
// since signal-handler bounds checking is not yet implemented for atomic accesses.
uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(-mir->endOffset())).offset();
masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
}
uint32_t before = masm.size();
MaybeAddAtomicsBoundsCheck(masm, mir, ptr);
masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
srcAddr,
oldval,
@ -945,31 +962,23 @@ CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
MOZ_ASSERT(mir->offset() == 0,
"The AsmJS signal handler doesn't yet support emulating "
"atomic accesses in the case of a fault from an unwrapped offset");
masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
}
void
CodeGeneratorX64::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
{
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
MOZ_ASSERT(ins->mir()->accessType() <= Scalar::Uint32);
MAsmJSAtomicExchangeHeap* mir = ins->mir();
Scalar::Type accessType = mir->accessType();
const LAllocation* ptr = ins->ptr();
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
MOZ_ASSERT(ptr->isRegister());
MOZ_ASSERT(accessType <= Scalar::Uint32);
BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
Register ptr = ToRegister(ins->ptr());
BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset());
Register value = ToRegister(ins->value());
// Note that we can't use the same machinery as normal asm.js loads/stores
// since signal-handler bounds checking is not yet implemented for atomic accesses.
uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(-mir->endOffset())).offset();
masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
}
uint32_t before = masm.size();
MaybeAddAtomicsBoundsCheck(masm, mir, ptr);
masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
srcAddr,
value,
@ -978,7 +987,6 @@ CodeGeneratorX64::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
MOZ_ASSERT(mir->offset() == 0,
"The AsmJS signal handler doesn't yet support emulating "
"atomic accesses in the case of a fault from an unwrapped offset");
masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
}
void
@ -989,40 +997,29 @@ CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
MAsmJSAtomicBinopHeap* mir = ins->mir();
Scalar::Type accessType = mir->accessType();
Register ptrReg = ToRegister(ins->ptr());
Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
const LAllocation* value = ins->value();
accessType = accessType == Scalar::Uint32 ? Scalar::Int32 : accessType;
AtomicOp op = mir->operation();
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->offset());
Register ptr = ToRegister(ins->ptr());
Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset());
// Note that we can't use the same machinery as normal asm.js loads/stores
// since signal-handler bounds checking is not yet implemented for atomic accesses.
uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-mir->endOffset())).offset();
masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
}
uint32_t before = masm.size();
const LAllocation* value = ins->value();
MaybeAddAtomicsBoundsCheck(masm, mir, ptr);
AnyRegister output = ToAnyRegister(ins->output());
if (value->isConstant()) {
atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
Imm32(ToInt32(value)),
srcAddr,
temp,
InvalidReg,
ToAnyRegister(ins->output()));
atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr, temp, InvalidReg,
output);
} else {
atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
ToRegister(value),
srcAddr,
temp,
InvalidReg,
ToAnyRegister(ins->output()));
atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr, temp, InvalidReg,
output);
}
MOZ_ASSERT(mir->offset() == 0,
"The AsmJS signal handler doesn't yet support emulating "
"atomic accesses in the case of a fault from an unwrapped offset");
masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
}
void
@ -1033,21 +1030,14 @@ CodeGeneratorX64::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEff
MAsmJSAtomicBinopHeap* mir = ins->mir();
Scalar::Type accessType = mir->accessType();
Register ptrReg = ToRegister(ins->ptr());
const LAllocation* value = ins->value();
AtomicOp op = mir->operation();
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->offset());
Register ptr = ToRegister(ins->ptr());
BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset());
const LAllocation* value = ins->value();
// Note that we can't use the same machinery as normal asm.js loads/stores
// since signal-handler bounds checking is not yet implemented for atomic accesses.
uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-mir->endOffset())).offset();
masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
}
MaybeAddAtomicsBoundsCheck(masm, mir, ptr);
uint32_t before = masm.size();
if (value->isConstant())
atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr);
else
@ -1055,7 +1045,6 @@ CodeGeneratorX64::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEff
MOZ_ASSERT(mir->offset() == 0,
"The AsmJS signal handler doesn't yet support emulating "
"atomic accesses in the case of a fault from an unwrapped offset");
masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
}
void

Просмотреть файл

@ -1079,6 +1079,21 @@ class AssemblerX86Shared : public AssemblerShared
X86Encoding::BaseAssembler::patchJumpToTwoByteNop(jump);
}
static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength) {
// An access is out-of-bounds iff
// ptr + offset + data-type-byte-size > heapLength
// i.e. ptr > heapLength - data-type-byte-size - offset.
// data-type-byte-size and offset are already included in the addend so
// we just have to add the heap length here.
//
// On x64, even with signal handling being used for most bounds checks,
// there may be atomic operations that depend on explicit checks. All
// accesses that have been recorded are the only ones that need bound
// checks (see also
// CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,Exchange,AtomicBinop}Heap)
X86Encoding::AddInt32(patchAt, heapLength);
}
void breakpoint() {
masm.int3();
}

Просмотреть файл

@ -424,7 +424,7 @@ CodeGeneratorX86Shared::visitOffsetBoundsCheck(OffsetBoundsCheck* oolCheck)
masm.jmp(oolCheck->rejoin());
}
uint32_t
void
CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* access,
const MInstruction* mir,
Register ptr, Label* maybeFail)
@ -460,21 +460,22 @@ CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* acces
if (pass)
masm.bind(pass);
return cmpOffset;
masm.append(wasm::BoundsCheck(cmpOffset));
}
uint32_t
bool
CodeGeneratorX86Shared::maybeEmitThrowingAsmJSBoundsCheck(const MAsmJSHeapAccess* access,
const MInstruction* mir,
const LAllocation* ptr)
{
if (!gen->needsAsmJSBoundsCheckBranch(access))
return wasm::HeapAccess::NoLengthCheck;
return false;
return emitAsmJSBoundsCheckBranch(access, mir, ToRegister(ptr), nullptr);
emitAsmJSBoundsCheckBranch(access, mir, ToRegister(ptr), nullptr);
return true;
}
uint32_t
bool
CodeGeneratorX86Shared::maybeEmitAsmJSLoadBoundsCheck(const MAsmJSLoadHeap* mir, LAsmJSLoadHeap* ins,
OutOfLineLoadTypedArrayOutOfBounds** ool)
{
@ -482,33 +483,35 @@ CodeGeneratorX86Shared::maybeEmitAsmJSLoadBoundsCheck(const MAsmJSLoadHeap* mir,
*ool = nullptr;
if (!gen->needsAsmJSBoundsCheckBranch(mir))
return wasm::HeapAccess::NoLengthCheck;
return false;
if (mir->isAtomicAccess())
return emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), nullptr);
Label* rejoin = nullptr;
if (!mir->isAtomicAccess()) {
*ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(ins->output()),
mir->accessType());
addOutOfLineCode(*ool, mir);
rejoin = (*ool)->entry();
}
*ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(ins->output()),
mir->accessType());
addOutOfLineCode(*ool, mir);
return emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), (*ool)->entry());
emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), rejoin);
return true;
}
uint32_t
bool
CodeGeneratorX86Shared::maybeEmitAsmJSStoreBoundsCheck(const MAsmJSStoreHeap* mir, LAsmJSStoreHeap* ins,
Label** rejoin)
{
MOZ_ASSERT(!Scalar::isSimdType(mir->accessType()));
*rejoin = nullptr;
if (!gen->needsAsmJSBoundsCheckBranch(mir))
return wasm::HeapAccess::NoLengthCheck;
return false;
if (mir->isAtomicAccess())
return emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), nullptr);
if (!mir->isAtomicAccess())
*rejoin = alloc().lifoAlloc()->newInfallible<Label>();
*rejoin = alloc().lifoAlloc()->newInfallible<Label>();
return emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), *rejoin);
emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), *rejoin);
return true;
}
void

Просмотреть файл

@ -94,25 +94,24 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
};
private:
MOZ_MUST_USE uint32_t
void
emitAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* mir, const MInstruction* ins,
Register ptr, Label* fail);
public:
// For SIMD and atomic loads and stores (which throw on out-of-bounds):
MOZ_MUST_USE uint32_t
bool
maybeEmitThrowingAsmJSBoundsCheck(const MAsmJSHeapAccess* mir, const MInstruction* ins,
const LAllocation* ptr);
// For asm.js plain and atomic loads that possibly require a bounds check:
MOZ_MUST_USE uint32_t
bool
maybeEmitAsmJSLoadBoundsCheck(const MAsmJSLoadHeap* mir, LAsmJSLoadHeap* ins,
OutOfLineLoadTypedArrayOutOfBounds** ool);
// For asm.js plain and atomic stores that possibly require a bounds check:
MOZ_MUST_USE uint32_t
maybeEmitAsmJSStoreBoundsCheck(const MAsmJSStoreHeap* mir, LAsmJSStoreHeap* ins,
Label** rejoin);
bool
maybeEmitAsmJSStoreBoundsCheck(const MAsmJSStoreHeap* mir, LAsmJSStoreHeap* ins, Label** rejoin);
void cleanupAfterAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* mir, Register ptr);

Просмотреть файл

@ -405,7 +405,7 @@ CodeGeneratorX86::emitSimdLoad(LAsmJSLoadHeap* ins)
? Operand(PatchedAbsoluteAddress(mir->offset()))
: Operand(ToRegister(ptr), mir->offset());
uint32_t maybeCmpOffset = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
bool hasBoundsCheck = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
unsigned numElems = mir->numSimdElems();
if (numElems == 3) {
@ -417,29 +417,23 @@ CodeGeneratorX86::emitSimdLoad(LAsmJSLoadHeap* ins)
: Operand(ToRegister(ptr), 2 * sizeof(float) + mir->offset());
// Load XY
uint32_t before = masm.size();
loadSimd(type, 2, srcAddr, out);
uint32_t after = masm.size();
masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
masm.append(wasm::MemoryAccess(masm.size()));
// Load Z (W is zeroed)
// This is still in bounds, as we've checked with a manual bounds check
// or we had enough space for sure when removing the bounds check.
before = after;
loadSimd(type, 1, srcAddrZ, ScratchSimd128Reg);
after = masm.size();
masm.append(wasm::HeapAccess(before, after));
masm.append(wasm::MemoryAccess(masm.size()));
// Move ZW atop XY
masm.vmovlhps(ScratchSimd128Reg, out, out);
} else {
uint32_t before = masm.size();
loadSimd(type, numElems, srcAddr, out);
uint32_t after = masm.size();
masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
masm.append(wasm::MemoryAccess(masm.size()));
}
if (maybeCmpOffset != wasm::HeapAccess::NoLengthCheck)
if (hasBoundsCheck)
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
}
@ -461,20 +455,20 @@ CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
memoryBarrier(mir->barrierBefore());
OutOfLineLoadTypedArrayOutOfBounds* ool;
uint32_t maybeCmpOffset = maybeEmitAsmJSLoadBoundsCheck(mir, ins, &ool);
DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSLoadBoundsCheck(mir, ins, &ool);
uint32_t before = masm.size();
load(accessType, srcAddr, out);
uint32_t after = masm.size();
if (ool) {
MOZ_ASSERT(hasBoundsCheck);
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
masm.bind(ool->rejoin());
}
memoryBarrier(mir->barrierAfter());
masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
masm.append(wasm::MemoryAccess(after));
}
void
@ -603,7 +597,7 @@ CodeGeneratorX86::emitSimdStore(LAsmJSStoreHeap* ins)
? Operand(PatchedAbsoluteAddress(mir->offset()))
: Operand(ToRegister(ptr), mir->offset());
uint32_t maybeCmpOffset = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
bool hasBoundsCheck = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
unsigned numElems = mir->numSimdElems();
if (numElems == 3) {
@ -615,28 +609,22 @@ CodeGeneratorX86::emitSimdStore(LAsmJSStoreHeap* ins)
: Operand(ToRegister(ptr), 2 * sizeof(float) + mir->offset());
// Store XY
uint32_t before = masm.size();
storeSimd(type, 2, in, dstAddr);
uint32_t after = masm.size();
masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
masm.append(wasm::MemoryAccess(masm.size()));
masm.vmovhlps(in, ScratchSimd128Reg, ScratchSimd128Reg);
// Store Z (W is zeroed)
// This is still in bounds, as we've checked with a manual bounds check
// or we had enough space for sure when removing the bounds check.
before = masm.size();
storeSimd(type, 1, ScratchSimd128Reg, dstAddrZ);
after = masm.size();
masm.append(wasm::HeapAccess(before, after));
masm.append(wasm::MemoryAccess(masm.size()));
} else {
uint32_t before = masm.size();
storeSimd(type, numElems, in, dstAddr);
uint32_t after = masm.size();
masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
masm.append(wasm::MemoryAccess(masm.size()));
}
if (maybeCmpOffset != wasm::HeapAccess::NoLengthCheck)
if (hasBoundsCheck)
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
}
@ -660,20 +648,20 @@ CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
memoryBarrier(mir->barrierBefore());
Label* rejoin;
uint32_t maybeCmpOffset = maybeEmitAsmJSStoreBoundsCheck(mir, ins, &rejoin);
DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSStoreBoundsCheck(mir, ins, &rejoin);
uint32_t before = masm.size();
store(accessType, value, dstAddr);
uint32_t after = masm.size();
if (rejoin) {
MOZ_ASSERT(hasBoundsCheck);
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
masm.bind(rejoin);
}
memoryBarrier(mir->barrierAfter());
masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
masm.append(wasm::MemoryAccess(after));
}
void
@ -706,20 +694,17 @@ void
CodeGeneratorX86::asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg, bool boundsCheck,
uint32_t offset, uint32_t endOffset)
{
uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
if (boundsCheck) {
maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-endOffset)).offset();
uint32_t cmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-endOffset)).offset();
masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
masm.append(wasm::BoundsCheck(cmpOffset));
}
// Add in the actual heap pointer explicitly, to avoid opening up
// the abstraction that is atomicBinopToTypedIntArray at this time.
masm.movl(ptrReg, addrTemp);
uint32_t before = masm.size();
masm.addlWithPatch(Imm32(offset), addrTemp);
uint32_t after = masm.size();
masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
masm.append(wasm::MemoryAccess(masm.size()));
}
void