diff --git a/js/src/asmjs/AsmJSModule.cpp b/js/src/asmjs/AsmJSModule.cpp index d7259f95aebe..dcff4d1f28e3 100644 --- a/js/src/asmjs/AsmJSModule.cpp +++ b/js/src/asmjs/AsmJSModule.cpp @@ -762,29 +762,6 @@ AsmJSModule::staticallyLink(ExclusiveContext *cx) MOZ_ASSERT(isStaticallyLinked()); } -#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) -static inline size_t -ViewTypeByteSize(AsmJSHeapAccess::ViewType vt) -{ - switch (vt) { - case AsmJSHeapAccess::Int8: - case AsmJSHeapAccess::Uint8: - case AsmJSHeapAccess::Uint8Clamped: - case AsmJSHeapAccess::Int16: - case AsmJSHeapAccess::Uint16: - case AsmJSHeapAccess::Int32: - case AsmJSHeapAccess::Uint32: - case AsmJSHeapAccess::Float32: - case AsmJSHeapAccess::Float64: - return 1 << TypedArrayShift(Scalar::Type(vt)); - case AsmJSHeapAccess::Float32x4: - case AsmJSHeapAccess::Int32x4: - return 16; - } - MOZ_CRASH("unexpected view type"); -} -#endif // JS_CODEGEN_X86 || JS_CODEGEN_X64 - void AsmJSModule::initHeap(Handle heap, JSContext *cx) { @@ -805,9 +782,9 @@ AsmJSModule::initHeap(Handle heap, JSContext *cx // ptr + data-type-byte-size > heapLength // i.e. ptr >= heapLength + 1 - data-type-byte-size // (Note that we need >= as this is what codegen uses.) - AsmJSHeapAccess::ViewType vt = access.viewType(); + size_t scalarByteSize = 1 << TypedArrayShift(access.type()); X86Assembler::setPointer(access.patchLengthAt(code_), - (void*)(heap->byteLength() + 1 - ViewTypeByteSize(vt))); + (void*)(heap->byteLength() + 1 - scalarByteSize)); } void *addr = access.patchOffsetAt(code_); uint32_t disp = reinterpret_cast(X86Assembler::getPointer(addr)); @@ -827,8 +804,8 @@ AsmJSModule::initHeap(Handle heap, JSContext *cx const jit::AsmJSHeapAccess &access = heapAccesses_[i]; if (access.hasLengthCheck()) { // See comment above for x86 codegen. - X86Assembler::setInt32(access.patchLengthAt(code_), - heapLength + 1 - ViewTypeByteSize(access.viewType())); + size_t scalarByteSize = 1 << TypedArrayShift(access.type()); + X86Assembler::setInt32(access.patchLengthAt(code_), heapLength + 1 - scalarByteSize); } } #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS) diff --git a/js/src/asmjs/AsmJSSignalHandlers.cpp b/js/src/asmjs/AsmJSSignalHandlers.cpp index cee0810d722f..e7aea7e599bf 100644 --- a/js/src/asmjs/AsmJSSignalHandlers.cpp +++ b/js/src/asmjs/AsmJSSignalHandlers.cpp @@ -331,10 +331,10 @@ ContextToPC(CONTEXT *context) #if defined(JS_CODEGEN_X64) template static void -SetXMMRegToNaN(AsmJSHeapAccess::ViewType viewType, T *xmm_reg) +SetXMMRegToNaN(Scalar::Type viewType, T *xmm_reg) { switch (viewType) { - case AsmJSHeapAccess::Float32: { + case Scalar::Float32: { JS_STATIC_ASSERT(sizeof(T) == 4 * sizeof(float)); float *floats = reinterpret_cast(xmm_reg); floats[0] = GenericNaN(); @@ -343,41 +343,42 @@ SetXMMRegToNaN(AsmJSHeapAccess::ViewType viewType, T *xmm_reg) floats[3] = 0; break; } - case AsmJSHeapAccess::Float64: { + case Scalar::Float64: { JS_STATIC_ASSERT(sizeof(T) == 2 * sizeof(double)); double *dbls = reinterpret_cast(xmm_reg); dbls[0] = GenericNaN(); dbls[1] = 0; break; } - case AsmJSHeapAccess::Float32x4: { + case Scalar::Float32x4: { JS_STATIC_ASSERT(sizeof(T) == 4 * sizeof(float)); float *floats = reinterpret_cast(xmm_reg); for (unsigned i = 0; i < 4; i++) floats[i] = GenericNaN(); break; } - case AsmJSHeapAccess::Int32x4: { + case Scalar::Int32x4: { JS_STATIC_ASSERT(sizeof(T) == 4 * sizeof(int32_t)); int32_t *ints = reinterpret_cast(xmm_reg); for (unsigned i = 0; i < 4; i++) ints[i] = 0; break; } - case AsmJSHeapAccess::Int8: - case AsmJSHeapAccess::Uint8: - case AsmJSHeapAccess::Int16: - case AsmJSHeapAccess::Uint16: - case AsmJSHeapAccess::Int32: - case AsmJSHeapAccess::Uint32: - case AsmJSHeapAccess::Uint8Clamped: + case Scalar::Int8: + case Scalar::Uint8: + case Scalar::Int16: + case Scalar::Uint16: + case Scalar::Int32: + case Scalar::Uint32: + case Scalar::Uint8Clamped: + case Scalar::MaxTypedArrayViewType: MOZ_CRASH("unexpected type in SetXMMRegToNaN"); } } # if !defined(XP_MACOSX) static void -SetRegisterToCoercedUndefined(CONTEXT *context, AsmJSHeapAccess::ViewType viewType, AnyRegister reg) +SetRegisterToCoercedUndefined(CONTEXT *context, Scalar::Type viewType, AnyRegister reg) { if (reg.isFloat()) { switch (reg.fpu().code()) { @@ -482,7 +483,7 @@ HandleFault(PEXCEPTION_POINTERS exception) // register) and set the PC to the next op. Upon return from the handler, // execution will resume at this next PC. if (heapAccess->isLoad()) - SetRegisterToCoercedUndefined(context, heapAccess->viewType(), heapAccess->loadedReg()); + SetRegisterToCoercedUndefined(context, heapAccess->type(), heapAccess->loadedReg()); *ppc += heapAccess->opLength(); return true; @@ -532,7 +533,7 @@ SetRegisterToCoercedUndefined(mach_port_t rtThread, x86_thread_state64_t &state, if (kret != KERN_SUCCESS) return false; - AsmJSHeapAccess::ViewType viewType = heapAccess.viewType(); + Scalar::Type viewType = heapAccess.type(); switch (heapAccess.loadedReg().fpu().code()) { case X86Registers::xmm0: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm0); break; case X86Registers::xmm1: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm1); break; @@ -874,7 +875,7 @@ HandleFault(int signum, siginfo_t *info, void *ctx) // register) and set the PC to the next op. Upon return from the handler, // execution will resume at this next PC. if (heapAccess->isLoad()) - SetRegisterToCoercedUndefined(context, heapAccess->viewType(), heapAccess->loadedReg()); + SetRegisterToCoercedUndefined(context, heapAccess->type(), heapAccess->loadedReg()); *ppc += heapAccess->opLength(); return true; diff --git a/js/src/asmjs/AsmJSValidate.cpp b/js/src/asmjs/AsmJSValidate.cpp index 9ce290c1298f..678bae537929 100644 --- a/js/src/asmjs/AsmJSValidate.cpp +++ b/js/src/asmjs/AsmJSValidate.cpp @@ -2756,7 +2756,7 @@ class FunctionCompiler curBlock_->setSlot(info().localSlot(local.slot), def); } - MDefinition *loadHeap(AsmJSHeapAccess::ViewType vt, MDefinition *ptr, NeedsBoundsCheck chk) + MDefinition *loadHeap(Scalar::Type vt, MDefinition *ptr, NeedsBoundsCheck chk) { if (inDeadCode()) return nullptr; @@ -2767,8 +2767,7 @@ class FunctionCompiler return load; } - void storeHeap(AsmJSHeapAccess::ViewType vt, MDefinition *ptr, MDefinition *v, - NeedsBoundsCheck chk) + void storeHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *v, NeedsBoundsCheck chk) { if (inDeadCode()) return; @@ -2786,7 +2785,7 @@ class FunctionCompiler curBlock_->add(ins); } - MDefinition *atomicLoadHeap(AsmJSHeapAccess::ViewType vt, MDefinition *ptr, NeedsBoundsCheck chk) + MDefinition *atomicLoadHeap(Scalar::Type vt, MDefinition *ptr, NeedsBoundsCheck chk) { if (inDeadCode()) return nullptr; @@ -2798,8 +2797,7 @@ class FunctionCompiler return load; } - void atomicStoreHeap(AsmJSHeapAccess::ViewType vt, MDefinition *ptr, MDefinition *v, - NeedsBoundsCheck chk) + void atomicStoreHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *v, NeedsBoundsCheck chk) { if (inDeadCode()) return; @@ -2810,8 +2808,8 @@ class FunctionCompiler curBlock_->add(store); } - MDefinition *atomicCompareExchangeHeap(AsmJSHeapAccess::ViewType vt, MDefinition *ptr, - MDefinition *oldv, MDefinition *newv, NeedsBoundsCheck chk) + MDefinition *atomicCompareExchangeHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *oldv, + MDefinition *newv, NeedsBoundsCheck chk) { if (inDeadCode()) return nullptr; @@ -2824,8 +2822,8 @@ class FunctionCompiler return cas; } - MDefinition *atomicBinopHeap(js::jit::AtomicOp op, AsmJSHeapAccess::ViewType vt, - MDefinition *ptr, MDefinition *v, NeedsBoundsCheck chk) + MDefinition *atomicBinopHeap(js::jit::AtomicOp op, Scalar::Type vt, MDefinition *ptr, + MDefinition *v, NeedsBoundsCheck chk) { if (inDeadCode()) return nullptr; @@ -4453,7 +4451,7 @@ CheckLoadArray(FunctionCompiler &f, ParseNode *elem, MDefinition **def, Type *ty if (!CheckArrayAccess(f, ElemBase(elem), ElemIndex(elem), &viewType, &pointerDef, &needsBoundsCheck)) return false; - *def = f.loadHeap(AsmJSHeapAccess::ViewType(viewType), pointerDef, needsBoundsCheck); + *def = f.loadHeap(viewType, pointerDef, needsBoundsCheck); *type = TypedArrayLoadType(viewType); return true; } @@ -4547,7 +4545,7 @@ CheckStoreArray(FunctionCompiler &f, ParseNode *lhs, ParseNode *rhs, MDefinition MOZ_CRASH("Unexpected view type"); } - f.storeHeap(AsmJSHeapAccess::ViewType(viewType), pointerDef, rhsDef, needsBoundsCheck); + f.storeHeap(viewType, pointerDef, rhsDef, needsBoundsCheck); *def = rhsDef; *type = rhsType; @@ -4815,7 +4813,7 @@ CheckAtomicsLoad(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type * if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck)) return false; - *def = f.atomicLoadHeap(AsmJSHeapAccess::ViewType(viewType), pointerDef, needsBoundsCheck); + *def = f.atomicLoadHeap(viewType, pointerDef, needsBoundsCheck); *type = Type::Signed; return true; } @@ -4844,7 +4842,7 @@ CheckAtomicsStore(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type if (!rhsType.isIntish()) return f.failf(arrayArg, "%s is not a subtype of intish", rhsType.toChars()); - f.atomicStoreHeap(AsmJSHeapAccess::ViewType(viewType), pointerDef, rhsDef, needsBoundsCheck); + f.atomicStoreHeap(viewType, pointerDef, rhsDef, needsBoundsCheck); *def = rhsDef; *type = Type::Signed; @@ -4875,8 +4873,7 @@ CheckAtomicsBinop(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type if (!valueArgType.isIntish()) return f.failf(valueArg, "%s is not a subtype of intish", valueArgType.toChars()); - *def = f.atomicBinopHeap(op, AsmJSHeapAccess::ViewType(viewType), pointerDef, valueArgDef, - needsBoundsCheck); + *def = f.atomicBinopHeap(op, viewType, pointerDef, valueArgDef, needsBoundsCheck); *type = Type::Signed; return true; } @@ -4914,8 +4911,8 @@ CheckAtomicsCompareExchange(FunctionCompiler &f, ParseNode *call, MDefinition ** if (!newValueArgType.isIntish()) return f.failf(newValueArg, "%s is not a subtype of intish", newValueArgType.toChars()); - *def = f.atomicCompareExchangeHeap(AsmJSHeapAccess::ViewType(viewType), pointerDef, - oldValueArgDef, newValueArgDef, needsBoundsCheck); + *def = f.atomicCompareExchangeHeap(viewType, pointerDef, oldValueArgDef, newValueArgDef, + needsBoundsCheck); *type = Type::Signed; return true; } @@ -5615,7 +5612,7 @@ CheckSimdShuffle(FunctionCompiler &f, ParseNode *call, AsmJSSimdType opType, MDe static bool CheckSimdLoadStoreArgs(FunctionCompiler &f, ParseNode *call, AsmJSSimdType opType, - AsmJSHeapAccess::ViewType *viewType, MDefinition **index, + Scalar::Type *viewType, MDefinition **index, NeedsBoundsCheck *needsBoundsCheck) { ParseNode *view = CallArgList(call); @@ -5633,8 +5630,8 @@ CheckSimdLoadStoreArgs(FunctionCompiler &f, ParseNode *call, AsmJSSimdType opTyp *needsBoundsCheck = NEEDS_BOUNDS_CHECK; switch (opType) { - case AsmJSSimdType_int32x4: *viewType = AsmJSHeapAccess::Int32x4; break; - case AsmJSSimdType_float32x4: *viewType = AsmJSHeapAccess::Float32x4; break; + case AsmJSSimdType_int32x4: *viewType = Scalar::Int32x4; break; + case AsmJSSimdType_float32x4: *viewType = Scalar::Float32x4; break; } ParseNode *indexExpr = NextNode(view); @@ -5674,7 +5671,7 @@ CheckSimdLoad(FunctionCompiler &f, ParseNode *call, AsmJSSimdType opType, MDefin if (numArgs != 2) return f.failf(call, "expected 2 arguments to SIMD load, got %u", numArgs); - AsmJSHeapAccess::ViewType viewType; + Scalar::Type viewType; MDefinition *index; NeedsBoundsCheck needsBoundsCheck; if (!CheckSimdLoadStoreArgs(f, call, opType, &viewType, &index, &needsBoundsCheck)) @@ -5692,7 +5689,7 @@ CheckSimdStore(FunctionCompiler &f, ParseNode *call, AsmJSSimdType opType, MDefi if (numArgs != 3) return f.failf(call, "expected 3 arguments to SIMD load, got %u", numArgs); - AsmJSHeapAccess::ViewType viewType; + Scalar::Type viewType; MDefinition *index; NeedsBoundsCheck needsBoundsCheck; if (!CheckSimdLoadStoreArgs(f, call, opType, &viewType, &index, &needsBoundsCheck)) diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h index bc993476f79b..52c3bc1f6dcb 100644 --- a/js/src/jit/MIR.h +++ b/js/src/jit/MIR.h @@ -12151,19 +12151,15 @@ class MAsmJSNeg : public MUnaryInstruction class MAsmJSHeapAccess { - protected: - typedef AsmJSHeapAccess::ViewType ViewType; - - private: - ViewType viewType_; + Scalar::Type viewType_; bool needsBoundsCheck_; public: - MAsmJSHeapAccess(ViewType vt, bool needsBoundsCheck) + MAsmJSHeapAccess(Scalar::Type vt, bool needsBoundsCheck) : viewType_(vt), needsBoundsCheck_(needsBoundsCheck) {} - ViewType viewType() const { return viewType_; } + Scalar::Type viewType() const { return viewType_; } bool needsBoundsCheck() const { return needsBoundsCheck_; } void removeBoundsCheck() { needsBoundsCheck_ = false; } }; @@ -12173,7 +12169,7 @@ class MAsmJSLoadHeap : public MUnaryInstruction, public MAsmJSHeapAccess MemoryBarrierBits barrierBefore_; MemoryBarrierBits barrierAfter_; - MAsmJSLoadHeap(ViewType vt, MDefinition *ptr, bool needsBoundsCheck, + MAsmJSLoadHeap(Scalar::Type vt, MDefinition *ptr, bool needsBoundsCheck, MemoryBarrierBits before, MemoryBarrierBits after) : MUnaryInstruction(ptr), MAsmJSHeapAccess(vt, needsBoundsCheck), @@ -12186,27 +12182,28 @@ class MAsmJSLoadHeap : public MUnaryInstruction, public MAsmJSHeapAccess setMovable(); switch (vt) { - case AsmJSHeapAccess::Int8: - case AsmJSHeapAccess::Uint8: - case AsmJSHeapAccess::Int16: - case AsmJSHeapAccess::Uint16: - case AsmJSHeapAccess::Int32: - case AsmJSHeapAccess::Uint32: + case Scalar::Int8: + case Scalar::Uint8: + case Scalar::Int16: + case Scalar::Uint16: + case Scalar::Int32: + case Scalar::Uint32: setResultType(MIRType_Int32); break; - case AsmJSHeapAccess::Float32: + case Scalar::Float32: setResultType(MIRType_Float32); break; - case AsmJSHeapAccess::Float64: + case Scalar::Float64: setResultType(MIRType_Double); break; - case AsmJSHeapAccess::Float32x4: + case Scalar::Float32x4: setResultType(MIRType_Float32x4); break; - case AsmJSHeapAccess::Int32x4: + case Scalar::Int32x4: setResultType(MIRType_Int32x4); break; - case AsmJSHeapAccess::Uint8Clamped: + case Scalar::Uint8Clamped: + case Scalar::MaxTypedArrayViewType: MOZ_CRASH("unexpected uint8clamped load heap in asm.js"); } } @@ -12214,7 +12211,7 @@ class MAsmJSLoadHeap : public MUnaryInstruction, public MAsmJSHeapAccess public: INSTRUCTION_HEADER(AsmJSLoadHeap); - static MAsmJSLoadHeap *New(TempAllocator &alloc, ViewType vt, + static MAsmJSLoadHeap *New(TempAllocator &alloc, Scalar::Type vt, MDefinition *ptr, bool needsBoundsCheck, MemoryBarrierBits barrierBefore = MembarNobits, MemoryBarrierBits barrierAfter = MembarNobits) @@ -12238,7 +12235,7 @@ class MAsmJSStoreHeap : public MBinaryInstruction, public MAsmJSHeapAccess MemoryBarrierBits barrierBefore_; MemoryBarrierBits barrierAfter_; - MAsmJSStoreHeap(ViewType vt, MDefinition *ptr, MDefinition *v, bool needsBoundsCheck, + MAsmJSStoreHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *v, bool needsBoundsCheck, MemoryBarrierBits before, MemoryBarrierBits after) : MBinaryInstruction(ptr, v), MAsmJSHeapAccess(vt, needsBoundsCheck), @@ -12252,7 +12249,7 @@ class MAsmJSStoreHeap : public MBinaryInstruction, public MAsmJSHeapAccess public: INSTRUCTION_HEADER(AsmJSStoreHeap); - static MAsmJSStoreHeap *New(TempAllocator &alloc, ViewType vt, + static MAsmJSStoreHeap *New(TempAllocator &alloc, Scalar::Type vt, MDefinition *ptr, MDefinition *v, bool needsBoundsCheck, MemoryBarrierBits barrierBefore = MembarNobits, MemoryBarrierBits barrierAfter = MembarNobits) @@ -12273,7 +12270,7 @@ class MAsmJSStoreHeap : public MBinaryInstruction, public MAsmJSHeapAccess class MAsmJSCompareExchangeHeap : public MTernaryInstruction, public MAsmJSHeapAccess { - MAsmJSCompareExchangeHeap(ViewType vt, MDefinition *ptr, MDefinition *oldv, MDefinition *newv, + MAsmJSCompareExchangeHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *oldv, MDefinition *newv, bool needsBoundsCheck) : MTernaryInstruction(ptr, oldv, newv), MAsmJSHeapAccess(vt, needsBoundsCheck) @@ -12285,7 +12282,7 @@ class MAsmJSCompareExchangeHeap : public MTernaryInstruction, public MAsmJSHeapA public: INSTRUCTION_HEADER(AsmJSCompareExchangeHeap); - static MAsmJSCompareExchangeHeap *New(TempAllocator &alloc, ViewType vt, + static MAsmJSCompareExchangeHeap *New(TempAllocator &alloc, Scalar::Type vt, MDefinition *ptr, MDefinition *oldv, MDefinition *newv, bool needsBoundsCheck) { @@ -12305,7 +12302,7 @@ class MAsmJSAtomicBinopHeap : public MBinaryInstruction, public MAsmJSHeapAccess { AtomicOp op_; - MAsmJSAtomicBinopHeap(AtomicOp op, ViewType vt, MDefinition *ptr, MDefinition *v, + MAsmJSAtomicBinopHeap(AtomicOp op, Scalar::Type vt, MDefinition *ptr, MDefinition *v, bool needsBoundsCheck) : MBinaryInstruction(ptr, v), MAsmJSHeapAccess(vt, needsBoundsCheck), @@ -12318,7 +12315,7 @@ class MAsmJSAtomicBinopHeap : public MBinaryInstruction, public MAsmJSHeapAccess public: INSTRUCTION_HEADER(AsmJSAtomicBinopHeap); - static MAsmJSAtomicBinopHeap *New(TempAllocator &alloc, AtomicOp op, ViewType vt, + static MAsmJSAtomicBinopHeap *New(TempAllocator &alloc, AtomicOp op, Scalar::Type vt, MDefinition *ptr, MDefinition *v, bool needsBoundsCheck) { return new(alloc) MAsmJSAtomicBinopHeap(op, vt, ptr, v, needsBoundsCheck); diff --git a/js/src/jit/arm/CodeGenerator-arm.cpp b/js/src/jit/arm/CodeGenerator-arm.cpp index 217a37db52c3..b8b8a4843bc8 100644 --- a/js/src/jit/arm/CodeGenerator-arm.cpp +++ b/js/src/jit/arm/CodeGenerator-arm.cpp @@ -1911,10 +1911,7 @@ void CodeGeneratorARM::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins) { MAsmJSCompareExchangeHeap *mir = ins->mir(); - - MOZ_ASSERT(mir->viewType() <= AsmJSHeapAccess::Uint32); - Scalar::Type vt = Scalar::Type(mir->viewType()); - + Scalar::Type vt = mir->viewType(); const LAllocation *ptr = ins->ptr(); Register ptrReg = ToRegister(ptr); BaseIndex srcAddr(HeapReg, ptrReg, TimesOne); @@ -1948,10 +1945,7 @@ void CodeGeneratorARM::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins) { MAsmJSAtomicBinopHeap *mir = ins->mir(); - - MOZ_ASSERT(mir->viewType() <= AsmJSHeapAccess::Uint32); - Scalar::Type vt = Scalar::Type(mir->viewType()); - + Scalar::Type vt = mir->viewType(); const LAllocation *ptr = ins->ptr(); Register ptrReg = ToRegister(ptr); Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp()); diff --git a/js/src/jit/arm/Lowering-arm.cpp b/js/src/jit/arm/Lowering-arm.cpp index 8ba858d1391b..87a0ea0f9c1b 100644 --- a/js/src/jit/arm/Lowering-arm.cpp +++ b/js/src/jit/arm/Lowering-arm.cpp @@ -651,7 +651,7 @@ LIRGeneratorARM::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArra bool LIRGeneratorARM::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins) { - MOZ_ASSERT(ins->viewType() < AsmJSHeapAccess::Float32); + MOZ_ASSERT(ins->viewType() < Scalar::Float32); MDefinition *ptr = ins->ptr(); MOZ_ASSERT(ptr->type() == MIRType_Int32); @@ -667,7 +667,7 @@ LIRGeneratorARM::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins) bool LIRGeneratorARM::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins) { - MOZ_ASSERT(ins->viewType() < AsmJSHeapAccess::Float32); + MOZ_ASSERT(ins->viewType() < Scalar::Float32); MDefinition *ptr = ins->ptr(); MOZ_ASSERT(ptr->type() == MIRType_Int32); diff --git a/js/src/jit/shared/Assembler-shared.h b/js/src/jit/shared/Assembler-shared.h index a54bacd570cb..3316400a0648 100644 --- a/js/src/jit/shared/Assembler-shared.h +++ b/js/src/jit/shared/Assembler-shared.h @@ -735,27 +735,12 @@ static const unsigned AsmJSNaN32GlobalDataOffset = 2 * sizeof(void*) + sizeof(do // #ifdefery. class AsmJSHeapAccess { - public: - enum ViewType { - Int8 = Scalar::Int8, - Uint8 = Scalar::Uint8, - Int16 = Scalar::Int16, - Uint16 = Scalar::Uint16, - Int32 = Scalar::Int32, - Uint32 = Scalar::Uint32, - Float32 = Scalar::Float32, - Float64 = Scalar::Float64, - Uint8Clamped = Scalar::Uint8Clamped, - Float32x4, - Int32x4 - }; - private: uint32_t offset_; #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) uint8_t cmpDelta_; // the number of bytes from the cmp to the load/store instruction uint8_t opLength_; // the length of the load/store instruction - ViewType viewType_; + Scalar::Type type_; AnyRegister::Code loadedReg_ : 8; #endif @@ -768,20 +753,19 @@ class AsmJSHeapAccess // If 'cmp' equals 'offset' or if it is not supplied then the // cmpDelta_ is zero indicating that there is no length to patch. - AsmJSHeapAccess(uint32_t offset, uint32_t after, ViewType viewType, - AnyRegister loadedReg, uint32_t cmp = NoLengthCheck) - : offset_(offset), - cmpDelta_(cmp == NoLengthCheck ? 0 : offset - cmp), - opLength_(after - offset), - viewType_(viewType), - loadedReg_(loadedReg.code()) - {} - AsmJSHeapAccess(uint32_t offset, uint8_t after, ViewType viewType, + AsmJSHeapAccess(uint32_t offset, uint32_t after, Scalar::Type type, AnyRegister loadedReg, uint32_t cmp = NoLengthCheck) : offset_(offset), cmpDelta_(cmp == NoLengthCheck ? 0 : offset - cmp), opLength_(after - offset), - viewType_(viewType), + type_(type), + loadedReg_(loadedReg.code()) + {} + AsmJSHeapAccess(uint32_t offset, uint8_t after, Scalar::Type type, uint32_t cmp = NoLengthCheck) + : offset_(offset), + cmpDelta_(cmp == NoLengthCheck ? 0 : offset - cmp), + opLength_(after - offset), + type_(type), loadedReg_(UINT8_MAX) {} #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS) @@ -800,7 +784,7 @@ class AsmJSHeapAccess void *patchLengthAt(uint8_t *code) const { return code + (offset_ - cmpDelta_); } unsigned opLength() const { return opLength_; } bool isLoad() const { return loadedReg_ != UINT8_MAX; } - ViewType viewType() const { return viewType_; } + Scalar::Type type() const { return type_; } AnyRegister loadedReg() const { return AnyRegister::FromCode(loadedReg_); } #endif }; diff --git a/js/src/jit/shared/CodeGenerator-x86-shared.cpp b/js/src/jit/shared/CodeGenerator-x86-shared.cpp index 9faa99be6420..2d51a689ae3c 100644 --- a/js/src/jit/shared/CodeGenerator-x86-shared.cpp +++ b/js/src/jit/shared/CodeGenerator-x86-shared.cpp @@ -331,25 +331,27 @@ void CodeGeneratorX86Shared::visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds *ool) { switch (ool->viewType()) { - case AsmJSHeapAccess::Float32: + case Scalar::MaxTypedArrayViewType: + MOZ_CRASH("unexpected array type"); + case Scalar::Float32: masm.loadConstantFloat32(float(GenericNaN()), ool->dest().fpu()); break; - case AsmJSHeapAccess::Float64: + case Scalar::Float64: masm.loadConstantDouble(GenericNaN(), ool->dest().fpu()); break; - case AsmJSHeapAccess::Float32x4: + case Scalar::Float32x4: masm.loadConstantFloat32x4(SimdConstant::SplatX4(float(GenericNaN())), ool->dest().fpu()); break; - case AsmJSHeapAccess::Int32x4: + case Scalar::Int32x4: masm.loadConstantInt32x4(SimdConstant::SplatX4(0), ool->dest().fpu()); break; - case AsmJSHeapAccess::Int8: - case AsmJSHeapAccess::Uint8: - case AsmJSHeapAccess::Int16: - case AsmJSHeapAccess::Uint16: - case AsmJSHeapAccess::Int32: - case AsmJSHeapAccess::Uint32: - case AsmJSHeapAccess::Uint8Clamped: + case Scalar::Int8: + case Scalar::Uint8: + case Scalar::Int16: + case Scalar::Uint16: + case Scalar::Int32: + case Scalar::Uint32: + case Scalar::Uint8Clamped: Register destReg = ool->dest().gpr(); masm.mov(ImmWord(0), destReg); break; diff --git a/js/src/jit/shared/CodeGenerator-x86-shared.h b/js/src/jit/shared/CodeGenerator-x86-shared.h index 8ae59cc6c202..80de9af703c5 100644 --- a/js/src/jit/shared/CodeGenerator-x86-shared.h +++ b/js/src/jit/shared/CodeGenerator-x86-shared.h @@ -38,14 +38,14 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared class OutOfLineLoadTypedArrayOutOfBounds : public OutOfLineCodeBase { AnyRegister dest_; - AsmJSHeapAccess::ViewType viewType_; + Scalar::Type viewType_; public: - OutOfLineLoadTypedArrayOutOfBounds(AnyRegister dest, AsmJSHeapAccess::ViewType viewType) + OutOfLineLoadTypedArrayOutOfBounds(AnyRegister dest, Scalar::Type viewType) : dest_(dest), viewType_(viewType) {} AnyRegister dest() const { return dest_; } - AsmJSHeapAccess::ViewType viewType() const { return viewType_; } + Scalar::Type viewType() const { return viewType_; } void accept(CodeGeneratorX86Shared *codegen) { codegen->visitOutOfLineLoadTypedArrayOutOfBounds(this); } diff --git a/js/src/jit/x64/CodeGenerator-x64.cpp b/js/src/jit/x64/CodeGenerator-x64.cpp index 76e3629a00fa..5d0e8f03c43e 100644 --- a/js/src/jit/x64/CodeGenerator-x64.cpp +++ b/js/src/jit/x64/CodeGenerator-x64.cpp @@ -258,7 +258,7 @@ void CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins) { MAsmJSLoadHeap *mir = ins->mir(); - AsmJSHeapAccess::ViewType vt = mir->viewType(); + Scalar::Type vt = mir->viewType(); const LAllocation *ptr = ins->ptr(); const LDefinition *out = ins->output(); Operand srcAddr(HeapReg); @@ -285,17 +285,19 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins) uint32_t before = masm.size(); switch (vt) { - case AsmJSHeapAccess::Int8: masm.movsbl(srcAddr, ToRegister(out)); break; - case AsmJSHeapAccess::Uint8: masm.movzbl(srcAddr, ToRegister(out)); break; - case AsmJSHeapAccess::Int16: masm.movswl(srcAddr, ToRegister(out)); break; - case AsmJSHeapAccess::Uint16: masm.movzwl(srcAddr, ToRegister(out)); break; - case AsmJSHeapAccess::Int32: - case AsmJSHeapAccess::Uint32: masm.movl(srcAddr, ToRegister(out)); break; - case AsmJSHeapAccess::Float32: masm.loadFloat32(srcAddr, ToFloatRegister(out)); break; - case AsmJSHeapAccess::Float64: masm.loadDouble(srcAddr, ToFloatRegister(out)); break; - case AsmJSHeapAccess::Float32x4: masm.loadUnalignedFloat32x4(srcAddr, ToFloatRegister(out)); break; - case AsmJSHeapAccess::Int32x4: masm.loadUnalignedInt32x4(srcAddr, ToFloatRegister(out)); break; - case AsmJSHeapAccess::Uint8Clamped: MOZ_CRASH("unexpected array type"); + case Scalar::Int8: masm.movsbl(srcAddr, ToRegister(out)); break; + case Scalar::Uint8: masm.movzbl(srcAddr, ToRegister(out)); break; + case Scalar::Int16: masm.movswl(srcAddr, ToRegister(out)); break; + case Scalar::Uint16: masm.movzwl(srcAddr, ToRegister(out)); break; + case Scalar::Int32: + case Scalar::Uint32: masm.movl(srcAddr, ToRegister(out)); break; + case Scalar::Float32: masm.loadFloat32(srcAddr, ToFloatRegister(out)); break; + case Scalar::Float64: masm.loadDouble(srcAddr, ToFloatRegister(out)); break; + case Scalar::Float32x4: masm.loadUnalignedFloat32x4(srcAddr, ToFloatRegister(out)); break; + case Scalar::Int32x4: masm.loadUnalignedInt32x4(srcAddr, ToFloatRegister(out)); break; + case Scalar::Uint8Clamped: + case Scalar::MaxTypedArrayViewType: + MOZ_CRASH("unexpected array type"); } uint32_t after = masm.size(); if (ool) @@ -308,7 +310,7 @@ void CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins) { MAsmJSStoreHeap *mir = ins->mir(); - AsmJSHeapAccess::ViewType vt = mir->viewType(); + Scalar::Type vt = mir->viewType(); const LAllocation *ptr = ins->ptr(); Operand dstAddr(HeapReg); @@ -332,31 +334,35 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins) uint32_t before = masm.size(); if (ins->value()->isConstant()) { switch (vt) { - case AsmJSHeapAccess::Int8: - case AsmJSHeapAccess::Uint8: masm.movb(Imm32(ToInt32(ins->value())), dstAddr); break; - case AsmJSHeapAccess::Int16: - case AsmJSHeapAccess::Uint16: masm.movw(Imm32(ToInt32(ins->value())), dstAddr); break; - case AsmJSHeapAccess::Int32: - case AsmJSHeapAccess::Uint32: masm.movl(Imm32(ToInt32(ins->value())), dstAddr); break; - case AsmJSHeapAccess::Float32: - case AsmJSHeapAccess::Float64: - case AsmJSHeapAccess::Float32x4: - case AsmJSHeapAccess::Int32x4: - case AsmJSHeapAccess::Uint8Clamped: MOZ_CRASH("unexpected array type"); + case Scalar::Int8: + case Scalar::Uint8: masm.movb(Imm32(ToInt32(ins->value())), dstAddr); break; + case Scalar::Int16: + case Scalar::Uint16: masm.movw(Imm32(ToInt32(ins->value())), dstAddr); break; + case Scalar::Int32: + case Scalar::Uint32: masm.movl(Imm32(ToInt32(ins->value())), dstAddr); break; + case Scalar::Float32: + case Scalar::Float64: + case Scalar::Float32x4: + case Scalar::Int32x4: + case Scalar::Uint8Clamped: + case Scalar::MaxTypedArrayViewType: + MOZ_CRASH("unexpected array type"); } } else { switch (vt) { - case AsmJSHeapAccess::Int8: - case AsmJSHeapAccess::Uint8: masm.movb(ToRegister(ins->value()), dstAddr); break; - case AsmJSHeapAccess::Int16: - case AsmJSHeapAccess::Uint16: masm.movw(ToRegister(ins->value()), dstAddr); break; - case AsmJSHeapAccess::Int32: - case AsmJSHeapAccess::Uint32: masm.movl(ToRegister(ins->value()), dstAddr); break; - case AsmJSHeapAccess::Float32: masm.storeFloat32(ToFloatRegister(ins->value()), dstAddr); break; - case AsmJSHeapAccess::Float64: masm.storeDouble(ToFloatRegister(ins->value()), dstAddr); break; - case AsmJSHeapAccess::Float32x4: masm.storeUnalignedFloat32x4(ToFloatRegister(ins->value()), dstAddr); break; - case AsmJSHeapAccess::Int32x4: masm.storeUnalignedInt32x4(ToFloatRegister(ins->value()), dstAddr); break; - case AsmJSHeapAccess::Uint8Clamped: MOZ_CRASH("unexpected array type"); + case Scalar::Int8: + case Scalar::Uint8: masm.movb(ToRegister(ins->value()), dstAddr); break; + case Scalar::Int16: + case Scalar::Uint16: masm.movw(ToRegister(ins->value()), dstAddr); break; + case Scalar::Int32: + case Scalar::Uint32: masm.movl(ToRegister(ins->value()), dstAddr); break; + case Scalar::Float32: masm.storeFloat32(ToFloatRegister(ins->value()), dstAddr); break; + case Scalar::Float64: masm.storeDouble(ToFloatRegister(ins->value()), dstAddr); break; + case Scalar::Float32x4: masm.storeUnalignedFloat32x4(ToFloatRegister(ins->value()), dstAddr); break; + case Scalar::Int32x4: masm.storeUnalignedInt32x4(ToFloatRegister(ins->value()), dstAddr); break; + case Scalar::Uint8Clamped: + case Scalar::MaxTypedArrayViewType: + MOZ_CRASH("unexpected array type"); } } uint32_t after = masm.size(); @@ -370,10 +376,7 @@ void CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins) { MAsmJSCompareExchangeHeap *mir = ins->mir(); - - MOZ_ASSERT(mir->viewType() <= AsmJSHeapAccess::Uint32); - Scalar::Type vt = Scalar::Type(mir->viewType()); - + Scalar::Type vt = mir->viewType(); const LAllocation *ptr = ins->ptr(); MOZ_ASSERT(ptr->isRegister()); @@ -411,10 +414,7 @@ void CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins) { MAsmJSAtomicBinopHeap *mir = ins->mir(); - - MOZ_ASSERT(mir->viewType() <= AsmJSHeapAccess::Uint32); - Scalar::Type vt = Scalar::Type(mir->viewType()); - + Scalar::Type vt = mir->viewType(); const LAllocation *ptr = ins->ptr(); Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp()); const LAllocation* value = ins->value(); diff --git a/js/src/jit/x64/Lowering-x64.cpp b/js/src/jit/x64/Lowering-x64.cpp index ec678fd46d50..a99b106826d7 100644 --- a/js/src/jit/x64/Lowering-x64.cpp +++ b/js/src/jit/x64/Lowering-x64.cpp @@ -174,21 +174,22 @@ LIRGeneratorX64::visitAsmJSStoreHeap(MAsmJSStoreHeap *ins) LAsmJSStoreHeap *lir; switch (ins->viewType()) { - case AsmJSHeapAccess::Int8: - case AsmJSHeapAccess::Uint8: - case AsmJSHeapAccess::Int16: - case AsmJSHeapAccess::Uint16: - case AsmJSHeapAccess::Int32: - case AsmJSHeapAccess::Uint32: + case Scalar::Int8: + case Scalar::Uint8: + case Scalar::Int16: + case Scalar::Uint16: + case Scalar::Int32: + case Scalar::Uint32: lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterOrConstantAtStart(ins->value())); break; - case AsmJSHeapAccess::Float32: - case AsmJSHeapAccess::Float64: - case AsmJSHeapAccess::Float32x4: - case AsmJSHeapAccess::Int32x4: + case Scalar::Float32: + case Scalar::Float64: + case Scalar::Float32x4: + case Scalar::Int32x4: lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value())); break; - case AsmJSHeapAccess::Uint8Clamped: + case Scalar::Uint8Clamped: + case Scalar::MaxTypedArrayViewType: MOZ_CRASH("unexpected array type"); } diff --git a/js/src/jit/x86/CodeGenerator-x86.cpp b/js/src/jit/x86/CodeGenerator-x86.cpp index 353c59651a69..768a57ff8f38 100644 --- a/js/src/jit/x86/CodeGenerator-x86.cpp +++ b/js/src/jit/x86/CodeGenerator-x86.cpp @@ -255,31 +255,31 @@ CodeGeneratorX86::visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32 *lir) template void -CodeGeneratorX86::loadViewTypeElement(AsmJSHeapAccess::ViewType vt, const T &srcAddr, - const LDefinition *out) +CodeGeneratorX86::load(Scalar::Type vt, const T &srcAddr, const LDefinition *out) { switch (vt) { - case AsmJSHeapAccess::Int8: masm.movsblWithPatch(srcAddr, ToRegister(out)); break; - case AsmJSHeapAccess::Uint8Clamped: - case AsmJSHeapAccess::Uint8: masm.movzblWithPatch(srcAddr, ToRegister(out)); break; - case AsmJSHeapAccess::Int16: masm.movswlWithPatch(srcAddr, ToRegister(out)); break; - case AsmJSHeapAccess::Uint16: masm.movzwlWithPatch(srcAddr, ToRegister(out)); break; - case AsmJSHeapAccess::Int32: - case AsmJSHeapAccess::Uint32: masm.movlWithPatch(srcAddr, ToRegister(out)); break; - case AsmJSHeapAccess::Float32: masm.movssWithPatch(srcAddr, ToFloatRegister(out)); break; - case AsmJSHeapAccess::Float64: masm.movsdWithPatch(srcAddr, ToFloatRegister(out)); break; - case AsmJSHeapAccess::Float32x4: masm.movupsWithPatch(srcAddr, ToFloatRegister(out)); break; - case AsmJSHeapAccess::Int32x4: masm.movdquWithPatch(srcAddr, ToFloatRegister(out)); break; + case Scalar::Int8: masm.movsblWithPatch(srcAddr, ToRegister(out)); break; + case Scalar::Uint8Clamped: + case Scalar::Uint8: masm.movzblWithPatch(srcAddr, ToRegister(out)); break; + case Scalar::Int16: masm.movswlWithPatch(srcAddr, ToRegister(out)); break; + case Scalar::Uint16: masm.movzwlWithPatch(srcAddr, ToRegister(out)); break; + case Scalar::Int32: + case Scalar::Uint32: masm.movlWithPatch(srcAddr, ToRegister(out)); break; + case Scalar::Float32: masm.movssWithPatch(srcAddr, ToFloatRegister(out)); break; + case Scalar::Float64: masm.movsdWithPatch(srcAddr, ToFloatRegister(out)); break; + case Scalar::Float32x4: masm.movupsWithPatch(srcAddr, ToFloatRegister(out)); break; + case Scalar::Int32x4: masm.movdquWithPatch(srcAddr, ToFloatRegister(out)); break; + case Scalar::MaxTypedArrayViewType: MOZ_CRASH("unexpected type"); } } template void -CodeGeneratorX86::loadAndNoteViewTypeElement(AsmJSHeapAccess::ViewType vt, const T &srcAddr, +CodeGeneratorX86::loadAndNoteViewTypeElement(Scalar::Type vt, const T &srcAddr, const LDefinition *out) { uint32_t before = masm.size(); - loadViewTypeElement(vt, srcAddr, out); + load(vt, srcAddr, out); uint32_t after = masm.size(); masm.append(AsmJSHeapAccess(before, after, vt, ToAnyRegister(out))); } @@ -288,8 +288,8 @@ void CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins) { const MLoadTypedArrayElementStatic *mir = ins->mir(); - AsmJSHeapAccess::ViewType vt = AsmJSHeapAccess::ViewType(mir->viewType()); - MOZ_ASSERT_IF(vt == AsmJSHeapAccess::Float32, mir->type() == MIRType_Float32); + Scalar::Type vt = mir->viewType(); + MOZ_ASSERT_IF(vt == Scalar::Float32, mir->type() == MIRType_Float32); Register ptr = ToRegister(ins->ptr()); const LDefinition *out = ins->output(); @@ -307,10 +307,10 @@ CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic bailoutIf(Assembler::AboveOrEqual, ins->snapshot()); Address srcAddr(ptr, (int32_t) mir->base()); - loadViewTypeElement(vt, srcAddr, out); - if (vt == AsmJSHeapAccess::Float64) + load(vt, srcAddr, out); + if (vt == Scalar::Float64) masm.canonicalizeDouble(ToFloatRegister(out)); - if (vt == AsmJSHeapAccess::Float32) + if (vt == Scalar::Float32) masm.canonicalizeFloat(ToFloatRegister(out)); if (ool) masm.bind(ool->rejoin()); @@ -352,7 +352,7 @@ void CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins) { const MAsmJSLoadHeap *mir = ins->mir(); - AsmJSHeapAccess::ViewType vt = mir->viewType(); + Scalar::Type vt = mir->viewType(); const LAllocation *ptr = ins->ptr(); const LDefinition *out = ins->output(); @@ -385,7 +385,7 @@ CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins) masm.j(Assembler::AboveOrEqual, ool->entry()); uint32_t before = masm.size(); - loadViewTypeElement(vt, srcAddr, out); + load(vt, srcAddr, out); uint32_t after = masm.size(); masm.bind(ool->rejoin()); memoryBarrier(ins->mir()->barrierAfter()); @@ -394,31 +394,31 @@ CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins) template void -CodeGeneratorX86::storeViewTypeElement(AsmJSHeapAccess::ViewType vt, const LAllocation *value, - const T &dstAddr) +CodeGeneratorX86::store(Scalar::Type vt, const LAllocation *value, const T &dstAddr) { switch (vt) { - case AsmJSHeapAccess::Int8: - case AsmJSHeapAccess::Uint8Clamped: - case AsmJSHeapAccess::Uint8: masm.movbWithPatch(ToRegister(value), dstAddr); break; - case AsmJSHeapAccess::Int16: - case AsmJSHeapAccess::Uint16: masm.movwWithPatch(ToRegister(value), dstAddr); break; - case AsmJSHeapAccess::Int32: - case AsmJSHeapAccess::Uint32: masm.movlWithPatch(ToRegister(value), dstAddr); break; - case AsmJSHeapAccess::Float32: masm.movssWithPatch(ToFloatRegister(value), dstAddr); break; - case AsmJSHeapAccess::Float64: masm.movsdWithPatch(ToFloatRegister(value), dstAddr); break; - case AsmJSHeapAccess::Float32x4: masm.movupsWithPatch(ToFloatRegister(value), dstAddr); break; - case AsmJSHeapAccess::Int32x4: masm.movdquWithPatch(ToFloatRegister(value), dstAddr); break; + case Scalar::Int8: + case Scalar::Uint8Clamped: + case Scalar::Uint8: masm.movbWithPatch(ToRegister(value), dstAddr); break; + case Scalar::Int16: + case Scalar::Uint16: masm.movwWithPatch(ToRegister(value), dstAddr); break; + case Scalar::Int32: + case Scalar::Uint32: masm.movlWithPatch(ToRegister(value), dstAddr); break; + case Scalar::Float32: masm.movssWithPatch(ToFloatRegister(value), dstAddr); break; + case Scalar::Float64: masm.movsdWithPatch(ToFloatRegister(value), dstAddr); break; + case Scalar::Float32x4: masm.movupsWithPatch(ToFloatRegister(value), dstAddr); break; + case Scalar::Int32x4: masm.movdquWithPatch(ToFloatRegister(value), dstAddr); break; + case Scalar::MaxTypedArrayViewType: MOZ_CRASH("unexpected type"); } } template void -CodeGeneratorX86::storeAndNoteViewTypeElement(AsmJSHeapAccess::ViewType vt, - const LAllocation *value, const T &dstAddr) +CodeGeneratorX86::storeAndNoteViewTypeElement(Scalar::Type vt, const LAllocation *value, + const T &dstAddr) { uint32_t before = masm.size(); - storeViewTypeElement(vt, value, dstAddr); + store(vt, value, dstAddr); uint32_t after = masm.size(); masm.append(AsmJSHeapAccess(before, after, vt)); } @@ -427,8 +427,7 @@ void CodeGeneratorX86::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins) { MStoreTypedArrayElementStatic *mir = ins->mir(); - AsmJSHeapAccess::ViewType vt = AsmJSHeapAccess::ViewType(mir->viewType()); - + Scalar::Type vt = Scalar::Type(mir->viewType()); Register ptr = ToRegister(ins->ptr()); const LAllocation *value = ins->value(); @@ -437,7 +436,7 @@ CodeGeneratorX86::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStati masm.j(Assembler::AboveOrEqual, &rejoin); Address dstAddr(ptr, (int32_t) mir->base()); - storeViewTypeElement(vt, value, dstAddr); + store(vt, value, dstAddr); masm.bind(&rejoin); } @@ -445,7 +444,7 @@ void CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins) { MAsmJSStoreHeap *mir = ins->mir(); - AsmJSHeapAccess::ViewType vt = mir->viewType(); + Scalar::Type vt = mir->viewType(); const LAllocation *value = ins->value(); const LAllocation *ptr = ins->ptr(); @@ -476,7 +475,7 @@ CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins) masm.j(Assembler::AboveOrEqual, &rejoin); uint32_t before = masm.size(); - storeViewTypeElement(vt, value, dstAddr); + store(vt, value, dstAddr); uint32_t after = masm.size(); masm.bind(&rejoin); memoryBarrier(ins->mir()->barrierAfter()); @@ -487,10 +486,7 @@ void CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins) { MAsmJSCompareExchangeHeap *mir = ins->mir(); - - MOZ_ASSERT(mir->viewType() <= AsmJSHeapAccess::Uint32); - Scalar::Type vt = Scalar::Type(mir->viewType()); - + Scalar::Type vt = mir->viewType(); const LAllocation *ptr = ins->ptr(); Register oldval = ToRegister(ins->oldValue()); Register newval = ToRegister(ins->newValue()); @@ -535,10 +531,7 @@ void CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins) { MAsmJSAtomicBinopHeap *mir = ins->mir(); - - MOZ_ASSERT(mir->viewType() <= AsmJSHeapAccess::Uint32); - Scalar::Type vt = Scalar::Type(mir->viewType()); - + Scalar::Type vt = mir->viewType(); const LAllocation *ptr = ins->ptr(); Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp()); const LAllocation* value = ins->value(); diff --git a/js/src/jit/x86/CodeGenerator-x86.h b/js/src/jit/x86/CodeGenerator-x86.h index 158e566058cf..368a889d8da6 100644 --- a/js/src/jit/x86/CodeGenerator-x86.h +++ b/js/src/jit/x86/CodeGenerator-x86.h @@ -29,17 +29,13 @@ class CodeGeneratorX86 : public CodeGeneratorX86Shared ValueOperand ToTempValue(LInstruction *ins, size_t pos); template - void loadAndNoteViewTypeElement(AsmJSHeapAccess::ViewType vt, const T &srcAddr, - const LDefinition *out); + void loadAndNoteViewTypeElement(Scalar::Type vt, const T &srcAddr, const LDefinition *out); template - void loadViewTypeElement(AsmJSHeapAccess::ViewType vt, const T &srcAddr, - const LDefinition *out); + void load(Scalar::Type vt, const T &srcAddr, const LDefinition *out); template - void storeAndNoteViewTypeElement(AsmJSHeapAccess::ViewType vt, const LAllocation *value, - const T &dstAddr); + void storeAndNoteViewTypeElement(Scalar::Type vt, const LAllocation *value, const T &dstAddr); template - void storeViewTypeElement(AsmJSHeapAccess::ViewType vt, const LAllocation *value, - const T &dstAddr); + void store(Scalar::Type vt, const LAllocation *value, const T &dstAddr); void memoryBarrier(MemoryBarrierBits barrier); diff --git a/js/src/jit/x86/Lowering-x86.cpp b/js/src/jit/x86/Lowering-x86.cpp index 347eab932dce..08e148200ece 100644 --- a/js/src/jit/x86/Lowering-x86.cpp +++ b/js/src/jit/x86/Lowering-x86.cpp @@ -247,37 +247,39 @@ LIRGeneratorX86::visitAsmJSStoreHeap(MAsmJSStoreHeap *ins) MOZ_ASSERT(ptr->toConstant()->value().toInt32() >= 0); LAllocation ptrAlloc = LAllocation(ptr->toConstant()->vp()); switch (ins->viewType()) { - case AsmJSHeapAccess::Int8: case AsmJSHeapAccess::Uint8: + case Scalar::Int8: case Scalar::Uint8: // See comment below. lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useFixed(ins->value(), eax)); break; - case AsmJSHeapAccess::Int16: case AsmJSHeapAccess::Uint16: - case AsmJSHeapAccess::Int32: case AsmJSHeapAccess::Uint32: - case AsmJSHeapAccess::Float32: case AsmJSHeapAccess::Float64: - case AsmJSHeapAccess::Float32x4: case AsmJSHeapAccess::Int32x4: + case Scalar::Int16: case Scalar::Uint16: + case Scalar::Int32: case Scalar::Uint32: + case Scalar::Float32: case Scalar::Float64: + case Scalar::Float32x4: case Scalar::Int32x4: // See comment below. lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value())); break; - case AsmJSHeapAccess::Uint8Clamped: + case Scalar::Uint8Clamped: + case Scalar::MaxTypedArrayViewType: MOZ_CRASH("unexpected array type"); } return add(lir, ins); } switch (ins->viewType()) { - case AsmJSHeapAccess::Int8: case AsmJSHeapAccess::Uint8: + case Scalar::Int8: case Scalar::Uint8: // See comment for LIRGeneratorX86::useByteOpRegister. lir = new(alloc()) LAsmJSStoreHeap(useRegister(ins->ptr()), useFixed(ins->value(), eax)); break; - case AsmJSHeapAccess::Int16: case AsmJSHeapAccess::Uint16: - case AsmJSHeapAccess::Int32: case AsmJSHeapAccess::Uint32: - case AsmJSHeapAccess::Float32: case AsmJSHeapAccess::Float64: - case AsmJSHeapAccess::Float32x4: case AsmJSHeapAccess::Int32x4: + case Scalar::Int16: case Scalar::Uint16: + case Scalar::Int32: case Scalar::Uint32: + case Scalar::Float32: case Scalar::Float64: + case Scalar::Float32x4: case Scalar::Int32x4: // For now, don't allow constant values. The immediate operand // affects instruction layout which affects patching. lir = new(alloc()) LAsmJSStoreHeap(useRegisterAtStart(ptr), useRegisterAtStart(ins->value())); break; - case AsmJSHeapAccess::Uint8Clamped: + case Scalar::Uint8Clamped: + case Scalar::MaxTypedArrayViewType: MOZ_CRASH("unexpected array type"); } diff --git a/js/src/vm/TypedArrayObject.h b/js/src/vm/TypedArrayObject.h index ca27ef5e0b85..65bf0bca01bc 100644 --- a/js/src/vm/TypedArrayObject.h +++ b/js/src/vm/TypedArrayObject.h @@ -296,6 +296,9 @@ TypedArrayShift(Scalar::Type viewType) return 2; case Scalar::Float64: return 3; + case Scalar::Float32x4: + case Scalar::Int32x4: + return 4; default:; } MOZ_CRASH("Unexpected array type");