Bug 1079361: Refactor AsmJSHeapAccess to include the view type of the heap access; r=luke

This commit is contained in:
Benjamin Bouvier 2014-11-21 12:12:29 +01:00
Родитель bcdd89cb6e
Коммит 3d99d11936
11 изменённых файлов: 248 добавлений и 177 удалений

Просмотреть файл

@ -331,45 +331,58 @@ ContextToPC(CONTEXT *context)
#if defined(JS_CODEGEN_X64)
template <class T>
static void
SetXMMRegToNaN(bool isFloat32, T *xmm_reg)
SetXMMRegToNaN(AsmJSHeapAccess::ViewType viewType, T *xmm_reg)
{
if (isFloat32) {
switch (viewType) {
case AsmJSHeapAccess::Float32: {
JS_STATIC_ASSERT(sizeof(T) == 4 * sizeof(float));
float *floats = reinterpret_cast<float*>(xmm_reg);
floats[0] = GenericNaN();
floats[1] = 0;
floats[2] = 0;
floats[3] = 0;
} else {
break;
}
case AsmJSHeapAccess::Float64: {
JS_STATIC_ASSERT(sizeof(T) == 2 * sizeof(double));
double *dbls = reinterpret_cast<double*>(xmm_reg);
dbls[0] = GenericNaN();
dbls[1] = 0;
break;
}
case AsmJSHeapAccess::Int8:
case AsmJSHeapAccess::Uint8:
case AsmJSHeapAccess::Int16:
case AsmJSHeapAccess::Uint16:
case AsmJSHeapAccess::Int32:
case AsmJSHeapAccess::Uint32:
case AsmJSHeapAccess::Uint8Clamped:
MOZ_CRASH("unexpected type in SetXMMRegToNaN");
}
}
# if !defined(XP_MACOSX)
static void
SetRegisterToCoercedUndefined(CONTEXT *context, bool isFloat32, AnyRegister reg)
SetRegisterToCoercedUndefined(CONTEXT *context, AsmJSHeapAccess::ViewType viewType, AnyRegister reg)
{
if (reg.isFloat()) {
switch (reg.fpu().code()) {
case X86Registers::xmm0: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 0)); break;
case X86Registers::xmm1: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 1)); break;
case X86Registers::xmm2: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 2)); break;
case X86Registers::xmm3: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 3)); break;
case X86Registers::xmm4: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 4)); break;
case X86Registers::xmm5: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 5)); break;
case X86Registers::xmm6: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 6)); break;
case X86Registers::xmm7: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 7)); break;
case X86Registers::xmm8: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 8)); break;
case X86Registers::xmm9: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 9)); break;
case X86Registers::xmm10: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 10)); break;
case X86Registers::xmm11: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 11)); break;
case X86Registers::xmm12: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 12)); break;
case X86Registers::xmm13: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 13)); break;
case X86Registers::xmm14: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 14)); break;
case X86Registers::xmm15: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 15)); break;
case X86Registers::xmm0: SetXMMRegToNaN(viewType, &XMM_sig(context, 0)); break;
case X86Registers::xmm1: SetXMMRegToNaN(viewType, &XMM_sig(context, 1)); break;
case X86Registers::xmm2: SetXMMRegToNaN(viewType, &XMM_sig(context, 2)); break;
case X86Registers::xmm3: SetXMMRegToNaN(viewType, &XMM_sig(context, 3)); break;
case X86Registers::xmm4: SetXMMRegToNaN(viewType, &XMM_sig(context, 4)); break;
case X86Registers::xmm5: SetXMMRegToNaN(viewType, &XMM_sig(context, 5)); break;
case X86Registers::xmm6: SetXMMRegToNaN(viewType, &XMM_sig(context, 6)); break;
case X86Registers::xmm7: SetXMMRegToNaN(viewType, &XMM_sig(context, 7)); break;
case X86Registers::xmm8: SetXMMRegToNaN(viewType, &XMM_sig(context, 8)); break;
case X86Registers::xmm9: SetXMMRegToNaN(viewType, &XMM_sig(context, 9)); break;
case X86Registers::xmm10: SetXMMRegToNaN(viewType, &XMM_sig(context, 10)); break;
case X86Registers::xmm11: SetXMMRegToNaN(viewType, &XMM_sig(context, 11)); break;
case X86Registers::xmm12: SetXMMRegToNaN(viewType, &XMM_sig(context, 12)); break;
case X86Registers::xmm13: SetXMMRegToNaN(viewType, &XMM_sig(context, 13)); break;
case X86Registers::xmm14: SetXMMRegToNaN(viewType, &XMM_sig(context, 14)); break;
case X86Registers::xmm15: SetXMMRegToNaN(viewType, &XMM_sig(context, 15)); break;
default: MOZ_CRASH();
}
} else {
@ -455,7 +468,7 @@ HandleFault(PEXCEPTION_POINTERS exception)
// register) and set the PC to the next op. Upon return from the handler,
// execution will resume at this next PC.
if (heapAccess->isLoad())
SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg());
SetRegisterToCoercedUndefined(context, heapAccess->viewType(), heapAccess->loadedReg());
*ppc += heapAccess->opLength();
return true;
@ -505,24 +518,24 @@ SetRegisterToCoercedUndefined(mach_port_t rtThread, x86_thread_state64_t &state,
if (kret != KERN_SUCCESS)
return false;
bool f32 = heapAccess.isFloat32Load();
AsmJSHeapAccess::ViewType viewType = heapAccess.viewType();
switch (heapAccess.loadedReg().fpu().code()) {
case X86Registers::xmm0: SetXMMRegToNaN(f32, &fstate.__fpu_xmm0); break;
case X86Registers::xmm1: SetXMMRegToNaN(f32, &fstate.__fpu_xmm1); break;
case X86Registers::xmm2: SetXMMRegToNaN(f32, &fstate.__fpu_xmm2); break;
case X86Registers::xmm3: SetXMMRegToNaN(f32, &fstate.__fpu_xmm3); break;
case X86Registers::xmm4: SetXMMRegToNaN(f32, &fstate.__fpu_xmm4); break;
case X86Registers::xmm5: SetXMMRegToNaN(f32, &fstate.__fpu_xmm5); break;
case X86Registers::xmm6: SetXMMRegToNaN(f32, &fstate.__fpu_xmm6); break;
case X86Registers::xmm7: SetXMMRegToNaN(f32, &fstate.__fpu_xmm7); break;
case X86Registers::xmm8: SetXMMRegToNaN(f32, &fstate.__fpu_xmm8); break;
case X86Registers::xmm9: SetXMMRegToNaN(f32, &fstate.__fpu_xmm9); break;
case X86Registers::xmm10: SetXMMRegToNaN(f32, &fstate.__fpu_xmm10); break;
case X86Registers::xmm11: SetXMMRegToNaN(f32, &fstate.__fpu_xmm11); break;
case X86Registers::xmm12: SetXMMRegToNaN(f32, &fstate.__fpu_xmm12); break;
case X86Registers::xmm13: SetXMMRegToNaN(f32, &fstate.__fpu_xmm13); break;
case X86Registers::xmm14: SetXMMRegToNaN(f32, &fstate.__fpu_xmm14); break;
case X86Registers::xmm15: SetXMMRegToNaN(f32, &fstate.__fpu_xmm15); break;
case X86Registers::xmm0: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm0); break;
case X86Registers::xmm1: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm1); break;
case X86Registers::xmm2: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm2); break;
case X86Registers::xmm3: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm3); break;
case X86Registers::xmm4: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm4); break;
case X86Registers::xmm5: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm5); break;
case X86Registers::xmm6: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm6); break;
case X86Registers::xmm7: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm7); break;
case X86Registers::xmm8: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm8); break;
case X86Registers::xmm9: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm9); break;
case X86Registers::xmm10: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm10); break;
case X86Registers::xmm11: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm11); break;
case X86Registers::xmm12: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm12); break;
case X86Registers::xmm13: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm13); break;
case X86Registers::xmm14: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm14); break;
case X86Registers::xmm15: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm15); break;
default: MOZ_CRASH();
}
@ -847,7 +860,7 @@ HandleFault(int signum, siginfo_t *info, void *ctx)
// register) and set the PC to the next op. Upon return from the handler,
// execution will resume at this next PC.
if (heapAccess->isLoad())
SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg());
SetRegisterToCoercedUndefined(context, heapAccess->viewType(), heapAccess->loadedReg());
*ppc += heapAccess->opLength();
return true;

Просмотреть файл

@ -2781,7 +2781,7 @@ class FunctionCompiler
curBlock_->setSlot(info().localSlot(local.slot), def);
}
MDefinition *loadHeap(Scalar::Type vt, MDefinition *ptr, NeedsBoundsCheck chk)
MDefinition *loadHeap(AsmJSHeapAccess::ViewType vt, MDefinition *ptr, NeedsBoundsCheck chk)
{
if (inDeadCode())
return nullptr;
@ -2792,7 +2792,8 @@ class FunctionCompiler
return load;
}
void storeHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *v, NeedsBoundsCheck chk)
void storeHeap(AsmJSHeapAccess::ViewType vt, MDefinition *ptr, MDefinition *v,
NeedsBoundsCheck chk)
{
if (inDeadCode())
return;
@ -2810,7 +2811,7 @@ class FunctionCompiler
curBlock_->add(ins);
}
MDefinition *atomicLoadHeap(Scalar::Type vt, MDefinition *ptr, NeedsBoundsCheck chk)
MDefinition *atomicLoadHeap(AsmJSHeapAccess::ViewType vt, MDefinition *ptr, NeedsBoundsCheck chk)
{
if (inDeadCode())
return nullptr;
@ -2822,7 +2823,8 @@ class FunctionCompiler
return load;
}
void atomicStoreHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *v, NeedsBoundsCheck chk)
void atomicStoreHeap(AsmJSHeapAccess::ViewType vt, MDefinition *ptr, MDefinition *v,
NeedsBoundsCheck chk)
{
if (inDeadCode())
return;
@ -2833,7 +2835,8 @@ class FunctionCompiler
curBlock_->add(store);
}
MDefinition *atomicCompareExchangeHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *oldv, MDefinition *newv, NeedsBoundsCheck chk)
MDefinition *atomicCompareExchangeHeap(AsmJSHeapAccess::ViewType vt, MDefinition *ptr,
MDefinition *oldv, MDefinition *newv, NeedsBoundsCheck chk)
{
if (inDeadCode())
return nullptr;
@ -2846,7 +2849,8 @@ class FunctionCompiler
return cas;
}
MDefinition *atomicBinopHeap(js::jit::AtomicOp op, Scalar::Type vt, MDefinition *ptr, MDefinition *v, NeedsBoundsCheck chk)
MDefinition *atomicBinopHeap(js::jit::AtomicOp op, AsmJSHeapAccess::ViewType vt,
MDefinition *ptr, MDefinition *v, NeedsBoundsCheck chk)
{
if (inDeadCode())
return nullptr;
@ -4474,7 +4478,7 @@ CheckLoadArray(FunctionCompiler &f, ParseNode *elem, MDefinition **def, Type *ty
if (!CheckArrayAccess(f, ElemBase(elem), ElemIndex(elem), &viewType, &pointerDef, &needsBoundsCheck))
return false;
*def = f.loadHeap(viewType, pointerDef, needsBoundsCheck);
*def = f.loadHeap(AsmJSHeapAccess::ViewType(viewType), pointerDef, needsBoundsCheck);
*type = TypedArrayLoadType(viewType);
return true;
}
@ -4564,7 +4568,7 @@ CheckStoreArray(FunctionCompiler &f, ParseNode *lhs, ParseNode *rhs, MDefinition
MOZ_CRASH("Unexpected view type");
}
f.storeHeap(viewType, pointerDef, rhsDef, needsBoundsCheck);
f.storeHeap(AsmJSHeapAccess::ViewType(viewType), pointerDef, rhsDef, needsBoundsCheck);
*def = rhsDef;
*type = rhsType;
@ -4832,7 +4836,7 @@ CheckAtomicsLoad(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck))
return false;
*def = f.atomicLoadHeap(viewType, pointerDef, needsBoundsCheck);
*def = f.atomicLoadHeap(AsmJSHeapAccess::ViewType(viewType), pointerDef, needsBoundsCheck);
*type = Type::Signed;
return true;
}
@ -4861,7 +4865,7 @@ CheckAtomicsStore(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type
if (!rhsType.isIntish())
return f.failf(arrayArg, "%s is not a subtype of intish", rhsType.toChars());
f.atomicStoreHeap(viewType, pointerDef, rhsDef, needsBoundsCheck);
f.atomicStoreHeap(AsmJSHeapAccess::ViewType(viewType), pointerDef, rhsDef, needsBoundsCheck);
*def = rhsDef;
*type = Type::Signed;
@ -4892,7 +4896,8 @@ CheckAtomicsBinop(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type
if (!valueArgType.isIntish())
return f.failf(valueArg, "%s is not a subtype of intish", valueArgType.toChars());
*def = f.atomicBinopHeap(op, viewType, pointerDef, valueArgDef, needsBoundsCheck);
*def = f.atomicBinopHeap(op, AsmJSHeapAccess::ViewType(viewType), pointerDef, valueArgDef,
needsBoundsCheck);
*type = Type::Signed;
return true;
}
@ -4930,7 +4935,8 @@ CheckAtomicsCompareExchange(FunctionCompiler &f, ParseNode *call, MDefinition **
if (!newValueArgType.isIntish())
return f.failf(newValueArg, "%s is not a subtype of intish", newValueArgType.toChars());
*def = f.atomicCompareExchangeHeap(viewType, pointerDef, oldValueArgDef, newValueArgDef, needsBoundsCheck);
*def = f.atomicCompareExchangeHeap(AsmJSHeapAccess::ViewType(viewType), pointerDef,
oldValueArgDef, newValueArgDef, needsBoundsCheck);
*type = Type::Signed;
return true;
}

Просмотреть файл

@ -12092,15 +12092,19 @@ class MAsmJSNeg : public MUnaryInstruction
class MAsmJSHeapAccess
{
Scalar::Type viewType_;
protected:
typedef AsmJSHeapAccess::ViewType ViewType;
private:
ViewType viewType_;
bool needsBoundsCheck_;
public:
MAsmJSHeapAccess(Scalar::Type vt, bool needsBoundsCheck)
MAsmJSHeapAccess(ViewType vt, bool needsBoundsCheck)
: viewType_(vt), needsBoundsCheck_(needsBoundsCheck)
{}
Scalar::Type viewType() const { return viewType_; }
ViewType viewType() const { return viewType_; }
bool needsBoundsCheck() const { return needsBoundsCheck_; }
void removeBoundsCheck() { needsBoundsCheck_ = false; }
};
@ -12110,7 +12114,7 @@ class MAsmJSLoadHeap : public MUnaryInstruction, public MAsmJSHeapAccess
MemoryBarrierBits barrierBefore_;
MemoryBarrierBits barrierAfter_;
MAsmJSLoadHeap(Scalar::Type vt, MDefinition *ptr, bool needsBoundsCheck,
MAsmJSLoadHeap(ViewType vt, MDefinition *ptr, bool needsBoundsCheck,
MemoryBarrierBits before, MemoryBarrierBits after)
: MUnaryInstruction(ptr),
MAsmJSHeapAccess(vt, needsBoundsCheck),
@ -12121,18 +12125,31 @@ class MAsmJSLoadHeap : public MUnaryInstruction, public MAsmJSHeapAccess
setGuard(); // Not removable
else
setMovable();
if (vt == Scalar::Float32)
setResultType(MIRType_Float32);
else if (vt == Scalar::Float64)
setResultType(MIRType_Double);
else
switch (vt) {
case AsmJSHeapAccess::Int8:
case AsmJSHeapAccess::Uint8:
case AsmJSHeapAccess::Int16:
case AsmJSHeapAccess::Uint16:
case AsmJSHeapAccess::Int32:
case AsmJSHeapAccess::Uint32:
setResultType(MIRType_Int32);
break;
case AsmJSHeapAccess::Float32:
setResultType(MIRType_Float32);
break;
case AsmJSHeapAccess::Float64:
setResultType(MIRType_Double);
break;
case AsmJSHeapAccess::Uint8Clamped:
MOZ_CRASH("unexpected uint8clamped load heap in asm.js");
}
}
public:
INSTRUCTION_HEADER(AsmJSLoadHeap);
static MAsmJSLoadHeap *New(TempAllocator &alloc, Scalar::Type vt,
static MAsmJSLoadHeap *New(TempAllocator &alloc, ViewType vt,
MDefinition *ptr, bool needsBoundsCheck,
MemoryBarrierBits barrierBefore = MembarNobits,
MemoryBarrierBits barrierAfter = MembarNobits)
@ -12156,7 +12173,7 @@ class MAsmJSStoreHeap : public MBinaryInstruction, public MAsmJSHeapAccess
MemoryBarrierBits barrierBefore_;
MemoryBarrierBits barrierAfter_;
MAsmJSStoreHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *v, bool needsBoundsCheck,
MAsmJSStoreHeap(ViewType vt, MDefinition *ptr, MDefinition *v, bool needsBoundsCheck,
MemoryBarrierBits before, MemoryBarrierBits after)
: MBinaryInstruction(ptr, v),
MAsmJSHeapAccess(vt, needsBoundsCheck),
@ -12170,7 +12187,7 @@ class MAsmJSStoreHeap : public MBinaryInstruction, public MAsmJSHeapAccess
public:
INSTRUCTION_HEADER(AsmJSStoreHeap);
static MAsmJSStoreHeap *New(TempAllocator &alloc, Scalar::Type vt,
static MAsmJSStoreHeap *New(TempAllocator &alloc, ViewType vt,
MDefinition *ptr, MDefinition *v, bool needsBoundsCheck,
MemoryBarrierBits barrierBefore = MembarNobits,
MemoryBarrierBits barrierAfter = MembarNobits)
@ -12191,7 +12208,7 @@ class MAsmJSStoreHeap : public MBinaryInstruction, public MAsmJSHeapAccess
class MAsmJSCompareExchangeHeap : public MTernaryInstruction, public MAsmJSHeapAccess
{
MAsmJSCompareExchangeHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *oldv, MDefinition *newv,
MAsmJSCompareExchangeHeap(ViewType vt, MDefinition *ptr, MDefinition *oldv, MDefinition *newv,
bool needsBoundsCheck)
: MTernaryInstruction(ptr, oldv, newv),
MAsmJSHeapAccess(vt, needsBoundsCheck)
@ -12203,7 +12220,7 @@ class MAsmJSCompareExchangeHeap : public MTernaryInstruction, public MAsmJSHeapA
public:
INSTRUCTION_HEADER(AsmJSCompareExchangeHeap);
static MAsmJSCompareExchangeHeap *New(TempAllocator &alloc, Scalar::Type vt,
static MAsmJSCompareExchangeHeap *New(TempAllocator &alloc, ViewType vt,
MDefinition *ptr, MDefinition *oldv,
MDefinition *newv, bool needsBoundsCheck)
{
@ -12223,7 +12240,7 @@ class MAsmJSAtomicBinopHeap : public MBinaryInstruction, public MAsmJSHeapAccess
{
AtomicOp op_;
MAsmJSAtomicBinopHeap(AtomicOp op, Scalar::Type vt, MDefinition *ptr, MDefinition *v,
MAsmJSAtomicBinopHeap(AtomicOp op, ViewType vt, MDefinition *ptr, MDefinition *v,
bool needsBoundsCheck)
: MBinaryInstruction(ptr, v),
MAsmJSHeapAccess(vt, needsBoundsCheck),
@ -12236,7 +12253,7 @@ class MAsmJSAtomicBinopHeap : public MBinaryInstruction, public MAsmJSHeapAccess
public:
INSTRUCTION_HEADER(AsmJSAtomicBinopHeap);
static MAsmJSAtomicBinopHeap *New(TempAllocator &alloc, AtomicOp op, Scalar::Type vt,
static MAsmJSAtomicBinopHeap *New(TempAllocator &alloc, AtomicOp op, ViewType vt,
MDefinition *ptr, MDefinition *v, bool needsBoundsCheck)
{
return new(alloc) MAsmJSAtomicBinopHeap(op, vt, ptr, v, needsBoundsCheck);

Просмотреть файл

@ -731,11 +731,25 @@ static const unsigned AsmJSNaN32GlobalDataOffset = 2 * sizeof(void*) + sizeof(do
// #ifdefery.
class AsmJSHeapAccess
{
public:
enum ViewType {
Int8 = Scalar::Int8,
Uint8 = Scalar::Uint8,
Int16 = Scalar::Int16,
Uint16 = Scalar::Uint16,
Int32 = Scalar::Int32,
Uint32 = Scalar::Uint32,
Float32 = Scalar::Float32,
Float64 = Scalar::Float64,
Uint8Clamped = Scalar::Uint8Clamped,
};
private:
uint32_t offset_;
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
uint8_t cmpDelta_; // the number of bytes from the cmp to the load/store instruction
uint8_t opLength_; // the length of the load/store instruction
uint8_t isFloat32Load_;
ViewType viewType_;
AnyRegister::Code loadedReg_ : 8;
#endif
@ -748,19 +762,19 @@ class AsmJSHeapAccess
// If 'cmp' equals 'offset' or if it is not supplied then the
// cmpDelta_ is zero indicating that there is no length to patch.
AsmJSHeapAccess(uint32_t offset, uint32_t after, Scalar::Type vt,
AsmJSHeapAccess(uint32_t offset, uint32_t after, ViewType viewType,
AnyRegister loadedReg, uint32_t cmp = NoLengthCheck)
: offset_(offset),
cmpDelta_(cmp == NoLengthCheck ? 0 : offset - cmp),
opLength_(after - offset),
isFloat32Load_(vt == Scalar::Float32),
viewType_(viewType),
loadedReg_(loadedReg.code())
{}
AsmJSHeapAccess(uint32_t offset, uint8_t after, uint32_t cmp = NoLengthCheck)
: offset_(offset),
cmpDelta_(cmp == NoLengthCheck ? 0 : offset - cmp),
opLength_(after - offset),
isFloat32Load_(false),
viewType_(ViewType(-1)),
loadedReg_(UINT8_MAX)
{}
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
@ -779,7 +793,7 @@ class AsmJSHeapAccess
void *patchLengthAt(uint8_t *code) const { return code + (offset_ - cmpDelta_); }
unsigned opLength() const { return opLength_; }
bool isLoad() const { return loadedReg_ != UINT8_MAX; }
bool isFloat32Load() const { return isFloat32Load_; }
ViewType viewType() const { return viewType_; }
AnyRegister loadedReg() const { return AnyRegister::FromCode(loadedReg_); }
#endif
};

Просмотреть файл

@ -346,14 +346,23 @@ CodeGeneratorX86Shared::visitAsmJSPassStackArg(LAsmJSPassStackArg *ins)
bool
CodeGeneratorX86Shared::visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds *ool)
{
if (ool->dest().isFloat()) {
if (ool->isFloat32Load())
masm.loadConstantFloat32(float(GenericNaN()), ool->dest().fpu());
else
masm.loadConstantDouble(GenericNaN(), ool->dest().fpu());
} else {
switch (ool->viewType()) {
case AsmJSHeapAccess::Float32:
masm.loadConstantFloat32(float(GenericNaN()), ool->dest().fpu());
break;
case AsmJSHeapAccess::Float64:
masm.loadConstantDouble(GenericNaN(), ool->dest().fpu());
break;
case AsmJSHeapAccess::Int8:
case AsmJSHeapAccess::Uint8:
case AsmJSHeapAccess::Int16:
case AsmJSHeapAccess::Uint16:
case AsmJSHeapAccess::Int32:
case AsmJSHeapAccess::Uint32:
case AsmJSHeapAccess::Uint8Clamped:
Register destReg = ool->dest().gpr();
masm.mov(ImmWord(0), destReg);
break;
}
masm.jmp(ool->rejoin());
return true;

Просмотреть файл

@ -38,14 +38,14 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
class OutOfLineLoadTypedArrayOutOfBounds : public OutOfLineCodeBase<CodeGeneratorX86Shared>
{
AnyRegister dest_;
bool isFloat32Load_;
AsmJSHeapAccess::ViewType viewType_;
public:
OutOfLineLoadTypedArrayOutOfBounds(AnyRegister dest, bool isFloat32Load)
: dest_(dest), isFloat32Load_(isFloat32Load)
OutOfLineLoadTypedArrayOutOfBounds(AnyRegister dest, AsmJSHeapAccess::ViewType viewType)
: dest_(dest), viewType_(viewType)
{}
AnyRegister dest() const { return dest_; }
bool isFloat32Load() const { return isFloat32Load_; }
AsmJSHeapAccess::ViewType viewType() const { return viewType_; }
bool accept(CodeGeneratorX86Shared *codegen) {
return codegen->visitOutOfLineLoadTypedArrayOutOfBounds(this);
}

Просмотреть файл

@ -269,7 +269,7 @@ bool
CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
{
MAsmJSLoadHeap *mir = ins->mir();
Scalar::Type vt = mir->viewType();
AsmJSHeapAccess::ViewType vt = mir->viewType();
const LAllocation *ptr = ins->ptr();
const LDefinition *out = ins->output();
Operand srcAddr(HeapReg);
@ -286,8 +286,7 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
OutOfLineLoadTypedArrayOutOfBounds *ool = nullptr;
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
bool isFloat32Load = vt == Scalar::Float32;
ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), isFloat32Load);
ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), vt);
if (!addOutOfLineCode(ool, ins->mir()))
return false;
@ -298,15 +297,15 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
uint32_t before = masm.size();
switch (vt) {
case Scalar::Int8: masm.movsbl(srcAddr, ToRegister(out)); break;
case Scalar::Uint8: masm.movzbl(srcAddr, ToRegister(out)); break;
case Scalar::Int16: masm.movswl(srcAddr, ToRegister(out)); break;
case Scalar::Uint16: masm.movzwl(srcAddr, ToRegister(out)); break;
case Scalar::Int32:
case Scalar::Uint32: masm.movl(srcAddr, ToRegister(out)); break;
case Scalar::Float32: masm.loadFloat32(srcAddr, ToFloatRegister(out)); break;
case Scalar::Float64: masm.loadDouble(srcAddr, ToFloatRegister(out)); break;
default: MOZ_CRASH("unexpected array type");
case AsmJSHeapAccess::Int8: masm.movsbl(srcAddr, ToRegister(out)); break;
case AsmJSHeapAccess::Uint8: masm.movzbl(srcAddr, ToRegister(out)); break;
case AsmJSHeapAccess::Int16: masm.movswl(srcAddr, ToRegister(out)); break;
case AsmJSHeapAccess::Uint16: masm.movzwl(srcAddr, ToRegister(out)); break;
case AsmJSHeapAccess::Int32:
case AsmJSHeapAccess::Uint32: masm.movl(srcAddr, ToRegister(out)); break;
case AsmJSHeapAccess::Float32: masm.loadFloat32(srcAddr, ToFloatRegister(out)); break;
case AsmJSHeapAccess::Float64: masm.loadDouble(srcAddr, ToFloatRegister(out)); break;
case AsmJSHeapAccess::Uint8Clamped: MOZ_CRASH("unexpected array type");
}
uint32_t after = masm.size();
if (ool)
@ -320,7 +319,7 @@ bool
CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
{
MAsmJSStoreHeap *mir = ins->mir();
Scalar::Type vt = mir->viewType();
AsmJSHeapAccess::ViewType vt = mir->viewType();
const LAllocation *ptr = ins->ptr();
Operand dstAddr(HeapReg);
@ -344,25 +343,27 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
uint32_t before = masm.size();
if (ins->value()->isConstant()) {
switch (vt) {
case Scalar::Int8:
case Scalar::Uint8: masm.movb(Imm32(ToInt32(ins->value())), dstAddr); break;
case Scalar::Int16:
case Scalar::Uint16: masm.movw(Imm32(ToInt32(ins->value())), dstAddr); break;
case Scalar::Int32:
case Scalar::Uint32: masm.movl(Imm32(ToInt32(ins->value())), dstAddr); break;
default: MOZ_CRASH("unexpected array type");
case AsmJSHeapAccess::Int8:
case AsmJSHeapAccess::Uint8: masm.movb(Imm32(ToInt32(ins->value())), dstAddr); break;
case AsmJSHeapAccess::Int16:
case AsmJSHeapAccess::Uint16: masm.movw(Imm32(ToInt32(ins->value())), dstAddr); break;
case AsmJSHeapAccess::Int32:
case AsmJSHeapAccess::Uint32: masm.movl(Imm32(ToInt32(ins->value())), dstAddr); break;
case AsmJSHeapAccess::Float32:
case AsmJSHeapAccess::Float64:
case AsmJSHeapAccess::Uint8Clamped: MOZ_CRASH("unexpected array type");
}
} else {
switch (vt) {
case Scalar::Int8:
case Scalar::Uint8: masm.movb(ToRegister(ins->value()), dstAddr); break;
case Scalar::Int16:
case Scalar::Uint16: masm.movw(ToRegister(ins->value()), dstAddr); break;
case Scalar::Int32:
case Scalar::Uint32: masm.movl(ToRegister(ins->value()), dstAddr); break;
case Scalar::Float32: masm.storeFloat32(ToFloatRegister(ins->value()), dstAddr); break;
case Scalar::Float64: masm.storeDouble(ToFloatRegister(ins->value()), dstAddr); break;
default: MOZ_CRASH("unexpected array type");
case AsmJSHeapAccess::Int8:
case AsmJSHeapAccess::Uint8: masm.movb(ToRegister(ins->value()), dstAddr); break;
case AsmJSHeapAccess::Int16:
case AsmJSHeapAccess::Uint16: masm.movw(ToRegister(ins->value()), dstAddr); break;
case AsmJSHeapAccess::Int32:
case AsmJSHeapAccess::Uint32: masm.movl(ToRegister(ins->value()), dstAddr); break;
case AsmJSHeapAccess::Float32: masm.storeFloat32(ToFloatRegister(ins->value()), dstAddr); break;
case AsmJSHeapAccess::Float64: masm.storeDouble(ToFloatRegister(ins->value()), dstAddr); break;
case AsmJSHeapAccess::Uint8Clamped: MOZ_CRASH("unexpected array type");
}
}
uint32_t after = masm.size();
@ -377,7 +378,10 @@ bool
CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
{
MAsmJSCompareExchangeHeap *mir = ins->mir();
Scalar::Type vt = mir->viewType();
MOZ_ASSERT(mir->viewType() <= AsmJSHeapAccess::Uint32);
Scalar::Type vt = Scalar::Type(mir->viewType());
const LAllocation *ptr = ins->ptr();
MOZ_ASSERT(ptr->isRegister());
@ -395,7 +399,7 @@ CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
masm.j(Assembler::LessThan, &goahead);
memoryBarrier(MembarFull);
Register out = ToRegister(ins->output());
masm.xorl(out,out);
masm.xorl(out, out);
masm.jmp(&rejoin);
masm.bind(&goahead);
}
@ -416,7 +420,10 @@ bool
CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
{
MAsmJSAtomicBinopHeap *mir = ins->mir();
Scalar::Type vt = mir->viewType();
MOZ_ASSERT(mir->viewType() <= AsmJSHeapAccess::Uint32);
Scalar::Type vt = Scalar::Type(mir->viewType());
const LAllocation *ptr = ins->ptr();
Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
const LAllocation* value = ins->value();

Просмотреть файл

@ -163,19 +163,19 @@ LIRGeneratorX64::visitAsmJSStoreHeap(MAsmJSStoreHeap *ins)
LAsmJSStoreHeap *lir;
switch (ins->viewType()) {
case Scalar::Int8:
case Scalar::Uint8:
case Scalar::Int16:
case Scalar::Uint16:
case Scalar::Int32:
case Scalar::Uint32:
case AsmJSHeapAccess::Int8:
case AsmJSHeapAccess::Uint8:
case AsmJSHeapAccess::Int16:
case AsmJSHeapAccess::Uint16:
case AsmJSHeapAccess::Int32:
case AsmJSHeapAccess::Uint32:
lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterOrConstantAtStart(ins->value()));
break;
case Scalar::Float32:
case Scalar::Float64:
case AsmJSHeapAccess::Float32:
case AsmJSHeapAccess::Float64:
lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value()));
break;
default:
case AsmJSHeapAccess::Uint8Clamped:
MOZ_CRASH("unexpected array type");
}

Просмотреть файл

@ -267,26 +267,25 @@ CodeGeneratorX86::visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32 *lir)
template<typename T>
void
CodeGeneratorX86::loadViewTypeElement(Scalar::Type vt, const T &srcAddr,
CodeGeneratorX86::loadViewTypeElement(AsmJSHeapAccess::ViewType vt, const T &srcAddr,
const LDefinition *out)
{
switch (vt) {
case Scalar::Int8: masm.movsblWithPatch(srcAddr, ToRegister(out)); break;
case Scalar::Uint8Clamped:
case Scalar::Uint8: masm.movzblWithPatch(srcAddr, ToRegister(out)); break;
case Scalar::Int16: masm.movswlWithPatch(srcAddr, ToRegister(out)); break;
case Scalar::Uint16: masm.movzwlWithPatch(srcAddr, ToRegister(out)); break;
case Scalar::Int32:
case Scalar::Uint32: masm.movlWithPatch(srcAddr, ToRegister(out)); break;
case Scalar::Float32: masm.movssWithPatch(srcAddr, ToFloatRegister(out)); break;
case Scalar::Float64: masm.movsdWithPatch(srcAddr, ToFloatRegister(out)); break;
default: MOZ_CRASH("unexpected array type");
case AsmJSHeapAccess::Int8: masm.movsblWithPatch(srcAddr, ToRegister(out)); break;
case AsmJSHeapAccess::Uint8Clamped:
case AsmJSHeapAccess::Uint8: masm.movzblWithPatch(srcAddr, ToRegister(out)); break;
case AsmJSHeapAccess::Int16: masm.movswlWithPatch(srcAddr, ToRegister(out)); break;
case AsmJSHeapAccess::Uint16: masm.movzwlWithPatch(srcAddr, ToRegister(out)); break;
case AsmJSHeapAccess::Int32:
case AsmJSHeapAccess::Uint32: masm.movlWithPatch(srcAddr, ToRegister(out)); break;
case AsmJSHeapAccess::Float32: masm.movssWithPatch(srcAddr, ToFloatRegister(out)); break;
case AsmJSHeapAccess::Float64: masm.movsdWithPatch(srcAddr, ToFloatRegister(out)); break;
}
}
template<typename T>
bool
CodeGeneratorX86::loadAndNoteViewTypeElement(Scalar::Type vt, const T &srcAddr,
CodeGeneratorX86::loadAndNoteViewTypeElement(AsmJSHeapAccess::ViewType vt, const T &srcAddr,
const LDefinition *out)
{
uint32_t before = masm.size();
@ -300,16 +299,15 @@ bool
CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins)
{
const MLoadTypedArrayElementStatic *mir = ins->mir();
Scalar::Type vt = mir->viewType();
MOZ_ASSERT_IF(vt == Scalar::Float32, mir->type() == MIRType_Float32);
AsmJSHeapAccess::ViewType vt = AsmJSHeapAccess::ViewType(mir->viewType());
MOZ_ASSERT_IF(vt == AsmJSHeapAccess::Float32, mir->type() == MIRType_Float32);
Register ptr = ToRegister(ins->ptr());
const LDefinition *out = ins->output();
OutOfLineLoadTypedArrayOutOfBounds *ool = nullptr;
bool isFloat32Load = (vt == Scalar::Float32);
if (!mir->fallible()) {
ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), isFloat32Load);
ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), vt);
if (!addOutOfLineCode(ool, ins->mir()))
return false;
}
@ -322,9 +320,9 @@ CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic
Address srcAddr(ptr, (int32_t) mir->base());
loadViewTypeElement(vt, srcAddr, out);
if (vt == Scalar::Float64)
if (vt == AsmJSHeapAccess::Float64)
masm.canonicalizeDouble(ToFloatRegister(out));
if (vt == Scalar::Float32)
if (vt == AsmJSHeapAccess::Float32)
masm.canonicalizeFloat(ToFloatRegister(out));
if (ool)
masm.bind(ool->rejoin());
@ -369,7 +367,7 @@ bool
CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
{
const MAsmJSLoadHeap *mir = ins->mir();
Scalar::Type vt = mir->viewType();
AsmJSHeapAccess::ViewType vt = mir->viewType();
const LAllocation *ptr = ins->ptr();
const LDefinition *out = ins->output();
@ -395,8 +393,7 @@ CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
return true;
}
bool isFloat32Load = vt == Scalar::Float32;
OutOfLineLoadTypedArrayOutOfBounds *ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), isFloat32Load);
OutOfLineLoadTypedArrayOutOfBounds *ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), vt);
if (!addOutOfLineCode(ool, mir))
return false;
@ -414,27 +411,26 @@ CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
template<typename T>
void
CodeGeneratorX86::storeViewTypeElement(Scalar::Type vt, const LAllocation *value,
CodeGeneratorX86::storeViewTypeElement(AsmJSHeapAccess::ViewType vt, const LAllocation *value,
const T &dstAddr)
{
switch (vt) {
case Scalar::Int8:
case Scalar::Uint8Clamped:
case Scalar::Uint8: masm.movbWithPatch(ToRegister(value), dstAddr); break;
case Scalar::Int16:
case Scalar::Uint16: masm.movwWithPatch(ToRegister(value), dstAddr); break;
case Scalar::Int32:
case Scalar::Uint32: masm.movlWithPatch(ToRegister(value), dstAddr); break;
case Scalar::Float32: masm.movssWithPatch(ToFloatRegister(value), dstAddr); break;
case Scalar::Float64: masm.movsdWithPatch(ToFloatRegister(value), dstAddr); break;
default: MOZ_CRASH("unexpected array type");
case AsmJSHeapAccess::Int8:
case AsmJSHeapAccess::Uint8Clamped:
case AsmJSHeapAccess::Uint8: masm.movbWithPatch(ToRegister(value), dstAddr); break;
case AsmJSHeapAccess::Int16:
case AsmJSHeapAccess::Uint16: masm.movwWithPatch(ToRegister(value), dstAddr); break;
case AsmJSHeapAccess::Int32:
case AsmJSHeapAccess::Uint32: masm.movlWithPatch(ToRegister(value), dstAddr); break;
case AsmJSHeapAccess::Float32: masm.movssWithPatch(ToFloatRegister(value), dstAddr); break;
case AsmJSHeapAccess::Float64: masm.movsdWithPatch(ToFloatRegister(value), dstAddr); break;
}
}
template<typename T>
void
CodeGeneratorX86::storeAndNoteViewTypeElement(Scalar::Type vt, const LAllocation *value,
const T &dstAddr)
CodeGeneratorX86::storeAndNoteViewTypeElement(AsmJSHeapAccess::ViewType vt,
const LAllocation *value, const T &dstAddr)
{
uint32_t before = masm.size();
storeViewTypeElement(vt, value, dstAddr);
@ -446,7 +442,7 @@ bool
CodeGeneratorX86::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins)
{
MStoreTypedArrayElementStatic *mir = ins->mir();
Scalar::Type vt = mir->viewType();
AsmJSHeapAccess::ViewType vt = AsmJSHeapAccess::ViewType(mir->viewType());
Register ptr = ToRegister(ins->ptr());
const LAllocation *value = ins->value();
@ -465,7 +461,7 @@ bool
CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
{
MAsmJSStoreHeap *mir = ins->mir();
Scalar::Type vt = mir->viewType();
AsmJSHeapAccess::ViewType vt = mir->viewType();
const LAllocation *value = ins->value();
const LAllocation *ptr = ins->ptr();
@ -508,7 +504,10 @@ bool
CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
{
MAsmJSCompareExchangeHeap *mir = ins->mir();
Scalar::Type vt = mir->viewType();
MOZ_ASSERT(mir->viewType() <= AsmJSHeapAccess::Uint32);
Scalar::Type vt = Scalar::Type(mir->viewType());
const LAllocation *ptr = ins->ptr();
Register oldval = ToRegister(ins->oldValue());
Register newval = ToRegister(ins->newValue());
@ -555,7 +554,10 @@ bool
CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
{
MAsmJSAtomicBinopHeap *mir = ins->mir();
Scalar::Type vt = mir->viewType();
MOZ_ASSERT(mir->viewType() <= AsmJSHeapAccess::Uint32);
Scalar::Type vt = Scalar::Type(mir->viewType());
const LAllocation *ptr = ins->ptr();
Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
const LAllocation* value = ins->value();

Просмотреть файл

@ -29,17 +29,18 @@ class CodeGeneratorX86 : public CodeGeneratorX86Shared
ValueOperand ToTempValue(LInstruction *ins, size_t pos);
template<typename T>
bool loadAndNoteViewTypeElement(Scalar::Type vt, const T &srcAddr,
bool loadAndNoteViewTypeElement(AsmJSHeapAccess::ViewType vt, const T &srcAddr,
const LDefinition *out);
template<typename T>
void loadViewTypeElement(AsmJSHeapAccess::ViewType vt, const T &srcAddr,
const LDefinition *out);
template<typename T>
void loadViewTypeElement(Scalar::Type vt, const T &srcAddr,
const LDefinition *out);
template<typename T>
void storeAndNoteViewTypeElement(Scalar::Type vt, const LAllocation *value,
void storeAndNoteViewTypeElement(AsmJSHeapAccess::ViewType vt, const LAllocation *value,
const T &dstAddr);
template<typename T>
void storeViewTypeElement(Scalar::Type vt, const LAllocation *value,
void storeViewTypeElement(AsmJSHeapAccess::ViewType vt, const LAllocation *value,
const T &dstAddr);
void memoryBarrier(MemoryBarrierBits barrier);
public:

Просмотреть файл

@ -243,34 +243,36 @@ LIRGeneratorX86::visitAsmJSStoreHeap(MAsmJSStoreHeap *ins)
MOZ_ASSERT(ptrValue >= 0);
LAllocation ptrAlloc = LAllocation(ptr->toConstant()->vp());
switch (ins->viewType()) {
case Scalar::Int8: case Scalar::Uint8:
case AsmJSHeapAccess::Int8: case AsmJSHeapAccess::Uint8:
// See comment below.
lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useFixed(ins->value(), eax));
break;
case Scalar::Int16: case Scalar::Uint16:
case Scalar::Int32: case Scalar::Uint32:
case Scalar::Float32: case Scalar::Float64:
case AsmJSHeapAccess::Int16: case AsmJSHeapAccess::Uint16:
case AsmJSHeapAccess::Int32: case AsmJSHeapAccess::Uint32:
case AsmJSHeapAccess::Float32: case AsmJSHeapAccess::Float64:
// See comment below.
lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value()));
break;
default: MOZ_CRASH("unexpected array type");
case AsmJSHeapAccess::Uint8Clamped:
MOZ_CRASH("unexpected array type");
}
return add(lir, ins);
}
switch (ins->viewType()) {
case Scalar::Int8: case Scalar::Uint8:
case AsmJSHeapAccess::Int8: case AsmJSHeapAccess::Uint8:
// See comment for LIRGeneratorX86::useByteOpRegister.
lir = new(alloc()) LAsmJSStoreHeap(useRegister(ins->ptr()), useFixed(ins->value(), eax));
break;
case Scalar::Int16: case Scalar::Uint16:
case Scalar::Int32: case Scalar::Uint32:
case Scalar::Float32: case Scalar::Float64:
case AsmJSHeapAccess::Int16: case AsmJSHeapAccess::Uint16:
case AsmJSHeapAccess::Int32: case AsmJSHeapAccess::Uint32:
case AsmJSHeapAccess::Float32: case AsmJSHeapAccess::Float64:
// For now, don't allow constant values. The immediate operand
// affects instruction layout which affects patching.
lir = new(alloc()) LAsmJSStoreHeap(useRegisterAtStart(ptr), useRegisterAtStart(ins->value()));
break;
default: MOZ_CRASH("unexpected array type");
case AsmJSHeapAccess::Uint8Clamped:
MOZ_CRASH("unexpected array type");
}
return add(lir, ins);