Bug 1267269 - Make MIRType an enum class. r=bbouvier

--HG--
extra : rebase_source : 803c40311e1c879dc55109bb79fb446bd62cb3f9
This commit is contained in:
Jan de Mooij 2016-04-26 16:42:24 +02:00
Родитель 2751db2b05
Коммит 7f9a1a1ca5
75 изменённых файлов: 2748 добавлений и 2744 удалений

Просмотреть файл

@ -127,26 +127,26 @@ class FunctionCompiler
MInstruction* ins = nullptr;
switch (locals_[i]) {
case ValType::I32:
ins = MConstant::NewAsmJS(alloc(), Int32Value(0), MIRType_Int32);
ins = MConstant::NewAsmJS(alloc(), Int32Value(0), MIRType::Int32);
break;
case ValType::I64:
ins = MConstant::NewInt64(alloc(), 0);
break;
case ValType::F32:
ins = MConstant::NewAsmJS(alloc(), Float32Value(0.f), MIRType_Float32);
ins = MConstant::NewAsmJS(alloc(), Float32Value(0.f), MIRType::Float32);
break;
case ValType::F64:
ins = MConstant::NewAsmJS(alloc(), DoubleValue(0.0), MIRType_Double);
ins = MConstant::NewAsmJS(alloc(), DoubleValue(0.0), MIRType::Double);
break;
case ValType::I32x4:
ins = MSimdConstant::New(alloc(), SimdConstant::SplatX4(0), MIRType_Int32x4);
ins = MSimdConstant::New(alloc(), SimdConstant::SplatX4(0), MIRType::Int32x4);
break;
case ValType::F32x4:
ins = MSimdConstant::New(alloc(), SimdConstant::SplatX4(0.f), MIRType_Float32x4);
ins = MSimdConstant::New(alloc(), SimdConstant::SplatX4(0.f), MIRType::Float32x4);
break;
case ValType::B32x4:
// Bool32x4 uses the same data layout as Int32x4.
ins = MSimdConstant::New(alloc(), SimdConstant::SplatX4(0), MIRType_Bool32x4);
ins = MSimdConstant::New(alloc(), SimdConstant::SplatX4(0), MIRType::Bool32x4);
break;
case ValType::Limit:
MOZ_CRASH("Limit");
@ -376,7 +376,7 @@ class FunctionCompiler
if (inDeadCode())
return nullptr;
MSimdAllTrue* ins = MSimdAllTrue::New(alloc(), boolVector, MIRType_Int32);
MSimdAllTrue* ins = MSimdAllTrue::New(alloc(), boolVector, MIRType::Int32);
curBlock_->add(ins);
return ins;
}
@ -386,7 +386,7 @@ class FunctionCompiler
if (inDeadCode())
return nullptr;
MSimdAnyTrue* ins = MSimdAnyTrue::New(alloc(), boolVector, MIRType_Int32);
MSimdAnyTrue* ins = MSimdAnyTrue::New(alloc(), boolVector, MIRType::Int32);
curBlock_->add(ins);
return ins;
}
@ -851,7 +851,7 @@ class FunctionCompiler
MOZ_ASSERT(IsPowerOfTwo(length));
MConstant* mask = MConstant::New(alloc(), Int32Value(length - 1));
curBlock_->add(mask);
MBitAnd* maskedIndex = MBitAnd::NewAsmJS(alloc(), index, mask, MIRType_Int32);
MBitAnd* maskedIndex = MBitAnd::NewAsmJS(alloc(), index, mask, MIRType::Int32);
curBlock_->add(maskedIndex);
ptrFun = MAsmJSLoadFuncPtr::New(alloc(), maskedIndex, globalDataOffset);
curBlock_->add(ptrFun);
@ -970,7 +970,7 @@ class FunctionCompiler
if (inDeadCode())
return;
MOZ_ASSERT(!hasPushed(curBlock_));
if (def && def->type() != MIRType_None)
if (def && def->type() != MIRType::None)
curBlock_->push(def);
}
@ -979,7 +979,7 @@ class FunctionCompiler
if (!hasPushed(curBlock_))
return nullptr;
MDefinition* def = curBlock_->pop();
MOZ_ASSERT(def->type() != MIRType_Value);
MOZ_ASSERT(def->type() != MIRType::Value);
return def;
}
@ -1434,7 +1434,7 @@ EmitLiteral(FunctionCompiler& f, ValType type, MDefinition** def)
switch (type) {
case ValType::I32: {
int32_t val = f.readVarS32();
*def = f.constant(Int32Value(val), MIRType_Int32);
*def = f.constant(Int32Value(val), MIRType::Int32);
return true;
}
case ValType::I64: {
@ -1444,28 +1444,28 @@ EmitLiteral(FunctionCompiler& f, ValType type, MDefinition** def)
}
case ValType::F32: {
float val = f.readF32();
*def = f.constant(Float32Value(val), MIRType_Float32);
*def = f.constant(Float32Value(val), MIRType::Float32);
return true;
}
case ValType::F64: {
double val = f.readF64();
*def = f.constant(DoubleValue(val), MIRType_Double);
*def = f.constant(DoubleValue(val), MIRType::Double);
return true;
}
case ValType::I32x4: {
SimdConstant lit(f.readI32X4());
*def = f.constant(lit, MIRType_Int32x4);
*def = f.constant(lit, MIRType::Int32x4);
return true;
}
case ValType::F32x4: {
SimdConstant lit(f.readF32X4());
*def = f.constant(lit, MIRType_Float32x4);
*def = f.constant(lit, MIRType::Float32x4);
return true;
}
case ValType::B32x4: {
// Boolean vectors are stored as an Int vector with -1 / 0 lanes.
SimdConstant lit(f.readI32X4());
*def = f.constant(lit, MIRType_Bool32x4);
*def = f.constant(lit, MIRType::Bool32x4);
return true;
}
case ValType::Limit:
@ -1518,8 +1518,8 @@ EmitHeapAddress(FunctionCompiler& f, MDefinition** base, MAsmJSHeapAccess* acces
// Assume worst case.
bool atomicAccess = true;
if (endOffset > f.mirGen().foldableOffsetRange(accessNeedsBoundsCheck, atomicAccess)) {
MDefinition* rhs = f.constant(Int32Value(offset), MIRType_Int32);
*base = f.binary<MAdd>(*base, rhs, MIRType_Int32);
MDefinition* rhs = f.constant(Int32Value(offset), MIRType::Int32);
*base = f.binary<MAdd>(*base, rhs, MIRType::Int32);
offset = 0;
access->setOffset(offset);
}
@ -1979,7 +1979,7 @@ EmitSimdBooleanLaneExpr(FunctionCompiler& f, MDefinition** def)
return false;
// Now compute !i32 - 1 to force the value range into {0, -1}.
MDefinition* noti32 = f.unary<MNot>(i32);
*def = f.binary<MSub>(noti32, f.constant(Int32Value(1), MIRType_Int32), MIRType_Int32);
*def = f.binary<MSub>(noti32, f.constant(Int32Value(1), MIRType::Int32), MIRType::Int32);
return true;
}
@ -2184,7 +2184,7 @@ EmitSimdCtor(FunctionCompiler& f, ValType type, MDefinition** def)
if (!EmitExpr(f, &args[i]))
return false;
}
*def = f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3], MIRType_Int32x4);
*def = f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3], MIRType::Int32x4);
return true;
}
case ValType::F32x4: {
@ -2193,7 +2193,7 @@ EmitSimdCtor(FunctionCompiler& f, ValType type, MDefinition** def)
if (!EmitExpr(f, &args[i]))
return false;
}
*def = f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3], MIRType_Float32x4);
*def = f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3], MIRType::Float32x4);
return true;
}
case ValType::B32x4: {
@ -2202,7 +2202,7 @@ EmitSimdCtor(FunctionCompiler& f, ValType type, MDefinition** def)
if (!EmitSimdBooleanLaneExpr(f, &args[i]))
return false;
}
*def = f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3], MIRType_Bool32x4);
*def = f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3], MIRType::Bool32x4);
return true;
}
case ValType::I32:
@ -2247,7 +2247,7 @@ EmitMultiply(FunctionCompiler& f, ValType type, MDefinition** def)
if (!EmitExpr(f, &rhs))
return false;
MIRType mirType = ToMIRType(type);
*def = f.mul(lhs, rhs, mirType, mirType == MIRType_Int32 ? MMul::Integer : MMul::Normal);
*def = f.mul(lhs, rhs, mirType, mirType == MIRType::Int32 ? MMul::Integer : MMul::Normal);
return true;
}
@ -2268,7 +2268,7 @@ EmitSelect(FunctionCompiler& f, MDefinition** def)
if (trueExpr && falseExpr &&
trueExpr->type() == falseExpr->type() &&
trueExpr->type() != MIRType_None)
trueExpr->type() != MIRType::None)
{
*def = f.select(trueExpr, falseExpr, condExpr);
} else {

Просмотреть файл

@ -166,12 +166,12 @@ wasm::GenerateEntry(MacroAssembler& masm, unsigned target, const Sig& sig, bool
unsigned argOffset = iter.index() * Module::SizeOfEntryArg;
Address src(argv, argOffset);
MIRType type = iter.mirType();
MOZ_ASSERT_IF(type == MIRType_Int64, JitOptions.wasmTestMode);
MOZ_ASSERT_IF(type == MIRType::Int64, JitOptions.wasmTestMode);
switch (iter->kind()) {
case ABIArg::GPR:
if (type == MIRType_Int32)
if (type == MIRType::Int32)
masm.load32(src, iter->gpr());
else if (type == MIRType_Int64)
else if (type == MIRType::Int64)
masm.load64(src, iter->gpr64());
break;
#ifdef JS_CODEGEN_REGISTER_PAIR
@ -183,17 +183,17 @@ wasm::GenerateEntry(MacroAssembler& masm, unsigned target, const Sig& sig, bool
static_assert(Module::SizeOfEntryArg >= jit::Simd128DataSize,
"EntryArg must be big enough to store SIMD values");
switch (type) {
case MIRType_Int32x4:
case MIRType_Bool32x4:
case MIRType::Int32x4:
case MIRType::Bool32x4:
masm.loadUnalignedInt32x4(src, iter->fpu());
break;
case MIRType_Float32x4:
case MIRType::Float32x4:
masm.loadUnalignedFloat32x4(src, iter->fpu());
break;
case MIRType_Double:
case MIRType::Double:
masm.loadDouble(src, iter->fpu());
break;
case MIRType_Float32:
case MIRType::Float32:
masm.loadFloat32(src, iter->fpu());
break;
default:
@ -204,29 +204,29 @@ wasm::GenerateEntry(MacroAssembler& masm, unsigned target, const Sig& sig, bool
}
case ABIArg::Stack:
switch (type) {
case MIRType_Int32:
case MIRType::Int32:
masm.load32(src, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
case MIRType_Int64:
case MIRType::Int64:
masm.load64(src, scratch64);
masm.store64(scratch64, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
case MIRType_Double:
case MIRType::Double:
masm.loadDouble(src, ScratchDoubleReg);
masm.storeDouble(ScratchDoubleReg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
case MIRType_Float32:
case MIRType::Float32:
masm.loadFloat32(src, ScratchFloat32Reg);
masm.storeFloat32(ScratchFloat32Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
case MIRType_Int32x4:
case MIRType_Bool32x4:
case MIRType::Int32x4:
case MIRType::Bool32x4:
masm.loadUnalignedInt32x4(src, ScratchSimd128Reg);
masm.storeAlignedInt32x4(ScratchSimd128Reg,
Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
case MIRType_Float32x4:
case MIRType::Float32x4:
masm.loadUnalignedFloat32x4(src, ScratchSimd128Reg);
masm.storeAlignedFloat32x4(ScratchSimd128Reg,
Address(masm.getStackPointer(), iter->offsetFromArgBase()));
@ -303,16 +303,16 @@ FillArgumentArray(MacroAssembler& masm, const ValTypeVector& args, unsigned argO
Address dstAddr(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
MIRType type = i.mirType();
MOZ_ASSERT_IF(type == MIRType_Int64, JitOptions.wasmTestMode);
MOZ_ASSERT_IF(type == MIRType::Int64, JitOptions.wasmTestMode);
switch (i->kind()) {
case ABIArg::GPR:
if (type == MIRType_Int32) {
if (type == MIRType::Int32) {
if (toValue)
masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dstAddr);
else
masm.store32(i->gpr(), dstAddr);
} else if (type == MIRType_Int64) {
} else if (type == MIRType::Int64) {
// We can't box int64 into Values (yet).
if (toValue)
masm.breakpoint();
@ -331,7 +331,7 @@ FillArgumentArray(MacroAssembler& masm, const ValTypeVector& args, unsigned argO
MOZ_ASSERT(IsFloatingPointType(type));
FloatRegister srcReg = i->fpu();
if (toValue) {
if (type == MIRType_Float32) {
if (type == MIRType::Float32) {
masm.convertFloat32ToDouble(i->fpu(), ScratchDoubleReg);
srcReg = ScratchDoubleReg;
}
@ -341,14 +341,14 @@ FillArgumentArray(MacroAssembler& masm, const ValTypeVector& args, unsigned argO
break;
}
case ABIArg::Stack:
if (type == MIRType_Int32) {
if (type == MIRType::Int32) {
Address src(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase());
masm.load32(src, scratch);
if (toValue)
masm.storeValue(JSVAL_TYPE_INT32, scratch, dstAddr);
else
masm.store32(scratch, dstAddr);
} else if (type == MIRType_Int64) {
} else if (type == MIRType::Int64) {
// We can't box int64 into Values (yet).
if (toValue) {
masm.breakpoint();
@ -361,7 +361,7 @@ FillArgumentArray(MacroAssembler& masm, const ValTypeVector& args, unsigned argO
MOZ_ASSERT(IsFloatingPointType(type));
Address src(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase());
if (toValue) {
if (type == MIRType_Float32) {
if (type == MIRType::Float32) {
masm.loadFloat32(src, ScratchFloat32Reg);
masm.convertFloat32ToDouble(ScratchFloat32Reg, ScratchDoubleReg);
} else {
@ -389,9 +389,9 @@ wasm::GenerateInterpExit(MacroAssembler& masm, const Import& import, uint32_t im
masm.setFramePushed(0);
// Argument types for InvokeImport_*:
static const MIRType typeArray[] = { MIRType_Pointer, // ImportExit
MIRType_Int32, // argc
MIRType_Pointer }; // argv
static const MIRType typeArray[] = { MIRType::Pointer, // ImportExit
MIRType::Int32, // argc
MIRType::Pointer }; // argv
MIRTypeVector invokeArgTypes;
MOZ_ALWAYS_TRUE(invokeArgTypes.append(typeArray, ArrayLength(typeArray)));
@ -745,7 +745,7 @@ wasm::GenerateJitExit(MacroAssembler& masm, const Import& import, bool usesHeap)
// Coercion calls use the following stack layout (sp grows to the left):
// | args | padding | Value argv[1] | padding | exit AsmJSFrame |
MIRTypeVector coerceArgTypes;
JS_ALWAYS_TRUE(coerceArgTypes.append(MIRType_Pointer));
JS_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
unsigned offsetToCoerceArgv = AlignBytes(StackArgBytes(coerceArgTypes), sizeof(Value));
MOZ_ASSERT(nativeFramePushed >= offsetToCoerceArgv + sizeof(Value));
AssertStackAlignment(masm, ABIStackAlignment);

Просмотреть файл

@ -89,13 +89,13 @@ static inline jit::MIRType
ToMIRType(ValType vt)
{
switch (vt) {
case ValType::I32: return jit::MIRType_Int32;
case ValType::I64: return jit::MIRType_Int64;
case ValType::F32: return jit::MIRType_Float32;
case ValType::F64: return jit::MIRType_Double;
case ValType::I32x4: return jit::MIRType_Int32x4;
case ValType::F32x4: return jit::MIRType_Float32x4;
case ValType::B32x4: return jit::MIRType_Bool32x4;
case ValType::I32: return jit::MIRType::Int32;
case ValType::I64: return jit::MIRType::Int64;
case ValType::F32: return jit::MIRType::Float32;
case ValType::F64: return jit::MIRType::Double;
case ValType::I32x4: return jit::MIRType::Int32x4;
case ValType::F32x4: return jit::MIRType::Float32x4;
case ValType::B32x4: return jit::MIRType::Bool32x4;
case ValType::Limit: break;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("bad type");
@ -104,7 +104,7 @@ ToMIRType(ValType vt)
static inline jit::MIRType
ToMIRType(ExprType et)
{
return IsVoid(et) ? jit::MIRType_None : ToMIRType(ValType(et));
return IsVoid(et) ? jit::MIRType::None : ToMIRType(ValType(et));
}
static inline const char*

Просмотреть файл

@ -37,7 +37,7 @@ AnalyzeAsmHeapAddress(MDefinition* ptr, MIRGraph& graph)
// Putting the add on the outside might seem like it exposes other users of
// the expression to the possibility of i32 overflow, if we aren't in asm.js
// and they aren't naturally truncating. However, since we use MAdd::NewAsmJS
// with MIRType_Int32, we make sure that the value is truncated, just as it
// with MIRType::Int32, we make sure that the value is truncated, just as it
// would be by the MBitAnd.
MOZ_ASSERT(IsCompilingAsmJS());
@ -65,9 +65,9 @@ AnalyzeAsmHeapAddress(MDefinition* ptr, MIRGraph& graph)
return;
// The pattern was matched! Produce the replacement expression.
MInstruction* and_ = MBitAnd::NewAsmJS(graph.alloc(), op0, rhs, MIRType_Int32);
MInstruction* and_ = MBitAnd::NewAsmJS(graph.alloc(), op0, rhs, MIRType::Int32);
ptr->block()->insertBefore(ptr->toBitAnd(), and_);
MInstruction* add = MAdd::NewAsmJS(graph.alloc(), and_, op1, MIRType_Int32);
MInstruction* add = MAdd::NewAsmJS(graph.alloc(), and_, op1, MIRType::Int32);
ptr->block()->insertBefore(ptr->toBitAnd(), add);
ptr->replaceAllUsesWith(add);
ptr->block()->discard(ptr->toBitAnd());

Просмотреть файл

@ -2495,7 +2495,7 @@ BaselineCompiler::emit_JSOP_SETALIASEDVAR()
getScopeCoordinateObject(objReg);
Address address = getScopeCoordinateAddressFromObject(objReg, R1.scratchReg());
masm.patchableCallPreBarrier(address, MIRType_Value);
masm.patchableCallPreBarrier(address, MIRType::Value);
masm.storeValue(R0, address);
frame.push(R0);
@ -2905,7 +2905,7 @@ BaselineCompiler::emitFormalArgAccess(uint32_t arg, bool get)
masm.loadValue(argAddr, R0);
frame.push(R0);
} else {
masm.patchableCallPreBarrier(argAddr, MIRType_Value);
masm.patchableCallPreBarrier(argAddr, MIRType::Value);
masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
masm.storeValue(R0, argAddr);
@ -3941,7 +3941,7 @@ BaselineCompiler::emit_JSOP_INITIALYIELD()
Register scopeObj = R0.scratchReg();
Address scopeChainSlot(genObj, GeneratorObject::offsetOfScopeChainSlot());
masm.loadPtr(frame.addressOfScopeChain(), scopeObj);
masm.patchableCallPreBarrier(scopeChainSlot, MIRType_Value);
masm.patchableCallPreBarrier(scopeChainSlot, MIRType::Value);
masm.storeValue(JSVAL_TYPE_OBJECT, scopeObj, scopeChainSlot);
Register temp = R1.scratchReg();
@ -3986,7 +3986,7 @@ BaselineCompiler::emit_JSOP_YIELD()
Register scopeObj = R0.scratchReg();
Address scopeChainSlot(genObj, GeneratorObject::offsetOfScopeChainSlot());
masm.loadPtr(frame.addressOfScopeChain(), scopeObj);
masm.patchableCallPreBarrier(scopeChainSlot, MIRType_Value);
masm.patchableCallPreBarrier(scopeChainSlot, MIRType::Value);
masm.storeValue(JSVAL_TYPE_OBJECT, scopeObj, scopeChainSlot);
Register temp = R1.scratchReg();
@ -4214,7 +4214,7 @@ BaselineCompiler::emit_JSOP_RESUME()
}
masm.bind(&loopDone);
masm.patchableCallPreBarrier(exprStackSlot, MIRType_Value);
masm.patchableCallPreBarrier(exprStackSlot, MIRType::Value);
masm.storeValue(NullValue(), exprStackSlot);
regs.add(initLength);
}

Просмотреть файл

@ -2737,9 +2737,9 @@ void
EmitUnboxedPreBarrierForBaseline(MacroAssembler &masm, T address, JSValueType type)
{
if (type == JSVAL_TYPE_OBJECT)
EmitPreBarrier(masm, address, MIRType_Object);
EmitPreBarrier(masm, address, MIRType::Object);
else if (type == JSVAL_TYPE_STRING)
EmitPreBarrier(masm, address, MIRType_String);
EmitPreBarrier(masm, address, MIRType::String);
else
MOZ_ASSERT(!UnboxedTypeNeedsPreBarrier(type));
}
@ -2856,7 +2856,7 @@ ICSetElem_DenseOrUnboxedArray::Compiler::generateStubCode(MacroAssembler& masm)
ValueOperand tmpVal = regs.takeAnyValue();
masm.loadValue(valueAddr, tmpVal);
EmitPreBarrier(masm, element, MIRType_Value);
EmitPreBarrier(masm, element, MIRType::Value);
masm.storeValue(tmpVal, element);
} else {
// Set element on an unboxed array.
@ -4749,7 +4749,7 @@ ICSetProp_Native::Compiler::generateStubCode(MacroAssembler& masm)
// Perform the store.
masm.load32(Address(ICStubReg, ICSetProp_Native::offsetOfOffset()), scratch);
EmitPreBarrier(masm, BaseIndex(holderReg, scratch, TimesOne), MIRType_Value);
EmitPreBarrier(masm, BaseIndex(holderReg, scratch, TimesOne), MIRType::Value);
masm.storeValue(R1, BaseIndex(holderReg, scratch, TimesOne));
if (holderReg != objReg)
regs.add(holderReg);
@ -4867,7 +4867,7 @@ ICSetPropNativeAddCompiler::generateStubCode(MacroAssembler& masm)
// Change the object's group.
Address groupAddr(objReg, JSObject::offsetOfGroup());
EmitPreBarrier(masm, groupAddr, MIRType_ObjectGroup);
EmitPreBarrier(masm, groupAddr, MIRType::ObjectGroup);
masm.storePtr(scratch, groupAddr);
masm.bind(&noGroupChange);
@ -4883,7 +4883,7 @@ ICSetPropNativeAddCompiler::generateStubCode(MacroAssembler& masm)
// Write the expando object's new shape.
Address shapeAddr(holderReg, JSObject::offsetOfShape());
EmitPreBarrier(masm, shapeAddr, MIRType_Shape);
EmitPreBarrier(masm, shapeAddr, MIRType::Shape);
masm.loadPtr(Address(ICStubReg, ICSetProp_NativeAdd::offsetOfNewShape()), scratch);
masm.storePtr(scratch, shapeAddr);
@ -4892,7 +4892,7 @@ ICSetPropNativeAddCompiler::generateStubCode(MacroAssembler& masm)
} else {
// Write the object's new shape.
Address shapeAddr(objReg, JSObject::offsetOfShape());
EmitPreBarrier(masm, shapeAddr, MIRType_Shape);
EmitPreBarrier(masm, shapeAddr, MIRType::Shape);
masm.loadPtr(Address(ICStubReg, ICSetProp_NativeAdd::offsetOfNewShape()), scratch);
masm.storePtr(scratch, shapeAddr);
@ -5078,12 +5078,12 @@ ICSetProp_TypedObject::Compiler::generateStubCode(MacroAssembler& masm)
switch (type) {
case ReferenceTypeDescr::TYPE_ANY:
EmitPreBarrier(masm, dest, MIRType_Value);
EmitPreBarrier(masm, dest, MIRType::Value);
masm.storeValue(R1, dest);
break;
case ReferenceTypeDescr::TYPE_OBJECT: {
EmitPreBarrier(masm, dest, MIRType_Object);
EmitPreBarrier(masm, dest, MIRType::Object);
Label notObject;
masm.branchTestObject(Assembler::NotEqual, R1, &notObject);
Register rhsObject = masm.extractObject(R1, ExtractTemp0);
@ -5096,7 +5096,7 @@ ICSetProp_TypedObject::Compiler::generateStubCode(MacroAssembler& masm)
}
case ReferenceTypeDescr::TYPE_STRING: {
EmitPreBarrier(masm, dest, MIRType_String);
EmitPreBarrier(masm, dest, MIRType::String);
masm.branchTestString(Assembler::NotEqual, R1, &failure);
Register rhsString = masm.extractString(R1, ExtractTemp0);
masm.storePtr(rhsString, dest);

Просмотреть файл

@ -254,29 +254,29 @@ MIRType
BaselineInspector::expectedResultType(jsbytecode* pc)
{
// Look at the IC entries for this op to guess what type it will produce,
// returning MIRType_None otherwise.
// returning MIRType::None otherwise.
ICStub* stub = monomorphicStub(pc);
if (!stub)
return MIRType_None;
return MIRType::None;
switch (stub->kind()) {
case ICStub::BinaryArith_Int32:
if (stub->toBinaryArith_Int32()->allowDouble())
return MIRType_Double;
return MIRType_Int32;
return MIRType::Double;
return MIRType::Int32;
case ICStub::BinaryArith_BooleanWithInt32:
case ICStub::UnaryArith_Int32:
case ICStub::BinaryArith_DoubleWithInt32:
return MIRType_Int32;
return MIRType::Int32;
case ICStub::BinaryArith_Double:
case ICStub::UnaryArith_Double:
return MIRType_Double;
return MIRType::Double;
case ICStub::BinaryArith_StringConcat:
case ICStub::BinaryArith_StringObjectConcat:
return MIRType_String;
return MIRType::String;
default:
return MIRType_None;
return MIRType::None;
}
}
@ -376,12 +376,12 @@ TryToSpecializeBinaryArithOp(ICStub** stubs,
return false;
if (sawDouble) {
*result = MIRType_Double;
*result = MIRType::Double;
return true;
}
MOZ_ASSERT(sawInt32);
*result = MIRType_Int32;
*result = MIRType::Int32;
return true;
}
@ -389,7 +389,7 @@ MIRType
BaselineInspector::expectedBinaryArithSpecialization(jsbytecode* pc)
{
if (!hasBaselineScript())
return MIRType_None;
return MIRType::None;
MIRType result;
ICStub* stubs[2];
@ -399,7 +399,7 @@ BaselineInspector::expectedBinaryArithSpecialization(jsbytecode* pc)
if (stub->isBinaryArith_Fallback() &&
stub->toBinaryArith_Fallback()->hadUnoptimizableOperands())
{
return MIRType_None;
return MIRType::None;
}
stubs[0] = monomorphicStub(pc);
@ -413,7 +413,7 @@ BaselineInspector::expectedBinaryArithSpecialization(jsbytecode* pc)
return result;
}
return MIRType_None;
return MIRType::None;
}
bool
@ -772,38 +772,38 @@ GetCacheIRExpectedInputType(ICCacheIR_Monitored* stub)
// For now, all CacheIR stubs expect an object.
MOZ_ALWAYS_TRUE(reader.matchOp(CacheOp::GuardIsObject, ObjOperandId(0)));
return MIRType_Object;
return MIRType::Object;
}
MIRType
BaselineInspector::expectedPropertyAccessInputType(jsbytecode* pc)
{
if (!hasBaselineScript())
return MIRType_Value;
return MIRType::Value;
const ICEntry& entry = icEntryFromPC(pc);
MIRType type = MIRType_None;
MIRType type = MIRType::None;
for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
MIRType stubType;
switch (stub->kind()) {
case ICStub::GetProp_Fallback:
if (stub->toGetProp_Fallback()->hadUnoptimizableAccess())
return MIRType_Value;
return MIRType::Value;
continue;
case ICStub::GetElem_Fallback:
if (stub->toGetElem_Fallback()->hadUnoptimizableAccess())
return MIRType_Value;
return MIRType::Value;
continue;
case ICStub::GetProp_Generic:
return MIRType_Value;
return MIRType::Value;
case ICStub::GetProp_ArgumentsLength:
case ICStub::GetElem_Arguments:
// Either an object or magic arguments.
return MIRType_Value;
return MIRType::Value;
case ICStub::GetProp_Unboxed:
case ICStub::GetProp_TypedObject:
@ -825,7 +825,7 @@ BaselineInspector::expectedPropertyAccessInputType(jsbytecode* pc)
case ICStub::GetElem_Dense:
case ICStub::GetElem_TypedArray:
case ICStub::GetElem_UnboxedArray:
stubType = MIRType_Object;
stubType = MIRType::Object;
break;
case ICStub::GetProp_Primitive:
@ -833,28 +833,28 @@ BaselineInspector::expectedPropertyAccessInputType(jsbytecode* pc)
break;
case ICStub::GetProp_StringLength:
stubType = MIRType_String;
stubType = MIRType::String;
break;
case ICStub::CacheIR_Monitored:
stubType = GetCacheIRExpectedInputType(stub->toCacheIR_Monitored());
if (stubType == MIRType_Value)
return MIRType_Value;
if (stubType == MIRType::Value)
return MIRType::Value;
break;
default:
MOZ_CRASH("Unexpected stub");
}
if (type != MIRType_None) {
if (type != MIRType::None) {
if (type != stubType)
return MIRType_Value;
return MIRType::Value;
} else {
type = stubType;
}
}
return (type == MIRType_None) ? MIRType_Value : type;
return (type == MIRType::None) ? MIRType::Value : type;
}
bool

Просмотреть файл

@ -197,7 +197,7 @@ CodeGenerator::visitValueToInt32(LValueToInt32* lir)
Label* stringEntry;
Label* stringRejoin;
Register stringReg;
if (input->mightBeType(MIRType_String)) {
if (input->mightBeType(MIRType::String)) {
stringReg = ToRegister(lir->temp());
OutOfLineCode* oolString = oolCallVM(StringToNumberInfo, lir, ArgList(stringReg),
StoreFloatRegisterTo(temp));
@ -516,14 +516,14 @@ CodeGenerator::testValueTruthyKernel(const ValueOperand& value,
// last one. In particular, whenever tagCount is 1 that means we've tried
// all but one of them already so we know exactly what's left based on the
// mightBe* booleans.
bool mightBeUndefined = valueMIR->mightBeType(MIRType_Undefined);
bool mightBeNull = valueMIR->mightBeType(MIRType_Null);
bool mightBeBoolean = valueMIR->mightBeType(MIRType_Boolean);
bool mightBeInt32 = valueMIR->mightBeType(MIRType_Int32);
bool mightBeObject = valueMIR->mightBeType(MIRType_Object);
bool mightBeString = valueMIR->mightBeType(MIRType_String);
bool mightBeSymbol = valueMIR->mightBeType(MIRType_Symbol);
bool mightBeDouble = valueMIR->mightBeType(MIRType_Double);
bool mightBeUndefined = valueMIR->mightBeType(MIRType::Undefined);
bool mightBeNull = valueMIR->mightBeType(MIRType::Null);
bool mightBeBoolean = valueMIR->mightBeType(MIRType::Boolean);
bool mightBeInt32 = valueMIR->mightBeType(MIRType::Int32);
bool mightBeObject = valueMIR->mightBeType(MIRType::Object);
bool mightBeString = valueMIR->mightBeType(MIRType::String);
bool mightBeSymbol = valueMIR->mightBeType(MIRType::Symbol);
bool mightBeDouble = valueMIR->mightBeType(MIRType::Double);
int tagCount = int(mightBeUndefined) + int(mightBeNull) +
int(mightBeBoolean) + int(mightBeInt32) + int(mightBeObject) +
int(mightBeString) + int(mightBeSymbol) + int(mightBeDouble);
@ -677,7 +677,7 @@ void
CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir)
{
MIRType inputType = lir->mir()->input()->type();
MOZ_ASSERT(inputType == MIRType_ObjectOrNull || lir->mir()->operandMightEmulateUndefined(),
MOZ_ASSERT(inputType == MIRType::ObjectOrNull || lir->mir()->operandMightEmulateUndefined(),
"If the object couldn't emulate undefined, this should have been folded.");
Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
@ -685,7 +685,7 @@ CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir)
Register input = ToRegister(lir->input());
if (lir->mir()->operandMightEmulateUndefined()) {
if (inputType == MIRType_ObjectOrNull)
if (inputType == MIRType::ObjectOrNull)
masm.branchTestPtr(Assembler::Zero, input, input, falsy);
OutOfLineTestObject* ool = new(alloc()) OutOfLineTestObject();
@ -693,7 +693,7 @@ CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir)
testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()), ool);
} else {
MOZ_ASSERT(inputType == MIRType_ObjectOrNull);
MOZ_ASSERT(inputType == MIRType::ObjectOrNull);
testZeroEmitBranch(Assembler::NotEqual, input, lir->ifTruthy(), lir->ifFalsy());
}
}
@ -707,7 +707,7 @@ CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir)
// out our input after we did cacheOperandMightEmulateUndefined. So we
// might think it can emulate undefined _and_ know that it can't be an
// object.
if (lir->mir()->operandMightEmulateUndefined() && input->mightBeType(MIRType_Object)) {
if (lir->mir()->operandMightEmulateUndefined() && input->mightBeType(MIRType::Object)) {
ool = new(alloc()) OutOfLineTestObject();
addOutOfLineCode(ool, lir->mir());
}
@ -905,7 +905,7 @@ CodeGenerator::visitValueToString(LValueToString* lir)
const JSAtomState& names = GetJitContext()->runtime->names();
// String
if (lir->mir()->input()->mightBeType(MIRType_String)) {
if (lir->mir()->input()->mightBeType(MIRType::String)) {
Label notString;
masm.branchTestString(Assembler::NotEqual, tag, &notString);
masm.unboxString(input, output);
@ -914,7 +914,7 @@ CodeGenerator::visitValueToString(LValueToString* lir)
}
// Integer
if (lir->mir()->input()->mightBeType(MIRType_Int32)) {
if (lir->mir()->input()->mightBeType(MIRType::Int32)) {
Label notInteger;
masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
Register unboxed = ToTempUnboxRegister(lir->tempToUnbox());
@ -925,14 +925,14 @@ CodeGenerator::visitValueToString(LValueToString* lir)
}
// Double
if (lir->mir()->input()->mightBeType(MIRType_Double)) {
if (lir->mir()->input()->mightBeType(MIRType::Double)) {
// Note: no fastpath. Need two extra registers and can only convert doubles
// that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
}
// Undefined
if (lir->mir()->input()->mightBeType(MIRType_Undefined)) {
if (lir->mir()->input()->mightBeType(MIRType::Undefined)) {
Label notUndefined;
masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
masm.movePtr(ImmGCPtr(names.undefined), output);
@ -941,7 +941,7 @@ CodeGenerator::visitValueToString(LValueToString* lir)
}
// Null
if (lir->mir()->input()->mightBeType(MIRType_Null)) {
if (lir->mir()->input()->mightBeType(MIRType::Null)) {
Label notNull;
masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
masm.movePtr(ImmGCPtr(names.null), output);
@ -950,7 +950,7 @@ CodeGenerator::visitValueToString(LValueToString* lir)
}
// Boolean
if (lir->mir()->input()->mightBeType(MIRType_Boolean)) {
if (lir->mir()->input()->mightBeType(MIRType::Boolean)) {
Label notBoolean, true_;
masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
masm.branchTestBooleanTruthy(true, input, &true_);
@ -963,7 +963,7 @@ CodeGenerator::visitValueToString(LValueToString* lir)
}
// Object
if (lir->mir()->input()->mightBeType(MIRType_Object)) {
if (lir->mir()->input()->mightBeType(MIRType::Object)) {
// Bail.
MOZ_ASSERT(lir->mir()->fallible());
Label bail;
@ -972,7 +972,7 @@ CodeGenerator::visitValueToString(LValueToString* lir)
}
// Symbol
if (lir->mir()->input()->mightBeType(MIRType_Symbol))
if (lir->mir()->input()->mightBeType(MIRType::Symbol))
masm.branchTestSymbol(Assembler::Equal, tag, ool->entry());
#ifdef DEBUG
@ -1218,9 +1218,9 @@ PrepareAndExecuteRegExp(JSContext* cx, MacroAssembler& masm, Register regexp, Re
Address lazySourceAddress(temp1, RegExpStatics::offsetOfLazySource());
Address lazyIndexAddress(temp1, RegExpStatics::offsetOfLazyIndex());
masm.patchableCallPreBarrier(pendingInputAddress, MIRType_String);
masm.patchableCallPreBarrier(matchesInputAddress, MIRType_String);
masm.patchableCallPreBarrier(lazySourceAddress, MIRType_String);
masm.patchableCallPreBarrier(pendingInputAddress, MIRType::String);
masm.patchableCallPreBarrier(matchesInputAddress, MIRType::String);
masm.patchableCallPreBarrier(lazySourceAddress, MIRType::String);
masm.storePtr(input, pendingInputAddress);
masm.storePtr(input, matchesInputAddress);
@ -2669,7 +2669,7 @@ CodeGenerator::visitTableSwitch(LTableSwitch* ins)
Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
const LAllocation* temp;
if (mir->getOperand(0)->type() != MIRType_Int32) {
if (mir->getOperand(0)->type() != MIRType::Int32) {
temp = ins->tempInt()->output();
// The input is a double, so try and convert it to an integer.
@ -3009,7 +3009,7 @@ CodeGenerator::visitStoreSlotT(LStoreSlotT* lir)
MIRType valueType = lir->mir()->value()->type();
if (valueType == MIRType_ObjectOrNull) {
if (valueType == MIRType::ObjectOrNull) {
masm.storeObjectOrNull(ToRegister(lir->value()), dest);
} else {
ConstantOrRegister value;
@ -3117,7 +3117,7 @@ CodeGenerator::visitGetPropertyPolymorphicT(LGetPropertyPolymorphicT* ins)
{
Register obj = ToRegister(ins->obj());
TypedOrValueRegister output(ins->mir()->type(), ToAnyRegister(ins->output()));
Register temp = (output.type() == MIRType_Double)
Register temp = (output.type() == MIRType::Double)
? ToRegister(ins->temp())
: output.typedReg().gpr();
emitGetPropertyPolymorphic(ins, obj, temp, output);
@ -3128,9 +3128,9 @@ static void
EmitUnboxedPreBarrier(MacroAssembler &masm, T address, JSValueType type)
{
if (type == JSVAL_TYPE_OBJECT)
masm.patchableCallPreBarrier(address, MIRType_Object);
masm.patchableCallPreBarrier(address, MIRType::Object);
else if (type == JSVAL_TYPE_STRING)
masm.patchableCallPreBarrier(address, MIRType_String);
masm.patchableCallPreBarrier(address, MIRType::String);
else
MOZ_ASSERT(!UnboxedTypeNeedsPreBarrier(type));
}
@ -3376,11 +3376,11 @@ CodeGenerator::visitTypeBarrierO(LTypeBarrierO* lir)
Register scratch = ToTempRegisterOrInvalid(lir->temp());
Label miss, ok;
if (lir->mir()->type() == MIRType_ObjectOrNull) {
Label* nullTarget = lir->mir()->resultTypeSet()->mightBeMIRType(MIRType_Null) ? &ok : &miss;
if (lir->mir()->type() == MIRType::ObjectOrNull) {
Label* nullTarget = lir->mir()->resultTypeSet()->mightBeMIRType(MIRType::Null) ? &ok : &miss;
masm.branchTestPtr(Assembler::Zero, obj, obj, nullTarget);
} else {
MOZ_ASSERT(lir->mir()->type() == MIRType_Object);
MOZ_ASSERT(lir->mir()->type() == MIRType::Object);
MOZ_ASSERT(lir->mir()->barrierKind() != BarrierKind::TypeTagOnly);
}
@ -4765,8 +4765,8 @@ CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated)
void
CodeGenerator::emitAssertObjectOrStringResult(Register input, MIRType type, const TemporaryTypeSet* typeset)
{
MOZ_ASSERT(type == MIRType_Object || type == MIRType_ObjectOrNull ||
type == MIRType_String || type == MIRType_Symbol);
MOZ_ASSERT(type == MIRType::Object || type == MIRType::ObjectOrNull ||
type == MIRType::String || type == MIRType::Symbol);
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
regs.take(input);
@ -4779,12 +4779,12 @@ CodeGenerator::emitAssertObjectOrStringResult(Register input, MIRType type, cons
Label done;
branchIfInvalidated(temp, &done);
if ((type == MIRType_Object || type == MIRType_ObjectOrNull) &&
if ((type == MIRType::Object || type == MIRType::ObjectOrNull) &&
typeset && !typeset->unknownObject())
{
// We have a result TypeSet, assert this object is in it.
Label miss, ok;
if (type == MIRType_ObjectOrNull)
if (type == MIRType::ObjectOrNull)
masm.branchPtr(Assembler::Equal, input, ImmWord(0), &ok);
if (typeset->getObjectCount() > 0)
masm.guardObjectType(input, typeset, temp, &miss);
@ -4809,16 +4809,16 @@ CodeGenerator::emitAssertObjectOrStringResult(Register input, MIRType type, cons
void* callee;
switch (type) {
case MIRType_Object:
case MIRType::Object:
callee = JS_FUNC_TO_DATA_PTR(void*, AssertValidObjectPtr);
break;
case MIRType_ObjectOrNull:
case MIRType::ObjectOrNull:
callee = JS_FUNC_TO_DATA_PTR(void*, AssertValidObjectOrNullPtr);
break;
case MIRType_String:
case MIRType::String:
callee = JS_FUNC_TO_DATA_PTR(void*, AssertValidStringPtr);
break;
case MIRType_Symbol:
case MIRType::Symbol:
callee = JS_FUNC_TO_DATA_PTR(void*, AssertValidSymbolPtr);
break;
default:
@ -4926,13 +4926,13 @@ CodeGenerator::emitDebugResultChecks(LInstruction* ins)
return;
switch (mir->type()) {
case MIRType_Object:
case MIRType_ObjectOrNull:
case MIRType_String:
case MIRType_Symbol:
case MIRType::Object:
case MIRType::ObjectOrNull:
case MIRType::String:
case MIRType::Symbol:
emitObjectOrStringResultChecks(ins, mir);
break;
case MIRType_Value:
case MIRType::Value:
emitValueResultChecks(ins, mir);
break;
default:
@ -5508,11 +5508,11 @@ CodeGenerator::visitSimdBox(LSimdBox* lir)
Address objectData(object, InlineTypedObject::offsetOfDataStart());
switch (type) {
case MIRType_Bool32x4:
case MIRType_Int32x4:
case MIRType::Bool32x4:
case MIRType::Int32x4:
masm.storeUnalignedInt32x4(in, objectData);
break;
case MIRType_Float32x4:
case MIRType::Float32x4:
masm.storeUnalignedFloat32x4(in, objectData);
break;
default:
@ -5586,11 +5586,11 @@ CodeGenerator::visitSimdUnbox(LSimdUnbox* lir)
// Load the value from the data of the InlineTypedObject.
Address objectData(object, InlineTypedObject::offsetOfDataStart());
switch (lir->mir()->type()) {
case MIRType_Bool32x4:
case MIRType_Int32x4:
case MIRType::Bool32x4:
case MIRType::Int32x4:
masm.loadUnalignedInt32x4(objectData, simd);
break;
case MIRType_Float32x4:
case MIRType::Float32x4:
masm.loadUnalignedFloat32x4(objectData, simd);
break;
default:
@ -6086,8 +6086,8 @@ CodeGenerator::visitGetNextMapEntryForIterator(LGetNextMapEntryForIterator* lir)
Address valueAddress(front, ValueMap::Entry::offsetOfValue());
Address keyElemAddress(result, elementsOffset);
Address valueElemAddress(result, elementsOffset + sizeof(Value));
masm.patchableCallPreBarrier(keyElemAddress, MIRType_Value);
masm.patchableCallPreBarrier(valueElemAddress, MIRType_Value);
masm.patchableCallPreBarrier(keyElemAddress, MIRType::Value);
masm.patchableCallPreBarrier(valueElemAddress, MIRType::Value);
masm.storeValue(keyAddress, keyElemAddress, temp);
masm.storeValue(valueAddress, valueElemAddress, temp);
@ -6621,7 +6621,7 @@ CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir)
Register output = ToRegister(lir->output());
if (op == JSOP_EQ || op == JSOP_NE) {
MOZ_ASSERT(lir->mir()->lhs()->type() != MIRType_Object ||
MOZ_ASSERT(lir->mir()->lhs()->type() != MIRType::Object ||
lir->mir()->operandMightEmulateUndefined(),
"Operands which can't emulate undefined should have been folded");
@ -6643,9 +6643,9 @@ CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir)
Register tag = masm.splitTagForTest(value);
MDefinition* input = lir->mir()->lhs();
if (input->mightBeType(MIRType_Null))
if (input->mightBeType(MIRType::Null))
masm.branchTestNull(Assembler::Equal, tag, nullOrLikeUndefined);
if (input->mightBeType(MIRType_Undefined))
if (input->mightBeType(MIRType::Undefined))
masm.branchTestUndefined(Assembler::Equal, tag, nullOrLikeUndefined);
if (ool) {
@ -6707,7 +6707,7 @@ CodeGenerator::visitIsNullOrLikeUndefinedAndBranchV(LIsNullOrLikeUndefinedAndBra
op = JSOP_EQ;
}
MOZ_ASSERT(lir->cmpMir()->lhs()->type() != MIRType_Object ||
MOZ_ASSERT(lir->cmpMir()->lhs()->type() != MIRType::Object ||
lir->cmpMir()->operandMightEmulateUndefined(),
"Operands which can't emulate undefined should have been folded");
@ -6723,9 +6723,9 @@ CodeGenerator::visitIsNullOrLikeUndefinedAndBranchV(LIsNullOrLikeUndefinedAndBra
Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
MDefinition* input = lir->cmpMir()->lhs();
if (input->mightBeType(MIRType_Null))
if (input->mightBeType(MIRType::Null))
masm.branchTestNull(Assembler::Equal, tag, ifTrueLabel);
if (input->mightBeType(MIRType_Undefined))
if (input->mightBeType(MIRType::Undefined))
masm.branchTestUndefined(Assembler::Equal, tag, ifTrueLabel);
if (ool) {
@ -6757,13 +6757,13 @@ CodeGenerator::visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT * lir)
lir->mir()->compareType() == MCompare::Compare_Null);
MIRType lhsType = lir->mir()->lhs()->type();
MOZ_ASSERT(lhsType == MIRType_Object || lhsType == MIRType_ObjectOrNull);
MOZ_ASSERT(lhsType == MIRType::Object || lhsType == MIRType::ObjectOrNull);
JSOp op = lir->mir()->jsop();
MOZ_ASSERT(lhsType == MIRType_ObjectOrNull || op == JSOP_EQ || op == JSOP_NE,
MOZ_ASSERT(lhsType == MIRType::ObjectOrNull || op == JSOP_EQ || op == JSOP_NE,
"Strict equality should have been folded");
MOZ_ASSERT(lhsType == MIRType_ObjectOrNull || lir->mir()->operandMightEmulateUndefined(),
MOZ_ASSERT(lhsType == MIRType::ObjectOrNull || lir->mir()->operandMightEmulateUndefined(),
"If the object couldn't emulate undefined, this should have been folded.");
Register objreg = ToRegister(lir->input());
@ -6776,7 +6776,7 @@ CodeGenerator::visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT * lir)
Label* emulatesUndefined = ool->label1();
Label* doesntEmulateUndefined = ool->label2();
if (lhsType == MIRType_ObjectOrNull)
if (lhsType == MIRType::ObjectOrNull)
masm.branchTestPtr(Assembler::Zero, objreg, objreg, emulatesUndefined);
branchTestObjectEmulatesUndefined(objreg, emulatesUndefined, doesntEmulateUndefined,
@ -6791,7 +6791,7 @@ CodeGenerator::visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT * lir)
masm.move32(Imm32(op == JSOP_EQ), output);
masm.bind(&done);
} else {
MOZ_ASSERT(lhsType == MIRType_ObjectOrNull);
MOZ_ASSERT(lhsType == MIRType::ObjectOrNull);
Label isNull, done;
@ -6815,13 +6815,13 @@ CodeGenerator::visitIsNullOrLikeUndefinedAndBranchT(LIsNullOrLikeUndefinedAndBra
compareType == MCompare::Compare_Null);
MIRType lhsType = lir->cmpMir()->lhs()->type();
MOZ_ASSERT(lhsType == MIRType_Object || lhsType == MIRType_ObjectOrNull);
MOZ_ASSERT(lhsType == MIRType::Object || lhsType == MIRType::ObjectOrNull);
JSOp op = lir->cmpMir()->jsop();
MOZ_ASSERT(lhsType == MIRType_ObjectOrNull || op == JSOP_EQ || op == JSOP_NE,
MOZ_ASSERT(lhsType == MIRType::ObjectOrNull || op == JSOP_EQ || op == JSOP_NE,
"Strict equality should have been folded");
MOZ_ASSERT(lhsType == MIRType_ObjectOrNull || lir->cmpMir()->operandMightEmulateUndefined(),
MOZ_ASSERT(lhsType == MIRType::ObjectOrNull || lir->cmpMir()->operandMightEmulateUndefined(),
"If the object couldn't emulate undefined, this should have been folded.");
MBasicBlock* ifTrue;
@ -6845,14 +6845,14 @@ CodeGenerator::visitIsNullOrLikeUndefinedAndBranchT(LIsNullOrLikeUndefinedAndBra
Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
if (lhsType == MIRType_ObjectOrNull)
if (lhsType == MIRType::ObjectOrNull)
masm.branchTestPtr(Assembler::Zero, input, input, ifTrueLabel);
// Objects that emulate undefined are loosely equal to null/undefined.
Register scratch = ToRegister(lir->temp());
testObjectEmulatesUndefined(input, ifTrueLabel, ifFalseLabel, scratch, ool);
} else {
MOZ_ASSERT(lhsType == MIRType_ObjectOrNull);
MOZ_ASSERT(lhsType == MIRType::ObjectOrNull);
testZeroEmitBranch(Assembler::Equal, input, ifTrue, ifFalse);
}
}
@ -7554,7 +7554,7 @@ CodeGenerator::visitNotV(LNotV* lir)
// out our operand after we did cacheOperandMightEmulateUndefined. So we
// might think it can emulate undefined _and_ know that it can't be an
// object.
if (lir->mir()->operandMightEmulateUndefined() && operand->mightBeType(MIRType_Object)) {
if (lir->mir()->operandMightEmulateUndefined() && operand->mightBeType(MIRType::Object)) {
ool = new(alloc()) OutOfLineTestObjectWithLabels();
addOutOfLineCode(ool, lir->mir());
ifTruthy = ool->label1();
@ -7992,11 +7992,11 @@ CodeGenerator::visitOutOfLineStoreElementHole(OutOfLineStoreElementHole* ool)
masm.bind(&dontUpdate);
}
if (ins->isStoreElementHoleT() && unboxedType == JSVAL_TYPE_MAGIC && valueType != MIRType_Double) {
if (ins->isStoreElementHoleT() && unboxedType == JSVAL_TYPE_MAGIC && valueType != MIRType::Double) {
// The inline path for StoreElementHoleT does not always store the type tag,
// so we do the store on the OOL path. We use MIRType_None for the element type
// so we do the store on the OOL path. We use MIRType::None for the element type
// so that storeElementTyped will always store the type tag.
emitStoreElementTyped(ins->toStoreElementHoleT()->value(), valueType, MIRType_None,
emitStoreElementTyped(ins->toStoreElementHoleT()->value(), valueType, MIRType::None,
elements, index, 0);
masm.jump(ool->rejoin());
} else {
@ -8047,11 +8047,11 @@ CodeGenerator::visitStoreUnboxedPointer(LStoreUnboxedPointer* lir)
int32_t offsetAdjustment;
bool preBarrier;
if (lir->mir()->isStoreUnboxedObjectOrNull()) {
type = MIRType_Object;
type = MIRType::Object;
offsetAdjustment = lir->mir()->toStoreUnboxedObjectOrNull()->offsetAdjustment();
preBarrier = lir->mir()->toStoreUnboxedObjectOrNull()->preBarrier();
} else if (lir->mir()->isStoreUnboxedString()) {
type = MIRType_String;
type = MIRType::String;
offsetAdjustment = lir->mir()->toStoreUnboxedString()->offsetAdjustment();
preBarrier = lir->mir()->toStoreUnboxedString()->preBarrier();
} else {
@ -8635,7 +8635,7 @@ CodeGenerator::visitSetFrameArgumentT(LSetFrameArgumentT* lir)
MIRType type = lir->mir()->value()->type();
if (type == MIRType_Double) {
if (type == MIRType::Double) {
// Store doubles directly.
FloatRegister input = ToFloatRegister(lir->input());
masm.storeDouble(input, Address(masm.getStackPointer(), argOffset));
@ -9269,7 +9269,7 @@ CodeGenerator::visitUnboxFloatingPoint(LUnboxFloatingPoint* lir)
FloatRegister resultReg = ToFloatRegister(result);
masm.branchTestDouble(Assembler::NotEqual, box, ool->entry());
masm.unboxDouble(box, resultReg);
if (lir->type() == MIRType_Float32)
if (lir->type() == MIRType::Float32)
masm.convertDoubleToFloat32(resultReg, resultReg);
masm.bind(ool->rejoin());
}
@ -9390,7 +9390,7 @@ CodeGenerator::visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* ins)
Address address(input, NativeObject::getFixedSlotOffset(slot));
Label bail;
if (type == MIRType_Double) {
if (type == MIRType::Double) {
MOZ_ASSERT(result.isFloat());
masm.ensureDouble(address, result.fpu(), &bail);
if (mir->fallible())
@ -9399,10 +9399,10 @@ CodeGenerator::visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* ins)
}
if (mir->fallible()) {
switch (type) {
case MIRType_Int32:
case MIRType::Int32:
masm.branchTestInt32(Assembler::NotEqual, address, &bail);
break;
case MIRType_Boolean:
case MIRType::Boolean:
masm.branchTestBoolean(Assembler::NotEqual, address, &bail);
break;
default:
@ -9441,7 +9441,7 @@ CodeGenerator::visitStoreFixedSlotT(LStoreFixedSlotT* ins)
if (ins->mir()->needsBarrier())
emitPreBarrier(address);
if (valueType == MIRType_ObjectOrNull) {
if (valueType == MIRType::ObjectOrNull) {
Register nvalue = ToRegister(value);
masm.storeObjectOrNull(nvalue, address);
} else {
@ -9511,7 +9511,7 @@ CodeGenerator::addSetPropertyCache(LInstruction* ins, LiveRegisterSet liveRegs,
ConstantOrRegister
CodeGenerator::toConstantOrRegister(LInstruction* lir, size_t n, MIRType type)
{
if (type == MIRType_Value)
if (type == MIRType::Value)
return TypedOrValueRegister(ToValue(lir, n));
const LAllocation* value = lir->getOperand(n);
@ -9789,13 +9789,13 @@ CodeGenerator::visitTypeOfV(LTypeOfV* lir)
MDefinition* input = lir->mir()->input();
bool testObject = input->mightBeType(MIRType_Object);
bool testNumber = input->mightBeType(MIRType_Int32) || input->mightBeType(MIRType_Double);
bool testBoolean = input->mightBeType(MIRType_Boolean);
bool testUndefined = input->mightBeType(MIRType_Undefined);
bool testNull = input->mightBeType(MIRType_Null);
bool testString = input->mightBeType(MIRType_String);
bool testSymbol = input->mightBeType(MIRType_Symbol);
bool testObject = input->mightBeType(MIRType::Object);
bool testNumber = input->mightBeType(MIRType::Int32) || input->mightBeType(MIRType::Double);
bool testBoolean = input->mightBeType(MIRType::Boolean);
bool testUndefined = input->mightBeType(MIRType::Undefined);
bool testNull = input->mightBeType(MIRType::Null);
bool testString = input->mightBeType(MIRType::String);
bool testSymbol = input->mightBeType(MIRType::Symbol);
unsigned numTests = unsigned(testObject) + unsigned(testNumber) + unsigned(testBoolean) +
unsigned(testUndefined) + unsigned(testNull) + unsigned(testString) + unsigned(testSymbol);
@ -10365,7 +10365,7 @@ CodeGenerator::visitClampVToUint8(LClampVToUint8* lir)
Label* stringEntry;
Label* stringRejoin;
if (input->mightBeType(MIRType_String)) {
if (input->mightBeType(MIRType::String)) {
OutOfLineCode* oolString = oolCallVM(StringToNumberInfo, lir, ArgList(output),
StoreFloatRegisterTo(tempFloat));
stringEntry = oolString->entry();

Просмотреть файл

@ -18,7 +18,7 @@ namespace jit {
static bool
CanUnboxSimdPhi(const JitCompartment* jitCompartment, MPhi* phi, SimdType unboxType)
{
MOZ_ASSERT(phi->type() == MIRType_Object);
MOZ_ASSERT(phi->type() == MIRType::Object);
// If we are unboxing, we are more than likely to have boxed this SIMD type
// once in baseline, otherwise, we cannot create a MSimdBox as we have no

Просмотреть файл

@ -14,20 +14,20 @@ using namespace jit;
static void
AnalyzeLsh(TempAllocator& alloc, MLsh* lsh)
{
if (lsh->specialization() != MIRType_Int32)
if (lsh->specialization() != MIRType::Int32)
return;
if (lsh->isRecoveredOnBailout())
return;
MDefinition* index = lsh->lhs();
MOZ_ASSERT(index->type() == MIRType_Int32);
MOZ_ASSERT(index->type() == MIRType::Int32);
MConstant* shiftValue = lsh->rhs()->maybeConstantValue();
if (!shiftValue)
return;
if (shiftValue->type() != MIRType_Int32 || !IsShiftInScaleRange(shiftValue->toInt32()))
if (shiftValue->type() != MIRType::Int32 || !IsShiftInScaleRange(shiftValue->toInt32()))
return;
Scale scale = ShiftToScale(shiftValue->toInt32());
@ -44,7 +44,7 @@ AnalyzeLsh(TempAllocator& alloc, MLsh* lsh)
break;
MAdd* add = use->consumer()->toDefinition()->toAdd();
if (add->specialization() != MIRType_Int32 || !add->isTruncated())
if (add->specialization() != MIRType::Int32 || !add->isTruncated())
break;
MDefinition* other = add->getOperand(1 - add->indexOf(*use));
@ -80,7 +80,7 @@ AnalyzeLsh(TempAllocator& alloc, MLsh* lsh)
MDefinition* other = bitAnd->getOperand(1 - bitAnd->indexOf(*use));
MConstant* otherConst = other->maybeConstantValue();
if (!otherConst || otherConst->type() != MIRType_Int32)
if (!otherConst || otherConst->type() != MIRType::Int32)
return;
uint32_t bitsClearedByShift = elemSize - 1;

Просмотреть файл

@ -275,27 +275,27 @@ JitRuntime::initialize(JSContext* cx)
return false;
JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for Value");
valuePreBarrier_ = generatePreBarrier(cx, MIRType_Value);
valuePreBarrier_ = generatePreBarrier(cx, MIRType::Value);
if (!valuePreBarrier_)
return false;
JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for String");
stringPreBarrier_ = generatePreBarrier(cx, MIRType_String);
stringPreBarrier_ = generatePreBarrier(cx, MIRType::String);
if (!stringPreBarrier_)
return false;
JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for Object");
objectPreBarrier_ = generatePreBarrier(cx, MIRType_Object);
objectPreBarrier_ = generatePreBarrier(cx, MIRType::Object);
if (!objectPreBarrier_)
return false;
JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for Shape");
shapePreBarrier_ = generatePreBarrier(cx, MIRType_Shape);
shapePreBarrier_ = generatePreBarrier(cx, MIRType::Shape);
if (!shapePreBarrier_)
return false;
JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for ObjectGroup");
objectGroupPreBarrier_ = generatePreBarrier(cx, MIRType_ObjectGroup);
objectGroupPreBarrier_ = generatePreBarrier(cx, MIRType::ObjectGroup);
if (!objectGroupPreBarrier_)
return false;
@ -1376,7 +1376,7 @@ OptimizeSinCos(MIRGenerator *mir, MIRGraph &graph)
continue;
// Check if sin/cos is already optimized.
if (insFunc->getOperand(0)->type() == MIRType_SinCosDouble)
if (insFunc->getOperand(0)->type() == MIRType::SinCosDouble)
continue;
// insFunc is either a |sin(x)| or |cos(x)| instruction. The

Просмотреть файл

@ -1208,14 +1208,14 @@ GuessPhiType(MPhi* phi, bool* hasInputsWithEmptyTypes)
// Check that different magic constants aren't flowing together. Ignore
// JS_OPTIMIZED_OUT, since an operand could be legitimately optimized
// away.
MIRType magicType = MIRType_None;
MIRType magicType = MIRType::None;
for (size_t i = 0; i < phi->numOperands(); i++) {
MDefinition* in = phi->getOperand(i);
if (in->type() == MIRType_MagicOptimizedArguments ||
in->type() == MIRType_MagicHole ||
in->type() == MIRType_MagicIsConstructing)
if (in->type() == MIRType::MagicOptimizedArguments ||
in->type() == MIRType::MagicHole ||
in->type() == MIRType::MagicIsConstructing)
{
if (magicType == MIRType_None)
if (magicType == MIRType::None)
magicType = in->type();
MOZ_ASSERT(magicType == in->type());
}
@ -1224,7 +1224,7 @@ GuessPhiType(MPhi* phi, bool* hasInputsWithEmptyTypes)
*hasInputsWithEmptyTypes = false;
MIRType type = MIRType_None;
MIRType type = MIRType::None;
bool convertibleToFloat32 = false;
bool hasPhiInputs = false;
for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
@ -1233,7 +1233,7 @@ GuessPhiType(MPhi* phi, bool* hasInputsWithEmptyTypes)
hasPhiInputs = true;
if (!in->toPhi()->triedToSpecialize())
continue;
if (in->type() == MIRType_None) {
if (in->type() == MIRType::None) {
// The operand is a phi we tried to specialize, but we were
// unable to guess its type. propagateSpecialization will
// propagate the type to this phi when it becomes known.
@ -1247,34 +1247,34 @@ GuessPhiType(MPhi* phi, bool* hasInputsWithEmptyTypes)
continue;
}
if (type == MIRType_None) {
if (type == MIRType::None) {
type = in->type();
if (in->canProduceFloat32())
convertibleToFloat32 = true;
continue;
}
if (type != in->type()) {
if (convertibleToFloat32 && in->type() == MIRType_Float32) {
if (convertibleToFloat32 && in->type() == MIRType::Float32) {
// If we only saw definitions that can be converted into Float32 before and
// encounter a Float32 value, promote previous values to Float32
type = MIRType_Float32;
type = MIRType::Float32;
} else if (IsTypeRepresentableAsDouble(type) &&
IsTypeRepresentableAsDouble(in->type()))
{
// Specialize phis with int32 and double operands as double.
type = MIRType_Double;
type = MIRType::Double;
convertibleToFloat32 &= in->canProduceFloat32();
} else {
return MIRType_Value;
return MIRType::Value;
}
}
}
if (type == MIRType_None && !hasPhiInputs) {
// All inputs are non-phis with empty typesets. Use MIRType_Value
if (type == MIRType::None && !hasPhiInputs) {
// All inputs are non-phis with empty typesets. Use MIRType::Value
// in this case, as it's impossible to get better type information.
MOZ_ASSERT(*hasInputsWithEmptyTypes);
type = MIRType_Value;
type = MIRType::Value;
}
return type;
@ -1292,7 +1292,7 @@ TypeAnalyzer::respecialize(MPhi* phi, MIRType type)
bool
TypeAnalyzer::propagateSpecialization(MPhi* phi)
{
MOZ_ASSERT(phi->type() != MIRType_None);
MOZ_ASSERT(phi->type() != MIRType::None);
// Verify that this specialization matches any phis depending on it.
for (MUseDefIterator iter(phi); iter; iter++) {
@ -1301,7 +1301,7 @@ TypeAnalyzer::propagateSpecialization(MPhi* phi)
MPhi* use = iter.def()->toPhi();
if (!use->triedToSpecialize())
continue;
if (use->type() == MIRType_None) {
if (use->type() == MIRType::None) {
// We tried to specialize this phi, but were unable to guess its
// type. Now that we know the type of one of its operands, we can
// specialize it.
@ -1311,10 +1311,10 @@ TypeAnalyzer::propagateSpecialization(MPhi* phi)
}
if (use->type() != phi->type()) {
// Specialize phis with int32 that can be converted to float and float operands as floats.
if ((use->type() == MIRType_Int32 && use->canProduceFloat32() && phi->type() == MIRType_Float32) ||
(phi->type() == MIRType_Int32 && phi->canProduceFloat32() && use->type() == MIRType_Float32))
if ((use->type() == MIRType::Int32 && use->canProduceFloat32() && phi->type() == MIRType::Float32) ||
(phi->type() == MIRType::Int32 && phi->canProduceFloat32() && use->type() == MIRType::Float32))
{
if (!respecialize(use, MIRType_Float32))
if (!respecialize(use, MIRType::Float32))
return false;
continue;
}
@ -1323,13 +1323,13 @@ TypeAnalyzer::propagateSpecialization(MPhi* phi)
if (IsTypeRepresentableAsDouble(use->type()) &&
IsTypeRepresentableAsDouble(phi->type()))
{
if (!respecialize(use, MIRType_Double))
if (!respecialize(use, MIRType::Double))
return false;
continue;
}
// This phi in our use chain can now no longer be specialized.
if (!respecialize(use, MIRType_Value))
if (!respecialize(use, MIRType::Value))
return false;
}
}
@ -1350,7 +1350,7 @@ TypeAnalyzer::specializePhis()
bool hasInputsWithEmptyTypes;
MIRType type = GuessPhiType(*phi, &hasInputsWithEmptyTypes);
phi->specialize(type);
if (type == MIRType_None) {
if (type == MIRType::None) {
// We tried to guess the type but failed because all operands are
// phis we still have to visit. Set the triedToSpecialize flag but
// don't propagate the type to other phis, propagateSpecialization
@ -1358,8 +1358,8 @@ TypeAnalyzer::specializePhis()
// Edge case: when this phi has a non-phi input with an empty
// typeset, it's possible for two phis to have a cyclic
// dependency and they will both have MIRType_None. Specialize
// such phis to MIRType_Value later on.
// dependency and they will both have MIRType::None. Specialize
// such phis to MIRType::Value later on.
if (hasInputsWithEmptyTypes && !phisWithEmptyInputTypes.append(*phi))
return false;
continue;
@ -1381,14 +1381,14 @@ TypeAnalyzer::specializePhis()
// When two phis have a cyclic dependency and inputs that have an empty
// typeset (which are ignored by GuessPhiType), we may still have to
// specialize these to MIRType_Value.
// specialize these to MIRType::Value.
while (!phisWithEmptyInputTypes.empty()) {
if (mir->shouldCancel("Specialize Phis (phisWithEmptyInputTypes)"))
return false;
MPhi* phi = phisWithEmptyInputTypes.popCopy();
if (phi->type() == MIRType_None) {
phi->specialize(MIRType_Value);
if (phi->type() == MIRType::None) {
phi->specialize(MIRType::Value);
if (!propagateSpecialization(phi))
return false;
}
@ -1402,13 +1402,13 @@ bool
TypeAnalyzer::adjustPhiInputs(MPhi* phi)
{
MIRType phiType = phi->type();
MOZ_ASSERT(phiType != MIRType_None);
MOZ_ASSERT(phiType != MIRType::None);
// If we specialized a type that's not Value, there are 3 cases:
// 1. Every input is of that type.
// 2. Every observed input is of that type (i.e., some inputs haven't been executed yet).
// 3. Inputs were doubles and int32s, and was specialized to double.
if (phiType != MIRType_Value) {
if (phiType != MIRType::Value) {
for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
MDefinition* in = phi->getOperand(i);
if (in->type() == phiType)
@ -1422,21 +1422,21 @@ TypeAnalyzer::adjustPhiInputs(MPhi* phi)
} else {
MInstruction* replacement;
if (phiType == MIRType_Double && IsFloatType(in->type())) {
if (phiType == MIRType::Double && IsFloatType(in->type())) {
// Convert int32 operands to double.
replacement = MToDouble::New(alloc(), in);
} else if (phiType == MIRType_Float32) {
if (in->type() == MIRType_Int32 || in->type() == MIRType_Double) {
} else if (phiType == MIRType::Float32) {
if (in->type() == MIRType::Int32 || in->type() == MIRType::Double) {
replacement = MToFloat32::New(alloc(), in);
} else {
// See comment below
if (in->type() != MIRType_Value) {
if (in->type() != MIRType::Value) {
MBox* box = MBox::New(alloc(), in);
in->block()->insertBefore(in->block()->lastIns(), box);
in = box;
}
MUnbox* unbox = MUnbox::New(alloc(), in, MIRType_Double, MUnbox::Fallible);
MUnbox* unbox = MUnbox::New(alloc(), in, MIRType::Double, MUnbox::Fallible);
in->block()->insertBefore(in->block()->lastIns(), unbox);
replacement = MToFloat32::New(alloc(), in);
}
@ -1444,7 +1444,7 @@ TypeAnalyzer::adjustPhiInputs(MPhi* phi)
// If we know this branch will fail to convert to phiType,
// insert a box that'll immediately fail in the fallible unbox
// below.
if (in->type() != MIRType_Value) {
if (in->type() != MIRType::Value) {
MBox* box = MBox::New(alloc(), in);
in->block()->insertBefore(in->block()->lastIns(), box);
in = box;
@ -1466,7 +1466,7 @@ TypeAnalyzer::adjustPhiInputs(MPhi* phi)
// Box every typed input.
for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
MDefinition* in = phi->getOperand(i);
if (in->type() == MIRType_Value)
if (in->type() == MIRType::Value)
continue;
// The input is being explicitly unboxed, so sneak past and grab
@ -1474,7 +1474,7 @@ TypeAnalyzer::adjustPhiInputs(MPhi* phi)
if (in->isUnbox() && phi->typeIncludes(in->toUnbox()->input()))
in = in->toUnbox()->input();
if (in->type() != MIRType_Value) {
if (in->type() != MIRType::Value) {
if (!alloc().ensureBallast())
return false;
@ -1508,19 +1508,19 @@ TypeAnalyzer::replaceRedundantPhi(MPhi* phi)
MBasicBlock* block = phi->block();
js::Value v;
switch (phi->type()) {
case MIRType_Undefined:
case MIRType::Undefined:
v = UndefinedValue();
break;
case MIRType_Null:
case MIRType::Null:
v = NullValue();
break;
case MIRType_MagicOptimizedArguments:
case MIRType::MagicOptimizedArguments:
v = MagicValue(JS_OPTIMIZED_ARGUMENTS);
break;
case MIRType_MagicOptimizedOut:
case MIRType::MagicOptimizedOut:
v = MagicValue(JS_OPTIMIZED_OUT);
break;
case MIRType_MagicUninitializedLexical:
case MIRType::MagicUninitializedLexical:
v = MagicValue(JS_UNINITIALIZED_LEXICAL);
break;
default:
@ -1544,11 +1544,11 @@ TypeAnalyzer::insertConversions()
for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ) {
MPhi* phi = *iter++;
if (phi->type() == MIRType_Undefined ||
phi->type() == MIRType_Null ||
phi->type() == MIRType_MagicOptimizedArguments ||
phi->type() == MIRType_MagicOptimizedOut ||
phi->type() == MIRType_MagicUninitializedLexical)
if (phi->type() == MIRType::Undefined ||
phi->type() == MIRType::Null ||
phi->type() == MIRType::MagicOptimizedArguments ||
phi->type() == MIRType::MagicOptimizedOut ||
phi->type() == MIRType::MagicUninitializedLexical)
{
replaceRedundantPhi(phi);
block->discardPhi(phi);
@ -1729,7 +1729,7 @@ TypeAnalyzer::specializeValidFloatOps()
if (!ins->isFloat32Commutative())
continue;
if (ins->type() == MIRType_Float32)
if (ins->type() == MIRType::Float32)
continue;
// This call will try to specialize the instruction iff all uses are consumers and
@ -1748,7 +1748,7 @@ TypeAnalyzer::graphContainsFloat32()
return false;
for (MDefinitionIterator def(*block); def; def++) {
if (def->type() == MIRType_Float32)
if (def->type() == MIRType::Float32)
return true;
}
}
@ -1788,7 +1788,7 @@ TypeAnalyzer::checkFloatCoherency()
return false;
for (MDefinitionIterator def(*block); def; def++) {
if (def->type() != MIRType_Float32)
if (def->type() != MIRType::Float32)
continue;
for (MUseDefIterator use(*def); use; use++) {
@ -2352,12 +2352,12 @@ jit::AssertBasicGraphCoherency(MIRGraph& graph)
for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
MOZ_ASSERT(phi->numOperands() == block->numPredecessors());
MOZ_ASSERT(!phi->isRecoveredOnBailout());
MOZ_ASSERT(phi->type() != MIRType_None);
MOZ_ASSERT(phi->type() != MIRType::None);
MOZ_ASSERT(phi->dependency() == nullptr);
}
for (MDefinitionIterator iter(*block); iter; iter++) {
MOZ_ASSERT(iter->block() == *block);
MOZ_ASSERT_IF(iter->hasUses(), iter->type() != MIRType_None);
MOZ_ASSERT_IF(iter->hasUses(), iter->type() != MIRType::None);
MOZ_ASSERT(!iter->isDiscarded());
MOZ_ASSERT_IF(iter->isStart(),
*block == graph.entryBlock() || *block == graph.osrBlock());
@ -2388,7 +2388,7 @@ jit::AssertBasicGraphCoherency(MIRGraph& graph)
MControlInstruction* control = block->lastIns();
MOZ_ASSERT(control->block() == *block);
MOZ_ASSERT(!control->hasUses());
MOZ_ASSERT(control->type() == MIRType_None);
MOZ_ASSERT(control->type() == MIRType::None);
MOZ_ASSERT(!control->isDiscarded());
MOZ_ASSERT(!control->isRecoveredOnBailout());
MOZ_ASSERT(control->resumePoint() == nullptr);
@ -2499,36 +2499,36 @@ IsResumableMIRType(MIRType type)
{
// see CodeGeneratorShared::encodeAllocation
switch (type) {
case MIRType_Undefined:
case MIRType_Null:
case MIRType_Boolean:
case MIRType_Int32:
case MIRType_Double:
case MIRType_Float32:
case MIRType_String:
case MIRType_Symbol:
case MIRType_Object:
case MIRType_MagicOptimizedArguments:
case MIRType_MagicOptimizedOut:
case MIRType_MagicUninitializedLexical:
case MIRType_Value:
case MIRType_Float32x4:
case MIRType_Int32x4:
case MIRType_Bool32x4:
case MIRType::Undefined:
case MIRType::Null:
case MIRType::Boolean:
case MIRType::Int32:
case MIRType::Double:
case MIRType::Float32:
case MIRType::String:
case MIRType::Symbol:
case MIRType::Object:
case MIRType::MagicOptimizedArguments:
case MIRType::MagicOptimizedOut:
case MIRType::MagicUninitializedLexical:
case MIRType::Value:
case MIRType::Float32x4:
case MIRType::Int32x4:
case MIRType::Bool32x4:
return true;
case MIRType_MagicHole:
case MIRType_MagicIsConstructing:
case MIRType_ObjectOrNull:
case MIRType_None:
case MIRType_Slots:
case MIRType_Elements:
case MIRType_Pointer:
case MIRType_Shape:
case MIRType_ObjectGroup:
case MIRType_Doublex2: // NYI, see also RSimdBox::recover
case MIRType_SinCosDouble:
case MIRType_Int64:
case MIRType::MagicHole:
case MIRType::MagicIsConstructing:
case MIRType::ObjectOrNull:
case MIRType::None:
case MIRType::Slots:
case MIRType::Elements:
case MIRType::Pointer:
case MIRType::Shape:
case MIRType::ObjectGroup:
case MIRType::Doublex2: // NYI, see also RSimdBox::recover
case MIRType::SinCosDouble:
case MIRType::Int64:
return false;
}
MOZ_CRASH("Unknown MIRType.");
@ -2559,7 +2559,7 @@ AssertResumePointDominatedByOperands(MResumePoint* resume)
{
for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
MDefinition* op = resume->getOperand(i);
if (op->type() == MIRType_MagicOptimizedArguments)
if (op->type() == MIRType::MagicOptimizedArguments)
continue;
MOZ_ASSERT(op->block()->dominates(resume->block()),
"Resume point is not dominated by its operands");
@ -2630,7 +2630,7 @@ jit::AssertExtendedGraphCoherency(MIRGraph& graph, bool underValueNumberer)
// We sometimes see a phi with a magic-optimized-arguments
// operand defined in the normal entry block, while the phi is
// also reachable from the OSR entry (auto-regress/bug779818.js)
if (phi->getOperand(i)->type() == MIRType_MagicOptimizedArguments)
if (phi->getOperand(i)->type() == MIRType::MagicOptimizedArguments)
continue;
MOZ_ASSERT(phi->getOperand(i)->block()->dominates(block->getPredecessor(i)),
@ -2734,7 +2734,7 @@ jit::ExtractLinearSum(MDefinition* ins)
if (ins->isBeta())
ins = ins->getOperand(0);
if (ins->type() != MIRType_Int32)
if (ins->type() != MIRType::Int32)
return SimpleLinearSum(ins, 0);
if (ins->isConstant())
@ -2743,7 +2743,7 @@ jit::ExtractLinearSum(MDefinition* ins)
if (ins->isAdd() || ins->isSub()) {
MDefinition* lhs = ins->getOperand(0);
MDefinition* rhs = ins->getOperand(1);
if (lhs->type() == MIRType_Int32 && rhs->type() == MIRType_Int32) {
if (lhs->type() == MIRType::Int32 && rhs->type() == MIRType::Int32) {
SimpleLinearSum lsum = ExtractLinearSum(lhs);
SimpleLinearSum rsum = ExtractLinearSum(rhs);
@ -2787,8 +2787,8 @@ jit::ExtractLinearInequality(MTest* test, BranchDirection direction,
if (!compare->isInt32Comparison())
return false;
MOZ_ASSERT(lhs->type() == MIRType_Int32);
MOZ_ASSERT(rhs->type() == MIRType_Int32);
MOZ_ASSERT(lhs->type() == MIRType::Int32);
MOZ_ASSERT(rhs->type() == MIRType::Int32);
JSOp jsop = compare->jsop();
if (direction == FALSE_BRANCH)
@ -2993,7 +2993,7 @@ TryEliminateTypeBarrier(MTypeBarrier* barrier, bool* eliminated)
static bool
TryOptimizeLoadObjectOrNull(MDefinition* def, MDefinitionVector* peliminateList)
{
if (def->type() != MIRType_Value)
if (def->type() != MIRType::Value)
return true;
// Check if this definition can only produce object or null values.
@ -3025,13 +3025,13 @@ TryOptimizeLoadObjectOrNull(MDefinition* def, MDefinitionVector* peliminateList)
return false;
break;
case MDefinition::Op_Unbox:
if (ndef->type() != MIRType_Object)
if (ndef->type() != MIRType::Object)
return true;
break;
case MDefinition::Op_TypeBarrier:
// For now, only handle type barriers which are not consumed
// anywhere and only test that the value is null.
if (ndef->hasUses() || ndef->resultTypeSet()->getKnownMIRType() != MIRType_Null)
if (ndef->hasUses() || ndef->resultTypeSet()->getKnownMIRType() != MIRType::Null)
return true;
break;
default:
@ -3054,13 +3054,13 @@ TryOptimizeLoadObjectOrNull(MDefinition* def, MDefinitionVector* peliminateList)
return true;
#endif // JS_PUNBOX64
def->setResultType(MIRType_ObjectOrNull);
def->setResultType(MIRType::ObjectOrNull);
// Fixup the result type of MTypeBarrier uses.
for (MUseDefIterator iter(def); iter; ++iter) {
MDefinition* ndef = iter.def();
if (ndef->isTypeBarrier())
ndef->setResultType(MIRType_ObjectOrNull);
ndef->setResultType(MIRType::ObjectOrNull);
}
// Eliminate MToObjectOrNull instruction uses.
@ -3183,8 +3183,8 @@ jit::EliminateRedundantChecks(MIRGraph& graph)
static bool
NeedsKeepAlive(MInstruction* slotsOrElements, MInstruction* use)
{
MOZ_ASSERT(slotsOrElements->type() == MIRType_Elements ||
slotsOrElements->type() == MIRType_Slots);
MOZ_ASSERT(slotsOrElements->type() == MIRType::Elements ||
slotsOrElements->type() == MIRType::Slots);
if (slotsOrElements->block() != use->block())
return true;
@ -3230,7 +3230,7 @@ jit::AddKeepAliveInstructions(MIRGraph& graph)
for (MInstructionIterator insIter(block->begin()); insIter != block->end(); insIter++) {
MInstruction* ins = *insIter;
if (ins->type() != MIRType_Elements && ins->type() != MIRType_Slots)
if (ins->type() != MIRType::Elements && ins->type() != MIRType::Slots)
continue;
MDefinition* ownerObject;
@ -3254,7 +3254,7 @@ jit::AddKeepAliveInstructions(MIRGraph& graph)
MOZ_CRASH("Unexpected op");
}
MOZ_ASSERT(ownerObject->type() == MIRType_Object);
MOZ_ASSERT(ownerObject->type() == MIRType::Object);
if (ownerObject->isConstant()) {
// Constants are kept alive by other pointers, for instance

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -416,9 +416,9 @@ class IonBuilder
BarrierKind barrier, TemporaryTypeSet* types);
bool storeSlot(MDefinition* obj, size_t slot, size_t nfixed,
MDefinition* value, bool needsBarrier,
MIRType slotType = MIRType_None);
MIRType slotType = MIRType::None);
bool storeSlot(MDefinition* obj, Shape* shape, MDefinition* value, bool needsBarrier,
MIRType slotType = MIRType_None);
MIRType slotType = MIRType::None);
bool shouldAbortOnPreliminaryGroups(MDefinition *obj);
MDefinition* tryInnerizeWindow(MDefinition* obj);

Просмотреть файл

@ -782,7 +782,7 @@ GenerateReadSlot(JSContext* cx, IonScript* ion, MacroAssembler& masm,
{
if (output.hasValue()) {
scratchReg = output.valueReg().scratchReg();
} else if (output.type() == MIRType_Double) {
} else if (output.type() == MIRType::Double) {
scratchReg = object;
masm.push(scratchReg);
restoreScratch = true;
@ -933,7 +933,7 @@ EmitGetterCall(JSContext* cx, MacroAssembler& masm,
// Construct vp array:
// Push object value for |this|
masm.Push(TypedOrValueRegister(MIRType_Object, AnyRegister(object)));
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
// Push callee/outparam.
masm.Push(ObjectValue(*target));
@ -1049,7 +1049,7 @@ EmitGetterCall(JSContext* cx, MacroAssembler& masm,
for (size_t i = 0; i < target->nargs(); i++)
masm.Push(UndefinedValue());
masm.Push(TypedOrValueRegister(MIRType_Object, AnyRegister(object)));
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
masm.movePtr(ImmGCPtr(target), scratchReg);
@ -1160,7 +1160,7 @@ GenerateArrayLength(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher&
if (output.hasValue()) {
outReg = output.valueReg().scratchReg();
} else {
MOZ_ASSERT(output.type() == MIRType_Int32);
MOZ_ASSERT(output.type() == MIRType::Int32);
outReg = output.typedReg().gpr();
}
@ -1193,7 +1193,7 @@ GenerateUnboxedArrayLength(JSContext* cx, MacroAssembler& masm, IonCache::StubAt
if (output.hasValue()) {
outReg = output.valueReg().scratchReg();
} else {
MOZ_ASSERT(output.type() == MIRType_Int32);
MOZ_ASSERT(output.type() == MIRType::Int32);
outReg = output.typedReg().gpr();
}
MOZ_ASSERT(object != outReg);
@ -1228,7 +1228,7 @@ GenerateTypedArrayLength(JSContext* cx, MacroAssembler& masm, IonCache::StubAtta
if (output.hasValue()) {
tmpReg = output.valueReg().scratchReg();
} else {
MOZ_ASSERT(output.type() == MIRType_Int32);
MOZ_ASSERT(output.type() == MIRType::Int32);
tmpReg = output.typedReg().gpr();
}
MOZ_ASSERT(object != tmpReg);
@ -1258,7 +1258,7 @@ IsCacheableArrayLength(JSContext* cx, HandleObject obj, TypedOrValueRegister out
if (!obj->is<ArrayObject>())
return false;
if (output.type() != MIRType_Value && output.type() != MIRType_Int32) {
if (output.type() != MIRType::Value && output.type() != MIRType::Int32) {
// The stub assumes that we always output Int32, so make sure our output
// is equipped to handle that.
return false;
@ -1365,12 +1365,12 @@ EmitIdGuard(MacroAssembler& masm, jsid id, TypedOrValueRegister idReg, Register
{
MOZ_ASSERT(JSID_IS_STRING(id) || JSID_IS_SYMBOL(id));
MOZ_ASSERT(idReg.type() == MIRType_String ||
idReg.type() == MIRType_Symbol ||
idReg.type() == MIRType_Value);
MOZ_ASSERT(idReg.type() == MIRType::String ||
idReg.type() == MIRType::Symbol ||
idReg.type() == MIRType::Value);
Register payloadReg;
if (idReg.type() == MIRType_Value) {
if (idReg.type() == MIRType::Value) {
ValueOperand val = idReg.valueReg();
if (JSID_IS_SYMBOL(id)) {
masm.branchTestSymbol(Assembler::NotEqual, val, failures);
@ -1640,7 +1640,7 @@ GetPropertyIC::tryAttachTypedArrayLength(JSContext* cx, HandleScript outerScript
if (hasTypedArrayLengthStub(obj))
return true;
if (output().type() != MIRType_Value && output().type() != MIRType_Int32) {
if (output().type() != MIRType::Value && output().type() != MIRType::Int32) {
// The next execution should cause an invalidation because the type
// does not fit.
return true;
@ -1993,7 +1993,7 @@ GetPropertyIC::tryAttachArgumentsLength(JSContext* cx, HandleScript outerScript,
return true;
MIRType outputType = output().type();
if (!(outputType == MIRType_Value || outputType == MIRType_Int32))
if (!(outputType == MIRType::Value || outputType == MIRType::Int32))
return true;
if (hasArgumentsLengthStub(obj->is<MappedArgumentsObject>()))
@ -2013,7 +2013,7 @@ GetPropertyIC::tryAttachArgumentsLength(JSContext* cx, HandleScript outerScript,
if (output().hasValue()) {
tmpReg = output().valueReg().scratchReg();
} else {
MOZ_ASSERT(output().type() == MIRType_Int32);
MOZ_ASSERT(output().type() == MIRType::Int32);
tmpReg = output().typedReg().gpr();
}
MOZ_ASSERT(object() != tmpReg);
@ -2076,7 +2076,7 @@ GenerateReadModuleNamespace(JSContext* cx, IonScript* ion, MacroAssembler& masm,
if (output.hasValue()) {
scratchReg = output.valueReg().scratchReg();
} else if (output.type() == MIRType_Double) {
} else if (output.type() == MIRType::Double) {
masm.push(object);
scratchReg = object;
restoreScratch = true;
@ -2390,7 +2390,7 @@ GenerateSetSlot(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& att
Address addr(object, NativeObject::getFixedSlotOffset(shape->slot()));
if (cx->zone()->needsIncrementalBarrier())
masm.callPreBarrier(addr, MIRType_Value);
masm.callPreBarrier(addr, MIRType::Value);
masm.storeConstantOrRegister(value, addr);
} else {
@ -2399,7 +2399,7 @@ GenerateSetSlot(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& att
Address addr(tempReg, obj->as<NativeObject>().dynamicSlotIndex(shape->slot()) * sizeof(Value));
if (cx->zone()->needsIncrementalBarrier())
masm.callPreBarrier(addr, MIRType_Value);
masm.callPreBarrier(addr, MIRType::Value);
masm.storeConstantOrRegister(value, addr);
}
@ -2744,7 +2744,7 @@ GenerateCallSetter(JSContext* cx, IonScript* ion, MacroAssembler& masm,
// Build vp and move the base into argVpReg.
masm.Push(value);
masm.Push(TypedOrValueRegister(MIRType_Object, AnyRegister(object)));
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
masm.Push(ObjectValue(*target));
masm.moveStackPtrTo(argVpReg);
@ -2872,7 +2872,7 @@ GenerateCallSetter(JSContext* cx, IonScript* ion, MacroAssembler& masm,
for (size_t i = 1; i < target->nargs(); i++)
masm.Push(UndefinedValue());
masm.Push(value);
masm.Push(TypedOrValueRegister(MIRType_Object, AnyRegister(object)));
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
masm.movePtr(ImmGCPtr(target), tempReg);
@ -3105,7 +3105,7 @@ GenerateAddSlot(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& att
// Write the object or expando object's new shape.
Address shapeAddr(object, JSObject::offsetOfShape());
if (cx->zone()->needsIncrementalBarrier())
masm.callPreBarrier(shapeAddr, MIRType_Shape);
masm.callPreBarrier(shapeAddr, MIRType::Shape);
masm.storePtr(ImmGCPtr(newShape), shapeAddr);
if (oldGroup != obj->group()) {
@ -3124,7 +3124,7 @@ GenerateAddSlot(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& att
Address groupAddr(object, JSObject::offsetOfGroup());
if (cx->zone()->needsIncrementalBarrier())
masm.callPreBarrier(groupAddr, MIRType_ObjectGroup);
masm.callPreBarrier(groupAddr, MIRType::ObjectGroup);
masm.storePtr(ImmGCPtr(obj->group()), groupAddr);
masm.bind(&noTypeChange);
@ -3198,7 +3198,7 @@ CanInlineSetPropTypeCheck(JSObject* obj, jsid id, ConstantOrRegister val, bool*
// TIs handling of objects is complicated enough to warrant a runtime
// check, as we can't statically handle the case where the typeset
// contains the specific object, but doesn't have ANYOBJECT set.
if (reg.hasTyped() && reg.type() != MIRType_Object) {
if (reg.hasTyped() && reg.type() != MIRType::Object) {
JSValueType valType = ValueTypeFromMIRType(reg.type());
if (!propTypes->hasType(TypeSet::PrimitiveType(valType)))
return false;
@ -3369,9 +3369,9 @@ GenerateSetUnboxed(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher&
if (cx->zone()->needsIncrementalBarrier()) {
if (unboxedType == JSVAL_TYPE_OBJECT)
masm.callPreBarrier(address, MIRType_Object);
masm.callPreBarrier(address, MIRType::Object);
else if (unboxedType == JSVAL_TYPE_STRING)
masm.callPreBarrier(address, MIRType_String);
masm.callPreBarrier(address, MIRType::String);
else
MOZ_ASSERT(!UnboxedTypeNeedsPreBarrier(unboxedType));
}
@ -3926,7 +3926,7 @@ GenerateDenseElementHole(JSContext* cx, MacroAssembler& masm, IonCache::StubAtta
indexReg = scratchReg;
masm.unboxInt32(val, indexReg);
} else {
MOZ_ASSERT(index.type() == MIRType_Int32);
MOZ_ASSERT(index.type() == MIRType::Int32);
indexReg = index.typedReg().gpr();
}
@ -4198,7 +4198,7 @@ GetPropertyIC::tryAttachArgumentsElement(JSContext* cx, HandleScript outerScript
return true;
TypedOrValueRegister index = id().reg();
if (index.type() != MIRType_Value && index.type() != MIRType_Int32)
if (index.type() != MIRType::Value && index.type() != MIRType::Int32)
return true;
MOZ_ASSERT(output().hasValue());
@ -4236,7 +4236,7 @@ GetPropertyIC::tryAttachArgumentsElement(JSContext* cx, HandleScript outerScript
masm.unboxInt32(val, indexReg);
masm.branch32(Assembler::AboveOrEqual, indexReg, tmpReg, &failureRestoreIndex);
} else {
MOZ_ASSERT(index.type() == MIRType_Int32);
MOZ_ASSERT(index.type() == MIRType::Int32);
indexReg = index.typedReg().gpr();
masm.branch32(Assembler::AboveOrEqual, indexReg, tmpReg, &failures);
}
@ -4371,7 +4371,7 @@ StoreDenseElement(MacroAssembler& masm, ConstantOrRegister value, Register eleme
}
TypedOrValueRegister reg = value.reg();
if (reg.hasTyped() && reg.type() != MIRType_Int32) {
if (reg.hasTyped() && reg.type() != MIRType::Int32) {
masm.storeTypedOrValue(reg, target);
return;
}
@ -4390,7 +4390,7 @@ StoreDenseElement(MacroAssembler& masm, ConstantOrRegister value, Register eleme
masm.int32ValueToDouble(reg.valueReg(), ScratchDoubleReg);
masm.storeDouble(ScratchDoubleReg, target);
} else {
MOZ_ASSERT(reg.type() == MIRType_Int32);
MOZ_ASSERT(reg.type() == MIRType::Int32);
masm.convertInt32ToDouble(reg.typedReg().gpr(), ScratchDoubleReg);
masm.storeDouble(ScratchDoubleReg, target);
}
@ -4486,7 +4486,7 @@ GenerateSetDenseElement(JSContext* cx, MacroAssembler& masm, IonCache::StubAttac
}
if (cx->zone()->needsIncrementalBarrier())
masm.callPreBarrier(target, MIRType_Value);
masm.callPreBarrier(target, MIRType::Value);
// Store the value.
if (guardHoles)

Просмотреть файл

@ -388,41 +388,41 @@ class SimdConstant {
// The ordering of this enumeration is important: Anything < Value is a
// specialized type. Furthermore, anything < String has trivial conversion to
// a number.
enum MIRType
enum class MIRType
{
MIRType_Undefined,
MIRType_Null,
MIRType_Boolean,
MIRType_Int32,
MIRType_Int64,
MIRType_Double,
MIRType_Float32,
Undefined,
Null,
Boolean,
Int32,
Int64,
Double,
Float32,
// Types above have trivial conversion to a number.
MIRType_String,
MIRType_Symbol,
String,
Symbol,
// Types above are primitive (including undefined and null).
MIRType_Object,
MIRType_MagicOptimizedArguments, // JS_OPTIMIZED_ARGUMENTS magic value.
MIRType_MagicOptimizedOut, // JS_OPTIMIZED_OUT magic value.
MIRType_MagicHole, // JS_ELEMENTS_HOLE magic value.
MIRType_MagicIsConstructing, // JS_IS_CONSTRUCTING magic value.
MIRType_MagicUninitializedLexical, // JS_UNINITIALIZED_LEXICAL magic value.
Object,
MagicOptimizedArguments, // JS_OPTIMIZED_ARGUMENTS magic value.
MagicOptimizedOut, // JS_OPTIMIZED_OUT magic value.
MagicHole, // JS_ELEMENTS_HOLE magic value.
MagicIsConstructing, // JS_IS_CONSTRUCTING magic value.
MagicUninitializedLexical, // JS_UNINITIALIZED_LEXICAL magic value.
// Types above are specialized.
MIRType_Value,
MIRType_SinCosDouble, // Optimizing a sin/cos to sincos.
MIRType_ObjectOrNull,
MIRType_None, // Invalid, used as a placeholder.
MIRType_Slots, // A slots vector
MIRType_Elements, // An elements vector
MIRType_Pointer, // An opaque pointer that receives no special treatment
MIRType_Shape, // A Shape pointer.
MIRType_ObjectGroup, // An ObjectGroup pointer.
MIRType_Last = MIRType_ObjectGroup,
MIRType_Float32x4 = MIRType_Float32 | (2 << VECTOR_SCALE_SHIFT),
Value,
SinCosDouble, // Optimizing a sin/cos to sincos.
ObjectOrNull,
None, // Invalid, used as a placeholder.
Slots, // A slots vector
Elements, // An elements vector
Pointer, // An opaque pointer that receives no special treatment
Shape, // A Shape pointer.
ObjectGroup, // An ObjectGroup pointer.
Last = ObjectGroup,
Float32x4 = Float32 | (2 << VECTOR_SCALE_SHIFT),
// Representing both SIMD.Int32x4 and SIMD.Uint32x4.
MIRType_Int32x4 = MIRType_Int32 | (2 << VECTOR_SCALE_SHIFT),
MIRType_Bool32x4 = MIRType_Boolean | (2 << VECTOR_SCALE_SHIFT),
MIRType_Doublex2 = MIRType_Double | (1 << VECTOR_SCALE_SHIFT)
Int32x4 = Int32 | (2 << VECTOR_SCALE_SHIFT),
Bool32x4 = Boolean | (2 << VECTOR_SCALE_SHIFT),
Doublex2 = Double | (1 << VECTOR_SCALE_SHIFT)
};
static inline MIRType
@ -432,23 +432,23 @@ MIRTypeFromValueType(JSValueType type)
// filtered out in MIRTypeFromValue.
switch (type) {
case JSVAL_TYPE_DOUBLE:
return MIRType_Double;
return MIRType::Double;
case JSVAL_TYPE_INT32:
return MIRType_Int32;
return MIRType::Int32;
case JSVAL_TYPE_UNDEFINED:
return MIRType_Undefined;
return MIRType::Undefined;
case JSVAL_TYPE_STRING:
return MIRType_String;
return MIRType::String;
case JSVAL_TYPE_SYMBOL:
return MIRType_Symbol;
return MIRType::Symbol;
case JSVAL_TYPE_BOOLEAN:
return MIRType_Boolean;
return MIRType::Boolean;
case JSVAL_TYPE_NULL:
return MIRType_Null;
return MIRType::Null;
case JSVAL_TYPE_OBJECT:
return MIRType_Object;
return MIRType::Object;
case JSVAL_TYPE_UNKNOWN:
return MIRType_Value;
return MIRType::Value;
default:
MOZ_CRASH("unexpected jsval type");
}
@ -458,29 +458,29 @@ static inline JSValueType
ValueTypeFromMIRType(MIRType type)
{
switch (type) {
case MIRType_Undefined:
case MIRType::Undefined:
return JSVAL_TYPE_UNDEFINED;
case MIRType_Null:
case MIRType::Null:
return JSVAL_TYPE_NULL;
case MIRType_Boolean:
case MIRType::Boolean:
return JSVAL_TYPE_BOOLEAN;
case MIRType_Int32:
case MIRType::Int32:
return JSVAL_TYPE_INT32;
case MIRType_Float32: // Fall through, there's no JSVAL for Float32
case MIRType_Double:
case MIRType::Float32: // Fall through, there's no JSVAL for Float32
case MIRType::Double:
return JSVAL_TYPE_DOUBLE;
case MIRType_String:
case MIRType::String:
return JSVAL_TYPE_STRING;
case MIRType_Symbol:
case MIRType::Symbol:
return JSVAL_TYPE_SYMBOL;
case MIRType_MagicOptimizedArguments:
case MIRType_MagicOptimizedOut:
case MIRType_MagicHole:
case MIRType_MagicIsConstructing:
case MIRType_MagicUninitializedLexical:
case MIRType::MagicOptimizedArguments:
case MIRType::MagicOptimizedOut:
case MIRType::MagicHole:
case MIRType::MagicIsConstructing:
case MIRType::MagicUninitializedLexical:
return JSVAL_TYPE_MAGIC;
default:
MOZ_ASSERT(type == MIRType_Object);
MOZ_ASSERT(type == MIRType::Object);
return JSVAL_TYPE_OBJECT;
}
}
@ -495,61 +495,61 @@ static inline const char*
StringFromMIRType(MIRType type)
{
switch (type) {
case MIRType_Undefined:
case MIRType::Undefined:
return "Undefined";
case MIRType_Null:
case MIRType::Null:
return "Null";
case MIRType_Boolean:
case MIRType::Boolean:
return "Bool";
case MIRType_Int32:
case MIRType::Int32:
return "Int32";
case MIRType_Int64:
case MIRType::Int64:
return "Int64";
case MIRType_Double:
case MIRType::Double:
return "Double";
case MIRType_Float32:
case MIRType::Float32:
return "Float32";
case MIRType_String:
case MIRType::String:
return "String";
case MIRType_Symbol:
case MIRType::Symbol:
return "Symbol";
case MIRType_Object:
case MIRType::Object:
return "Object";
case MIRType_MagicOptimizedArguments:
case MIRType::MagicOptimizedArguments:
return "MagicOptimizedArguments";
case MIRType_MagicOptimizedOut:
case MIRType::MagicOptimizedOut:
return "MagicOptimizedOut";
case MIRType_MagicHole:
case MIRType::MagicHole:
return "MagicHole";
case MIRType_MagicIsConstructing:
case MIRType::MagicIsConstructing:
return "MagicIsConstructing";
case MIRType_MagicUninitializedLexical:
case MIRType::MagicUninitializedLexical:
return "MagicUninitializedLexical";
case MIRType_Value:
case MIRType::Value:
return "Value";
case MIRType_SinCosDouble:
case MIRType::SinCosDouble:
return "SinCosDouble";
case MIRType_ObjectOrNull:
case MIRType::ObjectOrNull:
return "ObjectOrNull";
case MIRType_None:
case MIRType::None:
return "None";
case MIRType_Slots:
case MIRType::Slots:
return "Slots";
case MIRType_Elements:
case MIRType::Elements:
return "Elements";
case MIRType_Pointer:
case MIRType::Pointer:
return "Pointer";
case MIRType_Shape:
case MIRType::Shape:
return "Shape";
case MIRType_ObjectGroup:
case MIRType::ObjectGroup:
return "ObjectGroup";
case MIRType_Float32x4:
case MIRType::Float32x4:
return "Float32x4";
case MIRType_Int32x4:
case MIRType::Int32x4:
return "Int32x4";
case MIRType_Bool32x4:
case MIRType::Bool32x4:
return "Bool32x4";
case MIRType_Doublex2:
case MIRType::Doublex2:
return "Doublex2";
}
MOZ_CRASH("Unknown MIRType.");
@ -558,70 +558,70 @@ StringFromMIRType(MIRType type)
static inline bool
IsNumberType(MIRType type)
{
return type == MIRType_Int32 ||
type == MIRType_Double ||
type == MIRType_Float32 ||
type == MIRType_Int64;
return type == MIRType::Int32 ||
type == MIRType::Double ||
type == MIRType::Float32 ||
type == MIRType::Int64;
}
static inline bool
IsTypeRepresentableAsDouble(MIRType type)
{
return type == MIRType_Int32 ||
type == MIRType_Double ||
type == MIRType_Float32;
return type == MIRType::Int32 ||
type == MIRType::Double ||
type == MIRType::Float32;
}
static inline bool
IsFloatType(MIRType type)
{
return type == MIRType_Int32 || type == MIRType_Float32;
return type == MIRType::Int32 || type == MIRType::Float32;
}
static inline bool
IsFloatingPointType(MIRType type)
{
return type == MIRType_Double || type == MIRType_Float32;
return type == MIRType::Double || type == MIRType::Float32;
}
static inline bool
IsNullOrUndefined(MIRType type)
{
return type == MIRType_Null || type == MIRType_Undefined;
return type == MIRType::Null || type == MIRType::Undefined;
}
static inline bool
IsSimdType(MIRType type)
{
return type == MIRType_Int32x4 || type == MIRType_Float32x4 || type == MIRType_Bool32x4;
return type == MIRType::Int32x4 || type == MIRType::Float32x4 || type == MIRType::Bool32x4;
}
static inline bool
IsFloatingPointSimdType(MIRType type)
{
return type == MIRType_Float32x4;
return type == MIRType::Float32x4;
}
static inline bool
IsIntegerSimdType(MIRType type)
{
return type == MIRType_Int32x4;
return type == MIRType::Int32x4;
}
static inline bool
IsBooleanSimdType(MIRType type)
{
return type == MIRType_Bool32x4;
return type == MIRType::Bool32x4;
}
static inline bool
IsMagicType(MIRType type)
{
return type == MIRType_MagicHole ||
type == MIRType_MagicOptimizedOut ||
type == MIRType_MagicIsConstructing ||
type == MIRType_MagicOptimizedArguments ||
type == MIRType_MagicUninitializedLexical;
return type == MIRType::MagicHole ||
type == MIRType::MagicOptimizedOut ||
type == MIRType::MagicIsConstructing ||
type == MIRType::MagicOptimizedArguments ||
type == MIRType::MagicUninitializedLexical;
}
// Returns the number of vector elements (hereby called "length") for a given
@ -630,7 +630,7 @@ static inline unsigned
SimdTypeToLength(MIRType type)
{
MOZ_ASSERT(IsSimdType(type));
return 1 << ((type >> VECTOR_SCALE_SHIFT) & VECTOR_SCALE_MASK);
return 1 << ((unsigned(type) >> VECTOR_SCALE_SHIFT) & VECTOR_SCALE_MASK);
}
static inline MIRType
@ -644,15 +644,15 @@ ScalarTypeToMIRType(Scalar::Type type)
case Scalar::Int32:
case Scalar::Uint32:
case Scalar::Uint8Clamped:
return MIRType_Int32;
return MIRType::Int32;
case Scalar::Float32:
return MIRType_Float32;
return MIRType::Float32;
case Scalar::Float64:
return MIRType_Double;
return MIRType::Double;
case Scalar::Float32x4:
return MIRType_Float32x4;
return MIRType::Float32x4;
case Scalar::Int32x4:
return MIRType_Int32x4;
return MIRType::Int32x4;
case Scalar::MaxTypedArrayViewType:
break;
}
@ -688,9 +688,9 @@ static inline MIRType
SimdTypeToLaneType(MIRType type)
{
MOZ_ASSERT(IsSimdType(type));
static_assert(MIRType_Last <= ELEMENT_TYPE_MASK,
static_assert(unsigned(MIRType::Last) <= ELEMENT_TYPE_MASK,
"ELEMENT_TYPE_MASK should be larger than the last MIRType");
return MIRType((type >> ELEMENT_TYPE_SHIFT) & ELEMENT_TYPE_MASK);
return MIRType((unsigned(type) >> ELEMENT_TYPE_SHIFT) & ELEMENT_TYPE_MASK);
}
// Get the type expected when inserting a lane into a SIMD type.
@ -703,7 +703,7 @@ SimdTypeToLaneArgumentType(MIRType type)
// Boolean lanes should be pre-converted to an Int32 with the values 0 or -1.
// All other lane types are inserted directly.
return laneType == MIRType_Boolean ? MIRType_Int32 : laneType;
return laneType == MIRType::Boolean ? MIRType::Int32 : laneType;
}
// Indicates a lane in a SIMD register: X for the first lane, Y for the second,

Просмотреть файл

@ -234,7 +234,7 @@ JSONSpewer::spewMDef(MDefinition* def)
if (def->isAdd() || def->isSub() || def->isMod() || def->isMul() || def->isDiv())
isTruncated = static_cast<MBinaryArithInstruction*>(def)->isTruncated();
if (def->type() != MIRType_None && def->range()) {
if (def->type() != MIRType::None && def->range()) {
beginStringProperty("type");
def->range()->dump(out_);
out_.printf(" : %s%s", StringFromMIRType(def->type()), (isTruncated ? " (t)" : ""));

Просмотреть файл

@ -318,11 +318,11 @@ class JitRuntime
JitCode* preBarrier(MIRType type) const {
switch (type) {
case MIRType_Value: return valuePreBarrier_;
case MIRType_String: return stringPreBarrier_;
case MIRType_Object: return objectPreBarrier_;
case MIRType_Shape: return shapePreBarrier_;
case MIRType_ObjectGroup: return objectGroupPreBarrier_;
case MIRType::Value: return valuePreBarrier_;
case MIRType::String: return stringPreBarrier_;
case MIRType::Object: return objectPreBarrier_;
case MIRType::Shape: return shapePreBarrier_;
case MIRType::ObjectGroup: return objectGroupPreBarrier_;
default: MOZ_CRASH();
}
}

Просмотреть файл

@ -1682,7 +1682,7 @@ SnapshotIterator::fromStack(int32_t offset) const
static Value
FromObjectPayload(uintptr_t payload)
{
// Note: Both MIRType_Object and MIRType_ObjectOrNull are encoded in
// Note: Both MIRType::Object and MIRType::ObjectOrNull are encoded in
// snapshots using JSVAL_TYPE_OBJECT.
return ObjectOrNullValue(reinterpret_cast<JSObject*>(payload));
}

Просмотреть файл

@ -89,7 +89,7 @@ LBlock::init(TempAllocator& alloc)
size_t numLPhis = 0;
for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) {
MPhi* phi = *i;
numLPhis += (phi->type() == MIRType_Value) ? BOX_PIECES : 1;
numLPhis += (phi->type() == MIRType::Value) ? BOX_PIECES : 1;
}
// Allocate space for the LPhis.
@ -105,7 +105,7 @@ LBlock::init(TempAllocator& alloc)
MPhi* phi = *i;
MOZ_ASSERT(phi->numOperands() == numPreds);
int numPhis = (phi->type() == MIRType_Value) ? BOX_PIECES : 1;
int numPhis = (phi->type() == MIRType::Value) ? BOX_PIECES : 1;
for (int i = 0; i < numPhis; i++) {
LAllocation* inputs = alloc.allocateArray<LAllocation>(numPreds);
if (!inputs)

Просмотреть файл

@ -588,39 +588,39 @@ class LDefinition
static inline Type TypeFrom(MIRType type) {
switch (type) {
case MIRType_Boolean:
case MIRType_Int32:
case MIRType::Boolean:
case MIRType::Int32:
// The stack slot allocator doesn't currently support allocating
// 1-byte slots, so for now we lower MIRType_Boolean into INT32.
// 1-byte slots, so for now we lower MIRType::Boolean into INT32.
static_assert(sizeof(bool) <= sizeof(int32_t), "bool doesn't fit in an int32 slot");
return LDefinition::INT32;
case MIRType_String:
case MIRType_Symbol:
case MIRType_Object:
case MIRType_ObjectOrNull:
case MIRType::String:
case MIRType::Symbol:
case MIRType::Object:
case MIRType::ObjectOrNull:
return LDefinition::OBJECT;
case MIRType_Double:
case MIRType::Double:
return LDefinition::DOUBLE;
case MIRType_Float32:
case MIRType::Float32:
return LDefinition::FLOAT32;
#if defined(JS_PUNBOX64)
case MIRType_Value:
case MIRType::Value:
return LDefinition::BOX;
#endif
case MIRType_SinCosDouble:
case MIRType::SinCosDouble:
return LDefinition::SINCOS;
case MIRType_Slots:
case MIRType_Elements:
case MIRType::Slots:
case MIRType::Elements:
return LDefinition::SLOTS;
case MIRType_Pointer:
case MIRType::Pointer:
#if JS_BITS_PER_WORD == 64
case MIRType_Int64:
case MIRType::Int64:
#endif
return LDefinition::GENERAL;
case MIRType_Bool32x4:
case MIRType_Int32x4:
case MIRType::Bool32x4:
case MIRType::Int32x4:
return LDefinition::INT32X4;
case MIRType_Float32x4:
case MIRType::Float32x4:
return LDefinition::FLOAT32X4;
default:
MOZ_CRASH("unexpected type");

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -434,7 +434,7 @@ MBasicBlock::NewAsmJS(MIRGraph& graph, const CompileInfo& info, MBasicBlock* pre
for (size_t i = 0; i < nphis; i++) {
MDefinition* predSlot = pred->getSlot(i);
MOZ_ASSERT(predSlot->type() != MIRType_Value);
MOZ_ASSERT(predSlot->type() != MIRType::Value);
MPhi* phi;
if (i < nfree)
@ -686,7 +686,7 @@ MBasicBlock::linkOsrValues(MStart* start)
cloneRp = def->toOsrReturnValue();
} else if (info().hasArguments() && i == info().argsObjSlot()) {
MOZ_ASSERT(def->isConstant() || def->isOsrArgumentsObject());
MOZ_ASSERT_IF(def->isConstant(), def->toConstant()->type() == MIRType_Undefined);
MOZ_ASSERT_IF(def->isConstant(), def->toConstant()->type() == MIRType::Undefined);
if (def->isOsrArgumentsObject())
cloneRp = def->toOsrArgumentsObject();
} else {
@ -696,7 +696,7 @@ MBasicBlock::linkOsrValues(MStart* start)
// A constant Undefined can show up here for an argument slot when
// the function has an arguments object, but the argument in
// question is stored on the scope chain.
MOZ_ASSERT_IF(def->isConstant(), def->toConstant()->type() == MIRType_Undefined);
MOZ_ASSERT_IF(def->isConstant(), def->toConstant()->type() == MIRType::Undefined);
if (def->isOsrValue())
cloneRp = def->toOsrValue();
@ -875,7 +875,7 @@ MBasicBlock::optimizedOutConstant(TempAllocator& alloc)
// If the first instruction is a MConstant(MagicValue(JS_OPTIMIZED_OUT))
// then reuse it.
MInstruction* ins = *begin();
if (ins->type() == MIRType_MagicOptimizedOut)
if (ins->type() == MIRType::MagicOptimizedOut)
return ins->toConstant();
MConstant* constant = MConstant::New(alloc, MagicValue(JS_OPTIMIZED_OUT));
@ -1338,7 +1338,7 @@ MBasicBlock::setBackedgeAsmJS(MBasicBlock* pred)
// Assert that the phi already has the correct type.
MOZ_ASSERT(entryDef->type() == exitDef->type());
MOZ_ASSERT(entryDef->type() != MIRType_Value);
MOZ_ASSERT(entryDef->type() != MIRType::Value);
if (entryDef == exitDef) {
// If the exit def is the same as the entry def, make a redundant

Просмотреть файл

@ -518,17 +518,17 @@ void
MacroAssembler::branchTestMIRType(Condition cond, const Value& val, MIRType type, Label* label)
{
switch (type) {
case MIRType_Null: return branchTestNull(cond, val, label);
case MIRType_Undefined: return branchTestUndefined(cond, val, label);
case MIRType_Boolean: return branchTestBoolean(cond, val, label);
case MIRType_Int32: return branchTestInt32(cond, val, label);
case MIRType_String: return branchTestString(cond, val, label);
case MIRType_Symbol: return branchTestSymbol(cond, val, label);
case MIRType_Object: return branchTestObject(cond, val, label);
case MIRType_Double: return branchTestDouble(cond, val, label);
case MIRType_MagicOptimizedArguments: // Fall through.
case MIRType_MagicIsConstructing:
case MIRType_MagicHole: return branchTestMagic(cond, val, label);
case MIRType::Null: return branchTestNull(cond, val, label);
case MIRType::Undefined: return branchTestUndefined(cond, val, label);
case MIRType::Boolean: return branchTestBoolean(cond, val, label);
case MIRType::Int32: return branchTestInt32(cond, val, label);
case MIRType::String: return branchTestString(cond, val, label);
case MIRType::Symbol: return branchTestSymbol(cond, val, label);
case MIRType::Object: return branchTestObject(cond, val, label);
case MIRType::Double: return branchTestDouble(cond, val, label);
case MIRType::MagicOptimizedArguments: // Fall through.
case MIRType::MagicIsConstructing:
case MIRType::MagicHole: return branchTestMagic(cond, val, label);
default:
MOZ_CRASH("Bad MIRType");
}

Просмотреть файл

@ -327,7 +327,7 @@ MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegi
// Bail out if the value doesn't fit into a signed int32 value. This
// is what allows MLoadUnboxedScalar to have a type() of
// MIRType_Int32 for UInt32 array loads.
// MIRType::Int32 for UInt32 array loads.
branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
}
break;
@ -453,7 +453,7 @@ MacroAssembler::loadUnboxedProperty(T address, JSValueType type, TypedOrValueReg
switch (type) {
case JSVAL_TYPE_INT32: {
// Handle loading an int32 into a double reg.
if (output.type() == MIRType_Double) {
if (output.type() == MIRType::Double) {
convertInt32ToDouble(address, output.typedReg().fpu());
break;
}
@ -568,7 +568,7 @@ MacroAssembler::storeUnboxedProperty(T address, JSValueType type,
else
StoreUnboxedFailure(*this, failure);
} else if (value.reg().hasTyped()) {
if (value.reg().type() == MIRType_Boolean)
if (value.reg().type() == MIRType::Boolean)
store8(value.reg().typedReg().gpr(), address);
else
StoreUnboxedFailure(*this, failure);
@ -586,7 +586,7 @@ MacroAssembler::storeUnboxedProperty(T address, JSValueType type,
else
StoreUnboxedFailure(*this, failure);
} else if (value.reg().hasTyped()) {
if (value.reg().type() == MIRType_Int32)
if (value.reg().type() == MIRType::Int32)
store32(value.reg().typedReg().gpr(), address);
else
StoreUnboxedFailure(*this, failure);
@ -606,10 +606,10 @@ MacroAssembler::storeUnboxedProperty(T address, JSValueType type,
StoreUnboxedFailure(*this, failure);
}
} else if (value.reg().hasTyped()) {
if (value.reg().type() == MIRType_Int32) {
if (value.reg().type() == MIRType::Int32) {
convertInt32ToDouble(value.reg().typedReg().gpr(), ScratchDoubleReg);
storeDouble(ScratchDoubleReg, address);
} else if (value.reg().type() == MIRType_Double) {
} else if (value.reg().type() == MIRType::Double) {
storeDouble(value.reg().typedReg().fpu(), address);
} else {
StoreUnboxedFailure(*this, failure);
@ -636,8 +636,8 @@ MacroAssembler::storeUnboxedProperty(T address, JSValueType type,
else
StoreUnboxedFailure(*this, failure);
} else if (value.reg().hasTyped()) {
MOZ_ASSERT(value.reg().type() != MIRType_Null);
if (value.reg().type() == MIRType_Object)
MOZ_ASSERT(value.reg().type() != MIRType::Null);
if (value.reg().type() == MIRType::Object)
storePtr(value.reg().typedReg().gpr(), address);
else
StoreUnboxedFailure(*this, failure);
@ -659,7 +659,7 @@ MacroAssembler::storeUnboxedProperty(T address, JSValueType type,
else
StoreUnboxedFailure(*this, failure);
} else if (value.reg().hasTyped()) {
if (value.reg().type() == MIRType_String)
if (value.reg().type() == MIRType::String)
storePtr(value.reg().typedReg().gpr(), address);
else
StoreUnboxedFailure(*this, failure);
@ -1690,11 +1690,11 @@ MacroAssembler::convertValueToFloatingPoint(ValueOperand value, FloatRegister ou
bind(&isDouble);
FloatRegister tmp = output;
if (outputType == MIRType_Float32 && hasMultiAlias())
if (outputType == MIRType::Float32 && hasMultiAlias())
tmp = ScratchDoubleReg;
unboxDouble(value, tmp);
if (outputType == MIRType_Float32)
if (outputType == MIRType::Float32)
convertDoubleToFloat32(tmp, output);
bind(&done);
@ -1761,16 +1761,16 @@ MacroAssembler::convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, Flo
return;
}
bool outputIsDouble = outputType == MIRType_Double;
bool outputIsDouble = outputType == MIRType::Double;
switch (src.type()) {
case MIRType_Null:
case MIRType::Null:
loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
break;
case MIRType_Boolean:
case MIRType_Int32:
case MIRType::Boolean:
case MIRType::Int32:
convertInt32ToFloatingPoint(src.typedReg().gpr(), output, outputType);
break;
case MIRType_Float32:
case MIRType::Float32:
if (outputIsDouble) {
convertFloat32ToDouble(src.typedReg().fpu(), output);
} else {
@ -1778,7 +1778,7 @@ MacroAssembler::convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, Flo
moveFloat32(src.typedReg().fpu(), output);
}
break;
case MIRType_Double:
case MIRType::Double:
if (outputIsDouble) {
if (src.typedReg().fpu() != output)
moveDouble(src.typedReg().fpu(), output);
@ -1786,12 +1786,12 @@ MacroAssembler::convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, Flo
convertDoubleToFloat32(src.typedReg().fpu(), output);
}
break;
case MIRType_Object:
case MIRType_String:
case MIRType_Symbol:
case MIRType::Object:
case MIRType::String:
case MIRType::Symbol:
jump(fail);
break;
case MIRType_Undefined:
case MIRType::Undefined:
loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
break;
default:
@ -1838,10 +1838,10 @@ MacroAssembler::convertValueToInt(ValueOperand value, MDefinition* maybeInput,
Label done, isInt32, isBool, isDouble, isNull, isString;
maybeBranchTestType(MIRType_Int32, maybeInput, tag, &isInt32);
maybeBranchTestType(MIRType::Int32, maybeInput, tag, &isInt32);
if (conversion == IntConversion_Any || conversion == IntConversion_NumbersOrBoolsOnly)
maybeBranchTestType(MIRType_Boolean, maybeInput, tag, &isBool);
maybeBranchTestType(MIRType_Double, maybeInput, tag, &isDouble);
maybeBranchTestType(MIRType::Boolean, maybeInput, tag, &isBool);
maybeBranchTestType(MIRType::Double, maybeInput, tag, &isDouble);
if (conversion == IntConversion_Any) {
// If we are not truncating, we fail for anything that's not
@ -1854,10 +1854,10 @@ MacroAssembler::convertValueToInt(ValueOperand value, MDefinition* maybeInput,
case IntConversion_Truncate:
case IntConversion_ClampToUint8:
maybeBranchTestType(MIRType_Null, maybeInput, tag, &isNull);
maybeBranchTestType(MIRType::Null, maybeInput, tag, &isNull);
if (handleStrings)
maybeBranchTestType(MIRType_String, maybeInput, tag, &isString);
maybeBranchTestType(MIRType_Object, maybeInput, tag, fail);
maybeBranchTestType(MIRType::String, maybeInput, tag, &isString);
maybeBranchTestType(MIRType::Object, maybeInput, tag, fail);
branchTestUndefined(Assembler::NotEqual, tag, fail);
break;
}
@ -1985,28 +1985,28 @@ MacroAssembler::convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister
}
switch (src.type()) {
case MIRType_Undefined:
case MIRType_Null:
case MIRType::Undefined:
case MIRType::Null:
move32(Imm32(0), output);
break;
case MIRType_Boolean:
case MIRType_Int32:
case MIRType::Boolean:
case MIRType::Int32:
if (src.typedReg().gpr() != output)
move32(src.typedReg().gpr(), output);
if (src.type() == MIRType_Int32 && behavior == IntConversion_ClampToUint8)
if (src.type() == MIRType::Int32 && behavior == IntConversion_ClampToUint8)
clampIntToUint8(output);
break;
case MIRType_Double:
case MIRType::Double:
convertDoubleToInt(src.typedReg().fpu(), output, temp, nullptr, fail, behavior);
break;
case MIRType_Float32:
case MIRType::Float32:
// Conversion to Double simplifies implementation at the expense of performance.
convertFloat32ToDouble(src.typedReg().fpu(), temp);
convertDoubleToInt(temp, output, temp, nullptr, fail, behavior);
break;
case MIRType_String:
case MIRType_Symbol:
case MIRType_Object:
case MIRType::String:
case MIRType::Symbol:
case MIRType::Object:
jump(fail);
break;
default:
@ -2256,7 +2256,7 @@ MacroAssembler::Push(TypedOrValueRegister v)
Push(v.valueReg());
} else if (IsFloatingPointType(v.type())) {
FloatRegister reg = v.typedReg().fpu();
if (v.type() == MIRType_Float32) {
if (v.type() == MIRType::Float32) {
convertFloat32ToDouble(reg, ScratchDoubleReg);
reg = ScratchDoubleReg;
}
@ -2427,13 +2427,13 @@ MacroAssembler::passABIArg(const MoveOperand& from, MoveOp::Type type)
ABIArg arg;
switch (type) {
case MoveOp::FLOAT32:
arg = abiArgs_.next(MIRType_Float32);
arg = abiArgs_.next(MIRType::Float32);
break;
case MoveOp::DOUBLE:
arg = abiArgs_.next(MIRType_Double);
arg = abiArgs_.next(MIRType::Double);
break;
case MoveOp::GENERAL:
arg = abiArgs_.next(MIRType_Pointer);
arg = abiArgs_.next(MIRType::Pointer);
break;
default:
MOZ_CRASH("Unexpected argument type");
@ -2520,25 +2520,25 @@ MacroAssembler::maybeBranchTestType(MIRType type, MDefinition* maybeDef, Registe
{
if (!maybeDef || maybeDef->mightBeType(type)) {
switch (type) {
case MIRType_Null:
case MIRType::Null:
branchTestNull(Equal, tag, label);
break;
case MIRType_Boolean:
case MIRType::Boolean:
branchTestBoolean(Equal, tag, label);
break;
case MIRType_Int32:
case MIRType::Int32:
branchTestInt32(Equal, tag, label);
break;
case MIRType_Double:
case MIRType::Double:
branchTestDouble(Equal, tag, label);
break;
case MIRType_String:
case MIRType::String:
branchTestString(Equal, tag, label);
break;
case MIRType_Symbol:
case MIRType::Symbol:
branchTestSymbol(Equal, tag, label);
break;
case MIRType_Object:
case MIRType::Object:
branchTestObject(Equal, tag, label);
break;
default:
@ -2553,20 +2553,20 @@ void
MacroAssembler::BranchType::emit(MacroAssembler& masm)
{
MOZ_ASSERT(isInitialized());
MIRType mirType = MIRType_None;
MIRType mirType = MIRType::None;
if (type_.isPrimitive()) {
if (type_.isMagicArguments())
mirType = MIRType_MagicOptimizedArguments;
mirType = MIRType::MagicOptimizedArguments;
else
mirType = MIRTypeFromValueType(type_.primitive());
} else if (type_.isAnyObject()) {
mirType = MIRType_Object;
mirType = MIRType::Object;
} else {
MOZ_CRASH("Unknown conversion to mirtype");
}
if (mirType == MIRType_Double)
if (mirType == MIRType::Double)
masm.branchTestNumber(cond(), reg(), jump());
else
masm.branchTestMIRType(cond(), reg(), mirType, jump());

Просмотреть файл

@ -1167,7 +1167,7 @@ class MacroAssembler : public MacroAssemblerSpecific
storeValue(src.valueReg(), dest);
} else if (IsFloatingPointType(src.type())) {
FloatRegister reg = src.typedReg().fpu();
if (src.type() == MIRType_Float32) {
if (src.type() == MIRType::Float32) {
convertFloat32ToDouble(reg, ScratchDoubleReg);
reg = ScratchDoubleReg;
}
@ -1247,7 +1247,7 @@ class MacroAssembler : public MacroAssemblerSpecific
void callPreBarrier(const T& address, MIRType type) {
Label done;
if (type == MIRType_Value)
if (type == MIRType::Value)
branchTestGCThing(Assembler::NotEqual, address, &done);
Push(PreBarrierReg);
@ -1349,7 +1349,7 @@ class MacroAssembler : public MacroAssemblerSpecific
Register extractObject(const TypedOrValueRegister& reg, Register scratch) {
if (reg.hasValue())
return extractObject(reg.valueReg(), scratch);
MOZ_ASSERT(reg.type() == MIRType_Object);
MOZ_ASSERT(reg.type() == MIRType::Object);
return reg.typedReg().gpr();
}
@ -1540,7 +1540,7 @@ class MacroAssembler : public MacroAssemblerSpecific
#define DISPATCH_FLOATING_POINT_OP(method, type, arg1d, arg1f, arg2) \
MOZ_ASSERT(IsFloatingPointType(type)); \
if (type == MIRType_Double) \
if (type == MIRType::Double) \
method##Double(arg1d, arg2); \
else \
method##Float32(arg1f, arg2); \
@ -1572,33 +1572,33 @@ class MacroAssembler : public MacroAssemblerSpecific
void convertInt32ValueToDouble(const Address& address, Register scratch, Label* done);
void convertValueToDouble(ValueOperand value, FloatRegister output, Label* fail) {
convertValueToFloatingPoint(value, output, fail, MIRType_Double);
convertValueToFloatingPoint(value, output, fail, MIRType::Double);
}
bool convertValueToDouble(JSContext* cx, const Value& v, FloatRegister output, Label* fail) {
return convertValueToFloatingPoint(cx, v, output, fail, MIRType_Double);
return convertValueToFloatingPoint(cx, v, output, fail, MIRType::Double);
}
bool convertConstantOrRegisterToDouble(JSContext* cx, ConstantOrRegister src,
FloatRegister output, Label* fail)
{
return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType_Double);
return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType::Double);
}
void convertTypedOrValueToDouble(TypedOrValueRegister src, FloatRegister output, Label* fail) {
convertTypedOrValueToFloatingPoint(src, output, fail, MIRType_Double);
convertTypedOrValueToFloatingPoint(src, output, fail, MIRType::Double);
}
void convertValueToFloat(ValueOperand value, FloatRegister output, Label* fail) {
convertValueToFloatingPoint(value, output, fail, MIRType_Float32);
convertValueToFloatingPoint(value, output, fail, MIRType::Float32);
}
bool convertValueToFloat(JSContext* cx, const Value& v, FloatRegister output, Label* fail) {
return convertValueToFloatingPoint(cx, v, output, fail, MIRType_Float32);
return convertValueToFloatingPoint(cx, v, output, fail, MIRType::Float32);
}
bool convertConstantOrRegisterToFloat(JSContext* cx, ConstantOrRegister src,
FloatRegister output, Label* fail)
{
return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType_Float32);
return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType::Float32);
}
void convertTypedOrValueToFloat(TypedOrValueRegister src, FloatRegister output, Label* fail) {
convertTypedOrValueToFloatingPoint(src, output, fail, MIRType_Float32);
convertTypedOrValueToFloatingPoint(src, output, fail, MIRType::Float32);
}
enum IntConversionBehavior {

Просмотреть файл

@ -1094,7 +1094,7 @@ IonBuilder::trackTypeInfoUnchecked(TrackedTypeSite kind, JSObject* obj)
{
BytecodeSite* site = current->trackedSite();
// OOMs are handled as if optimization tracking were turned off.
OptimizationTypeInfo typeInfo(alloc(), kind, MIRType_Object);
OptimizationTypeInfo typeInfo(alloc(), kind, MIRType::Object);
if (!typeInfo.trackType(TypeSet::ObjectType(obj)))
return;
if (!site->optimizations()->trackTypeInfo(mozilla::Move(typeInfo)))

Просмотреть файл

@ -113,7 +113,7 @@ static inline void
SpewRange(MDefinition* def)
{
#ifdef JS_JITSPEW
if (JitSpewEnabled(JitSpew_Range) && def->type() != MIRType_None && def->range()) {
if (JitSpewEnabled(JitSpew_Range) && def->type() != MIRType::None && def->range()) {
JitSpewHeader(JitSpew_Range);
Fprinter& out = JitSpewPrinter();
def->printName(out);
@ -205,7 +205,7 @@ RangeAnalysis::addBetaNodes()
} else if (rightConst && rightConst->isTypeRepresentableAsDouble()) {
bound = rightConst->numberToDouble();
val = left;
} else if (left->type() == MIRType_Int32 && right->type() == MIRType_Int32) {
} else if (left->type() == MIRType::Int32 && right->type() == MIRType::Int32) {
MDefinition* smaller = nullptr;
MDefinition* greater = nullptr;
if (jsop == JSOP_LT) {
@ -244,7 +244,7 @@ RangeAnalysis::addBetaNodes()
break;
case JSOP_LT:
// For integers, if x < c, the upper bound of x is c-1.
if (val->type() == MIRType_Int32) {
if (val->type() == MIRType::Int32) {
int32_t intbound;
if (NumberEqualsInt32(bound, &intbound) && SafeSub(intbound, 1, &intbound))
bound = intbound;
@ -260,7 +260,7 @@ RangeAnalysis::addBetaNodes()
break;
case JSOP_GT:
// For integers, if x > c, the lower bound of x is c+1.
if (val->type() == MIRType_Int32) {
if (val->type() == MIRType::Int32) {
int32_t intbound;
if (NumberEqualsInt32(bound, &intbound) && SafeAdd(intbound, 1, &intbound))
bound = intbound;
@ -594,17 +594,17 @@ Range::Range(const MDefinition* def)
// and truncation can increase range again. So doing wrapAround to
// mimick a possible truncation.
switch (def->type()) {
case MIRType_Int32:
case MIRType::Int32:
// MToInt32 cannot truncate. So we can safely clamp.
if (def->isToInt32())
clampToInt32();
else
wrapAroundToInt32();
break;
case MIRType_Boolean:
case MIRType::Boolean:
wrapAroundToBoolean();
break;
case MIRType_None:
case MIRType::None:
MOZ_CRASH("Asking for the range of an instruction with no value");
default:
break;
@ -614,13 +614,13 @@ Range::Range(const MDefinition* def)
// because we don't care what value the instruction actually produces,
// but what value we might get after we get past the bailouts.
switch (def->type()) {
case MIRType_Int32:
case MIRType::Int32:
setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
break;
case MIRType_Boolean:
case MIRType::Boolean:
setInt32(0, 1);
break;
case MIRType_None:
case MIRType::None:
MOZ_CRASH("Asking for the range of an instruction with no value");
default:
setUnknown();
@ -629,14 +629,14 @@ Range::Range(const MDefinition* def)
}
// As a special case, MUrsh is permitted to claim a result type of
// MIRType_Int32 while actually returning values in [0,UINT32_MAX] without
// MIRType::Int32 while actually returning values in [0,UINT32_MAX] without
// bailouts. If range analysis hasn't ruled out values in
// (INT32_MAX,UINT32_MAX], set the range to be conservatively correct for
// use as either a uint32 or an int32.
if (!hasInt32UpperBound() &&
def->isUrsh() &&
def->toUrsh()->bailoutsDisabled() &&
def->type() != MIRType_Int64)
def->type() != MIRType::Int64)
{
lower_ = INT32_MIN;
}
@ -1252,7 +1252,7 @@ Range::update(const Range* other)
void
MPhi::computeRange(TempAllocator& alloc)
{
if (type() != MIRType_Int32 && type() != MIRType_Double)
if (type() != MIRType::Int32 && type() != MIRType::Double)
return;
Range* range = nullptr;
@ -1299,7 +1299,7 @@ MConstant::computeRange(TempAllocator& alloc)
if (isTypeRepresentableAsDouble()) {
double d = numberToDouble();
setRange(Range::NewDoubleSingletonRange(alloc, d));
} else if (type() == MIRType_Boolean) {
} else if (type() == MIRType::Boolean) {
bool b = toBoolean();
setRange(Range::NewInt32Range(alloc, b, b));
}
@ -1321,7 +1321,7 @@ MClampToUint8::computeRange(TempAllocator& alloc)
void
MBitAnd::computeRange(TempAllocator& alloc)
{
if (specialization_ == MIRType_Int64)
if (specialization_ == MIRType::Int64)
return;
Range left(getOperand(0));
@ -1335,7 +1335,7 @@ MBitAnd::computeRange(TempAllocator& alloc)
void
MBitOr::computeRange(TempAllocator& alloc)
{
if (specialization_ == MIRType_Int64)
if (specialization_ == MIRType::Int64)
return;
Range left(getOperand(0));
@ -1349,7 +1349,7 @@ MBitOr::computeRange(TempAllocator& alloc)
void
MBitXor::computeRange(TempAllocator& alloc)
{
if (specialization_ == MIRType_Int64)
if (specialization_ == MIRType::Int64)
return;
Range left(getOperand(0));
@ -1372,7 +1372,7 @@ MBitNot::computeRange(TempAllocator& alloc)
void
MLsh::computeRange(TempAllocator& alloc)
{
if (specialization_ == MIRType_Int64)
if (specialization_ == MIRType::Int64)
return;
Range left(getOperand(0));
@ -1380,7 +1380,7 @@ MLsh::computeRange(TempAllocator& alloc)
left.wrapAroundToInt32();
MConstant* rhsConst = getOperand(1)->maybeConstantValue();
if (rhsConst && rhsConst->type() == MIRType_Int32) {
if (rhsConst && rhsConst->type() == MIRType::Int32) {
int32_t c = rhsConst->toInt32();
setRange(Range::lsh(alloc, &left, c));
return;
@ -1393,7 +1393,7 @@ MLsh::computeRange(TempAllocator& alloc)
void
MRsh::computeRange(TempAllocator& alloc)
{
if (specialization_ == MIRType_Int64)
if (specialization_ == MIRType::Int64)
return;
Range left(getOperand(0));
@ -1401,7 +1401,7 @@ MRsh::computeRange(TempAllocator& alloc)
left.wrapAroundToInt32();
MConstant* rhsConst = getOperand(1)->maybeConstantValue();
if (rhsConst && rhsConst->type() == MIRType_Int32) {
if (rhsConst && rhsConst->type() == MIRType::Int32) {
int32_t c = rhsConst->toInt32();
setRange(Range::rsh(alloc, &left, c));
return;
@ -1414,7 +1414,7 @@ MRsh::computeRange(TempAllocator& alloc)
void
MUrsh::computeRange(TempAllocator& alloc)
{
if (specialization_ == MIRType_Int64)
if (specialization_ == MIRType::Int64)
return;
Range left(getOperand(0));
@ -1429,7 +1429,7 @@ MUrsh::computeRange(TempAllocator& alloc)
right.wrapAroundToShiftCount();
MConstant* rhsConst = getOperand(1)->maybeConstantValue();
if (rhsConst && rhsConst->type() == MIRType_Int32) {
if (rhsConst && rhsConst->type() == MIRType::Int32) {
int32_t c = rhsConst->toInt32();
setRange(Range::ursh(alloc, &left, c));
} else {
@ -1442,7 +1442,7 @@ MUrsh::computeRange(TempAllocator& alloc)
void
MAbs::computeRange(TempAllocator& alloc)
{
if (specialization_ != MIRType_Int32 && specialization_ != MIRType_Double)
if (specialization_ != MIRType::Int32 && specialization_ != MIRType::Double)
return;
Range other(getOperand(0));
@ -1487,7 +1487,7 @@ MPopcnt::computeRange(TempAllocator& alloc)
void
MMinMax::computeRange(TempAllocator& alloc)
{
if (specialization_ != MIRType_Int32 && specialization_ != MIRType_Double)
if (specialization_ != MIRType::Int32 && specialization_ != MIRType::Double)
return;
Range left(getOperand(0));
@ -1498,7 +1498,7 @@ MMinMax::computeRange(TempAllocator& alloc)
void
MAdd::computeRange(TempAllocator& alloc)
{
if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
return;
Range left(getOperand(0));
Range right(getOperand(1));
@ -1511,7 +1511,7 @@ MAdd::computeRange(TempAllocator& alloc)
void
MSub::computeRange(TempAllocator& alloc)
{
if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
return;
Range left(getOperand(0));
Range right(getOperand(1));
@ -1524,7 +1524,7 @@ MSub::computeRange(TempAllocator& alloc)
void
MMul::computeRange(TempAllocator& alloc)
{
if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
return;
Range left(getOperand(0));
Range right(getOperand(1));
@ -1542,7 +1542,7 @@ MMul::computeRange(TempAllocator& alloc)
void
MMod::computeRange(TempAllocator& alloc)
{
if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
return;
Range lhs(getOperand(0));
Range rhs(getOperand(1));
@ -1558,7 +1558,7 @@ MMod::computeRange(TempAllocator& alloc)
// If both operands are non-negative integers, we can optimize this to an
// unsigned mod.
if (specialization() == MIRType_Int32 && lhs.lower() >= 0 && rhs.lower() > 0 &&
if (specialization() == MIRType::Int32 && lhs.lower() >= 0 && rhs.lower() > 0 &&
!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart())
{
unsigned_ = true;
@ -1637,7 +1637,7 @@ MMod::computeRange(TempAllocator& alloc)
void
MDiv::computeRange(TempAllocator& alloc)
{
if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
return;
Range lhs(getOperand(0));
Range rhs(getOperand(1));
@ -2335,8 +2335,8 @@ RangeAnalysis::addRangeAssertions()
// Perform range checking for all numeric and numeric-like types.
if (!IsNumberType(ins->type()) &&
ins->type() != MIRType_Boolean &&
ins->type() != MIRType_Value)
ins->type() != MIRType::Boolean &&
ins->type() != MIRType::Value)
{
continue;
}
@ -2350,7 +2350,7 @@ RangeAnalysis::addRangeAssertions()
Range r(ins);
// Don't insert assertions if there's nothing interesting to assert.
if (r.isUnknown() || (ins->type() == MIRType_Int32 && r.isUnknownInt32()))
if (r.isUnknown() || (ins->type() == MIRType::Int32 && r.isUnknownInt32()))
continue;
// Don't add a use to an instruction that is recovered on bailout.
@ -2461,7 +2461,7 @@ MConstant::truncate()
int32_t res = ToInt32(numberToDouble());
payload_.asBits = 0;
payload_.i32 = res;
setResultType(MIRType_Int32);
setResultType(MIRType::Int32);
if (range())
range()->setInt32(res, res);
}
@ -2469,7 +2469,7 @@ MConstant::truncate()
bool
MPhi::needTruncation(TruncateKind kind)
{
if (type() == MIRType_Double || type() == MIRType_Int32) {
if (type() == MIRType::Double || type() == MIRType::Int32) {
truncateKind_ = kind;
return true;
}
@ -2480,7 +2480,7 @@ MPhi::needTruncation(TruncateKind kind)
void
MPhi::truncate()
{
setResultType(MIRType_Int32);
setResultType(MIRType::Int32);
if (truncateKind_ >= IndirectTruncate && range())
range()->wrapAroundToInt32();
}
@ -2491,15 +2491,15 @@ MAdd::needTruncation(TruncateKind kind)
// Remember analysis, needed for fallible checks.
setTruncateKind(kind);
return type() == MIRType_Double || type() == MIRType_Int32;
return type() == MIRType::Double || type() == MIRType::Int32;
}
void
MAdd::truncate()
{
MOZ_ASSERT(needTruncation(truncateKind()));
specialization_ = MIRType_Int32;
setResultType(MIRType_Int32);
specialization_ = MIRType::Int32;
setResultType(MIRType::Int32);
if (truncateKind() >= IndirectTruncate && range())
range()->wrapAroundToInt32();
}
@ -2510,15 +2510,15 @@ MSub::needTruncation(TruncateKind kind)
// Remember analysis, needed for fallible checks.
setTruncateKind(kind);
return type() == MIRType_Double || type() == MIRType_Int32;
return type() == MIRType::Double || type() == MIRType::Int32;
}
void
MSub::truncate()
{
MOZ_ASSERT(needTruncation(truncateKind()));
specialization_ = MIRType_Int32;
setResultType(MIRType_Int32);
specialization_ = MIRType::Int32;
setResultType(MIRType::Int32);
if (truncateKind() >= IndirectTruncate && range())
range()->wrapAroundToInt32();
}
@ -2529,15 +2529,15 @@ MMul::needTruncation(TruncateKind kind)
// Remember analysis, needed for fallible checks.
setTruncateKind(kind);
return type() == MIRType_Double || type() == MIRType_Int32;
return type() == MIRType::Double || type() == MIRType::Int32;
}
void
MMul::truncate()
{
MOZ_ASSERT(needTruncation(truncateKind()));
specialization_ = MIRType_Int32;
setResultType(MIRType_Int32);
specialization_ = MIRType::Int32;
setResultType(MIRType::Int32);
if (truncateKind() >= IndirectTruncate) {
setCanBeNegativeZero(false);
if (range())
@ -2551,15 +2551,15 @@ MDiv::needTruncation(TruncateKind kind)
// Remember analysis, needed for fallible checks.
setTruncateKind(kind);
return type() == MIRType_Double || type() == MIRType_Int32;
return type() == MIRType::Double || type() == MIRType::Int32;
}
void
MDiv::truncate()
{
MOZ_ASSERT(needTruncation(truncateKind()));
specialization_ = MIRType_Int32;
setResultType(MIRType_Int32);
specialization_ = MIRType::Int32;
setResultType(MIRType::Int32);
// Divisions where the lhs and rhs are unsigned and the result is
// truncated can be lowered more efficiently.
@ -2575,7 +2575,7 @@ MMod::needTruncation(TruncateKind kind)
// Remember analysis, needed for fallible checks.
setTruncateKind(kind);
return type() == MIRType_Double || type() == MIRType_Int32;
return type() == MIRType::Double || type() == MIRType::Int32;
}
void
@ -2583,8 +2583,8 @@ MMod::truncate()
{
// As for division, handle unsigned modulus with a truncated result.
MOZ_ASSERT(needTruncation(truncateKind()));
specialization_ = MIRType_Int32;
setResultType(MIRType_Int32);
specialization_ = MIRType::Int32;
setResultType(MIRType::Int32);
if (unsignedOperands()) {
replaceWithUnsignedOperands();
@ -2595,7 +2595,7 @@ MMod::truncate()
bool
MToDouble::needTruncation(TruncateKind kind)
{
MOZ_ASSERT(type() == MIRType_Double);
MOZ_ASSERT(type() == MIRType::Double);
setTruncateKind(kind);
return true;
@ -2608,7 +2608,7 @@ MToDouble::truncate()
// We use the return type to flag that this MToDouble should be replaced by
// a MTruncateToInt32 when modifying the graph.
setResultType(MIRType_Int32);
setResultType(MIRType::Int32);
if (truncateKind() >= IndirectTruncate) {
if (range())
range()->wrapAroundToInt32();
@ -2631,7 +2631,7 @@ bool
MLimitedTruncate::needTruncation(TruncateKind kind)
{
setTruncateKind(kind);
setResultType(MIRType_Int32);
setResultType(MIRType::Int32);
if (kind >= IndirectTruncate && range())
range()->wrapAroundToInt32();
return false;
@ -2781,7 +2781,7 @@ TruncateTest(TempAllocator& alloc, MTest* test)
// If all possible inputs to the test are either int32 or boolean,
// convert those inputs to int32 so that an int32 test can be performed.
if (test->input()->type() != MIRType_Value)
if (test->input()->type() != MIRType::Value)
return true;
if (!test->input()->isPhi() || !test->input()->hasOneDefUse() || test->input()->isImplicitlyUsed())
@ -2793,24 +2793,24 @@ TruncateTest(TempAllocator& alloc, MTest* test)
if (!def->isBox())
return true;
MDefinition* inner = def->getOperand(0);
if (inner->type() != MIRType_Boolean && inner->type() != MIRType_Int32)
if (inner->type() != MIRType::Boolean && inner->type() != MIRType::Int32)
return true;
}
for (size_t i = 0; i < phi->numOperands(); i++) {
MDefinition* inner = phi->getOperand(i)->getOperand(0);
if (inner->type() != MIRType_Int32) {
if (inner->type() != MIRType::Int32) {
if (!alloc.ensureBallast())
return false;
MBasicBlock* block = inner->block();
inner = MToInt32::New(alloc, inner);
block->insertBefore(block->lastIns(), inner->toInstruction());
}
MOZ_ASSERT(inner->type() == MIRType_Int32);
MOZ_ASSERT(inner->type() == MIRType::Int32);
phi->replaceOperand(i, inner);
}
phi->setResultType(MIRType_Int32);
phi->setResultType(MIRType::Int32);
return true;
}
@ -2967,7 +2967,7 @@ ComputeTruncateKind(MDefinition* candidate, bool* shouldClone)
// Special case integer division and modulo: a/b can be infinite, and a%b
// can be NaN but cannot actually have rounding errors induced by truncation.
if ((candidate->isDiv() || candidate->isMod()) &&
static_cast<const MBinaryArithInstruction *>(candidate)->specialization() == MIRType_Int32)
static_cast<const MBinaryArithInstruction *>(candidate)->specialization() == MIRType::Int32)
{
canHaveRoundingErrors = false;
}
@ -2986,7 +2986,7 @@ RemoveTruncatesOnOutput(MDefinition* truncated)
if (truncated->isCompare())
return;
MOZ_ASSERT(truncated->type() == MIRType_Int32);
MOZ_ASSERT(truncated->type() == MIRType::Int32);
MOZ_ASSERT(Range(truncated).isInt32());
for (MUseDefIterator use(truncated); use; use++) {
@ -3008,10 +3008,10 @@ AdjustTruncatedInputs(TempAllocator& alloc, MDefinition* truncated)
continue;
MDefinition* input = truncated->getOperand(i);
if (input->type() == MIRType_Int32)
if (input->type() == MIRType::Int32)
continue;
if (input->isToDouble() && input->getOperand(0)->type() == MIRType_Int32) {
if (input->isToDouble() && input->getOperand(0)->type() == MIRType::Int32) {
truncated->replaceOperand(i, input->getOperand(0));
} else {
MInstruction* op;
@ -3065,7 +3065,7 @@ RangeAnalysis::truncate()
if (iter->isRecoveredOnBailout())
continue;
if (iter->type() == MIRType_None) {
if (iter->type() == MIRType::None) {
if (iter->isTest()) {
if (!TruncateTest(alloc(), iter->toTest()))
return false;
@ -3391,7 +3391,7 @@ MPowHalf::collectRangeInfoPreTrunc()
void
MUrsh::collectRangeInfoPreTrunc()
{
if (specialization_ == MIRType_Int64)
if (specialization_ == MIRType::Int64)
return;
Range lhsRange(lhs()), rhsRange(rhs());
@ -3431,13 +3431,13 @@ MBinaryBitwiseInstruction::collectRangeInfoPreTrunc()
Range lhsRange(lhs());
Range rhsRange(rhs());
if (lhs()->isConstant() && lhs()->type() == MIRType_Int32 &&
if (lhs()->isConstant() && lhs()->type() == MIRType::Int32 &&
DoesMaskMatchRange(lhs()->toConstant()->toInt32(), rhsRange))
{
maskMatchesRightRange = true;
}
if (rhs()->isConstant() && rhs()->type() == MIRType_Int32 &&
if (rhs()->isConstant() && rhs()->type() == MIRType::Int32 &&
DoesMaskMatchRange(rhs()->toConstant()->toInt32(), lhsRange))
{
maskMatchesLeftRange = true;

Просмотреть файл

@ -337,7 +337,7 @@ MAdd::writeRecoverData(CompactBufferWriter& writer) const
{
MOZ_ASSERT(canRecoverOnBailout());
writer.writeUnsigned(uint32_t(RInstruction::Recover_Add));
writer.writeByte(specialization_ == MIRType_Float32);
writer.writeByte(specialization_ == MIRType::Float32);
return true;
}
@ -357,7 +357,7 @@ RAdd::recover(JSContext* cx, SnapshotIterator& iter) const
if (!js::AddValues(cx, &lhs, &rhs, &result))
return false;
// MIRType_Float32 is a specialization embedding the fact that the result is
// MIRType::Float32 is a specialization embedding the fact that the result is
// rounded to a Float32.
if (isFloatOperation_ && !RoundFloat32(cx, result, &result))
return false;
@ -371,7 +371,7 @@ MSub::writeRecoverData(CompactBufferWriter& writer) const
{
MOZ_ASSERT(canRecoverOnBailout());
writer.writeUnsigned(uint32_t(RInstruction::Recover_Sub));
writer.writeByte(specialization_ == MIRType_Float32);
writer.writeByte(specialization_ == MIRType::Float32);
return true;
}
@ -391,7 +391,7 @@ RSub::recover(JSContext* cx, SnapshotIterator& iter) const
if (!js::SubValues(cx, &lhs, &rhs, &result))
return false;
// MIRType_Float32 is a specialization embedding the fact that the result is
// MIRType::Float32 is a specialization embedding the fact that the result is
// rounded to a Float32.
if (isFloatOperation_ && !RoundFloat32(cx, result, &result))
return false;
@ -405,7 +405,7 @@ MMul::writeRecoverData(CompactBufferWriter& writer) const
{
MOZ_ASSERT(canRecoverOnBailout());
writer.writeUnsigned(uint32_t(RInstruction::Recover_Mul));
writer.writeByte(specialization_ == MIRType_Float32);
writer.writeByte(specialization_ == MIRType::Float32);
MOZ_ASSERT(Mode(uint8_t(mode_)) == mode_);
writer.writeByte(uint8_t(mode_));
return true;
@ -428,7 +428,7 @@ RMul::recover(JSContext* cx, SnapshotIterator& iter) const
if (!js::MulValues(cx, &lhs, &rhs, &result))
return false;
// MIRType_Float32 is a specialization embedding the fact that the
// MIRType::Float32 is a specialization embedding the fact that the
// result is rounded to a Float32.
if (isFloatOperation_ && !RoundFloat32(cx, result, &result))
return false;
@ -447,7 +447,7 @@ MDiv::writeRecoverData(CompactBufferWriter& writer) const
{
MOZ_ASSERT(canRecoverOnBailout());
writer.writeUnsigned(uint32_t(RInstruction::Recover_Div));
writer.writeByte(specialization_ == MIRType_Float32);
writer.writeByte(specialization_ == MIRType::Float32);
return true;
}
@ -466,7 +466,7 @@ RDiv::recover(JSContext* cx, SnapshotIterator& iter) const
if (!js::DivValues(cx, &lhs, &rhs, &result))
return false;
// MIRType_Float32 is a specialization embedding the fact that the result is
// MIRType::Float32 is a specialization embedding the fact that the result is
// rounded to a Float32.
if (isFloatOperation_ && !RoundFloat32(cx, result, &result))
return false;
@ -830,7 +830,7 @@ MSqrt::writeRecoverData(CompactBufferWriter& writer) const
{
MOZ_ASSERT(canRecoverOnBailout());
writer.writeUnsigned(uint32_t(RInstruction::Recover_Sqrt));
writer.writeByte(type() == MIRType_Float32);
writer.writeByte(type() == MIRType::Float32);
return true;
}
@ -849,7 +849,7 @@ RSqrt::recover(JSContext* cx, SnapshotIterator& iter) const
if (!math_sqrt_handle(cx, num, &result))
return false;
// MIRType_Float32 is a specialization embedding the fact that the result is
// MIRType::Float32 is a specialization embedding the fact that the result is
// rounded to a Float32.
if (isFloatOperation_ && !RoundFloat32(cx, result, &result))
return false;

Просмотреть файл

@ -196,7 +196,7 @@ class TypedOrValueRegister
public:
TypedOrValueRegister()
: type_(MIRType_None)
: type_(MIRType::None)
{}
TypedOrValueRegister(MIRType type, AnyRegister reg)
@ -206,7 +206,7 @@ class TypedOrValueRegister
}
MOZ_IMPLICIT TypedOrValueRegister(ValueOperand value)
: type_(MIRType_Value)
: type_(MIRType::Value)
{
dataValue() = value;
}
@ -216,11 +216,11 @@ class TypedOrValueRegister
}
bool hasTyped() const {
return type() != MIRType_None && type() != MIRType_Value;
return type() != MIRType::None && type() != MIRType::Value;
}
bool hasValue() const {
return type() == MIRType_Value;
return type() == MIRType::Value;
}
AnyRegister typedReg() const {

Просмотреть файл

@ -142,7 +142,7 @@ IsLambdaEscaped(MLambda* lambda, JSObject* obj)
static bool
IsObjectEscaped(MInstruction* ins, JSObject* objDefault)
{
MOZ_ASSERT(ins->type() == MIRType_Object);
MOZ_ASSERT(ins->type() == MIRType::Object);
MOZ_ASSERT(ins->isNewObject() || ins->isGuardShape() || ins->isCreateThisWithTemplate() ||
ins->isNewCallObject() || ins->isFunctionEnvironment());
@ -781,7 +781,7 @@ IndexOf(MDefinition* ins, int32_t* res)
if (indexDef->isToInt32())
indexDef = indexDef->toToInt32()->getOperand(0);
MConstant* indexDefConst = indexDef->maybeConstantValue();
if (!indexDefConst || indexDefConst->type() != MIRType_Int32)
if (!indexDefConst || indexDefConst->type() != MIRType::Int32)
return false;
*res = indexDefConst->toInt32();
return true;
@ -796,7 +796,7 @@ IsElementEscaped(MElements* def, uint32_t arraySize)
JitSpewIndent spewIndent(JitSpew_Escape);
for (MUseIterator i(def->usesBegin()); i != def->usesEnd(); i++) {
// The MIRType_Elements cannot be captured in a resume point as
// The MIRType::Elements cannot be captured in a resume point as
// it does not represent a value allocation.
MDefinition* access = (*i)->consumer()->toDefinition();
@ -858,7 +858,7 @@ IsElementEscaped(MElements* def, uint32_t arraySize)
}
// We are not yet encoding magic hole constants in resume points.
if (access->toStoreElement()->value()->type() == MIRType_MagicHole) {
if (access->toStoreElement()->value()->type() == MIRType::MagicHole) {
JitSpewDef(JitSpew_Escape, "has a store element with an magic-hole constant\n", access);
return true;
}
@ -894,7 +894,7 @@ IsElementEscaped(MElements* def, uint32_t arraySize)
static bool
IsArrayEscaped(MInstruction* ins)
{
MOZ_ASSERT(ins->type() == MIRType_Object);
MOZ_ASSERT(ins->type() == MIRType::Object);
MOZ_ASSERT(ins->isNewArray());
uint32_t length = ins->toNewArray()->length();

Просмотреть файл

@ -21,7 +21,7 @@ static void
EnsureOperandNotFloat32(TempAllocator& alloc, MInstruction* def, unsigned op)
{
MDefinition* in = def->getOperand(op);
if (in->type() == MIRType_Float32) {
if (in->type() == MIRType::Float32) {
MToDouble* replace = MToDouble::New(alloc, in);
def->block()->insertBefore(def, replace);
if (def->isRecoveredOnBailout())
@ -35,7 +35,7 @@ js::jit::AlwaysBoxAt(TempAllocator& alloc, MInstruction* at, MDefinition* operan
{
MDefinition* boxedOperand = operand;
// Replace Float32 by double
if (operand->type() == MIRType_Float32) {
if (operand->type() == MIRType::Float32) {
MInstruction* replace = MToDouble::New(alloc, operand);
at->block()->insertBefore(at, replace);
boxedOperand = replace;
@ -58,7 +58,7 @@ BoxInputsPolicy::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
{
for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
MDefinition* in = ins->getOperand(i);
if (in->type() == MIRType_Value)
if (in->type() == MIRType::Value)
continue;
ins->replaceOperand(i, BoxAt(alloc, ins, in));
}
@ -69,10 +69,10 @@ bool
ArithPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
{
MIRType specialization = ins->typePolicySpecialization();
if (specialization == MIRType_None)
if (specialization == MIRType::None)
return BoxInputsPolicy::staticAdjustInputs(alloc, ins);
MOZ_ASSERT(ins->type() == MIRType_Double || ins->type() == MIRType_Int32 || ins->type() == MIRType_Float32);
MOZ_ASSERT(ins->type() == MIRType::Double || ins->type() == MIRType::Int32 || ins->type() == MIRType::Float32);
for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
MDefinition* in = ins->getOperand(i);
@ -81,9 +81,9 @@ ArithPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
MInstruction* replace;
if (ins->type() == MIRType_Double)
if (ins->type() == MIRType::Double)
replace = MToDouble::New(alloc, in);
else if (ins->type() == MIRType_Float32)
else if (ins->type() == MIRType::Float32)
replace = MToFloat32::New(alloc, in);
else
replace = MToInt32::New(alloc, in);
@ -103,7 +103,7 @@ AllDoublePolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
{
for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
MDefinition* in = ins->getOperand(i);
if (in->type() == MIRType_Double)
if (in->type() == MIRType::Double)
continue;
if (!alloc.ensureBallast())
@ -129,7 +129,7 @@ ComparePolicy::adjustInputs(TempAllocator& alloc, MInstruction* def)
// Convert Float32 operands to doubles
for (size_t i = 0; i < 2; i++) {
MDefinition* in = def->getOperand(i);
if (in->type() == MIRType_Float32) {
if (in->type() == MIRType::Float32) {
MInstruction* replace = MToDouble::New(alloc, in);
def->block()->insertBefore(def, replace);
def->replaceOperand(i, replace);
@ -148,7 +148,7 @@ ComparePolicy::adjustInputs(TempAllocator& alloc, MInstruction* def)
// This matches other comparisons of the form bool === bool and
// generated code of Compare_Int32 is more efficient.
if (compare->compareType() == MCompare::Compare_Boolean &&
def->getOperand(0)->type() == MIRType_Boolean)
def->getOperand(0)->type() == MIRType::Boolean)
{
compare->setCompareType(MCompare::Compare_Int32MaybeCoerceBoth);
}
@ -158,23 +158,23 @@ ComparePolicy::adjustInputs(TempAllocator& alloc, MInstruction* def)
if (compare->compareType() == MCompare::Compare_Boolean) {
// Unbox rhs that is definitely Boolean
MDefinition* rhs = def->getOperand(1);
if (rhs->type() != MIRType_Boolean) {
MInstruction* unbox = MUnbox::New(alloc, rhs, MIRType_Boolean, MUnbox::Infallible);
if (rhs->type() != MIRType::Boolean) {
MInstruction* unbox = MUnbox::New(alloc, rhs, MIRType::Boolean, MUnbox::Infallible);
def->block()->insertBefore(def, unbox);
def->replaceOperand(1, unbox);
if (!unbox->typePolicy()->adjustInputs(alloc, unbox))
return false;
}
MOZ_ASSERT(def->getOperand(0)->type() != MIRType_Boolean);
MOZ_ASSERT(def->getOperand(1)->type() == MIRType_Boolean);
MOZ_ASSERT(def->getOperand(0)->type() != MIRType::Boolean);
MOZ_ASSERT(def->getOperand(1)->type() == MIRType::Boolean);
return true;
}
// Compare_StrictString specialization is done for "Anything === String"
// If the LHS is string, we set the specialization to Compare_String.
if (compare->compareType() == MCompare::Compare_StrictString &&
def->getOperand(0)->type() == MIRType_String)
def->getOperand(0)->type() == MIRType::String)
{
compare->setCompareType(MCompare::Compare_String);
}
@ -184,16 +184,16 @@ ComparePolicy::adjustInputs(TempAllocator& alloc, MInstruction* def)
if (compare->compareType() == MCompare::Compare_StrictString) {
// Unbox rhs that is definitely String
MDefinition* rhs = def->getOperand(1);
if (rhs->type() != MIRType_String) {
MInstruction* unbox = MUnbox::New(alloc, rhs, MIRType_String, MUnbox::Infallible);
if (rhs->type() != MIRType::String) {
MInstruction* unbox = MUnbox::New(alloc, rhs, MIRType::String, MUnbox::Infallible);
def->block()->insertBefore(def, unbox);
def->replaceOperand(1, unbox);
if (!unbox->typePolicy()->adjustInputs(alloc, unbox))
return false;
}
MOZ_ASSERT(def->getOperand(0)->type() != MIRType_String);
MOZ_ASSERT(def->getOperand(1)->type() == MIRType_String);
MOZ_ASSERT(def->getOperand(0)->type() != MIRType::String);
MOZ_ASSERT(def->getOperand(1)->type() == MIRType::String);
return true;
}
@ -206,8 +206,8 @@ ComparePolicy::adjustInputs(TempAllocator& alloc, MInstruction* def)
// Convert all inputs to the right input type
MIRType type = compare->inputType();
MOZ_ASSERT(type == MIRType_Int32 || type == MIRType_Double ||
type == MIRType_Object || type == MIRType_String || type == MIRType_Float32);
MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Double ||
type == MIRType::Object || type == MIRType::String || type == MIRType::Float32);
for (size_t i = 0; i < 2; i++) {
MDefinition* in = def->getOperand(i);
if (in->type() == type)
@ -216,7 +216,7 @@ ComparePolicy::adjustInputs(TempAllocator& alloc, MInstruction* def)
MInstruction* replace;
switch (type) {
case MIRType_Double: {
case MIRType::Double: {
MToFPInstruction::ConversionKind convert = MToFPInstruction::NumbersOnly;
if (compare->compareType() == MCompare::Compare_DoubleMaybeCoerceLHS && i == 0)
convert = MToFPInstruction::NonNullNonStringPrimitives;
@ -225,7 +225,7 @@ ComparePolicy::adjustInputs(TempAllocator& alloc, MInstruction* def)
replace = MToDouble::New(alloc, in, convert);
break;
}
case MIRType_Float32: {
case MIRType::Float32: {
MToFPInstruction::ConversionKind convert = MToFPInstruction::NumbersOnly;
if (compare->compareType() == MCompare::Compare_DoubleMaybeCoerceLHS && i == 0)
convert = MToFPInstruction::NonNullNonStringPrimitives;
@ -234,7 +234,7 @@ ComparePolicy::adjustInputs(TempAllocator& alloc, MInstruction* def)
replace = MToFloat32::New(alloc, in, convert);
break;
}
case MIRType_Int32: {
case MIRType::Int32: {
MacroAssembler::IntConversionInputKind convert = MacroAssembler::IntConversion_NumbersOnly;
if (compare->compareType() == MCompare::Compare_Int32MaybeCoerceBoth ||
(compare->compareType() == MCompare::Compare_Int32MaybeCoerceLHS && i == 0) ||
@ -245,11 +245,11 @@ ComparePolicy::adjustInputs(TempAllocator& alloc, MInstruction* def)
replace = MToInt32::New(alloc, in, convert);
break;
}
case MIRType_Object:
replace = MUnbox::New(alloc, in, MIRType_Object, MUnbox::Infallible);
case MIRType::Object:
replace = MUnbox::New(alloc, in, MIRType::Object, MUnbox::Infallible);
break;
case MIRType_String:
replace = MUnbox::New(alloc, in, MIRType_String, MUnbox::Infallible);
case MIRType::String:
replace = MUnbox::New(alloc, in, MIRType::String, MUnbox::Infallible);
break;
default:
MOZ_CRASH("Unknown compare specialization");
@ -277,16 +277,16 @@ TypeBarrierPolicy::adjustInputs(TempAllocator& alloc, MInstruction* def)
return true;
// Output is a value, currently box the input.
if (outputType == MIRType_Value) {
if (outputType == MIRType::Value) {
// XXX: Possible optimization: decrease resultTypeSet to only include
// the inputType. This will remove the need for boxing.
MOZ_ASSERT(inputType != MIRType_Value);
MOZ_ASSERT(inputType != MIRType::Value);
ins->replaceOperand(0, BoxAt(alloc, ins, ins->getOperand(0)));
return true;
}
// Box input if needed.
if (inputType != MIRType_Value) {
if (inputType != MIRType::Value) {
MOZ_ASSERT(ins->alwaysBails());
ins->replaceOperand(0, BoxAt(alloc, ins, ins->getOperand(0)));
}
@ -295,9 +295,9 @@ TypeBarrierPolicy::adjustInputs(TempAllocator& alloc, MInstruction* def)
// also a value.
// Note: Using setResultType shouldn't be done in TypePolicies,
// Here it is fine, since the type barrier has no uses.
if (IsNullOrUndefined(outputType) || outputType == MIRType_MagicOptimizedArguments) {
if (IsNullOrUndefined(outputType) || outputType == MIRType::MagicOptimizedArguments) {
MOZ_ASSERT(!ins->hasDefUses());
ins->setResultType(MIRType_Value);
ins->setResultType(MIRType::Value);
return true;
}
@ -325,18 +325,18 @@ TestPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
{
MDefinition* op = ins->getOperand(0);
switch (op->type()) {
case MIRType_Value:
case MIRType_Null:
case MIRType_Undefined:
case MIRType_Boolean:
case MIRType_Int32:
case MIRType_Double:
case MIRType_Float32:
case MIRType_Symbol:
case MIRType_Object:
case MIRType::Value:
case MIRType::Null:
case MIRType::Undefined:
case MIRType::Boolean:
case MIRType::Int32:
case MIRType::Double:
case MIRType::Float32:
case MIRType::Symbol:
case MIRType::Object:
break;
case MIRType_String:
case MIRType::String:
{
MStringLength* length = MStringLength::New(alloc, op);
ins->block()->insertBefore(ins, length);
@ -355,16 +355,16 @@ bool
BitwisePolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
{
MIRType specialization = ins->typePolicySpecialization();
if (specialization == MIRType_None)
if (specialization == MIRType::None)
return BoxInputsPolicy::staticAdjustInputs(alloc, ins);
MOZ_ASSERT(ins->type() == specialization);
MOZ_ASSERT(specialization == MIRType_Int32 || specialization == MIRType_Double);
MOZ_ASSERT(specialization == MIRType::Int32 || specialization == MIRType::Double);
// This policy works for both unary and binary bitwise operations.
for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
MDefinition* in = ins->getOperand(i);
if (in->type() == MIRType_Int32)
if (in->type() == MIRType::Int32)
continue;
MInstruction* replace = MTruncateToInt32::New(alloc, in);
@ -382,14 +382,14 @@ bool
PowPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
{
MIRType specialization = ins->typePolicySpecialization();
MOZ_ASSERT(specialization == MIRType_Int32 || specialization == MIRType_Double);
MOZ_ASSERT(specialization == MIRType::Int32 || specialization == MIRType::Double);
// Input must be a double.
if (!DoublePolicy<0>::staticAdjustInputs(alloc, ins))
return false;
// Power may be an int32 or a double. Integers receive a faster path.
if (specialization == MIRType_Double)
if (specialization == MIRType::Double)
return DoublePolicy<1>::staticAdjustInputs(alloc, ins);
return IntPolicy<1>::staticAdjustInputs(alloc, ins);
}
@ -399,10 +399,10 @@ bool
StringPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
{
MDefinition* in = ins->getOperand(Op);
if (in->type() == MIRType_String)
if (in->type() == MIRType::String)
return true;
MUnbox* replace = MUnbox::New(alloc, in, MIRType_String, MUnbox::Fallible);
MUnbox* replace = MUnbox::New(alloc, in, MIRType::String, MUnbox::Fallible);
ins->block()->insertBefore(ins, replace);
ins->replaceOperand(Op, replace);
@ -418,7 +418,7 @@ bool
ConvertToStringPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
{
MDefinition* in = ins->getOperand(Op);
if (in->type() == MIRType_String)
if (in->type() == MIRType::String)
return true;
MToString* replace = MToString::New(alloc, in);
@ -440,10 +440,10 @@ bool
BooleanPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def)
{
MDefinition* in = def->getOperand(Op);
if (in->type() == MIRType_Boolean)
if (in->type() == MIRType::Boolean)
return true;
MUnbox* replace = MUnbox::New(alloc, in, MIRType_Boolean, MUnbox::Fallible);
MUnbox* replace = MUnbox::New(alloc, in, MIRType::Boolean, MUnbox::Fallible);
def->block()->insertBefore(def, replace);
def->replaceOperand(Op, replace);
@ -457,10 +457,10 @@ bool
IntPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def)
{
MDefinition* in = def->getOperand(Op);
if (in->type() == MIRType_Int32)
if (in->type() == MIRType::Int32)
return true;
MUnbox* replace = MUnbox::New(alloc, in, MIRType_Int32, MUnbox::Fallible);
MUnbox* replace = MUnbox::New(alloc, in, MIRType::Int32, MUnbox::Fallible);
def->block()->insertBefore(def, replace);
def->replaceOperand(Op, replace);
@ -477,7 +477,7 @@ bool
ConvertToInt32Policy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def)
{
MDefinition* in = def->getOperand(Op);
if (in->type() == MIRType_Int32)
if (in->type() == MIRType::Int32)
return true;
MToInt32* replace = MToInt32::New(alloc, in);
@ -494,7 +494,7 @@ bool
TruncateToInt32Policy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def)
{
MDefinition* in = def->getOperand(Op);
if (in->type() == MIRType_Int32)
if (in->type() == MIRType::Int32)
return true;
MTruncateToInt32* replace = MTruncateToInt32::New(alloc, in);
@ -512,7 +512,7 @@ bool
DoublePolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def)
{
MDefinition* in = def->getOperand(Op);
if (in->type() == MIRType_Double || in->type() == MIRType_SinCosDouble)
if (in->type() == MIRType::Double || in->type() == MIRType::SinCosDouble)
return true;
MToDouble* replace = MToDouble::New(alloc, in);
@ -530,7 +530,7 @@ bool
Float32Policy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def)
{
MDefinition* in = def->getOperand(Op);
if (in->type() == MIRType_Float32)
if (in->type() == MIRType::Float32)
return true;
MToFloat32* replace = MToFloat32::New(alloc, in);
@ -549,7 +549,7 @@ bool
FloatingPointPolicy<Op>::adjustInputs(TempAllocator& alloc, MInstruction* def)
{
MIRType policyType = def->typePolicySpecialization();
if (policyType == MIRType_Double)
if (policyType == MIRType::Double)
return DoublePolicy<Op>::staticAdjustInputs(alloc, def);
return Float32Policy<Op>::staticAdjustInputs(alloc, def);
}
@ -592,9 +592,9 @@ SimdScalarPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins
// A vector with boolean lanes requires Int32 inputs that have already been
// converted to 0/-1.
// We can't insert a MIRType_Boolean lane directly - it requires conversion.
if (laneType == MIRType_Boolean) {
MOZ_ASSERT(in->type() == MIRType_Int32, "Boolean SIMD vector requires Int32 lanes.");
// We can't insert a MIRType::Boolean lane directly - it requires conversion.
if (laneType == MIRType::Boolean) {
MOZ_ASSERT(in->type() == MIRType::Int32, "Boolean SIMD vector requires Int32 lanes.");
return true;
}
@ -602,10 +602,10 @@ SimdScalarPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins
return true;
MInstruction* replace;
if (laneType == MIRType_Int32) {
if (laneType == MIRType::Int32) {
replace = MTruncateToInt32::New(alloc, in);
} else {
MOZ_ASSERT(laneType == MIRType_Float32);
MOZ_ASSERT(laneType == MIRType::Float32);
replace = MToFloat32::New(alloc, in);
}
@ -625,7 +625,7 @@ bool
BoxPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
{
MDefinition* in = ins->getOperand(Op);
if (in->type() == MIRType_Value)
if (in->type() == MIRType::Value)
return true;
ins->replaceOperand(Op, BoxAt(alloc, ins, in));
@ -646,11 +646,11 @@ BoxExceptPolicy<Op, Type>::staticAdjustInputs(TempAllocator& alloc, MInstruction
return BoxPolicy<Op>::staticAdjustInputs(alloc, ins);
}
template bool BoxExceptPolicy<0, MIRType_String>::staticAdjustInputs(TempAllocator& alloc,
template bool BoxExceptPolicy<0, MIRType::String>::staticAdjustInputs(TempAllocator& alloc,
MInstruction* ins);
template bool BoxExceptPolicy<1, MIRType_String>::staticAdjustInputs(TempAllocator& alloc,
template bool BoxExceptPolicy<1, MIRType::String>::staticAdjustInputs(TempAllocator& alloc,
MInstruction* ins);
template bool BoxExceptPolicy<2, MIRType_String>::staticAdjustInputs(TempAllocator& alloc,
template bool BoxExceptPolicy<2, MIRType::String>::staticAdjustInputs(TempAllocator& alloc,
MInstruction* ins);
template <unsigned Op>
@ -659,9 +659,9 @@ CacheIdPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
{
MDefinition* in = ins->getOperand(Op);
switch (in->type()) {
case MIRType_Int32:
case MIRType_String:
case MIRType_Symbol:
case MIRType::Int32:
case MIRType::String:
case MIRType::Symbol:
return true;
default:
return BoxPolicy<Op>::staticAdjustInputs(alloc, ins);
@ -683,28 +683,28 @@ ToDoublePolicy::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
conversion = ins->toToFloat32()->conversion();
switch (in->type()) {
case MIRType_Int32:
case MIRType_Float32:
case MIRType_Double:
case MIRType_Value:
case MIRType::Int32:
case MIRType::Float32:
case MIRType::Double:
case MIRType::Value:
// No need for boxing for these types.
return true;
case MIRType_Null:
case MIRType::Null:
// No need for boxing, when we will convert.
if (conversion == MToFPInstruction::NonStringPrimitives)
return true;
break;
case MIRType_Undefined:
case MIRType_Boolean:
case MIRType::Undefined:
case MIRType::Boolean:
// No need for boxing, when we will convert.
if (conversion == MToFPInstruction::NonStringPrimitives)
return true;
if (conversion == MToFPInstruction::NonNullNonStringPrimitives)
return true;
break;
case MIRType_Object:
case MIRType_String:
case MIRType_Symbol:
case MIRType::Object:
case MIRType::String:
case MIRType::Symbol:
// Objects might be effectful. Symbols give TypeError.
break;
default:
@ -727,32 +727,32 @@ ToInt32Policy::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
MDefinition* in = ins->getOperand(0);
switch (in->type()) {
case MIRType_Int32:
case MIRType_Float32:
case MIRType_Double:
case MIRType_Value:
case MIRType::Int32:
case MIRType::Float32:
case MIRType::Double:
case MIRType::Value:
// No need for boxing for these types.
return true;
case MIRType_Undefined:
case MIRType::Undefined:
// No need for boxing when truncating.
if (ins->isTruncateToInt32())
return true;
break;
case MIRType_Null:
case MIRType::Null:
// No need for boxing, when we will convert.
if (conversion == MacroAssembler::IntConversion_Any)
return true;
break;
case MIRType_Boolean:
case MIRType::Boolean:
// No need for boxing, when we will convert.
if (conversion == MacroAssembler::IntConversion_Any)
return true;
if (conversion == MacroAssembler::IntConversion_NumbersOrBoolsOnly)
return true;
break;
case MIRType_Object:
case MIRType_String:
case MIRType_Symbol:
case MIRType::Object:
case MIRType::String:
case MIRType::Symbol:
// Objects might be effectful. Symbols give TypeError.
break;
default:
@ -770,7 +770,7 @@ ToStringPolicy::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
MOZ_ASSERT(ins->isToString());
MIRType type = ins->getOperand(0)->type();
if (type == MIRType_Object || type == MIRType_Symbol) {
if (type == MIRType::Object || type == MIRType::Symbol) {
ins->replaceOperand(0, BoxAt(alloc, ins, ins->getOperand(0)));
return true;
}
@ -786,13 +786,13 @@ bool
ObjectPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
{
MDefinition* in = ins->getOperand(Op);
if (in->type() == MIRType_Object || in->type() == MIRType_Slots ||
in->type() == MIRType_Elements)
if (in->type() == MIRType::Object || in->type() == MIRType::Slots ||
in->type() == MIRType::Elements)
{
return true;
}
MUnbox* replace = MUnbox::New(alloc, in, MIRType_Object, MUnbox::Fallible);
MUnbox* replace = MUnbox::New(alloc, in, MIRType::Object, MUnbox::Fallible);
ins->block()->insertBefore(ins, replace);
ins->replaceOperand(Op, replace);
@ -847,7 +847,7 @@ SimdShufflePolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
// Next inputs are the lanes, which need to be int32
for (unsigned i = 0; i < s->numLanes(); i++) {
MDefinition* in = ins->getOperand(s->numVectors() + i);
if (in->type() == MIRType_Int32)
if (in->type() == MIRType::Int32)
continue;
MInstruction* replace = MToInt32::New(alloc, in, MacroAssembler::IntConversion_NumbersOnly);
@ -864,7 +864,7 @@ bool
SimdSelectPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
{
// First input is the mask, which has to be a bool32x4.
MOZ_ASSERT(ins->getOperand(0)->type() == MIRType_Bool32x4);
MOZ_ASSERT(ins->getOperand(0)->type() == MIRType::Bool32x4);
// Next inputs are the two vectors of a particular type.
for (unsigned i = 1; i < 3; i++)
@ -879,8 +879,8 @@ CallPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
MCall* call = ins->toCall();
MDefinition* func = call->getFunction();
if (func->type() != MIRType_Object) {
MInstruction* unbox = MUnbox::New(alloc, func, MIRType_Object, MUnbox::Fallible);
if (func->type() != MIRType::Object) {
MInstruction* unbox = MUnbox::New(alloc, func, MIRType::Object, MUnbox::Fallible);
call->block()->insertBefore(call, unbox);
call->replaceFunction(unbox);
@ -906,7 +906,7 @@ CallSetElementPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
// Box the index and value operands.
for (size_t i = 1, e = ins->numOperands(); i < e; i++) {
MDefinition* in = ins->getOperand(i);
if (in->type() == MIRType_Value)
if (in->type() == MIRType::Value)
continue;
ins->replaceOperand(i, BoxAt(alloc, ins, in));
}
@ -917,7 +917,7 @@ bool
InstanceOfPolicy::adjustInputs(TempAllocator& alloc, MInstruction* def)
{
// Box first operand if it isn't object
if (def->getOperand(0)->type() != MIRType_Object)
if (def->getOperand(0)->type() != MIRType::Object)
BoxPolicy<0>::staticAdjustInputs(alloc, def);
return true;
@ -939,25 +939,25 @@ StoreUnboxedScalarPolicy::adjustValueInput(TempAllocator& alloc, MInstruction* i
// First, ensure the value is int32, boolean, double or Value.
// The conversion is based on TypedArrayObjectTemplate::setElementTail.
switch (value->type()) {
case MIRType_Int32:
case MIRType_Double:
case MIRType_Float32:
case MIRType_Boolean:
case MIRType_Value:
case MIRType::Int32:
case MIRType::Double:
case MIRType::Float32:
case MIRType::Boolean:
case MIRType::Value:
break;
case MIRType_Null:
case MIRType::Null:
value->setImplicitlyUsedUnchecked();
value = MConstant::New(alloc, Int32Value(0));
ins->block()->insertBefore(ins, value->toInstruction());
break;
case MIRType_Undefined:
case MIRType::Undefined:
value->setImplicitlyUsedUnchecked();
value = MConstant::New(alloc, DoubleNaNValue());
ins->block()->insertBefore(ins, value->toInstruction());
break;
case MIRType_Object:
case MIRType_String:
case MIRType_Symbol:
case MIRType::Object:
case MIRType::String:
case MIRType::Symbol:
value = BoxAt(alloc, ins, value);
break;
default:
@ -969,11 +969,11 @@ StoreUnboxedScalarPolicy::adjustValueInput(TempAllocator& alloc, MInstruction* i
curValue = value;
}
MOZ_ASSERT(value->type() == MIRType_Int32 ||
value->type() == MIRType_Boolean ||
value->type() == MIRType_Double ||
value->type() == MIRType_Float32 ||
value->type() == MIRType_Value);
MOZ_ASSERT(value->type() == MIRType::Int32 ||
value->type() == MIRType::Boolean ||
value->type() == MIRType::Double ||
value->type() == MIRType::Float32 ||
value->type() == MIRType::Value);
switch (writeType) {
case Scalar::Int8:
@ -982,23 +982,23 @@ StoreUnboxedScalarPolicy::adjustValueInput(TempAllocator& alloc, MInstruction* i
case Scalar::Uint16:
case Scalar::Int32:
case Scalar::Uint32:
if (value->type() != MIRType_Int32) {
if (value->type() != MIRType::Int32) {
value = MTruncateToInt32::New(alloc, value);
ins->block()->insertBefore(ins, value->toInstruction());
}
break;
case Scalar::Uint8Clamped:
// IonBuilder should have inserted ClampToUint8.
MOZ_ASSERT(value->type() == MIRType_Int32);
MOZ_ASSERT(value->type() == MIRType::Int32);
break;
case Scalar::Float32:
if (value->type() != MIRType_Float32) {
if (value->type() != MIRType::Float32) {
value = MToFloat32::New(alloc, value);
ins->block()->insertBefore(ins, value->toInstruction());
}
break;
case Scalar::Float64:
if (value->type() != MIRType_Double) {
if (value->type() != MIRType::Double) {
value = MToDouble::New(alloc, value);
ins->block()->insertBefore(ins, value->toInstruction());
}
@ -1020,7 +1020,7 @@ StoreUnboxedScalarPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
MStoreUnboxedScalar* store = ins->toStoreUnboxedScalar();
MOZ_ASSERT(IsValidElementsType(store->elements(), store->offsetAdjustment()));
MOZ_ASSERT(store->index()->type() == MIRType_Int32);
MOZ_ASSERT(store->index()->type() == MIRType::Int32);
return adjustValueInput(alloc, store, store->writeType(), store->value(), 2);
}
@ -1029,9 +1029,9 @@ bool
StoreTypedArrayHolePolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
{
MStoreTypedArrayElementHole* store = ins->toStoreTypedArrayElementHole();
MOZ_ASSERT(store->elements()->type() == MIRType_Elements);
MOZ_ASSERT(store->index()->type() == MIRType_Int32);
MOZ_ASSERT(store->length()->type() == MIRType_Int32);
MOZ_ASSERT(store->elements()->type() == MIRType::Elements);
MOZ_ASSERT(store->index()->type() == MIRType::Int32);
MOZ_ASSERT(store->length()->type() == MIRType::Int32);
return StoreUnboxedScalarPolicy::adjustValueInput(alloc, ins, store->arrayType(), store->value(), 3);
}
@ -1056,14 +1056,14 @@ StoreUnboxedObjectOrNullPolicy::adjustInputs(TempAllocator& alloc, MInstruction*
// and whatever its new value is, unless the value is definitely null.
MStoreUnboxedObjectOrNull* store = ins->toStoreUnboxedObjectOrNull();
MOZ_ASSERT(store->typedObj()->type() == MIRType_Object);
MOZ_ASSERT(store->typedObj()->type() == MIRType::Object);
MDefinition* value = store->value();
if (value->type() == MIRType_Object ||
value->type() == MIRType_Null ||
value->type() == MIRType_ObjectOrNull)
if (value->type() == MIRType::Object ||
value->type() == MIRType::Null ||
value->type() == MIRType::ObjectOrNull)
{
if (value->type() != MIRType_Null) {
if (value->type() != MIRType::Null) {
MInstruction* barrier = MPostWriteBarrier::New(alloc, store->typedObj(), value);
store->block()->insertBefore(store, barrier);
}
@ -1089,9 +1089,9 @@ ClampPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
MDefinition* in = ins->toClampToUint8()->input();
switch (in->type()) {
case MIRType_Int32:
case MIRType_Double:
case MIRType_Value:
case MIRType::Int32:
case MIRType::Double:
case MIRType::Value:
break;
default:
ins->replaceOperand(0, BoxAt(alloc, ins, in));
@ -1109,14 +1109,14 @@ FilterTypeSetPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
MIRType outputType = ins->type();
// Special case when output is a Float32, but input isn't.
if (outputType == MIRType_Float32 && inputType != MIRType_Float32) {
if (outputType == MIRType::Float32 && inputType != MIRType::Float32) {
// Create a MToFloat32 to add between the MFilterTypeSet and
// its uses.
MInstruction* replace = MToFloat32::New(alloc, ins);
ins->justReplaceAllUsesWithExcept(replace);
ins->block()->insertAfter(ins, replace);
// Reset the type to not MIRType_Float32
// Reset the type to not MIRType::Float32
// Note: setResultType shouldn't happen in TypePolicies,
// Here it is fine, since there is just one use we just
// added ourself. And the resulting type after MToFloat32
@ -1137,8 +1137,8 @@ FilterTypeSetPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
return true;
// Output is a value, box the input.
if (outputType == MIRType_Value) {
MOZ_ASSERT(inputType != MIRType_Value);
if (outputType == MIRType::Value) {
MOZ_ASSERT(inputType != MIRType::Value);
ins->replaceOperand(0, BoxAt(alloc, ins, ins->getOperand(0)));
return true;
}
@ -1146,7 +1146,7 @@ FilterTypeSetPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
// The outputType should be a subset of the inputType else we are in code
// that has never executed yet. Bail to see the new type (if that hasn't
// happened yet).
if (inputType != MIRType_Value) {
if (inputType != MIRType::Value) {
MBail* bail = MBail::New(alloc);
ins->block()->insertBefore(ins, bail);
bail->setDependency(ins->dependency());
@ -1158,9 +1158,9 @@ FilterTypeSetPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
// also a value.
// Note: Using setResultType shouldn't be done in TypePolicies,
// Here it is fine, since the type barrier has no uses.
if (IsNullOrUndefined(outputType) || outputType == MIRType_MagicOptimizedArguments) {
if (IsNullOrUndefined(outputType) || outputType == MIRType::MagicOptimizedArguments) {
MOZ_ASSERT(!ins->hasDefUses());
ins->setResultType(MIRType_Value);
ins->setResultType(MIRType::Value);
return true;
}
@ -1206,7 +1206,7 @@ FilterTypeSetPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
_(TypeBarrierPolicy)
#define TEMPLATE_TYPE_POLICY_LIST(_) \
_(BoxExceptPolicy<0, MIRType_String>) \
_(BoxExceptPolicy<0, MIRType::String>) \
_(BoxPolicy<0>) \
_(ConvertToInt32Policy<0>) \
_(ConvertToStringPolicy<0>) \

Просмотреть файл

@ -716,15 +716,15 @@ inline void*
IonMarkFunction(MIRType type)
{
switch (type) {
case MIRType_Value:
case MIRType::Value:
return JS_FUNC_TO_DATA_PTR(void*, MarkValueFromIon);
case MIRType_String:
case MIRType::String:
return JS_FUNC_TO_DATA_PTR(void*, MarkStringFromIon);
case MIRType_Object:
case MIRType::Object:
return JS_FUNC_TO_DATA_PTR(void*, MarkObjectFromIon);
case MIRType_Shape:
case MIRType::Shape:
return JS_FUNC_TO_DATA_PTR(void*, MarkShapeFromIon);
case MIRType_ObjectGroup:
case MIRType::ObjectGroup:
return JS_FUNC_TO_DATA_PTR(void*, MarkObjectGroupFromIon);
default: MOZ_CRASH();
}

Просмотреть файл

@ -47,8 +47,8 @@ ABIArg
ABIArgGenerator::softNext(MIRType type)
{
switch (type) {
case MIRType_Int32:
case MIRType_Pointer:
case MIRType::Int32:
case MIRType::Pointer:
if (intRegIndex_ == NumIntArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint32_t);
@ -57,7 +57,7 @@ ABIArgGenerator::softNext(MIRType type)
current_ = ABIArg(Register::FromCode(intRegIndex_));
intRegIndex_++;
break;
case MIRType_Float32:
case MIRType::Float32:
if (intRegIndex_ == NumIntArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint32_t);
@ -66,7 +66,7 @@ ABIArgGenerator::softNext(MIRType type)
current_ = ABIArg(Register::FromCode(intRegIndex_));
intRegIndex_++;
break;
case MIRType_Double:
case MIRType::Double:
// Make sure to use an even register index. Increase to next even number
// when odd.
intRegIndex_ = (intRegIndex_ + 1) & ~1;
@ -92,8 +92,8 @@ ABIArg
ABIArgGenerator::hardNext(MIRType type)
{
switch (type) {
case MIRType_Int32:
case MIRType_Pointer:
case MIRType::Int32:
case MIRType::Pointer:
if (intRegIndex_ == NumIntArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint32_t);
@ -102,7 +102,7 @@ ABIArgGenerator::hardNext(MIRType type)
current_ = ABIArg(Register::FromCode(intRegIndex_));
intRegIndex_++;
break;
case MIRType_Float32:
case MIRType::Float32:
if (floatRegIndex_ == NumFloatArgRegs) {
static const int align = sizeof(double) - 1;
stackOffset_ = (stackOffset_ + align) & ~align;
@ -113,7 +113,7 @@ ABIArgGenerator::hardNext(MIRType type)
current_ = ABIArg(VFPRegister(floatRegIndex_, VFPRegister::Single));
floatRegIndex_++;
break;
case MIRType_Double:
case MIRType::Double:
// Double register are composed of 2 float registers, thus we have to
// skip any float register which cannot be used in a pair of float
// registers in which a double value can be stored.

Просмотреть файл

@ -1359,7 +1359,7 @@ CodeGeneratorARM::visitBoxFloatingPoint(LBoxFloatingPoint* box)
const LAllocation* in = box->getOperand(0);
FloatRegister reg = ToFloatRegister(in);
if (box->type() == MIRType_Float32) {
if (box->type() == MIRType::Float32) {
ScratchFloat32Scope scratch(masm);
masm.convertFloat32ToDouble(reg, scratch);
masm.ma_vxfer(VFPRegister(scratch), ToRegister(payload), ToRegister(type));
@ -2118,7 +2118,7 @@ CodeGeneratorARM::visitAsmSelect(LAsmSelect* ins)
Register cond = ToRegister(ins->condExpr());
masm.ma_cmp(cond, Imm32(0));
if (mirType == MIRType_Int32) {
if (mirType == MIRType::Int32) {
Register falseExpr = ToRegister(ins->falseExpr());
Register out = ToRegister(ins->output());
MOZ_ASSERT(ToRegister(ins->trueExpr()) == out, "true expr input is reused for output");
@ -2131,9 +2131,9 @@ CodeGeneratorARM::visitAsmSelect(LAsmSelect* ins)
FloatRegister falseExpr = ToFloatRegister(ins->falseExpr());
if (mirType == MIRType_Double)
if (mirType == MIRType::Double)
masm.moveDouble(falseExpr, out, Assembler::Zero);
else if (mirType == MIRType_Float32)
else if (mirType == MIRType::Float32)
masm.moveFloat32(falseExpr, out, Assembler::Zero);
else
MOZ_CRASH("unhandled type in visitAsmSelect!");
@ -2149,16 +2149,16 @@ CodeGeneratorARM::visitAsmReinterpret(LAsmReinterpret* lir)
DebugOnly<MIRType> from = ins->input()->type();
switch (to) {
case MIRType_Int32:
MOZ_ASSERT(from == MIRType_Float32);
case MIRType::Int32:
MOZ_ASSERT(from == MIRType::Float32);
masm.ma_vxfer(ToFloatRegister(lir->input()), ToRegister(lir->output()));
break;
case MIRType_Float32:
MOZ_ASSERT(from == MIRType_Int32);
case MIRType::Float32:
MOZ_ASSERT(from == MIRType::Int32);
masm.ma_vxfer(ToRegister(lir->input()), ToFloatRegister(lir->output()));
break;
case MIRType_Double:
case MIRType_Int64:
case MIRType::Double:
case MIRType::Int64:
MOZ_CRASH("not handled by this LIR opcode");
default:
MOZ_CRASH("unexpected AsmReinterpret");
@ -2198,10 +2198,10 @@ CodeGeneratorARM::visitAsmJSCall(LAsmJSCall* ins)
emitAsmJSCall(ins);
switch (mir->type()) {
case MIRType_Double:
case MIRType::Double:
masm.ma_vxfer(r0, r1, d0);
break;
case MIRType_Float32:
case MIRType::Float32:
masm.as_vxfer(r0, InvalidReg, VFPRegister(d0).singleOverlay(), Assembler::CoreToFloat);
break;
default:
@ -2723,9 +2723,9 @@ CodeGeneratorARM::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar* ins)
{
const MAsmJSLoadGlobalVar* mir = ins->mir();
unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
if (mir->type() == MIRType_Int32) {
if (mir->type() == MIRType::Int32) {
masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr), ToRegister(ins->output()));
} else if (mir->type() == MIRType_Float32) {
} else if (mir->type() == MIRType::Float32) {
VFPRegister vd(ToFloatRegister(ins->output()));
masm.ma_vldr(Address(GlobalReg, addr), vd.singleOverlay());
} else {
@ -2742,9 +2742,9 @@ CodeGeneratorARM::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar* ins)
MOZ_ASSERT(IsNumberType(type));
unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
if (type == MIRType_Int32) {
if (type == MIRType::Int32) {
masm.ma_dtr(IsStore, GlobalReg, Imm32(addr), ToRegister(ins->value()));
} else if (type == MIRType_Float32) {
} else if (type == MIRType::Float32) {
VFPRegister vd(ToFloatRegister(ins->value()));
masm.ma_vstr(vd.singleOverlay(), Address(GlobalReg, addr));
} else {

Просмотреть файл

@ -20,7 +20,7 @@ using mozilla::FloorLog2;
LBoxAllocation
LIRGeneratorARM::useBoxFixed(MDefinition* mir, Register reg1, Register reg2, bool useAtStart)
{
MOZ_ASSERT(mir->type() == MIRType_Value);
MOZ_ASSERT(mir->type() == MIRType::Value);
MOZ_ASSERT(reg1 != reg2);
ensureDefined(mir);
@ -91,7 +91,7 @@ LIRGeneratorARM::visitUnbox(MUnbox* unbox)
{
MDefinition* inner = unbox->getOperand(0);
if (inner->type() == MIRType_ObjectOrNull) {
if (inner->type() == MIRType::ObjectOrNull) {
LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(inner));
if (unbox->fallible())
assignSnapshot(lir, unbox->bailoutKind());
@ -102,7 +102,7 @@ LIRGeneratorARM::visitUnbox(MUnbox* unbox)
// An unbox on arm reads in a type tag (either in memory or a register) and
// a payload. Unlike most instructions consuming a box, we ask for the type
// second, so that the result can re-use the first input.
MOZ_ASSERT(inner->type() == MIRType_Value);
MOZ_ASSERT(inner->type() == MIRType::Value);
ensureDefined(inner);
@ -134,7 +134,7 @@ void
LIRGeneratorARM::visitReturn(MReturn* ret)
{
MDefinition* opd = ret->getOperand(0);
MOZ_ASSERT(opd->type() == MIRType_Value);
MOZ_ASSERT(opd->type() == MIRType::Value);
LReturn* ins = new(alloc()) LReturn;
ins->setOperand(0, LUse(JSReturnReg_Type));
@ -354,7 +354,7 @@ void
LIRGeneratorARM::visitPowHalf(MPowHalf* ins)
{
MDefinition* input = ins->input();
MOZ_ASSERT(input->type() == MIRType_Double);
MOZ_ASSERT(input->type() == MIRType::Double);
LPowHalfD* lir = new(alloc()) LPowHalfD(useRegisterAtStart(input));
defineReuseInput(lir, ins, 0);
}
@ -376,7 +376,7 @@ LIRGeneratorARM::newLTableSwitchV(MTableSwitch* tableswitch)
void
LIRGeneratorARM::visitGuardShape(MGuardShape* ins)
{
MOZ_ASSERT(ins->obj()->type() == MIRType_Object);
MOZ_ASSERT(ins->obj()->type() == MIRType::Object);
LDefinition tempObj = temp(LDefinition::OBJECT);
LGuardShape* guard = new(alloc()) LGuardShape(useRegister(ins->obj()), tempObj);
@ -388,7 +388,7 @@ LIRGeneratorARM::visitGuardShape(MGuardShape* ins)
void
LIRGeneratorARM::visitGuardObjectGroup(MGuardObjectGroup* ins)
{
MOZ_ASSERT(ins->obj()->type() == MIRType_Object);
MOZ_ASSERT(ins->obj()->type() == MIRType::Object);
LDefinition tempObj = temp(LDefinition::OBJECT);
LGuardObjectGroup* guard = new(alloc()) LGuardObjectGroup(useRegister(ins->obj()), tempObj);
@ -403,8 +403,8 @@ LIRGeneratorARM::lowerUrshD(MUrsh* mir)
MDefinition* lhs = mir->lhs();
MDefinition* rhs = mir->rhs();
MOZ_ASSERT(lhs->type() == MIRType_Int32);
MOZ_ASSERT(rhs->type() == MIRType_Int32);
MOZ_ASSERT(lhs->type() == MIRType::Int32);
MOZ_ASSERT(rhs->type() == MIRType::Int32);
LUrshD* lir = new(alloc()) LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
define(lir, mir);
@ -413,7 +413,7 @@ LIRGeneratorARM::lowerUrshD(MUrsh* mir)
void
LIRGeneratorARM::visitAsmSelect(MAsmSelect* ins)
{
MOZ_ASSERT(ins->type() != MIRType_Int64);
MOZ_ASSERT(ins->type() != MIRType::Int64);
auto* lir = new(alloc()) LAsmSelect(useRegisterAtStart(ins->trueExpr()),
useRegister(ins->falseExpr()),
@ -426,12 +426,12 @@ LIRGeneratorARM::visitAsmSelect(MAsmSelect* ins)
void
LIRGeneratorARM::visitAsmJSNeg(MAsmJSNeg* ins)
{
if (ins->type() == MIRType_Int32) {
if (ins->type() == MIRType::Int32) {
define(new(alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
} else if (ins->type() == MIRType_Float32) {
} else if (ins->type() == MIRType::Float32) {
define(new(alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
} else {
MOZ_ASSERT(ins->type() == MIRType_Double);
MOZ_ASSERT(ins->type() == MIRType::Double);
define(new(alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
}
}
@ -483,7 +483,7 @@ LIRGeneratorARM::lowerUMod(MMod* mod)
void
LIRGeneratorARM::visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins)
{
MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
LAsmJSUInt32ToDouble* lir = new(alloc()) LAsmJSUInt32ToDouble(useRegisterAtStart(ins->input()));
define(lir, ins);
}
@ -491,7 +491,7 @@ LIRGeneratorARM::visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins)
void
LIRGeneratorARM::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins)
{
MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
LAsmJSUInt32ToFloat32* lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input()));
define(lir, ins);
}
@ -502,7 +502,7 @@ LIRGeneratorARM::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
MOZ_ASSERT(ins->offset() == 0);
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType_Int32);
MOZ_ASSERT(base->type() == MIRType::Int32);
LAllocation baseAlloc;
// For the ARM it is best to keep the 'base' in a register if a bounds check is needed.
@ -523,7 +523,7 @@ LIRGeneratorARM::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
MOZ_ASSERT(ins->offset() == 0);
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType_Int32);
MOZ_ASSERT(base->type() == MIRType::Int32);
LAllocation baseAlloc;
if (base->isConstant() && !ins->needsBoundsCheck()) {
@ -546,7 +546,7 @@ void
LIRGeneratorARM::lowerTruncateDToInt32(MTruncateToInt32* ins)
{
MDefinition* opd = ins->input();
MOZ_ASSERT(opd->type() == MIRType_Double);
MOZ_ASSERT(opd->type() == MIRType::Double);
define(new(alloc()) LTruncateDToInt32(useRegister(opd), LDefinition::BogusTemp()), ins);
}
@ -555,7 +555,7 @@ void
LIRGeneratorARM::lowerTruncateFToInt32(MTruncateToInt32* ins)
{
MDefinition* opd = ins->input();
MOZ_ASSERT(opd->type() == MIRType_Float32);
MOZ_ASSERT(opd->type() == MIRType::Float32);
define(new(alloc()) LTruncateFToInt32(useRegister(opd), LDefinition::BogusTemp()), ins);
}
@ -596,8 +596,8 @@ LIRGeneratorARM::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayE
MOZ_ASSERT(HasLDSTREXBHD());
MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegisterOrConstant(ins->index());
@ -608,7 +608,7 @@ LIRGeneratorARM::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayE
const LAllocation value = useRegister(ins->value());
LDefinition tempDef = LDefinition::BogusTemp();
if (ins->arrayType() == Scalar::Uint32) {
MOZ_ASSERT(ins->type() == MIRType_Double);
MOZ_ASSERT(ins->type() == MIRType::Double);
tempDef = temp();
}
@ -625,8 +625,8 @@ LIRGeneratorARM::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop
MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegisterOrConstant(ins->index());
@ -666,8 +666,8 @@ LIRGeneratorARM::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArra
MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegisterOrConstant(ins->index());
@ -698,7 +698,7 @@ LIRGeneratorARM::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
MOZ_ASSERT(ins->offset() == 0);
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType_Int32);
MOZ_ASSERT(base->type() == MIRType::Int32);
if (byteSize(ins->accessType()) != 4 && !HasLDSTREXBHD()) {
LAsmJSCompareExchangeCallout* lir =
@ -720,7 +720,7 @@ LIRGeneratorARM::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
void
LIRGeneratorARM::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
{
MOZ_ASSERT(ins->base()->type() == MIRType_Int32);
MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
MOZ_ASSERT(ins->accessType() < Scalar::Float32);
MOZ_ASSERT(ins->offset() == 0);
@ -743,7 +743,7 @@ LIRGeneratorARM::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
MOZ_ASSERT(ins->offset() == 0);
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType_Int32);
MOZ_ASSERT(base->type() == MIRType::Int32);
if (byteSize(ins->accessType()) != 4 && !HasLDSTREXBHD()) {
LAsmJSAtomicBinopCallout* lir =

Просмотреть файл

@ -3073,7 +3073,7 @@ void
MacroAssemblerARMCompat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType,
const T& dest, MIRType slotType)
{
if (valueType == MIRType_Double) {
if (valueType == MIRType::Double) {
storeDouble(value.reg().typedReg().fpu(), dest);
return;
}

Просмотреть файл

@ -32,8 +32,8 @@ ABIArg
ABIArgGenerator::next(MIRType type)
{
switch (type) {
case MIRType_Int32:
case MIRType_Pointer:
case MIRType::Int32:
case MIRType::Pointer:
if (intRegIndex_ == NumIntArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uintptr_t);
@ -43,15 +43,15 @@ ABIArgGenerator::next(MIRType type)
intRegIndex_++;
break;
case MIRType_Float32:
case MIRType_Double:
case MIRType::Float32:
case MIRType::Double:
if (floatRegIndex_ == NumFloatArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(double);
break;
}
current_ = ABIArg(FloatRegister(floatRegIndex_,
type == MIRType_Double ? FloatRegisters::Double
type == MIRType::Double ? FloatRegisters::Double
: FloatRegisters::Single));
floatRegIndex_++;
break;

Просмотреть файл

@ -34,7 +34,7 @@ class LIRGeneratorARM64 : public LIRGeneratorShared
bool needTempForPostBarrier() { return true; }
// ARM64 has a scratch register, so no need for another temp for dispatch ICs.
LDefinition tempForDispatchCache(MIRType outputType = MIRType_None) {
LDefinition tempForDispatchCache(MIRType outputType = MIRType::None) {
return LDefinition::BogusTemp();
}

Просмотреть файл

@ -286,17 +286,17 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
template <typename T>
void storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest, MIRType slotType) {
if (valueType == MIRType_Double) {
if (valueType == MIRType::Double) {
storeDouble(value.reg().typedReg().fpu(), dest);
return;
}
// For known integers and booleans, we can just store the unboxed value if
// the slot has the same type.
if ((valueType == MIRType_Int32 || valueType == MIRType_Boolean) && slotType == valueType) {
if ((valueType == MIRType::Int32 || valueType == MIRType::Boolean) && slotType == valueType) {
if (value.constant()) {
Value val = value.value();
if (valueType == MIRType_Int32)
if (valueType == MIRType::Int32)
store32(Imm32(val.toInt32()), dest);
else
store32(Imm32(val.toBoolean() ? 1 : 0), dest);
@ -1842,7 +1842,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
MOZ_ASSERT(scratch64.asUnsized() != address.base);
Ldr(scratch64, toMemOperand(address));
int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64));
} else if (type == MIRType_Int32 || type == MIRType_Boolean) {
} else if (type == MIRType::Int32 || type == MIRType::Boolean) {
load32(address, dest.gpr());
} else {
loadPtr(address, dest.gpr());
@ -1858,7 +1858,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
MOZ_ASSERT(scratch64.asUnsized() != address.index);
doBaseIndex(scratch64, address, vixl::LDR_x);
int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64));
} else if (type == MIRType_Int32 || type == MIRType_Boolean) {
} else if (type == MIRType::Int32 || type == MIRType::Boolean) {
load32(address, dest.gpr());
} else {
loadPtr(address, dest.gpr());

Просмотреть файл

@ -1914,7 +1914,7 @@ CodeGeneratorMIPSShared::visitAsmSelect(LAsmSelect* ins)
Register cond = ToRegister(ins->condExpr());
const LAllocation* falseExpr = ins->falseExpr();
if (mirType == MIRType_Int32) {
if (mirType == MIRType::Int32) {
Register out = ToRegister(ins->output());
MOZ_ASSERT(ToRegister(ins->trueExpr()) == out, "true expr input is reused for output");
masm.as_movz(out, ToRegister(falseExpr), cond);
@ -1925,9 +1925,9 @@ CodeGeneratorMIPSShared::visitAsmSelect(LAsmSelect* ins)
MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out, "true expr input is reused for output");
if (falseExpr->isFloatReg()) {
if (mirType == MIRType_Float32)
if (mirType == MIRType::Float32)
masm.as_movz(Assembler::SingleFloat, out, ToFloatRegister(falseExpr), cond);
else if (mirType == MIRType_Double)
else if (mirType == MIRType::Double)
masm.as_movz(Assembler::DoubleFloat, out, ToFloatRegister(falseExpr), cond);
else
MOZ_CRASH("unhandled type in visitAsmSelect!");
@ -1935,9 +1935,9 @@ CodeGeneratorMIPSShared::visitAsmSelect(LAsmSelect* ins)
Label done;
masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
if (mirType == MIRType_Float32)
if (mirType == MIRType::Float32)
masm.loadFloat32(ToAddress(falseExpr), out);
else if (mirType == MIRType_Double)
else if (mirType == MIRType::Double)
masm.loadDouble(ToAddress(falseExpr), out);
else
MOZ_CRASH("unhandled type in visitAsmSelect!");
@ -1956,16 +1956,16 @@ CodeGeneratorMIPSShared::visitAsmReinterpret(LAsmReinterpret* lir)
DebugOnly<MIRType> from = ins->input()->type();
switch (to) {
case MIRType_Int32:
MOZ_ASSERT(from == MIRType_Float32);
case MIRType::Int32:
MOZ_ASSERT(from == MIRType::Float32);
masm.as_mfc1(ToRegister(lir->output()), ToFloatRegister(lir->input()));
break;
case MIRType_Float32:
MOZ_ASSERT(from == MIRType_Int32);
case MIRType::Float32:
MOZ_ASSERT(from == MIRType::Int32);
masm.as_mtc1(ToRegister(lir->input()), ToFloatRegister(lir->output()));
break;
case MIRType_Double:
case MIRType_Int64:
case MIRType::Double:
case MIRType::Int64:
MOZ_CRASH("not handled by this LIR opcode");
default:
MOZ_CRASH("unexpected AsmReinterpret");
@ -2028,9 +2028,9 @@ CodeGeneratorMIPSShared::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar* ins)
{
const MAsmJSLoadGlobalVar* mir = ins->mir();
unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
if (mir->type() == MIRType_Int32)
if (mir->type() == MIRType::Int32)
masm.load32(Address(GlobalReg, addr), ToRegister(ins->output()));
else if (mir->type() == MIRType_Float32)
else if (mir->type() == MIRType::Float32)
masm.loadFloat32(Address(GlobalReg, addr), ToFloatRegister(ins->output()));
else
masm.loadDouble(Address(GlobalReg, addr), ToFloatRegister(ins->output()));
@ -2043,9 +2043,9 @@ CodeGeneratorMIPSShared::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar* ins)
MOZ_ASSERT(IsNumberType(mir->value()->type()));
unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
if (mir->value()->type() == MIRType_Int32)
if (mir->value()->type() == MIRType::Int32)
masm.store32(ToRegister(ins->value()), Address(GlobalReg, addr));
else if (mir->value()->type() == MIRType_Float32)
else if (mir->value()->type() == MIRType::Float32)
masm.storeFloat32(ToFloatRegister(ins->value()), Address(GlobalReg, addr));
else
masm.storeDouble(ToFloatRegister(ins->value()), Address(GlobalReg, addr));

Просмотреть файл

@ -204,7 +204,7 @@ void
LIRGeneratorMIPSShared::visitPowHalf(MPowHalf* ins)
{
MDefinition* input = ins->input();
MOZ_ASSERT(input->type() == MIRType_Double);
MOZ_ASSERT(input->type() == MIRType::Double);
LPowHalfD* lir = new(alloc()) LPowHalfD(useRegisterAtStart(input));
defineReuseInput(lir, ins, 0);
}
@ -226,7 +226,7 @@ LIRGeneratorMIPSShared::newLTableSwitchV(MTableSwitch* tableswitch)
void
LIRGeneratorMIPSShared::visitGuardShape(MGuardShape* ins)
{
MOZ_ASSERT(ins->obj()->type() == MIRType_Object);
MOZ_ASSERT(ins->obj()->type() == MIRType::Object);
LDefinition tempObj = temp(LDefinition::OBJECT);
LGuardShape* guard = new(alloc()) LGuardShape(useRegister(ins->obj()), tempObj);
@ -238,7 +238,7 @@ LIRGeneratorMIPSShared::visitGuardShape(MGuardShape* ins)
void
LIRGeneratorMIPSShared::visitGuardObjectGroup(MGuardObjectGroup* ins)
{
MOZ_ASSERT(ins->obj()->type() == MIRType_Object);
MOZ_ASSERT(ins->obj()->type() == MIRType::Object);
LDefinition tempObj = temp(LDefinition::OBJECT);
LGuardObjectGroup* guard = new(alloc()) LGuardObjectGroup(useRegister(ins->obj()), tempObj);
@ -253,8 +253,8 @@ LIRGeneratorMIPSShared::lowerUrshD(MUrsh* mir)
MDefinition* lhs = mir->lhs();
MDefinition* rhs = mir->rhs();
MOZ_ASSERT(lhs->type() == MIRType_Int32);
MOZ_ASSERT(rhs->type() == MIRType_Int32);
MOZ_ASSERT(lhs->type() == MIRType::Int32);
MOZ_ASSERT(rhs->type() == MIRType::Int32);
LUrshD* lir = new(alloc()) LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
define(lir, mir);
@ -263,12 +263,12 @@ LIRGeneratorMIPSShared::lowerUrshD(MUrsh* mir)
void
LIRGeneratorMIPSShared::visitAsmJSNeg(MAsmJSNeg* ins)
{
if (ins->type() == MIRType_Int32) {
if (ins->type() == MIRType::Int32) {
define(new(alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
} else if (ins->type() == MIRType_Float32) {
} else if (ins->type() == MIRType::Float32) {
define(new(alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
} else {
MOZ_ASSERT(ins->type() == MIRType_Double);
MOZ_ASSERT(ins->type() == MIRType::Double);
define(new(alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
}
}
@ -276,7 +276,7 @@ LIRGeneratorMIPSShared::visitAsmJSNeg(MAsmJSNeg* ins)
void
LIRGeneratorMIPSShared::visitAsmSelect(MAsmSelect* ins)
{
if (ins->type() == MIRType_Int64) {
if (ins->type() == MIRType::Int64) {
auto* lir = new(alloc()) LAsmSelectI64(useInt64RegisterAtStart(ins->trueExpr()),
useInt64(ins->falseExpr()),
useRegister(ins->condExpr())
@ -327,7 +327,7 @@ LIRGeneratorMIPSShared::lowerUMod(MMod* mod)
void
LIRGeneratorMIPSShared::visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins)
{
MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
LAsmJSUInt32ToDouble* lir = new(alloc()) LAsmJSUInt32ToDouble(useRegisterAtStart(ins->input()));
define(lir, ins);
}
@ -335,7 +335,7 @@ LIRGeneratorMIPSShared::visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins)
void
LIRGeneratorMIPSShared::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins)
{
MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
LAsmJSUInt32ToFloat32* lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input()));
define(lir, ins);
}
@ -346,7 +346,7 @@ LIRGeneratorMIPSShared::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
MOZ_ASSERT(ins->offset() == 0);
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType_Int32);
MOZ_ASSERT(base->type() == MIRType::Int32);
LAllocation baseAlloc;
// For MIPS it is best to keep the 'base' in a register if a bounds check
@ -367,7 +367,7 @@ LIRGeneratorMIPSShared::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
MOZ_ASSERT(ins->offset() == 0);
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType_Int32);
MOZ_ASSERT(base->type() == MIRType::Int32);
LAllocation baseAlloc;
if (base->isConstant() && !ins->needsBoundsCheck()) {
@ -434,8 +434,8 @@ LIRGeneratorMIPSShared::visitCompareExchangeTypedArrayElement(MCompareExchangeTy
MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegisterOrConstant(ins->index());
@ -462,8 +462,8 @@ LIRGeneratorMIPSShared::visitAtomicExchangeTypedArrayElement(MAtomicExchangeType
{
MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegisterOrConstant(ins->index());
@ -474,7 +474,7 @@ LIRGeneratorMIPSShared::visitAtomicExchangeTypedArrayElement(MAtomicExchangeType
const LAllocation value = useRegister(ins->value());
LDefinition uint32Temp = LDefinition::BogusTemp();
if (ins->arrayType() == Scalar::Uint32) {
MOZ_ASSERT(ins->type() == MIRType_Double);
MOZ_ASSERT(ins->type() == MIRType::Double);
uint32Temp = temp();
}
@ -493,7 +493,7 @@ LIRGeneratorMIPSShared::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap*
MOZ_ASSERT(ins->offset() == 0);
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType_Int32);
MOZ_ASSERT(base->type() == MIRType::Int32);
LAsmJSCompareExchangeHeap* lir =
new(alloc()) LAsmJSCompareExchangeHeap(useRegister(base),
@ -509,7 +509,7 @@ LIRGeneratorMIPSShared::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap*
void
LIRGeneratorMIPSShared::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
{
MOZ_ASSERT(ins->base()->type() == MIRType_Int32);
MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
MOZ_ASSERT(ins->offset() == 0);
const LAllocation base = useRegister(ins->base());
@ -534,7 +534,7 @@ LIRGeneratorMIPSShared::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
MOZ_ASSERT(ins->offset() == 0);
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType_Int32);
MOZ_ASSERT(base->type() == MIRType::Int32);
if (!ins->hasUses()) {
LAsmJSAtomicBinopHeapForEffect* lir =
@ -567,8 +567,8 @@ LIRGeneratorMIPSShared::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayEleme
MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegisterOrConstant(ins->index());

Просмотреть файл

@ -25,15 +25,15 @@ ABIArgGenerator::next(MIRType type)
{
Register destReg;
switch (type) {
case MIRType_Int32:
case MIRType_Pointer:
case MIRType::Int32:
case MIRType::Pointer:
if (GetIntArgReg(usedArgSlots_, &destReg))
current_ = ABIArg(destReg);
else
current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
usedArgSlots_++;
break;
case MIRType_Float32:
case MIRType::Float32:
if (!usedArgSlots_) {
current_ = ABIArg(f12.asSingle());
firstArgFloatSize_ = 1;
@ -48,7 +48,7 @@ ABIArgGenerator::next(MIRType type)
}
usedArgSlots_++;
break;
case MIRType_Double:
case MIRType::Double:
if (!usedArgSlots_) {
current_ = ABIArg(f12);
usedArgSlots_ = 2;

Просмотреть файл

@ -181,7 +181,7 @@ CodeGeneratorMIPS::visitBoxFloatingPoint(LBoxFloatingPoint* box)
const LAllocation* in = box->getOperand(0);
FloatRegister reg = ToFloatRegister(in);
if (box->type() == MIRType_Float32) {
if (box->type() == MIRType::Float32) {
masm.convertFloat32ToDouble(reg, ScratchDoubleReg);
reg = ScratchDoubleReg;
}

Просмотреть файл

@ -18,7 +18,7 @@ using namespace js::jit;
LBoxAllocation
LIRGeneratorMIPS::useBoxFixed(MDefinition* mir, Register reg1, Register reg2, bool useAtStart)
{
MOZ_ASSERT(mir->type() == MIRType_Value);
MOZ_ASSERT(mir->type() == MIRType::Value);
MOZ_ASSERT(reg1 != reg2);
ensureDefined(mir);
@ -71,7 +71,7 @@ LIRGeneratorMIPS::visitUnbox(MUnbox* unbox)
{
MDefinition* inner = unbox->getOperand(0);
if (inner->type() == MIRType_ObjectOrNull) {
if (inner->type() == MIRType::ObjectOrNull) {
LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(inner));
if (unbox->fallible())
assignSnapshot(lir, unbox->bailoutKind());
@ -82,7 +82,7 @@ LIRGeneratorMIPS::visitUnbox(MUnbox* unbox)
// An unbox on mips reads in a type tag (either in memory or a register) and
// a payload. Unlike most instructions consuming a box, we ask for the type
// second, so that the result can re-use the first input.
MOZ_ASSERT(inner->type() == MIRType_Value);
MOZ_ASSERT(inner->type() == MIRType::Value);
ensureDefined(inner);
@ -115,7 +115,7 @@ void
LIRGeneratorMIPS::visitReturn(MReturn* ret)
{
MDefinition* opd = ret->getOperand(0);
MOZ_ASSERT(opd->type() == MIRType_Value);
MOZ_ASSERT(opd->type() == MIRType::Value);
LReturn* ins = new(alloc()) LReturn;
ins->setOperand(0, LUse(JSReturnReg_Type));
@ -158,7 +158,7 @@ void
LIRGeneratorMIPS::lowerTruncateDToInt32(MTruncateToInt32* ins)
{
MDefinition* opd = ins->input();
MOZ_ASSERT(opd->type() == MIRType_Double);
MOZ_ASSERT(opd->type() == MIRType::Double);
define(new(alloc()) LTruncateDToInt32(useRegister(opd), LDefinition::BogusTemp()), ins);
}
@ -167,7 +167,7 @@ void
LIRGeneratorMIPS::lowerTruncateFToInt32(MTruncateToInt32* ins)
{
MDefinition* opd = ins->input();
MOZ_ASSERT(opd->type() == MIRType_Float32);
MOZ_ASSERT(opd->type() == MIRType::Float32);
define(new(alloc()) LTruncateFToInt32(useRegister(opd), LDefinition::BogusTemp()), ins);
}

Просмотреть файл

@ -1359,7 +1359,7 @@ void
MacroAssemblerMIPSCompat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest,
MIRType slotType)
{
if (valueType == MIRType_Double) {
if (valueType == MIRType::Double) {
storeDouble(value.reg().typedReg().fpu(), dest);
return;
}

Просмотреть файл

@ -23,8 +23,8 @@ ABIArg
ABIArgGenerator::next(MIRType type)
{
switch (type) {
case MIRType_Int32:
case MIRType_Pointer: {
case MIRType::Int32:
case MIRType::Pointer: {
Register destReg;
if (GetIntArgReg(usedArgSlots_, &destReg))
current_ = ABIArg(destReg);
@ -33,13 +33,13 @@ ABIArgGenerator::next(MIRType type)
usedArgSlots_++;
break;
}
case MIRType_Float32:
case MIRType_Double: {
case MIRType::Float32:
case MIRType::Double: {
FloatRegister destFReg;
FloatRegister::ContentType contentType;
if (!usedArgSlots_)
firstArgFloat = true;
contentType = (type == MIRType_Double) ?
contentType = (type == MIRType::Double) ?
FloatRegisters::Double : FloatRegisters::Single;
if (GetFloatArgReg(usedArgSlots_, &destFReg))
current_ = ABIArg(FloatRegister(destFReg.id(), contentType));

Просмотреть файл

@ -155,7 +155,7 @@ CodeGeneratorMIPS64::visitBox(LBox* box)
if (IsFloatingPointType(box->type())) {
FloatRegister reg = ToFloatRegister(in);
if (box->type() == MIRType_Float32) {
if (box->type() == MIRType::Float32) {
masm.convertFloat32ToDouble(reg, ScratchDoubleReg);
reg = ScratchDoubleReg;
}
@ -182,19 +182,19 @@ CodeGeneratorMIPS64::visitUnbox(LUnbox* unbox)
if (input->isRegister()) {
Register inputReg = ToRegister(input);
switch (mir->type()) {
case MIRType_Int32:
case MIRType::Int32:
masm.unboxInt32(inputReg, result);
break;
case MIRType_Boolean:
case MIRType::Boolean:
masm.unboxBoolean(inputReg, result);
break;
case MIRType_Object:
case MIRType::Object:
masm.unboxObject(inputReg, result);
break;
case MIRType_String:
case MIRType::String:
masm.unboxString(inputReg, result);
break;
case MIRType_Symbol:
case MIRType::Symbol:
masm.unboxSymbol(inputReg, result);
break;
default:
@ -205,19 +205,19 @@ CodeGeneratorMIPS64::visitUnbox(LUnbox* unbox)
Address inputAddr = ToAddress(input);
switch (mir->type()) {
case MIRType_Int32:
case MIRType::Int32:
masm.unboxInt32(inputAddr, result);
break;
case MIRType_Boolean:
case MIRType::Boolean:
masm.unboxBoolean(inputAddr, result);
break;
case MIRType_Object:
case MIRType::Object:
masm.unboxObject(inputAddr, result);
break;
case MIRType_String:
case MIRType::String:
masm.unboxString(inputAddr, result);
break;
case MIRType_Symbol:
case MIRType::Symbol:
masm.unboxSymbol(inputAddr, result);
break;
default:
@ -306,7 +306,7 @@ CodeGeneratorMIPS64::visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir)
void
CodeGeneratorMIPS64::visitAsmSelectI64(LAsmSelectI64* lir)
{
MOZ_ASSERT(lir->mir()->type() == MIRType_Int64);
MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
Register cond = ToRegister(lir->condExpr());
const LAllocation* falseExpr = lir->falseExpr();
@ -327,16 +327,16 @@ CodeGeneratorMIPS64::visitAsmSelectI64(LAsmSelectI64* lir)
void
CodeGeneratorMIPS64::visitAsmReinterpretFromI64(LAsmReinterpretFromI64* lir)
{
MOZ_ASSERT(lir->mir()->type() == MIRType_Double);
MOZ_ASSERT(lir->mir()->input()->type() == MIRType_Int64);
MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
masm.as_dmtc1(ToRegister(lir->input()), ToFloatRegister(lir->output()));
}
void
CodeGeneratorMIPS64::visitAsmReinterpretToI64(LAsmReinterpretToI64* lir)
{
MOZ_ASSERT(lir->mir()->type() == MIRType_Int64);
MOZ_ASSERT(lir->mir()->input()->type() == MIRType_Double);
MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
masm.as_dmfc1(ToRegister(lir->output()), ToFloatRegister(lir->input()));
}

Просмотреть файл

@ -18,7 +18,7 @@ using namespace js::jit;
LBoxAllocation
LIRGeneratorMIPS64::useBoxFixed(MDefinition* mir, Register reg1, Register reg2, bool useAtStart)
{
MOZ_ASSERT(mir->type() == MIRType_Value);
MOZ_ASSERT(mir->type() == MIRType::Value);
ensureDefined(mir);
return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
@ -48,7 +48,7 @@ LIRGeneratorMIPS64::visitUnbox(MUnbox* unbox)
{
MDefinition* box = unbox->getOperand(0);
if (box->type() == MIRType_ObjectOrNull) {
if (box->type() == MIRType::ObjectOrNull) {
LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(box));
if (unbox->fallible())
assignSnapshot(lir, unbox->bailoutKind());
@ -56,7 +56,7 @@ LIRGeneratorMIPS64::visitUnbox(MUnbox* unbox)
return;
}
MOZ_ASSERT(box->type() == MIRType_Value);
MOZ_ASSERT(box->type() == MIRType::Value);
LUnbox* lir;
if (IsFloatingPointType(unbox->type())) {
@ -79,7 +79,7 @@ void
LIRGeneratorMIPS64::visitReturn(MReturn* ret)
{
MDefinition* opd = ret->getOperand(0);
MOZ_ASSERT(opd->type() == MIRType_Value);
MOZ_ASSERT(opd->type() == MIRType::Value);
LReturn* ins = new(alloc()) LReturn;
ins->setOperand(0, useFixed(opd, JSReturnReg));
@ -103,7 +103,7 @@ void
LIRGeneratorMIPS64::lowerTruncateDToInt32(MTruncateToInt32* ins)
{
MDefinition* opd = ins->input();
MOZ_ASSERT(opd->type() == MIRType_Double);
MOZ_ASSERT(opd->type() == MIRType::Double);
define(new(alloc())
LTruncateDToInt32(useRegister(opd), tempDouble()), ins);
@ -113,7 +113,7 @@ void
LIRGeneratorMIPS64::lowerTruncateFToInt32(MTruncateToInt32* ins)
{
MDefinition* opd = ins->input();
MOZ_ASSERT(opd->type() == MIRType_Float32);
MOZ_ASSERT(opd->type() == MIRType::Float32);
define(new(alloc())
LTruncateFToInt32(useRegister(opd), tempFloat32()), ins);

Просмотреть файл

@ -1530,17 +1530,17 @@ void
MacroAssemblerMIPS64Compat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest,
MIRType slotType)
{
if (valueType == MIRType_Double) {
if (valueType == MIRType::Double) {
storeDouble(value.reg().typedReg().fpu(), dest);
return;
}
// For known integers and booleans, we can just store the unboxed value if
// the slot has the same type.
if ((valueType == MIRType_Int32 || valueType == MIRType_Boolean) && slotType == valueType) {
if ((valueType == MIRType::Int32 || valueType == MIRType::Boolean) && slotType == valueType) {
if (value.constant()) {
Value val = value.value();
if (valueType == MIRType_Int32)
if (valueType == MIRType::Int32)
store32(Imm32(val.toInt32()), dest);
else
store32(Imm32(val.toBoolean() ? 1 : 0), dest);

Просмотреть файл

@ -426,9 +426,9 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
void loadUnboxedValue(const T& address, MIRType type, AnyRegister dest) {
if (dest.isFloat())
loadInt32OrDouble(address, dest.fpu());
else if (type == MIRType_Int32)
else if (type == MIRType::Int32)
unboxInt32(address, dest.gpr());
else if (type == MIRType_Boolean)
else if (type == MIRType::Boolean)
unboxBoolean(address, dest.gpr());
else
unboxNonDouble(address, dest.gpr());

Просмотреть файл

@ -386,14 +386,14 @@ CodeGeneratorShared::encodeAllocation(LSnapshot* snapshot, MDefinition* mir,
mir = mir->toBox()->getOperand(0);
MIRType type =
mir->isRecoveredOnBailout() ? MIRType_None :
mir->isUnused() ? MIRType_MagicOptimizedOut :
mir->isRecoveredOnBailout() ? MIRType::None :
mir->isUnused() ? MIRType::MagicOptimizedOut :
mir->type();
RValueAllocation alloc;
switch (type) {
case MIRType_None:
case MIRType::None:
{
MOZ_ASSERT(mir->isRecoveredOnBailout());
uint32_t index = 0;
@ -422,19 +422,19 @@ CodeGeneratorShared::encodeAllocation(LSnapshot* snapshot, MDefinition* mir,
alloc = RValueAllocation::RecoverInstruction(index);
break;
}
case MIRType_Undefined:
case MIRType::Undefined:
alloc = RValueAllocation::Undefined();
break;
case MIRType_Null:
case MIRType::Null:
alloc = RValueAllocation::Null();
break;
case MIRType_Int32:
case MIRType_String:
case MIRType_Symbol:
case MIRType_Object:
case MIRType_ObjectOrNull:
case MIRType_Boolean:
case MIRType_Double:
case MIRType::Int32:
case MIRType::String:
case MIRType::Symbol:
case MIRType::Object:
case MIRType::ObjectOrNull:
case MIRType::Boolean:
case MIRType::Double:
{
LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
if (payload->isConstant()) {
@ -446,7 +446,7 @@ CodeGeneratorShared::encodeAllocation(LSnapshot* snapshot, MDefinition* mir,
}
JSValueType valueType =
(type == MIRType_ObjectOrNull) ? JSVAL_TYPE_OBJECT : ValueTypeFromMIRType(type);
(type == MIRType::ObjectOrNull) ? JSVAL_TYPE_OBJECT : ValueTypeFromMIRType(type);
MOZ_ASSERT(payload->isMemory() || payload->isRegister());
if (payload->isMemory())
@ -457,10 +457,10 @@ CodeGeneratorShared::encodeAllocation(LSnapshot* snapshot, MDefinition* mir,
alloc = RValueAllocation::Double(ToFloatRegister(payload));
break;
}
case MIRType_Float32:
case MIRType_Bool32x4:
case MIRType_Int32x4:
case MIRType_Float32x4:
case MIRType::Float32:
case MIRType::Bool32x4:
case MIRType::Int32x4:
case MIRType::Float32x4:
{
LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
if (payload->isConstant()) {
@ -478,14 +478,14 @@ CodeGeneratorShared::encodeAllocation(LSnapshot* snapshot, MDefinition* mir,
alloc = RValueAllocation::AnyFloat(ToStackIndex(payload));
break;
}
case MIRType_MagicOptimizedArguments:
case MIRType_MagicOptimizedOut:
case MIRType_MagicUninitializedLexical:
case MIRType::MagicOptimizedArguments:
case MIRType::MagicOptimizedOut:
case MIRType::MagicUninitializedLexical:
{
uint32_t index;
Value v = MagicValue(type == MIRType_MagicOptimizedArguments
Value v = MagicValue(type == MIRType::MagicOptimizedArguments
? JS_OPTIMIZED_ARGUMENTS
: (type == MIRType_MagicOptimizedOut
: (type == MIRType::MagicOptimizedOut
? JS_OPTIMIZED_OUT
: JS_UNINITIALIZED_LEXICAL));
masm.propagateOOM(graph.addConstantToPool(v, &index));
@ -494,7 +494,7 @@ CodeGeneratorShared::encodeAllocation(LSnapshot* snapshot, MDefinition* mir,
}
default:
{
MOZ_ASSERT(mir->type() == MIRType_Value);
MOZ_ASSERT(mir->type() == MIRType::Value);
LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
#ifdef JS_NUNBOX32
LAllocation* type = snapshot->typeOfSlot(*allocIndex);
@ -1535,17 +1535,17 @@ CodeGeneratorShared::emitPreBarrier(Register base, const LAllocation* index, int
{
if (index->isConstant()) {
Address address(base, ToInt32(index) * sizeof(Value) + offsetAdjustment);
masm.patchableCallPreBarrier(address, MIRType_Value);
masm.patchableCallPreBarrier(address, MIRType::Value);
} else {
BaseIndex address(base, ToRegister(index), TimesEight, offsetAdjustment);
masm.patchableCallPreBarrier(address, MIRType_Value);
masm.patchableCallPreBarrier(address, MIRType::Value);
}
}
void
CodeGeneratorShared::emitPreBarrier(Address address)
{
masm.patchableCallPreBarrier(address, MIRType_Value);
masm.patchableCallPreBarrier(address, MIRType::Value);
}
Label*

Просмотреть файл

@ -28,8 +28,8 @@ LIRGeneratorShared::use(MDefinition* mir, LUse policy)
{
// It is illegal to call use() on an instruction with two defs.
#if BOX_PIECES > 1
MOZ_ASSERT(mir->type() != MIRType_Value);
MOZ_ASSERT(mir->type() != MIRType_Int64);
MOZ_ASSERT(mir->type() != MIRType::Value);
MOZ_ASSERT(mir->type() != MIRType::Int64);
#endif
ensureDefined(mir);
policy.setVirtualRegister(mir->virtualRegister());
@ -151,7 +151,7 @@ LIRGeneratorShared::defineBox(LInstructionHelper<BOX_PIECES, Ops, Temps>* lir, M
{
// Call instructions should use defineReturn.
MOZ_ASSERT(!lir->isCall());
MOZ_ASSERT(mir->type() == MIRType_Value);
MOZ_ASSERT(mir->type() == MIRType::Value);
uint32_t vreg = getVirtualRegister();
@ -174,7 +174,7 @@ LIRGeneratorShared::defineInt64(LInstructionHelper<INT64_PIECES, Ops, Temps>* li
{
// Call instructions should use defineReturn.
MOZ_ASSERT(!lir->isCall());
MOZ_ASSERT(mir->type() == MIRType_Int64);
MOZ_ASSERT(mir->type() == MIRType::Int64);
uint32_t vreg = getVirtualRegister();
@ -197,7 +197,7 @@ LIRGeneratorShared::defineSharedStubReturn(LInstruction* lir, MDefinition* mir)
lir->setMir(mir);
MOZ_ASSERT(lir->isBinarySharedStub() || lir->isUnarySharedStub() || lir->isNullarySharedStub());
MOZ_ASSERT(mir->type() == MIRType_Value);
MOZ_ASSERT(mir->type() == MIRType::Value);
uint32_t vreg = getVirtualRegister();
@ -225,7 +225,7 @@ LIRGeneratorShared::defineReturn(LInstruction* lir, MDefinition* mir)
uint32_t vreg = getVirtualRegister();
switch (mir->type()) {
case MIRType_Value:
case MIRType::Value:
#if defined(JS_NUNBOX32)
lir->setDef(TYPE_INDEX, LDefinition(vreg + VREG_TYPE_OFFSET, LDefinition::TYPE,
LGeneralReg(JSReturnReg_Type)));
@ -236,17 +236,17 @@ LIRGeneratorShared::defineReturn(LInstruction* lir, MDefinition* mir)
lir->setDef(0, LDefinition(vreg, LDefinition::BOX, LGeneralReg(JSReturnReg)));
#endif
break;
case MIRType_Float32:
case MIRType::Float32:
lir->setDef(0, LDefinition(vreg, LDefinition::FLOAT32, LFloatReg(ReturnFloat32Reg)));
break;
case MIRType_Double:
case MIRType::Double:
lir->setDef(0, LDefinition(vreg, LDefinition::DOUBLE, LFloatReg(ReturnDoubleReg)));
break;
case MIRType_Bool32x4:
case MIRType_Int32x4:
case MIRType::Bool32x4:
case MIRType::Int32x4:
lir->setDef(0, LDefinition(vreg, LDefinition::INT32X4, LFloatReg(ReturnSimd128Reg)));
break;
case MIRType_Float32x4:
case MIRType::Float32x4:
lir->setDef(0, LDefinition(vreg, LDefinition::FLOAT32X4, LFloatReg(ReturnSimd128Reg)));
break;
default:
@ -302,8 +302,8 @@ IsCompatibleLIRCoercion(MIRType to, MIRType from)
{
if (to == from)
return true;
if ((to == MIRType_Int32 || to == MIRType_Boolean) &&
(from == MIRType_Int32 || from == MIRType_Boolean)) {
if ((to == MIRType::Int32 || to == MIRType::Boolean) &&
(from == MIRType::Int32 || from == MIRType::Boolean)) {
return true;
}
// SIMD types can be coerced with from*Bits operators.
@ -318,7 +318,7 @@ void
LIRGeneratorShared::redefine(MDefinition* def, MDefinition* as, MMathFunction::Function func)
{
MOZ_ASSERT(def->isMathFunction());
MOZ_ASSERT(def->type() == MIRType_Double && as->type() == MIRType_SinCosDouble);
MOZ_ASSERT(def->type() == MIRType::Double && as->type() == MIRType::SinCosDouble);
MOZ_ASSERT(MMathFunction::Sin == func || MMathFunction::Cos == func);
ensureDefined(as);
@ -347,12 +347,12 @@ LIRGeneratorShared::redefine(MDefinition* def, MDefinition* as)
if (as->isEmittedAtUses() &&
(def->type() == as->type() ||
(as->isConstant() &&
(def->type() == MIRType_Int32 || def->type() == MIRType_Boolean) &&
(as->type() == MIRType_Int32 || as->type() == MIRType_Boolean))))
(def->type() == MIRType::Int32 || def->type() == MIRType::Boolean) &&
(as->type() == MIRType::Int32 || as->type() == MIRType::Boolean))))
{
MInstruction* replacement;
if (def->type() != as->type()) {
if (as->type() == MIRType_Int32)
if (as->type() == MIRType::Int32)
replacement = MConstant::New(alloc(), BooleanValue(as->toConstant()->toInt32()));
else
replacement = MConstant::New(alloc(), Int32Value(as->toConstant()->toBoolean()));
@ -372,15 +372,15 @@ LIRGeneratorShared::redefine(MDefinition* def, MDefinition* as)
!def->resultTypeSet()->equals(as->resultTypeSet()))
{
switch (def->type()) {
case MIRType_Object:
case MIRType_ObjectOrNull:
case MIRType_String:
case MIRType_Symbol: {
case MIRType::Object:
case MIRType::ObjectOrNull:
case MIRType::String:
case MIRType::Symbol: {
LAssertResultT* check = new(alloc()) LAssertResultT(useRegister(def));
add(check, def->toInstruction());
break;
}
case MIRType_Value: {
case MIRType::Value: {
LAssertResultV* check = new(alloc()) LAssertResultV(useBox(def));
add(check, def->toInstruction());
break;
@ -469,7 +469,7 @@ LIRGeneratorShared::useRegisterOrZeroAtStart(MDefinition* mir)
LAllocation
LIRGeneratorShared::useRegisterOrNonDoubleConstant(MDefinition* mir)
{
if (mir->isConstant() && mir->type() != MIRType_Double && mir->type() != MIRType_Float32)
if (mir->isConstant() && mir->type() != MIRType::Double && mir->type() != MIRType::Float32)
return LAllocation(mir->toConstant());
return useRegister(mir);
}
@ -622,7 +622,7 @@ VirtualRegisterOfPayload(MDefinition* mir)
{
if (mir->isBox()) {
MDefinition* inner = mir->toBox()->getOperand(0);
if (!inner->isConstant() && inner->type() != MIRType_Double && inner->type() != MIRType_Float32)
if (!inner->isConstant() && inner->type() != MIRType::Double && inner->type() != MIRType::Float32)
return inner->virtualRegister();
}
if (mir->isTypeBarrier())
@ -635,7 +635,7 @@ VirtualRegisterOfPayload(MDefinition* mir)
LUse
LIRGeneratorShared::useType(MDefinition* mir, LUse::Policy policy)
{
MOZ_ASSERT(mir->type() == MIRType_Value);
MOZ_ASSERT(mir->type() == MIRType::Value);
return LUse(mir->virtualRegister() + VREG_TYPE_OFFSET, policy);
}
@ -643,7 +643,7 @@ LIRGeneratorShared::useType(MDefinition* mir, LUse::Policy policy)
LUse
LIRGeneratorShared::usePayload(MDefinition* mir, LUse::Policy policy)
{
MOZ_ASSERT(mir->type() == MIRType_Value);
MOZ_ASSERT(mir->type() == MIRType::Value);
return LUse(VirtualRegisterOfPayload(mir), policy);
}
@ -651,7 +651,7 @@ LIRGeneratorShared::usePayload(MDefinition* mir, LUse::Policy policy)
LUse
LIRGeneratorShared::usePayloadAtStart(MDefinition* mir, LUse::Policy policy)
{
MOZ_ASSERT(mir->type() == MIRType_Value);
MOZ_ASSERT(mir->type() == MIRType::Value);
return LUse(VirtualRegisterOfPayload(mir), policy, true);
}
@ -674,14 +674,14 @@ LIRGeneratorShared::fillBoxUses(LInstruction* lir, size_t n, MDefinition* mir)
LUse
LIRGeneratorShared::useRegisterForTypedLoad(MDefinition* mir, MIRType type)
{
MOZ_ASSERT(type != MIRType_Value && type != MIRType_None);
MOZ_ASSERT(mir->type() == MIRType_Object || mir->type() == MIRType_Slots);
MOZ_ASSERT(type != MIRType::Value && type != MIRType::None);
MOZ_ASSERT(mir->type() == MIRType::Object || mir->type() == MIRType::Slots);
#ifdef JS_PUNBOX64
// On x64, masm.loadUnboxedValue emits slightly less efficient code when
// the input and output use the same register and we're not loading an
// int32/bool/double, so we just call useRegister in this case.
if (type != MIRType_Int32 && type != MIRType_Boolean && type != MIRType_Double)
if (type != MIRType::Int32 && type != MIRType::Boolean && type != MIRType::Double)
return useRegister(mir);
#endif
@ -691,7 +691,7 @@ LIRGeneratorShared::useRegisterForTypedLoad(MDefinition* mir, MIRType type)
LBoxAllocation
LIRGeneratorShared::useBox(MDefinition* mir, LUse::Policy policy, bool useAtStart)
{
MOZ_ASSERT(mir->type() == MIRType_Value);
MOZ_ASSERT(mir->type() == MIRType::Value);
ensureDefined(mir);
@ -706,7 +706,7 @@ LIRGeneratorShared::useBox(MDefinition* mir, LUse::Policy policy, bool useAtStar
LBoxAllocation
LIRGeneratorShared::useBoxOrTypedOrConstant(MDefinition* mir, bool useConstant)
{
if (mir->type() == MIRType_Value)
if (mir->type() == MIRType::Value)
return useBox(mir);
@ -728,7 +728,7 @@ LIRGeneratorShared::useBoxOrTypedOrConstant(MDefinition* mir, bool useConstant)
LInt64Allocation
LIRGeneratorShared::useInt64(MDefinition* mir, LUse::Policy policy, bool useAtStart)
{
MOZ_ASSERT(mir->type() == MIRType_Int64);
MOZ_ASSERT(mir->type() == MIRType::Int64);
ensureDefined(mir);

Просмотреть файл

@ -80,28 +80,28 @@ LIRGeneratorShared::visitConstant(MConstant* ins)
}
switch (ins->type()) {
case MIRType_Double:
case MIRType::Double:
define(new(alloc()) LDouble(ins->toDouble()), ins);
break;
case MIRType_Float32:
case MIRType::Float32:
define(new(alloc()) LFloat32(ins->toFloat32()), ins);
break;
case MIRType_Boolean:
case MIRType::Boolean:
define(new(alloc()) LInteger(ins->toBoolean()), ins);
break;
case MIRType_Int32:
case MIRType::Int32:
define(new(alloc()) LInteger(ins->toInt32()), ins);
break;
case MIRType_Int64:
case MIRType::Int64:
defineInt64(new(alloc()) LInteger64(ins->toInt64()), ins);
break;
case MIRType_String:
case MIRType::String:
define(new(alloc()) LPointer(ins->toString()), ins);
break;
case MIRType_Symbol:
case MIRType::Symbol:
define(new(alloc()) LPointer(ins->toSymbol()), ins);
break;
case MIRType_Object:
case MIRType::Object:
define(new(alloc()) LPointer(&ins->toObject()), ins);
break;
default:
@ -154,7 +154,7 @@ LRecoverInfo::OperandIter::canOptimizeOutIfUnused()
// We check ins->type() in addition to ins->isUnused() because
// EliminateDeadResumePointOperands may replace nodes with the constant
// MagicValue(JS_OPTIMIZED_OUT).
if ((ins->isUnused() || ins->type() == MIRType_MagicOptimizedOut) &&
if ((ins->isUnused() || ins->type() == MIRType::MagicOptimizedOut) &&
(*it_)->isResumePoint())
{
return !(*it_)->toResumePoint()->isObservableOperand(op_);
@ -209,7 +209,7 @@ LIRGeneratorShared::buildSnapshot(LInstruction* ins, MResumePoint* rp, BailoutKi
if (ins->isConstant() || ins->isUnused()) {
*type = LAllocation();
*payload = LAllocation();
} else if (ins->type() != MIRType_Value) {
} else if (ins->type() != MIRType::Value) {
*type = LAllocation();
*payload = use(ins, LUse(LUse::KEEPALIVE));
} else {

Просмотреть файл

@ -44,20 +44,20 @@ ABIArgGenerator::next(MIRType type)
return current_;
}
switch (type) {
case MIRType_Int32:
case MIRType_Int64:
case MIRType_Pointer:
case MIRType::Int32:
case MIRType::Int64:
case MIRType::Pointer:
current_ = ABIArg(IntArgRegs[regIndex_++]);
break;
case MIRType_Float32:
case MIRType::Float32:
current_ = ABIArg(FloatArgRegs[regIndex_++].asSingle());
break;
case MIRType_Double:
case MIRType::Double:
current_ = ABIArg(FloatArgRegs[regIndex_++]);
break;
case MIRType_Bool32x4:
case MIRType_Int32x4:
case MIRType_Float32x4:
case MIRType::Bool32x4:
case MIRType::Int32x4:
case MIRType::Float32x4:
// On Win64, >64 bit args need to be passed by reference, but asm.js
// doesn't allow passing SIMD values to FFIs. The only way to reach
// here is asm to asm calls, so we can break the ABI here.
@ -69,9 +69,9 @@ ABIArgGenerator::next(MIRType type)
return current_;
#else
switch (type) {
case MIRType_Int32:
case MIRType_Int64:
case MIRType_Pointer:
case MIRType::Int32:
case MIRType::Int64:
case MIRType::Pointer:
if (intRegIndex_ == NumIntArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint64_t);
@ -79,21 +79,21 @@ ABIArgGenerator::next(MIRType type)
}
current_ = ABIArg(IntArgRegs[intRegIndex_++]);
break;
case MIRType_Double:
case MIRType_Float32:
case MIRType::Double:
case MIRType::Float32:
if (floatRegIndex_ == NumFloatArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint64_t);
break;
}
if (type == MIRType_Float32)
if (type == MIRType::Float32)
current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asSingle());
else
current_ = ABIArg(FloatArgRegs[floatRegIndex_++]);
break;
case MIRType_Bool32x4:
case MIRType_Int32x4:
case MIRType_Float32x4:
case MIRType::Bool32x4:
case MIRType::Int32x4:
case MIRType::Float32x4:
if (floatRegIndex_ == NumFloatArgRegs) {
stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
current_ = ABIArg(stackOffset_);

Просмотреть файл

@ -76,7 +76,7 @@ CodeGeneratorX64::visitBox(LBox* box)
if (IsFloatingPointType(box->type())) {
ScratchDoubleScope scratch(masm);
FloatRegister reg = ToFloatRegister(in);
if (box->type() == MIRType_Float32) {
if (box->type() == MIRType::Float32) {
masm.convertFloat32ToDouble(reg, scratch);
reg = scratch;
}
@ -95,19 +95,19 @@ CodeGeneratorX64::visitUnbox(LUnbox* unbox)
const ValueOperand value = ToValue(unbox, LUnbox::Input);
Assembler::Condition cond;
switch (mir->type()) {
case MIRType_Int32:
case MIRType::Int32:
cond = masm.testInt32(Assembler::NotEqual, value);
break;
case MIRType_Boolean:
case MIRType::Boolean:
cond = masm.testBoolean(Assembler::NotEqual, value);
break;
case MIRType_Object:
case MIRType::Object:
cond = masm.testObject(Assembler::NotEqual, value);
break;
case MIRType_String:
case MIRType::String:
cond = masm.testString(Assembler::NotEqual, value);
break;
case MIRType_Symbol:
case MIRType::Symbol:
cond = masm.testSymbol(Assembler::NotEqual, value);
break;
default:
@ -119,19 +119,19 @@ CodeGeneratorX64::visitUnbox(LUnbox* unbox)
Operand input = ToOperand(unbox->getOperand(LUnbox::Input));
Register result = ToRegister(unbox->output());
switch (mir->type()) {
case MIRType_Int32:
case MIRType::Int32:
masm.unboxInt32(input, result);
break;
case MIRType_Boolean:
case MIRType::Boolean:
masm.unboxBoolean(input, result);
break;
case MIRType_Object:
case MIRType::Object:
masm.unboxObject(input, result);
break;
case MIRType_String:
case MIRType::String:
masm.unboxString(input, result);
break;
case MIRType_Symbol:
case MIRType::Symbol:
masm.unboxSymbol(input, result);
break;
default:
@ -480,7 +480,7 @@ CodeGeneratorX64::visitUDivOrMod64(LUDivOrMod64* lir)
void
CodeGeneratorX64::visitAsmSelectI64(LAsmSelectI64* lir)
{
MOZ_ASSERT(lir->mir()->type() == MIRType_Int64);
MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
Register cond = ToRegister(lir->condExpr());
Operand falseExpr = ToOperand(lir->falseExpr());
@ -495,16 +495,16 @@ CodeGeneratorX64::visitAsmSelectI64(LAsmSelectI64* lir)
void
CodeGeneratorX64::visitAsmReinterpretFromI64(LAsmReinterpretFromI64* lir)
{
MOZ_ASSERT(lir->mir()->type() == MIRType_Double);
MOZ_ASSERT(lir->mir()->input()->type() == MIRType_Int64);
MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
masm.vmovq(ToRegister(lir->input()), ToFloatRegister(lir->output()));
}
void
CodeGeneratorX64::visitAsmReinterpretToI64(LAsmReinterpretToI64* lir)
{
MOZ_ASSERT(lir->mir()->type() == MIRType_Int64);
MOZ_ASSERT(lir->mir()->input()->type() == MIRType_Double);
MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
masm.vmovq(ToFloatRegister(lir->input()), ToRegister(lir->output()));
}
@ -1007,22 +1007,22 @@ CodeGeneratorX64::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar* ins)
CodeOffset label;
switch (type) {
case MIRType_Int32:
case MIRType::Int32:
label = masm.loadRipRelativeInt32(ToRegister(ins->output()));
break;
case MIRType_Float32:
case MIRType::Float32:
label = masm.loadRipRelativeFloat32(ToFloatRegister(ins->output()));
break;
case MIRType_Double:
case MIRType::Double:
label = masm.loadRipRelativeDouble(ToFloatRegister(ins->output()));
break;
// Aligned access: code is aligned on PageSize + there is padding
// before the global data section.
case MIRType_Int32x4:
case MIRType_Bool32x4:
case MIRType::Int32x4:
case MIRType::Bool32x4:
label = masm.loadRipRelativeInt32x4(ToFloatRegister(ins->output()));
break;
case MIRType_Float32x4:
case MIRType::Float32x4:
label = masm.loadRipRelativeFloat32x4(ToFloatRegister(ins->output()));
break;
default:
@ -1042,22 +1042,22 @@ CodeGeneratorX64::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar* ins)
CodeOffset label;
switch (type) {
case MIRType_Int32:
case MIRType::Int32:
label = masm.storeRipRelativeInt32(ToRegister(ins->value()));
break;
case MIRType_Float32:
case MIRType::Float32:
label = masm.storeRipRelativeFloat32(ToFloatRegister(ins->value()));
break;
case MIRType_Double:
case MIRType::Double:
label = masm.storeRipRelativeDouble(ToFloatRegister(ins->value()));
break;
// Aligned access: code is aligned on PageSize + there is padding
// before the global data section.
case MIRType_Int32x4:
case MIRType_Bool32x4:
case MIRType::Int32x4:
case MIRType::Bool32x4:
label = masm.storeRipRelativeInt32x4(ToFloatRegister(ins->value()));
break;
case MIRType_Float32x4:
case MIRType::Float32x4:
label = masm.storeRipRelativeFloat32x4(ToFloatRegister(ins->value()));
break;
default:
@ -1164,7 +1164,7 @@ CodeGeneratorX64::visitTruncateToInt64(LTruncateToInt64* lir)
// If the input < INT64_MAX, vcvttsd2sq will do the right thing, so
// we use it directly. Else, we subtract INT64_MAX, convert to int64,
// and then add INT64_MAX to the result.
if (inputType == MIRType_Double) {
if (inputType == MIRType::Double) {
Label isLarge;
masm.loadConstantDouble(double(0x8000000000000000), ScratchDoubleReg);
masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, ScratchDoubleReg, &isLarge);
@ -1180,7 +1180,7 @@ CodeGeneratorX64::visitTruncateToInt64(LTruncateToInt64* lir)
masm.or64(Imm64(0x8000000000000000), Register64(output));
masm.jump(&done);
} else {
MOZ_ASSERT(inputType == MIRType_Float32);
MOZ_ASSERT(inputType == MIRType::Float32);
Label isLarge;
masm.loadConstantFloat32(float(0x8000000000000000), ScratchDoubleReg);
@ -1198,13 +1198,13 @@ CodeGeneratorX64::visitTruncateToInt64(LTruncateToInt64* lir)
masm.jump(&done);
}
} else {
if (inputType == MIRType_Double) {
if (inputType == MIRType::Double) {
masm.vcvttsd2sq(input, output);
masm.cmpq(Imm32(1), output);
masm.j(Assembler::Overflow, &trap);
masm.jump(&done);
} else {
MOZ_ASSERT(inputType == MIRType_Float32);
MOZ_ASSERT(inputType == MIRType::Float32);
masm.vcvttss2sq(input, output);
masm.cmpq(Imm32(1), output);
masm.j(Assembler::Overflow, &trap);
@ -1225,10 +1225,10 @@ CodeGeneratorX64::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir)
FloatRegister output = ToFloatRegister(lir->output());
MIRType outputType = lir->mir()->type();
MOZ_ASSERT(outputType == MIRType_Double || outputType == MIRType_Float32);
MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
// Zero the output register to break dependencies, see convertInt32ToDouble.
if (outputType == MIRType_Double)
if (outputType == MIRType::Double)
masm.zeroDouble(output);
else
masm.zeroFloat32(output);
@ -1238,7 +1238,7 @@ CodeGeneratorX64::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir)
// If the input is unsigned, we use cvtsq2sd or vcvtsq2ss directly.
// Else, we divide by 2, convert to double or float, and multiply the
// result by 2.
if (outputType == MIRType_Double) {
if (outputType == MIRType::Double) {
Label isSigned;
masm.branchTestPtr(Assembler::Signed, input, input, &isSigned);
masm.vcvtsq2sd(input, output, output);
@ -1264,7 +1264,7 @@ CodeGeneratorX64::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir)
masm.vaddss(output, output, output);
}
} else {
if (outputType == MIRType_Double)
if (outputType == MIRType::Double)
masm.vcvtsq2sd(input, output, output);
else
masm.vcvtsq2ss(input, output, output);

Просмотреть файл

@ -17,7 +17,7 @@ using namespace js::jit;
LBoxAllocation
LIRGeneratorX64::useBoxFixed(MDefinition* mir, Register reg1, Register, bool useAtStart)
{
MOZ_ASSERT(mir->type() == MIRType_Value);
MOZ_ASSERT(mir->type() == MIRType::Value);
ensureDefined(mir);
return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
@ -71,7 +71,7 @@ LIRGeneratorX64::visitUnbox(MUnbox* unbox)
{
MDefinition* box = unbox->getOperand(0);
if (box->type() == MIRType_ObjectOrNull) {
if (box->type() == MIRType::ObjectOrNull) {
LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(box));
if (unbox->fallible())
assignSnapshot(lir, unbox->bailoutKind());
@ -79,7 +79,7 @@ LIRGeneratorX64::visitUnbox(MUnbox* unbox)
return;
}
MOZ_ASSERT(box->type() == MIRType_Value);
MOZ_ASSERT(box->type() == MIRType::Value);
LUnboxBase* lir;
if (IsFloatingPointType(unbox->type())) {
@ -102,7 +102,7 @@ void
LIRGeneratorX64::visitReturn(MReturn* ret)
{
MDefinition* opd = ret->getOperand(0);
MOZ_ASSERT(opd->type() == MIRType_Value);
MOZ_ASSERT(opd->type() == MIRType::Value);
LReturn* ins = new(alloc()) LReturn;
ins->setOperand(0, useFixed(opd, JSReturnReg));
@ -142,7 +142,7 @@ LIRGeneratorX64::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop
void
LIRGeneratorX64::visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins)
{
MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
LAsmJSUInt32ToDouble* lir = new(alloc()) LAsmJSUInt32ToDouble(useRegisterAtStart(ins->input()));
define(lir, ins);
}
@ -150,7 +150,7 @@ LIRGeneratorX64::visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins)
void
LIRGeneratorX64::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins)
{
MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
LAsmJSUInt32ToFloat32* lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input()));
define(lir, ins);
}
@ -159,7 +159,7 @@ void
LIRGeneratorX64::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
{
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType_Int32);
MOZ_ASSERT(base->type() == MIRType::Int32);
// For simplicity, require a register if we're going to emit a bounds-check
// branch, so that we don't have special cases for constants.
@ -174,7 +174,7 @@ void
LIRGeneratorX64::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
{
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType_Int32);
MOZ_ASSERT(base->type() == MIRType::Int32);
// For simplicity, require a register if we're going to emit a bounds-check
// branch, so that we don't have special cases for constants.
@ -209,7 +209,7 @@ void
LIRGeneratorX64::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
{
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType_Int32);
MOZ_ASSERT(base->type() == MIRType::Int32);
// The output may not be used but will be clobbered regardless, so
// pin the output to eax.
@ -228,7 +228,7 @@ LIRGeneratorX64::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
void
LIRGeneratorX64::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
{
MOZ_ASSERT(ins->base()->type() == MIRType_Int32);
MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
const LAllocation base = useRegister(ins->base());
const LAllocation value = useRegister(ins->value());
@ -246,7 +246,7 @@ void
LIRGeneratorX64::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
{
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType_Int32);
MOZ_ASSERT(base->type() == MIRType::Int32);
// Case 1: the result of the operation is not used.
//
@ -390,7 +390,7 @@ void
LIRGeneratorX64::visitTruncateToInt64(MTruncateToInt64* ins)
{
MDefinition* opd = ins->input();
MOZ_ASSERT(opd->type() == MIRType_Double || opd->type() == MIRType_Float32);
MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
LDefinition maybeTemp = ins->isUnsigned() ? tempDouble() : LDefinition::BogusTemp();
defineInt64(new(alloc()) LTruncateToInt64(useRegister(opd), maybeTemp), ins);
@ -400,7 +400,7 @@ void
LIRGeneratorX64::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins)
{
MDefinition* opd = ins->input();
MOZ_ASSERT(opd->type() == MIRType_Int64);
MOZ_ASSERT(opd->type() == MIRType::Int64);
MOZ_ASSERT(IsFloatingPointType(ins->type()));
define(new(alloc()) LInt64ToFloatingPoint(useInt64Register(opd)), ins);

Просмотреть файл

@ -230,17 +230,17 @@ void
MacroAssemblerX64::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest,
MIRType slotType)
{
if (valueType == MIRType_Double) {
if (valueType == MIRType::Double) {
storeDouble(value.reg().typedReg().fpu(), dest);
return;
}
// For known integers and booleans, we can just store the unboxed value if
// the slot has the same type.
if ((valueType == MIRType_Int32 || valueType == MIRType_Boolean) && slotType == valueType) {
if ((valueType == MIRType::Int32 || valueType == MIRType::Boolean) && slotType == valueType) {
if (value.constant()) {
Value val = value.value();
if (valueType == MIRType_Int32)
if (valueType == MIRType::Int32)
store32(Imm32(val.toInt32()), dest);
else
store32(Imm32(val.toBoolean() ? 1 : 0), dest);

Просмотреть файл

@ -882,7 +882,7 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
void loadUnboxedValue(const T& src, MIRType type, AnyRegister dest) {
if (dest.isFloat())
loadInt32OrDouble(src, dest.fpu());
else if (type == MIRType_Int32 || type == MIRType_Boolean)
else if (type == MIRType::Int32 || type == MIRType::Boolean)
movl(Operand(src), dest.gpr());
else
unboxNonDouble(Operand(src), dest.gpr());

Просмотреть файл

@ -281,17 +281,17 @@ CodeGeneratorX86Shared::visitAsmJSPassStackArg(LAsmJSPassStackArg* ins)
masm.storePtr(ToRegister(ins->arg()), dst);
} else {
switch (mir->input()->type()) {
case MIRType_Double:
case MIRType_Float32:
case MIRType::Double:
case MIRType::Float32:
masm.storeDouble(ToFloatRegister(ins->arg()), dst);
return;
// StackPointer is SIMD-aligned and ABIArgGenerator guarantees
// stack offsets are SIMD-aligned.
case MIRType_Int32x4:
case MIRType_Bool32x4:
case MIRType::Int32x4:
case MIRType::Bool32x4:
masm.storeAlignedInt32x4(ToFloatRegister(ins->arg()), dst);
return;
case MIRType_Float32x4:
case MIRType::Float32x4:
masm.storeAlignedFloat32x4(ToFloatRegister(ins->arg()), dst);
return;
default: break;
@ -311,7 +311,7 @@ CodeGeneratorX86Shared::visitAsmSelect(LAsmSelect* ins)
masm.test32(cond, cond);
if (mirType == MIRType_Int32) {
if (mirType == MIRType::Int32) {
Register out = ToRegister(ins->output());
MOZ_ASSERT(ToRegister(ins->trueExpr()) == out, "true expr input is reused for output");
masm.cmovz(falseExpr, out);
@ -324,12 +324,12 @@ CodeGeneratorX86Shared::visitAsmSelect(LAsmSelect* ins)
Label done;
masm.j(Assembler::NonZero, &done);
if (mirType == MIRType_Float32) {
if (mirType == MIRType::Float32) {
if (falseExpr.kind() == Operand::FPREG)
masm.moveFloat32(ToFloatRegister(ins->falseExpr()), out);
else
masm.loadFloat32(falseExpr, out);
} else if (mirType == MIRType_Double) {
} else if (mirType == MIRType::Double) {
if (falseExpr.kind() == Operand::FPREG)
masm.moveDouble(ToFloatRegister(ins->falseExpr()), out);
else
@ -349,19 +349,21 @@ CodeGeneratorX86Shared::visitAsmReinterpret(LAsmReinterpret* lir)
MAsmReinterpret* ins = lir->mir();
MIRType to = ins->type();
DebugOnly<MIRType> from = ins->input()->type();
#ifdef DEBUG
MIRType from = ins->input()->type();
#endif
switch (to) {
case MIRType_Int32:
MOZ_ASSERT(from == MIRType_Float32);
case MIRType::Int32:
MOZ_ASSERT(from == MIRType::Float32);
masm.vmovd(ToFloatRegister(lir->input()), ToRegister(lir->output()));
break;
case MIRType_Float32:
MOZ_ASSERT(from == MIRType_Int32);
case MIRType::Float32:
MOZ_ASSERT(from == MIRType::Int32);
masm.vmovd(ToRegister(lir->input()), ToFloatRegister(lir->output()));
break;
case MIRType_Double:
case MIRType_Int64:
case MIRType::Double:
case MIRType::Int64:
MOZ_CRASH("not handled by this LIR opcode");
default:
MOZ_CRASH("unexpected AsmReinterpret");
@ -2583,7 +2585,7 @@ CodeGeneratorX86Shared::visitFloat32x4ToUint32x4(LFloat32x4ToUint32x4* ins)
void
CodeGeneratorX86Shared::visitSimdValueInt32x4(LSimdValueInt32x4* ins)
{
MOZ_ASSERT(ins->mir()->type() == MIRType_Int32x4 || ins->mir()->type() == MIRType_Bool32x4);
MOZ_ASSERT(ins->mir()->type() == MIRType::Int32x4 || ins->mir()->type() == MIRType::Bool32x4);
FloatRegister output = ToFloatRegister(ins->output());
if (AssemblerX86Shared::HasSSE41()) {
@ -2607,7 +2609,7 @@ CodeGeneratorX86Shared::visitSimdValueInt32x4(LSimdValueInt32x4* ins)
void
CodeGeneratorX86Shared::visitSimdValueFloat32x4(LSimdValueFloat32x4* ins)
{
MOZ_ASSERT(ins->mir()->type() == MIRType_Float32x4);
MOZ_ASSERT(ins->mir()->type() == MIRType::Float32x4);
FloatRegister r0 = ToFloatRegister(ins->getOperand(0));
FloatRegister r1 = ToFloatRegister(ins->getOperand(1));
@ -2634,14 +2636,14 @@ CodeGeneratorX86Shared::visitSimdSplatX4(LSimdSplatX4* ins)
JS_STATIC_ASSERT(sizeof(float) == sizeof(int32_t));
switch (mir->type()) {
case MIRType_Int32x4:
case MIRType_Bool32x4: {
case MIRType::Int32x4:
case MIRType::Bool32x4: {
Register r = ToRegister(ins->getOperand(0));
masm.vmovd(r, output);
masm.vpshufd(0, output, output);
break;
}
case MIRType_Float32x4: {
case MIRType::Float32x4: {
FloatRegister r = ToFloatRegister(ins->getOperand(0));
FloatRegister rCopy = masm.reusedInputFloat32x4(r, output);
masm.vshufps(0, rCopy, rCopy, output);
@ -2662,10 +2664,10 @@ CodeGeneratorX86Shared::visitSimdReinterpretCast(LSimdReinterpretCast* ins)
return;
switch (ins->mir()->type()) {
case MIRType_Int32x4:
case MIRType::Int32x4:
masm.vmovdqa(input, output);
break;
case MIRType_Float32x4:
case MIRType::Float32x4:
masm.vmovaps(input, output);
break;
default:
@ -3532,19 +3534,19 @@ CodeGeneratorX86Shared::visitSimdBinaryBitwiseX4(LSimdBinaryBitwiseX4* ins)
MSimdBinaryBitwise::Operation op = ins->operation();
switch (op) {
case MSimdBinaryBitwise::and_:
if (ins->type() == MIRType_Float32x4)
if (ins->type() == MIRType::Float32x4)
masm.vandps(rhs, lhs, output);
else
masm.vpand(rhs, lhs, output);
return;
case MSimdBinaryBitwise::or_:
if (ins->type() == MIRType_Float32x4)
if (ins->type() == MIRType::Float32x4)
masm.vorps(rhs, lhs, output);
else
masm.vpor(rhs, lhs, output);
return;
case MSimdBinaryBitwise::xor_:
if (ins->type() == MIRType_Float32x4)
if (ins->type() == MIRType::Float32x4)
masm.vxorps(rhs, lhs, output);
else
masm.vpxor(rhs, lhs, output);

Просмотреть файл

@ -36,7 +36,7 @@ LIRGeneratorX86Shared::newLTableSwitchV(MTableSwitch* tableswitch)
void
LIRGeneratorX86Shared::visitGuardShape(MGuardShape* ins)
{
MOZ_ASSERT(ins->obj()->type() == MIRType_Object);
MOZ_ASSERT(ins->obj()->type() == MIRType::Object);
LGuardShape* guard = new(alloc()) LGuardShape(useRegisterAtStart(ins->obj()));
assignSnapshot(guard, ins->bailoutKind());
@ -47,7 +47,7 @@ LIRGeneratorX86Shared::visitGuardShape(MGuardShape* ins)
void
LIRGeneratorX86Shared::visitGuardObjectGroup(MGuardObjectGroup* ins)
{
MOZ_ASSERT(ins->obj()->type() == MIRType_Object);
MOZ_ASSERT(ins->obj()->type() == MIRType::Object);
LGuardObjectGroup* guard = new(alloc()) LGuardObjectGroup(useRegisterAtStart(ins->obj()));
assignSnapshot(guard, ins->bailoutKind());
@ -59,7 +59,7 @@ void
LIRGeneratorX86Shared::visitPowHalf(MPowHalf* ins)
{
MDefinition* input = ins->input();
MOZ_ASSERT(input->type() == MIRType_Double);
MOZ_ASSERT(input->type() == MIRType::Double);
LPowHalfD* lir = new(alloc()) LPowHalfD(useRegisterAtStart(input));
define(lir, ins);
}
@ -291,7 +291,7 @@ LIRGeneratorX86Shared::lowerModI(MMod* mod)
void
LIRGeneratorX86Shared::visitAsmSelect(MAsmSelect* ins)
{
if (ins->type() == MIRType_Int64) {
if (ins->type() == MIRType::Int64) {
auto* lir = new(alloc()) LAsmSelectI64(useInt64RegisterAtStart(ins->trueExpr()),
useInt64(ins->falseExpr()),
useRegister(ins->condExpr())
@ -313,13 +313,13 @@ void
LIRGeneratorX86Shared::visitAsmJSNeg(MAsmJSNeg* ins)
{
switch (ins->type()) {
case MIRType_Int32:
case MIRType::Int32:
defineReuseInput(new(alloc()) LNegI(useRegisterAtStart(ins->input())), ins, 0);
break;
case MIRType_Float32:
case MIRType::Float32:
defineReuseInput(new(alloc()) LNegF(useRegisterAtStart(ins->input())), ins, 0);
break;
case MIRType_Double:
case MIRType::Double:
defineReuseInput(new(alloc()) LNegD(useRegisterAtStart(ins->input())), ins, 0);
break;
default:
@ -394,9 +394,9 @@ LIRGeneratorX86Shared::lowerUrshD(MUrsh* mir)
MDefinition* lhs = mir->lhs();
MDefinition* rhs = mir->rhs();
MOZ_ASSERT(lhs->type() == MIRType_Int32);
MOZ_ASSERT(rhs->type() == MIRType_Int32);
MOZ_ASSERT(mir->type() == MIRType_Double);
MOZ_ASSERT(lhs->type() == MIRType::Int32);
MOZ_ASSERT(rhs->type() == MIRType::Int32);
MOZ_ASSERT(mir->type() == MIRType::Double);
#ifdef JS_CODEGEN_X64
MOZ_ASSERT(ecx == rcx);
@ -413,7 +413,7 @@ void
LIRGeneratorX86Shared::lowerTruncateDToInt32(MTruncateToInt32* ins)
{
MDefinition* opd = ins->input();
MOZ_ASSERT(opd->type() == MIRType_Double);
MOZ_ASSERT(opd->type() == MIRType::Double);
LDefinition maybeTemp = Assembler::HasSSE3() ? LDefinition::BogusTemp() : tempDouble();
define(new(alloc()) LTruncateDToInt32(useRegister(opd), maybeTemp), ins);
@ -423,7 +423,7 @@ void
LIRGeneratorX86Shared::lowerTruncateFToInt32(MTruncateToInt32* ins)
{
MDefinition* opd = ins->input();
MOZ_ASSERT(opd->type() == MIRType_Float32);
MOZ_ASSERT(opd->type() == MIRType::Float32);
LDefinition maybeTemp = Assembler::HasSSE3() ? LDefinition::BogusTemp() : tempFloat32();
define(new(alloc()) LTruncateFToInt32(useRegister(opd), maybeTemp), ins);
@ -436,8 +436,8 @@ LIRGeneratorX86Shared::lowerCompareExchangeTypedArrayElement(MCompareExchangeTyp
MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegisterOrConstant(ins->index());
@ -488,8 +488,8 @@ LIRGeneratorX86Shared::lowerAtomicExchangeTypedArrayElement(MAtomicExchangeTyped
{
MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegisterOrConstant(ins->index());
@ -508,7 +508,7 @@ LIRGeneratorX86Shared::lowerAtomicExchangeTypedArrayElement(MAtomicExchangeTyped
LDefinition tempDef = LDefinition::BogusTemp();
if (ins->arrayType() == Scalar::Uint32) {
// This restriction is bug 1077305.
MOZ_ASSERT(ins->type() == MIRType_Double);
MOZ_ASSERT(ins->type() == MIRType::Double);
tempDef = temp();
}
@ -529,8 +529,8 @@ LIRGeneratorX86Shared::lowerAtomicTypedArrayElementBinop(MAtomicTypedArrayElemen
MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegisterOrConstant(ins->index());
@ -647,7 +647,7 @@ LIRGeneratorX86Shared::visitSimdBinaryArith(MSimdBinaryArith* ins)
if (ins->isCommutative())
ReorderCommutative(&lhs, &rhs, ins);
if (ins->type() == MIRType_Int32x4) {
if (ins->type() == MIRType::Int32x4) {
LSimdBinaryArithIx4* lir = new(alloc()) LSimdBinaryArithIx4();
bool needsTemp = ins->operation() == MSimdBinaryArith::Op_mul && !MacroAssembler::HasSSE41();
lir->setTemp(0, needsTemp ? temp(LDefinition::INT32X4) : LDefinition::BogusTemp());
@ -655,7 +655,7 @@ LIRGeneratorX86Shared::visitSimdBinaryArith(MSimdBinaryArith* ins)
return;
}
MOZ_ASSERT(ins->type() == MIRType_Float32x4, "unknown simd type on binary arith operation");
MOZ_ASSERT(ins->type() == MIRType::Float32x4, "unknown simd type on binary arith operation");
LSimdBinaryArithFx4* lir = new(alloc()) LSimdBinaryArithFx4();
@ -671,7 +671,7 @@ void
LIRGeneratorX86Shared::visitSimdSelect(MSimdSelect* ins)
{
MOZ_ASSERT(IsSimdType(ins->type()));
MOZ_ASSERT(ins->type() == MIRType_Int32x4 || ins->type() == MIRType_Float32x4,
MOZ_ASSERT(ins->type() == MIRType::Int32x4 || ins->type() == MIRType::Float32x4,
"Unknown SIMD kind when doing bitwise operations");
LSimdSelect* lins = new(alloc()) LSimdSelect;
@ -694,11 +694,11 @@ LIRGeneratorX86Shared::visitSimdSplatX4(MSimdSplatX4* ins)
LSimdSplatX4* lir = new(alloc()) LSimdSplatX4(x);
switch (ins->type()) {
case MIRType_Int32x4:
case MIRType_Bool32x4:
case MIRType::Int32x4:
case MIRType::Bool32x4:
define(lir, ins);
break;
case MIRType_Float32x4:
case MIRType::Float32x4:
// (Non-AVX) codegen actually wants the input and the output to be in
// the same register, but we can't currently use defineReuseInput
// because they have different types (scalar vs vector), so a spill slot
@ -714,7 +714,7 @@ void
LIRGeneratorX86Shared::visitSimdValueX4(MSimdValueX4* ins)
{
switch (ins->type()) {
case MIRType_Float32x4: {
case MIRType::Float32x4: {
// Ideally, x would be used at start and reused for the output, however
// register allocation currently doesn't permit us to tie together two
// virtual registers with different types.
@ -726,8 +726,8 @@ LIRGeneratorX86Shared::visitSimdValueX4(MSimdValueX4* ins)
define(new (alloc()) LSimdValueFloat32x4(x, y, z, w, t), ins);
break;
}
case MIRType_Bool32x4:
case MIRType_Int32x4: {
case MIRType::Bool32x4:
case MIRType::Int32x4: {
// No defineReuseInput => useAtStart for everyone.
LAllocation x = useRegisterAtStart(ins->getOperand(0));
LAllocation y = useRegisterAtStart(ins->getOperand(1));

Просмотреть файл

@ -20,19 +20,19 @@ ABIArg
ABIArgGenerator::next(MIRType type)
{
switch (type) {
case MIRType_Int32:
case MIRType_Pointer:
case MIRType::Int32:
case MIRType::Pointer:
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint32_t);
break;
case MIRType_Float32: // Float32 moves are actually double moves
case MIRType_Double:
case MIRType::Float32: // Float32 moves are actually double moves
case MIRType::Double:
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint64_t);
break;
case MIRType_Int32x4:
case MIRType_Float32x4:
case MIRType_Bool32x4:
case MIRType::Int32x4:
case MIRType::Float32x4:
case MIRType::Bool32x4:
// SIMD values aren't passed in or out of C++, so we can make up
// whatever internal ABI we like. visitAsmJSPassArg assumes
// SimdMemoryAlignment.

Просмотреть файл

@ -115,7 +115,7 @@ CodeGeneratorX86::visitBoxFloatingPoint(LBoxFloatingPoint* box)
const ValueOperand out = ToOutValue(box);
FloatRegister reg = ToFloatRegister(in);
if (box->type() == MIRType_Float32) {
if (box->type() == MIRType::Float32) {
masm.convertFloat32ToDouble(reg, ScratchFloat32Reg);
reg = ScratchFloat32Reg;
}
@ -280,7 +280,7 @@ CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic*
{
const MLoadTypedArrayElementStatic* mir = ins->mir();
Scalar::Type accessType = mir->accessType();
MOZ_ASSERT_IF(accessType == Scalar::Float32, mir->type() == MIRType_Float32);
MOZ_ASSERT_IF(accessType == Scalar::Float32, mir->type() == MIRType::Float32);
Register ptr = ToRegister(ins->ptr());
const LDefinition* out = ins->output();
@ -319,14 +319,14 @@ CodeGeneratorX86::visitAsmJSCall(LAsmJSCall* ins)
emitAsmJSCall(ins);
if (IsFloatingPointType(mir->type()) && mir->callee().which() == MAsmJSCall::Callee::Builtin) {
if (mir->type() == MIRType_Float32) {
if (mir->type() == MIRType::Float32) {
masm.reserveStack(sizeof(float));
Operand op(esp, 0);
masm.fstp32(op);
masm.loadFloat32(op, ReturnFloat32Reg);
masm.freeStack(sizeof(float));
} else {
MOZ_ASSERT(mir->type() == MIRType_Double);
MOZ_ASSERT(mir->type() == MIRType::Double);
masm.reserveStack(sizeof(double));
Operand op(esp, 0);
masm.fstp(op);
@ -763,22 +763,22 @@ CodeGeneratorX86::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar* ins)
CodeOffset label;
switch (type) {
case MIRType_Int32:
case MIRType::Int32:
label = masm.movlWithPatch(PatchedAbsoluteAddress(), ToRegister(ins->output()));
break;
case MIRType_Float32:
case MIRType::Float32:
label = masm.vmovssWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
break;
case MIRType_Double:
case MIRType::Double:
label = masm.vmovsdWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
break;
// Aligned access: code is aligned on PageSize + there is padding
// before the global data section.
case MIRType_Int32x4:
case MIRType_Bool32x4:
case MIRType::Int32x4:
case MIRType::Bool32x4:
label = masm.vmovdqaWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
break;
case MIRType_Float32x4:
case MIRType::Float32x4:
label = masm.vmovapsWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
break;
default:
@ -797,22 +797,22 @@ CodeGeneratorX86::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar* ins)
CodeOffset label;
switch (type) {
case MIRType_Int32:
case MIRType::Int32:
label = masm.movlWithPatch(ToRegister(ins->value()), PatchedAbsoluteAddress());
break;
case MIRType_Float32:
case MIRType::Float32:
label = masm.vmovssWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
break;
case MIRType_Double:
case MIRType::Double:
label = masm.vmovsdWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
break;
// Aligned access: code is aligned on PageSize + there is padding
// before the global data section.
case MIRType_Int32x4:
case MIRType_Bool32x4:
case MIRType::Int32x4:
case MIRType::Bool32x4:
label = masm.vmovdqaWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
break;
case MIRType_Float32x4:
case MIRType::Float32x4:
label = masm.vmovapsWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
break;
default:

Просмотреть файл

@ -17,7 +17,7 @@ using namespace js::jit;
LBoxAllocation
LIRGeneratorX86::useBoxFixed(MDefinition* mir, Register reg1, Register reg2, bool useAtStart)
{
MOZ_ASSERT(mir->type() == MIRType_Value);
MOZ_ASSERT(mir->type() == MIRType::Value);
MOZ_ASSERT(reg1 != reg2);
ensureDefined(mir);
@ -88,7 +88,7 @@ LIRGeneratorX86::visitUnbox(MUnbox* unbox)
{
MDefinition* inner = unbox->getOperand(0);
if (inner->type() == MIRType_ObjectOrNull) {
if (inner->type() == MIRType::ObjectOrNull) {
LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(inner));
if (unbox->fallible())
assignSnapshot(lir, unbox->bailoutKind());
@ -99,7 +99,7 @@ LIRGeneratorX86::visitUnbox(MUnbox* unbox)
// An unbox on x86 reads in a type tag (either in memory or a register) and
// a payload. Unlike most instructions consuming a box, we ask for the type
// second, so that the result can re-use the first input.
MOZ_ASSERT(inner->type() == MIRType_Value);
MOZ_ASSERT(inner->type() == MIRType::Value);
ensureDefined(inner);
@ -131,7 +131,7 @@ void
LIRGeneratorX86::visitReturn(MReturn* ret)
{
MDefinition* opd = ret->getOperand(0);
MOZ_ASSERT(opd->type() == MIRType_Value);
MOZ_ASSERT(opd->type() == MIRType::Value);
LReturn* ins = new(alloc()) LReturn;
ins->setOperand(0, LUse(JSReturnReg_Type));
@ -190,7 +190,7 @@ LIRGeneratorX86::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop
void
LIRGeneratorX86::visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins)
{
MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
LAsmJSUInt32ToDouble* lir = new(alloc()) LAsmJSUInt32ToDouble(useRegisterAtStart(ins->input()), temp());
define(lir, ins);
}
@ -198,7 +198,7 @@ LIRGeneratorX86::visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins)
void
LIRGeneratorX86::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins)
{
MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
LAsmJSUInt32ToFloat32* lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input()), temp());
define(lir, ins);
}
@ -207,7 +207,7 @@ void
LIRGeneratorX86::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
{
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType_Int32);
MOZ_ASSERT(base->type() == MIRType::Int32);
// For simplicity, require a register if we're going to emit a bounds-check
// branch, so that we don't have special cases for constants.
@ -222,7 +222,7 @@ void
LIRGeneratorX86::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
{
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType_Int32);
MOZ_ASSERT(base->type() == MIRType::Int32);
// For simplicity, require a register if we're going to emit a bounds-check
// branch, so that we don't have special cases for constants.
@ -281,7 +281,7 @@ LIRGeneratorX86::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
MOZ_ASSERT(ins->accessType() < Scalar::Float32);
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType_Int32);
MOZ_ASSERT(base->type() == MIRType::Int32);
bool byteArray = byteSize(ins->accessType()) == 1;
@ -311,7 +311,7 @@ LIRGeneratorX86::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
void
LIRGeneratorX86::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
{
MOZ_ASSERT(ins->base()->type() == MIRType_Int32);
MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
const LAllocation base = useRegister(ins->base());
const LAllocation value = useRegister(ins->value());
@ -332,7 +332,7 @@ LIRGeneratorX86::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
MOZ_ASSERT(ins->accessType() < Scalar::Float32);
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType_Int32);
MOZ_ASSERT(base->type() == MIRType::Int32);
bool byteArray = byteSize(ins->accessType()) == 1;

Просмотреть файл

@ -278,7 +278,7 @@ void
MacroAssemblerX86::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest,
MIRType slotType)
{
if (valueType == MIRType_Double) {
if (valueType == MIRType::Double) {
storeDouble(value.reg().typedReg().fpu(), dest);
return;
}

Просмотреть файл

@ -26,11 +26,11 @@ BEGIN_TEST(testJitDCEinGVN_ins)
// return p
MParameter* p = func.createParameter();
block->add(p);
MMul* mul0 = MMul::New(func.alloc, p, p, MIRType_Double);
MMul* mul0 = MMul::New(func.alloc, p, p, MIRType::Double);
block->add(mul0);
if (!mul0->typePolicy()->adjustInputs(func.alloc, mul0))
return false;
MMul* mul1 = MMul::New(func.alloc, mul0, mul0, MIRType_Double);
MMul* mul1 = MMul::New(func.alloc, mul0, mul0, MIRType::Double);
block->add(mul1);
if (!mul1->typePolicy()->adjustInputs(func.alloc, mul1))
return false;
@ -126,7 +126,7 @@ BEGIN_TEST(testJitDCEinGVN_phi)
// return y
joinBlock->addPhi(x);
joinBlock->addPhi(y);
MMul* z = MMul::New(func.alloc, x, y, MIRType_Double);
MMul* z = MMul::New(func.alloc, x, y, MIRType::Double);
joinBlock->add(z);
MReturn* ret = MReturn::New(func.alloc, y);
joinBlock->end(ret);

Просмотреть файл

@ -26,7 +26,7 @@ BEGIN_TEST(testJitFoldsTo_DivReciprocal)
block->add(p);
MConstant* c = MConstant::New(func.alloc, DoubleValue(4.0));
block->add(c);
MDiv* div = MDiv::New(func.alloc, p, c, MIRType_Double);
MDiv* div = MDiv::New(func.alloc, p, c, MIRType::Double);
block->add(div);
if (!div->typePolicy()->adjustInputs(func.alloc, div))
return false;
@ -57,7 +57,7 @@ BEGIN_TEST(testJitFoldsTo_NoDivReciprocal)
block->add(p);
MConstant* c = MConstant::New(func.alloc, DoubleValue(5.0));
block->add(c);
MDiv* div = MDiv::New(func.alloc, p, c, MIRType_Double);
MDiv* div = MDiv::New(func.alloc, p, c, MIRType::Double);
block->add(div);
if (!div->typePolicy()->adjustInputs(func.alloc, div))
return false;
@ -219,7 +219,7 @@ BEGIN_TEST(testJitFoldsTo_UnsignedDiv)
block->add(c0);
MConstant* c1 = MConstant::New(func.alloc, Int32Value(0xffffffff));
block->add(c1);
MDiv* div = MDiv::NewAsmJS(func.alloc, c0, c1, MIRType_Int32, /*unsignd=*/true);
MDiv* div = MDiv::NewAsmJS(func.alloc, c0, c1, MIRType::Int32, /*unsignd=*/true);
block->add(div);
MReturn* ret = MReturn::New(func.alloc, div);
block->end(ret);
@ -244,7 +244,7 @@ BEGIN_TEST(testJitFoldsTo_UnsignedMod)
block->add(c0);
MConstant* c1 = MConstant::New(func.alloc, Int32Value(0xffffffff));
block->add(c1);
MMod* mod = MMod::NewAsmJS(func.alloc, c0, c1, MIRType_Int32, /*unsignd=*/true);
MMod* mod = MMod::NewAsmJS(func.alloc, c0, c1, MIRType::Int32, /*unsignd=*/true);
block->add(mod);
MReturn* ret = MReturn::New(func.alloc, mod);
block->end(ret);

Просмотреть файл

@ -134,7 +134,7 @@ BEGIN_TEST(testJitRangeAnalysis_MathSignBeta)
// {
// return Math.sign(p + -0);
// }
MAdd* thenAdd = MAdd::NewAsmJS(func.alloc, p, cm0, MIRType_Double);
MAdd* thenAdd = MAdd::NewAsmJS(func.alloc, p, cm0, MIRType::Double);
thenBlock->add(thenAdd);
MMathFunction* thenSign = MMathFunction::New(func.alloc, thenAdd, MMathFunction::Sign, &cache);
thenBlock->add(thenSign);
@ -152,7 +152,7 @@ BEGIN_TEST(testJitRangeAnalysis_MathSignBeta)
// {
// return Math.sign(p + -0);
// }
MAdd* elseThenAdd = MAdd::NewAsmJS(func.alloc, p, cm0, MIRType_Double);
MAdd* elseThenAdd = MAdd::NewAsmJS(func.alloc, p, cm0, MIRType::Double);
elseThenBlock->add(elseThenAdd);
MMathFunction* elseThenSign = MMathFunction::New(func.alloc, elseThenAdd, MMathFunction::Sign, &cache);
elseThenBlock->add(elseThenSign);
@ -164,7 +164,7 @@ BEGIN_TEST(testJitRangeAnalysis_MathSignBeta)
// return Math.sign(p + -0);
// }
// }
MAdd* elseElseAdd = MAdd::NewAsmJS(func.alloc, p, cm0, MIRType_Double);
MAdd* elseElseAdd = MAdd::NewAsmJS(func.alloc, p, cm0, MIRType::Double);
elseElseBlock->add(elseElseAdd);
MMathFunction* elseElseSign = MMathFunction::New(func.alloc, elseElseAdd, MMathFunction::Sign, &cache);
elseElseBlock->add(elseElseSign);
@ -236,7 +236,7 @@ BEGIN_TEST(testJitRangeAnalysis_StrictCompareBeta)
// }
MConstant* cm0 = MConstant::New(func.alloc, DoubleValue(-0.0));
thenBlock->add(cm0);
MAdd* thenAdd = MAdd::NewAsmJS(func.alloc, p, cm0, MIRType_Double);
MAdd* thenAdd = MAdd::NewAsmJS(func.alloc, p, cm0, MIRType::Double);
thenBlock->add(thenAdd);
MReturn* thenRet = MReturn::New(func.alloc, thenAdd);
thenBlock->end(thenRet);

Просмотреть файл

@ -5678,7 +5678,7 @@ static const JSFunctionSpecWithHelp fuzzing_unsafe_functions[] = {
JS_INLINABLE_FN_HELP("assertFloat32", testingFunc_assertFloat32, 2, 0, TestAssertFloat32,
"assertFloat32(value, isFloat32)",
" In IonMonkey only, asserts that value has (resp. hasn't) the MIRType_Float32 if isFloat32 is true (resp. false)."),
" In IonMonkey only, asserts that value has (resp. hasn't) the MIRType::Float32 if isFloat32 is true (resp. false)."),
JS_INLINABLE_FN_HELP("assertRecoveredOnBailout", testingFunc_assertRecoveredOnBailout, 2, 0,
TestAssertRecoveredOnBailout,

Просмотреть файл

@ -340,36 +340,36 @@ TypeSet::mightBeMIRType(jit::MIRType type) const
if (unknown())
return true;
if (type == jit::MIRType_Object)
if (type == jit::MIRType::Object)
return unknownObject() || baseObjectCount() != 0;
switch (type) {
case jit::MIRType_Undefined:
case jit::MIRType::Undefined:
return baseFlags() & TYPE_FLAG_UNDEFINED;
case jit::MIRType_Null:
case jit::MIRType::Null:
return baseFlags() & TYPE_FLAG_NULL;
case jit::MIRType_Boolean:
case jit::MIRType::Boolean:
return baseFlags() & TYPE_FLAG_BOOLEAN;
case jit::MIRType_Int32:
case jit::MIRType::Int32:
return baseFlags() & TYPE_FLAG_INT32;
case jit::MIRType_Float32: // Fall through, there's no JSVAL for Float32.
case jit::MIRType_Double:
case jit::MIRType::Float32: // Fall through, there's no JSVAL for Float32.
case jit::MIRType::Double:
return baseFlags() & TYPE_FLAG_DOUBLE;
case jit::MIRType_String:
case jit::MIRType::String:
return baseFlags() & TYPE_FLAG_STRING;
case jit::MIRType_Symbol:
case jit::MIRType::Symbol:
return baseFlags() & TYPE_FLAG_SYMBOL;
case jit::MIRType_MagicOptimizedArguments:
case jit::MIRType::MagicOptimizedArguments:
return baseFlags() & TYPE_FLAG_LAZYARGS;
case jit::MIRType_MagicHole:
case jit::MIRType_MagicIsConstructing:
case jit::MIRType::MagicHole:
case jit::MIRType::MagicIsConstructing:
// These magic constants do not escape to script and are not observed
// in the type sets.
//
// The reason we can return false here is subtle: if Ion is asking the
// type set if it has seen such a magic constant, then the MIR in
// question is the most generic type, MIRType_Value. A magic constant
// could only be emitted by a MIR of MIRType_Value if that MIR is a
// question is the most generic type, MIRType::Value. A magic constant
// could only be emitted by a MIR of MIRType::Value if that MIR is a
// phi, and we check that different magic constants do not flow to the
// same join point in GuessPhiType.
return false;
@ -1579,25 +1579,25 @@ GetMIRTypeFromTypeFlags(TypeFlags flags)
{
switch (flags) {
case TYPE_FLAG_UNDEFINED:
return jit::MIRType_Undefined;
return jit::MIRType::Undefined;
case TYPE_FLAG_NULL:
return jit::MIRType_Null;
return jit::MIRType::Null;
case TYPE_FLAG_BOOLEAN:
return jit::MIRType_Boolean;
return jit::MIRType::Boolean;
case TYPE_FLAG_INT32:
return jit::MIRType_Int32;
return jit::MIRType::Int32;
case (TYPE_FLAG_INT32 | TYPE_FLAG_DOUBLE):
return jit::MIRType_Double;
return jit::MIRType::Double;
case TYPE_FLAG_STRING:
return jit::MIRType_String;
return jit::MIRType::String;
case TYPE_FLAG_SYMBOL:
return jit::MIRType_Symbol;
return jit::MIRType::Symbol;
case TYPE_FLAG_LAZYARGS:
return jit::MIRType_MagicOptimizedArguments;
return jit::MIRType::MagicOptimizedArguments;
case TYPE_FLAG_ANYOBJECT:
return jit::MIRType_Object;
return jit::MIRType::Object;
default:
return jit::MIRType_Value;
return jit::MIRType::Value;
}
}
@ -1608,7 +1608,7 @@ TemporaryTypeSet::getKnownMIRType()
jit::MIRType type;
if (baseObjectCount())
type = flags ? jit::MIRType_Value : jit::MIRType_Object;
type = flags ? jit::MIRType::Value : jit::MIRType::Object;
else
type = GetMIRTypeFromTypeFlags(flags);
@ -1620,7 +1620,7 @@ TemporaryTypeSet::getKnownMIRType()
* added to the set.
*/
DebugOnly<bool> empty = flags == 0 && baseObjectCount() == 0;
MOZ_ASSERT_IF(empty, type == jit::MIRType_Value);
MOZ_ASSERT_IF(empty, type == jit::MIRType::Value);
return type;
}
@ -1631,17 +1631,17 @@ HeapTypeSetKey::knownMIRType(CompilerConstraintList* constraints)
TypeSet* types = maybeTypes();
if (!types || types->unknown())
return jit::MIRType_Value;
return jit::MIRType::Value;
TypeFlags flags = types->baseFlags() & ~TYPE_FLAG_ANYOBJECT;
jit::MIRType type;
if (types->unknownObject() || types->getObjectCount())
type = flags ? jit::MIRType_Value : jit::MIRType_Object;
type = flags ? jit::MIRType::Value : jit::MIRType::Object;
else
type = GetMIRTypeFromTypeFlags(flags);
if (type != jit::MIRType_Value)
if (type != jit::MIRType::Value)
freeze(constraints);
/*
@ -1651,7 +1651,7 @@ HeapTypeSetKey::knownMIRType(CompilerConstraintList* constraints)
* that the exact tag is unknown, as it will stay unknown as more types are
* added to the set.
*/
MOZ_ASSERT_IF(types->empty(), type == jit::MIRType_Value);
MOZ_ASSERT_IF(types->empty(), type == jit::MIRType::Value);
return type;
}
@ -2223,7 +2223,7 @@ TemporaryTypeSet::convertDoubleElements(CompilerConstraintList* constraints)
// Only bother with converting known packed arrays whose possible
// element types are int or double. Other arrays require type tests
// when elements are accessed regardless of the conversion.
if (property.knownMIRType(constraints) == jit::MIRType_Double &&
if (property.knownMIRType(constraints) == jit::MIRType::Double &&
!key->hasFlags(constraints, OBJECT_FLAG_NON_PACKED))
{
maybeConvert = true;

Просмотреть файл

@ -665,7 +665,7 @@ class TemporaryTypeSet : public TypeSet
TemporaryTypeSet(LifoAlloc* alloc, jit::MIRType type)
: TemporaryTypeSet(alloc, PrimitiveType(ValueTypeFromMIRType(type)))
{
MOZ_ASSERT(type != jit::MIRType_Value);
MOZ_ASSERT(type != jit::MIRType::Value);
}
/*
@ -680,7 +680,7 @@ class TemporaryTypeSet : public TypeSet
/* Get any type tag which all values in this set must have. */
jit::MIRType getKnownMIRType();
bool isMagicArguments() { return getKnownMIRType() == jit::MIRType_MagicOptimizedArguments; }
bool isMagicArguments() { return getKnownMIRType() == jit::MIRType::MagicOptimizedArguments; }
/* Whether this value may be an object. */
bool maybeObject() { return unknownObject() || baseObjectCount() > 0; }

Просмотреть файл

@ -186,7 +186,7 @@ UnboxedLayout::makeConstructorCode(JSContext* cx, HandleObjectGroup group)
Label notObject;
masm.branchTestObject(Assembler::NotEqual, valueOperand,
types->mightBeMIRType(MIRType_Null) ? &notObject : &failureStoreObject);
types->mightBeMIRType(MIRType::Null) ? &notObject : &failureStoreObject);
Register payloadReg = masm.extractObject(valueOperand, scratch1);
@ -196,7 +196,7 @@ UnboxedLayout::makeConstructorCode(JSContext* cx, HandleObjectGroup group)
}
masm.storeUnboxedProperty(targetAddress, JSVAL_TYPE_OBJECT,
TypedOrValueRegister(MIRType_Object,
TypedOrValueRegister(MIRType::Object,
AnyRegister(payloadReg)), nullptr);
if (notObject.used()) {