Bug 1337024 part 2 - Port Baseline stubs for adding dense/unboxed elements to CacheIR. r=evilpie

This commit is contained in:
Jan de Mooij 2017-02-13 17:38:51 +01:00
Родитель 39312086de
Коммит a196f8f3a1
16 изменённых файлов: 392 добавлений и 860 удалений

Просмотреть файл

@ -1088,6 +1088,116 @@ BaselineCacheIRCompiler::emitStoreDenseElement()
return true;
}
bool
BaselineCacheIRCompiler::emitStoreDenseElementHole()
{
ObjOperandId objId = reader.objOperandId();
Int32OperandId indexId = reader.int32OperandId();
// Allocate the fixed registers first. These need to be fixed for
// callTypeUpdateIC.
AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
bool handleAdd = reader.readBool();
FailurePath* failure;
if (!addFailurePath(&failure))
return false;
// Load obj->elements in scratch.
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
BaseObjectElementIndex element(scratch, index);
Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
Address elementsFlags(scratch, ObjectElements::offsetOfFlags());
// Check for copy-on-write or frozen elements.
masm.branchTest32(Assembler::NonZero, elementsFlags,
Imm32(ObjectElements::COPY_ON_WRITE |
ObjectElements::FROZEN),
failure->label());
if (handleAdd) {
// Fail if index > initLength.
masm.branch32(Assembler::Below, initLength, index, failure->label());
// Check the capacity.
Address capacity(scratch, ObjectElements::offsetOfCapacity());
masm.branch32(Assembler::BelowOrEqual, capacity, index, failure->label());
// We increment initLength after the callTypeUpdateIC call, to ensure
// the type update code doesn't read uninitialized memory.
} else {
// Fail if index >= initLength.
masm.branch32(Assembler::BelowOrEqual, initLength, index, failure->label());
}
// Check if we have to convert a double element.
Label noConversion;
masm.branchTest32(Assembler::Zero, elementsFlags,
Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS),
&noConversion);
// We need to convert int32 values being stored into doubles. Note that
// double arrays are only created by IonMonkey, so if we have no FP support
// Ion is disabled and there should be no double arrays.
if (cx_->runtime()->jitSupportsFloatingPoint) {
// It's fine to convert the value in place in Baseline. We can't do
// this in Ion.
masm.convertInt32ValueToDouble(val);
} else {
masm.assumeUnreachable("There shouldn't be double arrays when there is no FP support.");
}
masm.bind(&noConversion);
// Call the type update IC. After this everything must be infallible as we
// don't save all registers here.
LiveGeneralRegisterSet saveRegs;
saveRegs.add(obj);
saveRegs.add(index);
saveRegs.add(val);
if (!callTypeUpdateIC(obj, val, scratch, saveRegs))
return false;
// Reload obj->elements as callTypeUpdateIC used the scratch register.
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
Label doStore;
if (handleAdd) {
// If index == initLength, increment initLength.
Label inBounds;
masm.branch32(Assembler::NotEqual, initLength, index, &inBounds);
// Increment initLength.
masm.add32(Imm32(1), initLength);
// If length is now <= index, increment length too.
Label skipIncrementLength;
Address length(scratch, ObjectElements::offsetOfLength());
masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
masm.add32(Imm32(1), length);
masm.bind(&skipIncrementLength);
// Skip EmitPreBarrier as the memory is uninitialized.
masm.jump(&doStore);
masm.bind(&inBounds);
}
EmitPreBarrier(masm, element, MIRType::Value);
masm.bind(&doStore);
masm.storeValue(val, element);
BaselineEmitPostWriteBarrierSlot(masm, obj, val, scratch, LiveGeneralRegisterSet(), cx_);
return true;
}
bool
BaselineCacheIRCompiler::emitStoreUnboxedArrayElement()
{
@ -1140,6 +1250,83 @@ BaselineCacheIRCompiler::emitStoreUnboxedArrayElement()
return true;
}
bool
BaselineCacheIRCompiler::emitStoreUnboxedArrayElementHole()
{
ObjOperandId objId = reader.objOperandId();
Int32OperandId indexId = reader.int32OperandId();
// Allocate the fixed registers first. These need to be fixed for
// callTypeUpdateIC.
AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
JSValueType elementType = reader.valueType();
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
FailurePath* failure;
if (!addFailurePath(&failure))
return false;
// Check index <= initLength.
Address initLength(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength());
masm.load32(initLength, scratch);
masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), scratch);
masm.branch32(Assembler::Below, scratch, index, failure->label());
// Check capacity.
masm.checkUnboxedArrayCapacity(obj, RegisterOrInt32Constant(index), scratch, failure->label());
// Call the type update IC. After this everything must be infallible as we
// don't save all registers here.
if (elementType == JSVAL_TYPE_OBJECT) {
LiveGeneralRegisterSet saveRegs;
saveRegs.add(obj);
saveRegs.add(index);
saveRegs.add(val);
if (!callTypeUpdateIC(obj, val, scratch, saveRegs))
return false;
}
// Load obj->elements.
masm.loadPtr(Address(obj, UnboxedArrayObject::offsetOfElements()), scratch);
// If index == initLength, increment initialized length.
Label inBounds, doStore;
masm.load32(initLength, scratch);
masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), scratch);
masm.branch32(Assembler::NotEqual, scratch, index, &inBounds);
masm.add32(Imm32(1), initLength);
// If length is now <= index, increment length.
Address length(obj, UnboxedArrayObject::offsetOfLength());
Label skipIncrementLength;
masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
masm.add32(Imm32(1), length);
masm.bind(&skipIncrementLength);
// Skip EmitUnboxedPreBarrierForBaseline as the memory is uninitialized.
masm.jump(&doStore);
masm.bind(&inBounds);
BaseIndex element(scratch, index, ScaleFromElemWidth(UnboxedTypeSize(elementType)));
EmitUnboxedPreBarrierForBaseline(masm, element, elementType);
// Note that the storeUnboxedProperty call here is infallible, as the
// IR emitter is responsible for guarding on |val|'s type.
masm.bind(&doStore);
masm.storeUnboxedProperty(element, elementType,
ConstantOrRegister(TypedOrValueRegister(val)),
/* failure = */ nullptr);
if (UnboxedTypeNeedsPostBarrier(elementType))
BaselineEmitPostWriteBarrierSlot(masm, obj, val, scratch, LiveGeneralRegisterSet(), cx_);
return true;
}
typedef bool (*CallNativeSetterFn)(JSContext*, HandleFunction, HandleObject, HandleValue);
static const VMFunction CallNativeSetterInfo =
FunctionInfo<CallNativeSetterFn>(CallNativeSetter, "CallNativeSetter");

Просмотреть файл

@ -325,11 +325,6 @@ DoTypeUpdateFallback(JSContext* cx, BaselineFrame* frame, ICUpdatedStub* stub, H
AddTypePropertyId(cx, group, maybeSingleton, id, value);
break;
}
case ICStub::SetElem_DenseOrUnboxedArrayAdd: {
id = JSID_VOID;
AddTypePropertyId(cx, obj, id, value);
break;
}
default:
MOZ_CRASH("Invalid stub");
}
@ -770,16 +765,6 @@ TypedThingRequiresFloatingPoint(JSObject* obj)
type == Scalar::Float64;
}
static bool
IsNativeOrUnboxedDenseElementAccess(HandleObject obj, HandleValue key)
{
if (!obj->isNative() && !obj->is<UnboxedArrayObject>())
return false;
if (key.isInt32() && key.toInt32() >= 0 && !obj->is<TypedArrayObject>())
return true;
return false;
}
static bool
DoGetElemFallback(JSContext* cx, BaselineFrame* frame, ICGetElem_Fallback* stub_, HandleValue lhs,
HandleValue rhs, MutableHandleValue res)
@ -918,55 +903,6 @@ LoadTypedThingLength(MacroAssembler& masm, TypedThingLayout layout, Register obj
}
}
//
// SetElem_Fallback
//
static bool
SetElemAddHasSameShapes(ICSetElem_DenseOrUnboxedArrayAdd* stub, JSObject* obj)
{
static const size_t MAX_DEPTH = ICSetElem_DenseOrUnboxedArrayAdd::MAX_PROTO_CHAIN_DEPTH;
ICSetElem_DenseOrUnboxedArrayAddImpl<MAX_DEPTH>* nstub = stub->toImplUnchecked<MAX_DEPTH>();
if (obj->maybeShape() != nstub->shape(0))
return false;
JSObject* proto = obj->staticPrototype();
for (size_t i = 0; i < stub->protoChainDepth(); i++) {
if (!proto->isNative())
return false;
if (proto->as<NativeObject>().lastProperty() != nstub->shape(i + 1))
return false;
proto = obj->staticPrototype();
if (!proto) {
if (i != stub->protoChainDepth() - 1)
return false;
break;
}
}
return true;
}
static bool
DenseOrUnboxedArraySetElemStubExists(JSContext* cx, ICStub::Kind kind,
ICSetElem_Fallback* stub, HandleObject obj)
{
MOZ_ASSERT(kind == ICStub::SetElem_DenseOrUnboxedArrayAdd);
for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
if (iter->isSetElem_DenseOrUnboxedArrayAdd()) {
ICSetElem_DenseOrUnboxedArrayAdd* nstub = iter->toSetElem_DenseOrUnboxedArrayAdd();
if (JSObject::getGroup(cx, obj) == nstub->group() &&
SetElemAddHasSameShapes(nstub, obj))
{
return true;
}
}
}
return false;
}
static bool
TypedArraySetElemStubExists(ICSetElem_Fallback* stub, HandleObject obj, bool expectOOB)
{
@ -999,76 +935,6 @@ RemoveExistingTypedArraySetElemStub(JSContext* cx, ICSetElem_Fallback* stub, Han
return false;
}
static bool
CanOptimizeDenseOrUnboxedArraySetElem(JSObject* obj, uint32_t index,
Shape* oldShape, uint32_t oldCapacity, uint32_t oldInitLength,
bool* isAddingCaseOut, size_t* protoDepthOut)
{
uint32_t initLength = GetAnyBoxedOrUnboxedInitializedLength(obj);
uint32_t capacity = GetAnyBoxedOrUnboxedCapacity(obj);
*isAddingCaseOut = false;
*protoDepthOut = 0;
// Some initial sanity checks.
if (initLength < oldInitLength || capacity < oldCapacity)
return false;
// Unboxed arrays need to be able to emit floating point code.
if (obj->is<UnboxedArrayObject>() && !obj->runtimeFromActiveCooperatingThread()->jitSupportsFloatingPoint)
return false;
Shape* shape = obj->maybeShape();
// Cannot optimize if the shape changed.
if (oldShape != shape)
return false;
// Cannot optimize if the capacity changed.
if (oldCapacity != capacity)
return false;
// Cannot optimize if the index doesn't fit within the new initialized length.
if (index >= initLength)
return false;
// Cannot optimize if the value at position after the set is a hole.
if (obj->isNative() && !obj->as<NativeObject>().containsDenseElement(index))
return false;
// At this point, if we know that the initLength did not change, then
// an optimized set is possible.
if (oldInitLength == initLength)
return true;
// If it did change, ensure that it changed specifically by incrementing by 1
// to accomodate this particular indexed set.
if (oldInitLength + 1 != initLength)
return false;
if (index != oldInitLength)
return false;
// The checks are not complete. The object may have a setter definition,
// either directly, or via a prototype, or via the target object for a prototype
// which is a proxy, that handles a particular integer write.
// Scan the prototype and shape chain to make sure that this is not the case.
if (obj->isIndexed())
return false;
JSObject* curObj = obj->staticPrototype();
while (curObj) {
++*protoDepthOut;
if (!curObj->isNative() || curObj->isIndexed())
return false;
curObj = curObj->staticPrototype();
}
if (*protoDepthOut > ICSetElem_DenseOrUnboxedArrayAdd::MAX_PROTO_CHAIN_DEPTH)
return false;
*isAddingCaseOut = true;
return true;
}
static bool
DoSetElemFallback(JSContext* cx, BaselineFrame* frame, ICSetElem_Fallback* stub_, Value* stack,
HandleValue objv, HandleValue index, HandleValue rhs)
@ -1132,14 +998,6 @@ DoSetElemFallback(JSContext* cx, BaselineFrame* frame, ICSetElem_Fallback* stub_
}
}
// Check the old capacity
uint32_t oldCapacity = 0;
uint32_t oldInitLength = 0;
if (index.isInt32() && index.toInt32() >= 0) {
oldCapacity = GetAnyBoxedOrUnboxedCapacity(obj);
oldInitLength = GetAnyBoxedOrUnboxedInitializedLength(obj);
}
if (op == JSOP_INITELEM || op == JSOP_INITHIDDENELEM) {
if (!InitElemOperation(cx, pc, obj, index, rhs))
return false;
@ -1199,45 +1057,6 @@ DoSetElemFallback(JSContext* cx, BaselineFrame* frame, ICSetElem_Fallback* stub_
}
}
// Try to generate new stubs.
if (IsNativeOrUnboxedDenseElementAccess(obj, index) && !rhs.isMagic(JS_ELEMENTS_HOLE)) {
bool addingCase;
size_t protoDepth;
if (CanOptimizeDenseOrUnboxedArraySetElem(obj, index.toInt32(),
oldShape, oldCapacity, oldInitLength,
&addingCase, &protoDepth))
{
RootedShape shape(cx, obj->maybeShape());
RootedObjectGroup group(cx, JSObject::getGroup(cx, obj));
if (!group)
return false;
if (addingCase &&
!DenseOrUnboxedArraySetElemStubExists(cx, ICStub::SetElem_DenseOrUnboxedArrayAdd,
stub, obj))
{
JitSpew(JitSpew_BaselineIC,
" Generating SetElem_DenseOrUnboxedArrayAdd stub "
"(shape=%p, group=%p, protoDepth=%" PRIuSIZE ")",
shape.get(), group.get(), protoDepth);
ICSetElemDenseOrUnboxedArrayAddCompiler compiler(cx, obj, protoDepth);
ICUpdatedStub* newStub = compiler.getStub(compiler.getStubSpace(outerScript));
if (!newStub)
return false;
if (compiler.needsUpdateStubs() &&
!newStub->addUpdateStubForValue(cx, outerScript, obj, JSID_VOIDHANDLE, rhs))
{
return false;
}
stub->addNewStub(newStub);
}
}
return true;
}
if ((obj->is<TypedArrayObject>() || IsPrimitiveArrayTypedObject(obj)) &&
index.isNumber() &&
rhs.isNumber())
@ -1343,10 +1162,6 @@ BaselineScript::noteArrayWriteHole(uint32_t pcOffset)
stub->toSetElem_Fallback()->noteArrayWriteHole();
}
//
// SetElem_DenseOrUnboxedArray
//
void
EmitUnboxedPreBarrierForBaseline(MacroAssembler &masm, const BaseIndex& address, JSValueType type)
{
@ -1358,239 +1173,6 @@ EmitUnboxedPreBarrierForBaseline(MacroAssembler &masm, const BaseIndex& address,
MOZ_ASSERT(!UnboxedTypeNeedsPreBarrier(type));
}
//
// SetElem_DenseOrUnboxedArrayAdd
//
ICUpdatedStub*
ICSetElemDenseOrUnboxedArrayAddCompiler::getStub(ICStubSpace* space)
{
Rooted<ShapeVector> shapes(cx, ShapeVector(cx));
if (!shapes.append(obj_->maybeShape()))
return nullptr;
if (!GetProtoShapes(obj_, protoChainDepth_, &shapes))
return nullptr;
JS_STATIC_ASSERT(ICSetElem_DenseOrUnboxedArrayAdd::MAX_PROTO_CHAIN_DEPTH == 4);
ICUpdatedStub* stub = nullptr;
switch (protoChainDepth_) {
case 0: stub = getStubSpecific<0>(space, shapes); break;
case 1: stub = getStubSpecific<1>(space, shapes); break;
case 2: stub = getStubSpecific<2>(space, shapes); break;
case 3: stub = getStubSpecific<3>(space, shapes); break;
case 4: stub = getStubSpecific<4>(space, shapes); break;
default: MOZ_CRASH("ProtoChainDepth too high.");
}
if (!stub || !stub->initUpdatingChain(cx, space))
return nullptr;
return stub;
}
bool
ICSetElemDenseOrUnboxedArrayAddCompiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
// R0 = object
// R1 = key
// Stack = { ... rhs-value, <return-addr>? }
Label failure, failurePopR0, failureUnstow;
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
Register scratchReg = regs.takeAny();
// Unbox R0 and guard on its group and, if this is a native access, its shape.
Register obj = masm.extractObject(R0, ExtractTemp0);
masm.loadPtr(Address(ICStubReg, ICSetElem_DenseOrUnboxedArrayAdd::offsetOfGroup()),
scratchReg);
masm.branchTestObjGroup(Assembler::NotEqual, obj, scratchReg, &failure);
if (unboxedType_ == JSVAL_TYPE_MAGIC) {
masm.loadPtr(Address(ICStubReg, ICSetElem_DenseOrUnboxedArrayAddImpl<0>::offsetOfShape(0)),
scratchReg);
masm.branchTestObjShape(Assembler::NotEqual, obj, scratchReg, &failure);
}
// Stow both R0 and R1 (object and key)
// But R0 and R1 still hold their values.
EmitStowICValues(masm, 2);
uint32_t framePushedAfterStow = masm.framePushed();
// We may need to free up some registers.
regs = availableGeneralRegs(0);
regs.take(R0);
regs.take(scratchReg);
// Shape guard objects on the proto chain.
Register protoReg = regs.takeAny();
for (size_t i = 0; i < protoChainDepth_; i++) {
masm.loadObjProto(i == 0 ? obj : protoReg, protoReg);
masm.branchTestPtr(Assembler::Zero, protoReg, protoReg, &failureUnstow);
masm.loadPtr(Address(ICStubReg, ICSetElem_DenseOrUnboxedArrayAddImpl<0>::offsetOfShape(i + 1)),
scratchReg);
masm.branchTestObjShape(Assembler::NotEqual, protoReg, scratchReg, &failureUnstow);
}
regs.add(protoReg);
regs.add(scratchReg);
if (needsUpdateStubs()) {
// Stack is now: { ..., rhs-value, object-value, key-value, maybe?-RET-ADDR }
// Load rhs-value in to R0
masm.loadValue(Address(masm.getStackPointer(), 2 * sizeof(Value) + ICStackValueOffset), R0);
// Call the type-update stub.
if (!callTypeUpdateIC(masm, sizeof(Value)))
return false;
}
// Unstow R0 and R1 (object and key)
EmitUnstowICValues(masm, 2);
// Restore object.
obj = masm.extractObject(R0, ExtractTemp0);
if (needsUpdateStubs()) {
// Trigger post barriers here on the value being written. Fields which
// objects can be written to also need update stubs.
masm.Push(R1);
masm.loadValue(Address(masm.getStackPointer(), sizeof(Value) + ICStackValueOffset), R1);
LiveGeneralRegisterSet saveRegs;
saveRegs.add(R0);
saveRegs.addUnchecked(obj);
saveRegs.add(ICStubReg);
BaselineEmitPostWriteBarrierSlot(masm, obj, R1, scratchReg, saveRegs, cx);
masm.Pop(R1);
}
// Reset register set.
regs = availableGeneralRegs(2);
scratchReg = regs.takeAny();
// Unbox key.
Register key = masm.extractInt32(R1, ExtractTemp1);
if (unboxedType_ == JSVAL_TYPE_MAGIC) {
// Adding element to a native object.
// Load obj->elements in scratchReg.
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratchReg);
// Bounds check (key == initLength)
Address initLength(scratchReg, ObjectElements::offsetOfInitializedLength());
masm.branch32(Assembler::NotEqual, initLength, key, &failure);
// Capacity check.
Address capacity(scratchReg, ObjectElements::offsetOfCapacity());
masm.branch32(Assembler::BelowOrEqual, capacity, key, &failure);
// Check for copy on write elements.
Address elementsFlags(scratchReg, ObjectElements::offsetOfFlags());
masm.branchTest32(Assembler::NonZero, elementsFlags,
Imm32(ObjectElements::COPY_ON_WRITE |
ObjectElements::FROZEN),
&failure);
// Failure is not possible now. Free up registers.
regs.add(R0);
regs.add(R1);
regs.takeUnchecked(obj);
regs.takeUnchecked(key);
// Increment initLength before write.
masm.add32(Imm32(1), initLength);
// If length is now <= key, increment length before write.
Label skipIncrementLength;
Address length(scratchReg, ObjectElements::offsetOfLength());
masm.branch32(Assembler::Above, length, key, &skipIncrementLength);
masm.add32(Imm32(1), length);
masm.bind(&skipIncrementLength);
// Convert int32 values to double if convertDoubleElements is set. In this
// case the heap typeset is guaranteed to contain both int32 and double, so
// it's okay to store a double.
Label dontConvertDoubles;
masm.branchTest32(Assembler::Zero, elementsFlags,
Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS),
&dontConvertDoubles);
Address valueAddr(masm.getStackPointer(), ICStackValueOffset);
// Note that double arrays are only created by IonMonkey, so if we have no
// floating-point support Ion is disabled and there should be no double arrays.
if (cx->runtime()->jitSupportsFloatingPoint)
masm.convertInt32ValueToDouble(valueAddr, regs.getAny(), &dontConvertDoubles);
else
masm.assumeUnreachable("There shouldn't be double arrays when there is no FP support.");
masm.bind(&dontConvertDoubles);
// Write the value. No need for pre-barrier since we're not overwriting an old value.
ValueOperand tmpVal = regs.takeAnyValue();
BaseIndex element(scratchReg, key, TimesEight);
masm.loadValue(valueAddr, tmpVal);
masm.storeValue(tmpVal, element);
} else {
// Adding element to an unboxed array.
// Bounds check (key == initLength)
Address initLengthAddr(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength());
masm.load32(initLengthAddr, scratchReg);
masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), scratchReg);
masm.branch32(Assembler::NotEqual, scratchReg, key, &failure);
// Capacity check.
masm.checkUnboxedArrayCapacity(obj, RegisterOrInt32Constant(key), scratchReg, &failure);
// Load obj->elements.
masm.loadPtr(Address(obj, UnboxedArrayObject::offsetOfElements()), scratchReg);
// Write the value first, since this can fail. No need for pre-barrier
// since we're not overwriting an old value.
masm.Push(R0);
Address valueAddr(masm.getStackPointer(), ICStackValueOffset + sizeof(Value));
masm.loadValue(valueAddr, R0);
BaseIndex address(scratchReg, key, ScaleFromElemWidth(UnboxedTypeSize(unboxedType_)));
masm.storeUnboxedProperty(address, unboxedType_,
ConstantOrRegister(TypedOrValueRegister(R0)), &failurePopR0);
masm.Pop(R0);
// Increment initialized length.
masm.add32(Imm32(1), initLengthAddr);
// If length is now <= key, increment length.
Address lengthAddr(obj, UnboxedArrayObject::offsetOfLength());
Label skipIncrementLength;
masm.branch32(Assembler::Above, lengthAddr, key, &skipIncrementLength);
masm.add32(Imm32(1), lengthAddr);
masm.bind(&skipIncrementLength);
}
EmitReturnFromIC(masm);
if (failurePopR0.used()) {
// Failure case: restore the value of R0
masm.bind(&failurePopR0);
masm.popValue(R0);
masm.jump(&failure);
}
// Failure case - fail but first unstow R0 and R1
masm.bind(&failureUnstow);
masm.setFramePushed(framePushedAfterStow);
EmitUnstowICValues(masm, 2);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// SetElem_TypedArray
//
@ -5092,27 +4674,6 @@ ICTypeUpdate_ObjectGroup::ICTypeUpdate_ObjectGroup(JitCode* stubCode, ObjectGrou
group_(group)
{ }
ICSetElem_DenseOrUnboxedArrayAdd::ICSetElem_DenseOrUnboxedArrayAdd(JitCode* stubCode, ObjectGroup* group,
size_t protoChainDepth)
: ICUpdatedStub(SetElem_DenseOrUnboxedArrayAdd, stubCode),
group_(group)
{
MOZ_ASSERT(protoChainDepth <= MAX_PROTO_CHAIN_DEPTH);
extra_ = protoChainDepth;
}
template <size_t ProtoChainDepth>
ICUpdatedStub*
ICSetElemDenseOrUnboxedArrayAddCompiler::getStubSpecific(ICStubSpace* space,
Handle<ShapeVector> shapes)
{
RootedObjectGroup group(cx, JSObject::getGroup(cx, obj_));
if (!group)
return nullptr;
Rooted<JitCode*> stubCode(cx, getStubCode());
return newStub<ICSetElem_DenseOrUnboxedArrayAddImpl<ProtoChainDepth>>(space, stubCode, group, shapes);
}
ICSetElem_TypedArray::ICSetElem_TypedArray(JitCode* stubCode, Shape* shape, Scalar::Type type,
bool expectOutOfBounds)
: ICStub(SetElem_TypedArray, stubCode),

Просмотреть файл

@ -457,112 +457,6 @@ class ICSetElem_Fallback : public ICFallbackStub
};
};
template <size_t ProtoChainDepth> class ICSetElem_DenseOrUnboxedArrayAddImpl;
class ICSetElem_DenseOrUnboxedArrayAdd : public ICUpdatedStub
{
friend class ICStubSpace;
public:
static const size_t MAX_PROTO_CHAIN_DEPTH = 4;
protected:
GCPtrObjectGroup group_;
ICSetElem_DenseOrUnboxedArrayAdd(JitCode* stubCode, ObjectGroup* group, size_t protoChainDepth);
public:
static size_t offsetOfGroup() {
return offsetof(ICSetElem_DenseOrUnboxedArrayAdd, group_);
}
GCPtrObjectGroup& group() {
return group_;
}
size_t protoChainDepth() const {
MOZ_ASSERT(extra_ <= MAX_PROTO_CHAIN_DEPTH);
return extra_;
}
template <size_t ProtoChainDepth>
ICSetElem_DenseOrUnboxedArrayAddImpl<ProtoChainDepth>* toImplUnchecked() {
return static_cast<ICSetElem_DenseOrUnboxedArrayAddImpl<ProtoChainDepth>*>(this);
}
template <size_t ProtoChainDepth>
ICSetElem_DenseOrUnboxedArrayAddImpl<ProtoChainDepth>* toImpl() {
MOZ_ASSERT(ProtoChainDepth == protoChainDepth());
return toImplUnchecked<ProtoChainDepth>();
}
};
template <size_t ProtoChainDepth>
class ICSetElem_DenseOrUnboxedArrayAddImpl : public ICSetElem_DenseOrUnboxedArrayAdd
{
friend class ICStubSpace;
// Note: for unboxed arrays, the first shape is null.
static const size_t NumShapes = ProtoChainDepth + 1;
mozilla::Array<GCPtrShape, NumShapes> shapes_;
ICSetElem_DenseOrUnboxedArrayAddImpl(JitCode* stubCode, ObjectGroup* group,
Handle<ShapeVector> shapes)
: ICSetElem_DenseOrUnboxedArrayAdd(stubCode, group, ProtoChainDepth)
{
MOZ_ASSERT(shapes.length() == NumShapes);
for (size_t i = 0; i < NumShapes; i++)
shapes_[i].init(shapes[i]);
}
public:
void traceShapes(JSTracer* trc) {
for (size_t i = 0; i < NumShapes; i++)
TraceNullableEdge(trc, &shapes_[i], "baseline-setelem-denseadd-stub-shape");
}
Shape* shape(size_t i) const {
MOZ_ASSERT(i < NumShapes);
return shapes_[i];
}
static size_t offsetOfShape(size_t idx) {
return offsetof(ICSetElem_DenseOrUnboxedArrayAddImpl, shapes_) + idx * sizeof(GCPtrShape);
}
};
class ICSetElemDenseOrUnboxedArrayAddCompiler : public ICStubCompiler {
RootedObject obj_;
size_t protoChainDepth_;
JSValueType unboxedType_;
MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
protected:
virtual int32_t getKey() const {
return static_cast<int32_t>(engine_) |
(static_cast<int32_t>(kind) << 1) |
(static_cast<int32_t>(protoChainDepth_) << 17) |
(static_cast<int32_t>(unboxedType_) << 20);
}
public:
ICSetElemDenseOrUnboxedArrayAddCompiler(JSContext* cx, HandleObject obj, size_t protoChainDepth)
: ICStubCompiler(cx, ICStub::SetElem_DenseOrUnboxedArrayAdd, Engine::Baseline),
obj_(cx, obj),
protoChainDepth_(protoChainDepth),
unboxedType_(obj->is<UnboxedArrayObject>()
? obj->as<UnboxedArrayObject>().elementType()
: JSVAL_TYPE_MAGIC)
{}
template <size_t ProtoChainDepth>
ICUpdatedStub* getStubSpecific(ICStubSpace* space, Handle<ShapeVector> shapes);
ICUpdatedStub* getStub(ICStubSpace* space);
bool needsUpdateStubs() {
return unboxedType_ == JSVAL_TYPE_MAGIC || unboxedType_ == JSVAL_TYPE_OBJECT;
}
};
// Accesses scalar elements of a typed array or typed object.
class ICSetElem_TypedArray : public ICStub
{

Просмотреть файл

@ -51,7 +51,6 @@ namespace jit {
_(GetElem_Fallback) \
\
_(SetElem_Fallback) \
_(SetElem_DenseOrUnboxedArrayAdd) \
_(SetElem_TypedArray) \
\
_(In_Fallback) \

Просмотреть файл

@ -27,12 +27,6 @@ SetElemICInspector::sawOOBDenseWrite() const
if (!icEntry_)
return false;
// Check for an element adding stub.
for (ICStub* stub = icEntry_->firstStub(); stub; stub = stub->next()) {
if (stub->isSetElem_DenseOrUnboxedArrayAdd())
return true;
}
// Check for a write hole bit on the SetElem_Fallback stub.
ICStub* stub = icEntry_->fallbackStub();
if (stub->isSetElem_Fallback())

Просмотреть файл

@ -1916,8 +1916,12 @@ SetPropIRGenerator::tryAttachStub()
if (maybeGuardInt32Index(idVal_, setElemKeyValueId(), &index, &indexId)) {
if (tryAttachSetDenseElement(obj, objId, index, indexId, rhsValId))
return true;
if (tryAttachSetDenseElementHole(obj, objId, index, indexId, rhsValId))
return true;
if (tryAttachSetUnboxedArrayElement(obj, objId, index, indexId, rhsValId))
return true;
if (tryAttachSetUnboxedArrayElementHole(obj, objId, index, indexId, rhsValId))
return true;
return false;
}
return false;
@ -2266,9 +2270,126 @@ SetPropIRGenerator::tryAttachSetDenseElement(HandleObject obj, ObjOperandId objI
return true;
}
static bool
CanAttachAddElement(JSObject* obj, bool isInit)
{
// Make sure the objects on the prototype don't have any indexed properties
// or that such properties can't appear without a shape change.
do {
// The first two checks are also relevant to the receiver object.
if (obj->isIndexed())
return false;
const Class* clasp = obj->getClass();
if ((clasp != &ArrayObject::class_ && clasp != &UnboxedArrayObject::class_) &&
(clasp->getAddProperty() ||
clasp->getResolve() ||
clasp->getOpsLookupProperty() ||
clasp->getSetProperty() ||
clasp->getOpsSetProperty()))
{
return false;
}
// If we're initializing a property instead of setting one, the objects
// on the prototype are not relevant.
if (isInit)
break;
JSObject* proto = obj->staticPrototype();
if (!proto)
break;
if (!proto->isNative())
return false;
obj = proto;
} while (true);
return true;
}
static void
ShapeGuardProtoChain(CacheIRWriter& writer, JSObject* obj, ObjOperandId objId)
{
while (true) {
// Guard on the proto if the shape does not imply the proto. Singleton
// objects always trigger a shape change when the proto changes, so we
// don't need a guard in that case.
bool guardProto = obj->hasUncacheableProto() && !obj->isSingleton();
obj = obj->staticPrototype();
if (!obj)
return;
objId = writer.loadProto(objId);
if (guardProto)
writer.guardSpecificObject(objId, obj);
writer.guardShape(objId, obj->as<NativeObject>().shape());
}
}
bool
SetPropIRGenerator::tryAttachSetUnboxedArrayElement(HandleObject obj, ObjOperandId objId, uint32_t index,
Int32OperandId indexId, ValOperandId rhsId)
SetPropIRGenerator::tryAttachSetDenseElementHole(HandleObject obj, ObjOperandId objId,
uint32_t index, Int32OperandId indexId,
ValOperandId rhsId)
{
if (!obj->isNative() || rhsVal_.isMagic(JS_ELEMENTS_HOLE))
return false;
JSOp op = JSOp(*pc_);
MOZ_ASSERT(IsPropertySetOp(op) || IsPropertyInitOp(op));
if (op == JSOP_INITHIDDENELEM)
return false;
NativeObject* nobj = &obj->as<NativeObject>();
if (nobj->getElementsHeader()->isFrozen())
return false;
uint32_t capacity = nobj->getDenseCapacity();
uint32_t initLength = nobj->getDenseInitializedLength();
// Optimize if we're adding an element at initLength or writing to a hole.
// Don't handle the adding case if the current accesss is in bounds, to
// ensure we always call noteArrayWriteHole.
bool isAdd = index == initLength;
bool isHoleInBounds = index < initLength && !nobj->containsDenseElement(index);
if (!isAdd && !isHoleInBounds)
return false;
// Checking the capacity also checks for arrays with non-writable length,
// as the capacity is always less than or equal to the length in this case.
if (index >= capacity)
return false;
MOZ_ASSERT(!nobj->is<TypedArrayObject>());
// Check for other indexed properties or class hooks.
if (!CanAttachAddElement(nobj, IsPropertyInitOp(op)))
return false;
writer.guardGroup(objId, nobj->group());
writer.guardShape(objId, nobj->shape());
// Also shape guard the proto chain, unless this is an INITELEM.
if (IsPropertySetOp(op))
ShapeGuardProtoChain(writer, obj, objId);
writer.storeDenseElementHole(objId, indexId, rhsId, isAdd);
writer.returnFromIC();
// Type inference uses JSID_VOID for the element types.
setUpdateStubInfo(nobj->group(), JSID_VOID);
trackAttached(isAdd ? "AddDenseElement" : "StoreDenseElementHole");
return true;
}
bool
SetPropIRGenerator::tryAttachSetUnboxedArrayElement(HandleObject obj, ObjOperandId objId,
uint32_t index, Int32OperandId indexId,
ValOperandId rhsId)
{
if (!obj->is<UnboxedArrayObject>())
return false;
@ -2294,6 +2415,52 @@ SetPropIRGenerator::tryAttachSetUnboxedArrayElement(HandleObject obj, ObjOperand
return true;
}
bool
SetPropIRGenerator::tryAttachSetUnboxedArrayElementHole(HandleObject obj, ObjOperandId objId,
uint32_t index, Int32OperandId indexId,
ValOperandId rhsId)
{
if (!obj->is<UnboxedArrayObject>() || rhsVal_.isMagic(JS_ELEMENTS_HOLE))
return false;
if (!cx_->runtime()->jitSupportsFloatingPoint)
return false;
JSOp op = JSOp(*pc_);
MOZ_ASSERT(IsPropertySetOp(op) || IsPropertyInitOp(op));
if (op == JSOP_INITHIDDENELEM)
return false;
// Optimize if we're adding an element at initLength. Unboxed arrays don't
// have holes at indexes < initLength.
UnboxedArrayObject* aobj = &obj->as<UnboxedArrayObject>();
if (index != aobj->initializedLength() || index >= aobj->capacity())
return false;
// Check for other indexed properties or class hooks.
if (!CanAttachAddElement(aobj, IsPropertyInitOp(op)))
return false;
writer.guardGroup(objId, aobj->group());
JSValueType elementType = aobj->group()->unboxedLayoutDontCheckGeneration().elementType();
EmitGuardUnboxedPropertyType(writer, elementType, rhsId);
// Also shape guard the proto chain, unless this is an INITELEM.
if (IsPropertySetOp(op))
ShapeGuardProtoChain(writer, aobj, objId);
writer.storeUnboxedArrayElementHole(objId, indexId, rhsId, elementType);
writer.returnFromIC();
// Type inference uses JSID_VOID for the element types.
setUpdateStubInfo(aobj->group(), JSID_VOID);
trackAttached("StoreUnboxedArrayElementHole");
return true;
}
bool
SetPropIRGenerator::tryAttachAddSlotStub(HandleObjectGroup oldGroup, HandleShape oldShape)
{
@ -2419,24 +2586,7 @@ SetPropIRGenerator::tryAttachAddSlotStub(HandleObjectGroup oldGroup, HandleShape
}
writer.guardShape(holderId, oldShape);
// Shape guard the objects on the proto chain.
JSObject* lastObj = obj;
ObjOperandId lastObjId = objId;
while (true) {
// Guard on the proto if the shape does not imply the proto. Singleton
// objects always trigger a shape change when the proto changes, so we
// don't need a guard in that case.
bool guardProto = lastObj->hasUncacheableProto() && !lastObj->isSingleton();
lastObj = lastObj->staticPrototype();
if (!lastObj)
break;
lastObjId = writer.loadProto(lastObjId);
if (guardProto)
writer.guardSpecificObject(lastObjId, lastObj);
writer.guardShape(lastObjId, lastObj->as<NativeObject>().shape());
}
ShapeGuardProtoChain(writer, obj, objId);
ObjectGroup* newGroup = obj->group();

Просмотреть файл

@ -188,7 +188,9 @@ extern const char* CacheKindNames[];
_(StoreTypedObjectScalarProperty) \
_(StoreUnboxedProperty) \
_(StoreDenseElement) \
_(StoreDenseElementHole) \
_(StoreUnboxedArrayElement) \
_(StoreUnboxedArrayElementHole) \
_(CallNativeSetter) \
_(CallScriptedSetter) \
_(CallSetArrayLength) \
@ -663,6 +665,22 @@ class MOZ_RAII CacheIRWriter : public JS::CustomAutoRooter
writeOperandId(rhs);
buffer_.writeByte(uint32_t(elementType));
}
void storeUnboxedArrayElementHole(ObjOperandId obj, Int32OperandId index, ValOperandId rhs,
JSValueType elementType)
{
writeOpWithOperandId(CacheOp::StoreUnboxedArrayElementHole, obj);
writeOperandId(index);
writeOperandId(rhs);
buffer_.writeByte(uint32_t(elementType));
}
void storeDenseElementHole(ObjOperandId obj, Int32OperandId index, ValOperandId rhs,
bool handleAdd)
{
writeOpWithOperandId(CacheOp::StoreDenseElementHole, obj);
writeOperandId(index);
writeOperandId(rhs);
buffer_.writeByte(handleAdd);
}
void callScriptedSetter(ObjOperandId obj, JSFunction* setter, ValOperandId rhs) {
writeOpWithOperandId(CacheOp::CallScriptedSetter, obj);
addStubField(uintptr_t(setter), StubField::Type::JSObject);
@ -1041,6 +1059,11 @@ class MOZ_RAII SetPropIRGenerator : public IRGenerator
bool tryAttachSetUnboxedArrayElement(HandleObject obj, ObjOperandId objId, uint32_t index,
Int32OperandId indexId, ValOperandId rhsId);
bool tryAttachSetDenseElementHole(HandleObject obj, ObjOperandId objId, uint32_t index,
Int32OperandId indexId, ValOperandId rhsId);
bool tryAttachSetUnboxedArrayElementHole(HandleObject obj, ObjOperandId objId, uint32_t index,
Int32OperandId indexId, ValOperandId rhsId);
void trackAttached(const char* name);
public:

Просмотреть файл

@ -891,12 +891,24 @@ IonCacheIRCompiler::emitStoreDenseElement()
MOZ_CRASH("Baseline-specific op");
}
bool
IonCacheIRCompiler::emitStoreDenseElementHole()
{
MOZ_CRASH("Baseline-specific op");
}
bool
IonCacheIRCompiler::emitStoreUnboxedArrayElement()
{
MOZ_CRASH("Baseline-specific op");
}
bool
IonCacheIRCompiler::emitStoreUnboxedArrayElementHole()
{
MOZ_CRASH("Baseline-specific op");
}
bool
IonCacheIRCompiler::emitCallNativeSetter()
{

Просмотреть файл

@ -267,22 +267,6 @@ ICStub::trace(JSTracer* trc)
TraceEdge(trc, &callStub->expectedStr(), "baseline-callstringsplit-str");
break;
}
case ICStub::SetElem_DenseOrUnboxedArrayAdd: {
ICSetElem_DenseOrUnboxedArrayAdd* setElemStub = toSetElem_DenseOrUnboxedArrayAdd();
TraceEdge(trc, &setElemStub->group(), "baseline-setelem-denseadd-group");
JS_STATIC_ASSERT(ICSetElem_DenseOrUnboxedArrayAdd::MAX_PROTO_CHAIN_DEPTH == 4);
switch (setElemStub->protoChainDepth()) {
case 0: setElemStub->toImpl<0>()->traceShapes(trc); break;
case 1: setElemStub->toImpl<1>()->traceShapes(trc); break;
case 2: setElemStub->toImpl<2>()->traceShapes(trc); break;
case 3: setElemStub->toImpl<3>()->traceShapes(trc); break;
case 4: setElemStub->toImpl<4>()->traceShapes(trc); break;
default: MOZ_CRASH("Invalid proto stub.");
}
break;
}
case ICStub::SetElem_TypedArray: {
ICSetElem_TypedArray* setElemStub = toSetElem_TypedArray();
TraceEdge(trc, &setElemStub->shape(), "baseline-setelem-typedarray-shape");
@ -580,17 +564,6 @@ ICStubCompiler::callVM(const VMFunction& fun, MacroAssembler& masm)
return true;
}
bool
ICStubCompiler::callTypeUpdateIC(MacroAssembler& masm, uint32_t objectOffset)
{
JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(DoTypeUpdateFallbackInfo);
if (!code)
return false;
EmitCallTypeUpdateIC(masm, code, objectOffset);
return true;
}
void
ICStubCompiler::enterStubFrame(MacroAssembler& masm, Register scratch)
{

Просмотреть файл

@ -1067,10 +1067,6 @@ class ICStubCompiler
// Emits a normal (non-tail) call to a VMFunction wrapper.
MOZ_MUST_USE bool callVM(const VMFunction& fun, MacroAssembler& masm);
// Emits a call to a type-update IC, assuming that the value to be
// checked is already in R0.
MOZ_MUST_USE bool callTypeUpdateIC(MacroAssembler& masm, uint32_t objectOffset);
// A stub frame is used when a stub wants to call into the VM without
// performing a tail call. This is required for the return address
// to pc mapping to work.

Просмотреть файл

@ -268,63 +268,6 @@ EmitUnstowICValues(MacroAssembler& masm, int values, bool discard = false)
masm.adjustFrame(-values * sizeof(Value));
}
inline void
EmitCallTypeUpdateIC(MacroAssembler& masm, JitCode* code, uint32_t objectOffset)
{
MOZ_ASSERT(R2 == ValueOperand(r1, r0));
// R0 contains the value that needs to be typechecked. The object we're
// updating is a boxed Value on the stack, at offset objectOffset from esp,
// excluding the return address.
// Save the current ICStubReg to stack, as well as the TailCallReg,
// since on ARM, the LR is live.
masm.push(ICStubReg);
masm.push(ICTailCallReg);
// This is expected to be called from within an IC, when ICStubReg is
// properly initialized to point to the stub.
masm.loadPtr(Address(ICStubReg, ICUpdatedStub::offsetOfFirstUpdateStub()),
ICStubReg);
// TODO: Change r0 uses below to use masm's configurable scratch register instead.
// Load stubcode pointer from ICStubReg into ICTailCallReg.
masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0);
// Call the stubcode.
masm.ma_blx(r0);
// Restore the old stub reg and tailcall reg.
masm.pop(ICTailCallReg);
masm.pop(ICStubReg);
// The update IC will store 0 or 1 in R1.scratchReg() reflecting if the
// value in R0 type-checked properly or not.
Label success;
masm.cmp32(R1.scratchReg(), Imm32(1));
masm.j(Assembler::Equal, &success);
// If the IC failed, then call the update fallback function.
EmitBaselineEnterStubFrame(masm, R1.scratchReg());
masm.loadValue(Address(BaselineStackReg, STUB_FRAME_SIZE + objectOffset), R1);
masm.Push(R0);
masm.Push(R1);
masm.Push(ICStubReg);
// Load previous frame pointer, push BaselineFrame*.
masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg());
masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
EmitBaselineCallVM(code, masm);
EmitBaselineLeaveStubFrame(masm);
// Success at end.
masm.bind(&success);
}
template <typename AddrType>
inline void
EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)

Просмотреть файл

@ -240,57 +240,6 @@ EmitUnstowICValues(MacroAssembler& masm, int values, bool discard = false)
masm.adjustFrame(-values * sizeof(Value));
}
inline void
EmitCallTypeUpdateIC(MacroAssembler& masm, JitCode* code, uint32_t objectOffset)
{
// R0 contains the value that needs to be typechecked.
// The object we're updating is a boxed Value on the stack, at offset
// objectOffset from stack top, excluding the return address.
MOZ_ASSERT(R2 == ValueOperand(r0));
// Save the current ICStubReg to stack, as well as the TailCallReg,
// since on AArch64, the LR is live.
masm.push(ICStubReg, ICTailCallReg);
// This is expected to be called from within an IC, when ICStubReg
// is properly initialized to point to the stub.
masm.loadPtr(Address(ICStubReg, (int32_t)ICUpdatedStub::offsetOfFirstUpdateStub()),
ICStubReg);
// Load stubcode pointer from ICStubReg into ICTailCallReg.
masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), ICTailCallReg);
// Call the stubcode.
masm.Blr(ARMRegister(ICTailCallReg, 64));
// Restore the old stub reg and tailcall reg.
masm.pop(ICTailCallReg, ICStubReg);
// The update IC will store 0 or 1 in R1.scratchReg() reflecting if the
// value in R0 type-checked properly or not.
Label success;
masm.cmp32(R1.scratchReg(), Imm32(1));
masm.j(Assembler::Equal, &success);
// If the IC failed, then call the update fallback function.
EmitBaselineEnterStubFrame(masm, R1.scratchReg());
masm.loadValue(Address(masm.getStackPointer(), STUB_FRAME_SIZE + objectOffset), R1);
masm.Push(R0.valueReg());
masm.Push(R1.valueReg());
masm.Push(ICStubReg);
// Load previous frame pointer, push BaselineFrame*.
masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg());
masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
EmitBaselineCallVM(code, masm);
EmitBaselineLeaveStubFrame(masm);
// Success at end.
masm.bind(&success);
}
template <typename AddrType>
inline void
EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)

Просмотреть файл

@ -272,60 +272,6 @@ EmitUnstowICValues(MacroAssembler& masm, int values, bool discard = false)
masm.adjustFrame(-values * sizeof(Value));
}
inline void
EmitCallTypeUpdateIC(MacroAssembler& masm, JitCode* code, uint32_t objectOffset)
{
// R0 contains the value that needs to be typechecked.
// The object we're updating is a boxed Value on the stack, at offset
// objectOffset from $sp, excluding the return address.
// Save the current ICStubReg to stack, as well as the TailCallReg,
// since on mips, the $ra is live.
masm.subPtr(Imm32(2 * sizeof(intptr_t)), StackPointer);
masm.storePtr(ICStubReg, Address(StackPointer, sizeof(intptr_t)));
masm.storePtr(ICTailCallReg, Address(StackPointer, 0));
// This is expected to be called from within an IC, when ICStubReg
// is properly initialized to point to the stub.
masm.loadPtr(Address(ICStubReg, ICUpdatedStub::offsetOfFirstUpdateStub()),
ICStubReg);
// Load stubcode pointer from ICStubReg into ICTailCallReg.
masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
// Call the stubcode.
masm.call(R2.scratchReg());
// Restore the old stub reg and tailcall reg.
masm.loadPtr(Address(StackPointer, 0), ICTailCallReg);
masm.loadPtr(Address(StackPointer, sizeof(intptr_t)), ICStubReg);
masm.addPtr(Imm32(2 * sizeof(intptr_t)), StackPointer);
// The update IC will store 0 or 1 in R1.scratchReg() reflecting if the
// value in R0 type-checked properly or not.
Label success;
masm.ma_b(R1.scratchReg(), Imm32(1), &success, Assembler::Equal, ShortJump);
// If the IC failed, then call the update fallback function.
EmitBaselineEnterStubFrame(masm, R1.scratchReg());
masm.loadValue(Address(BaselineStackReg, STUB_FRAME_SIZE + objectOffset), R1);
masm.Push(R0);
masm.Push(R1);
masm.Push(ICStubReg);
// Load previous frame pointer, push BaselineFrame*.
masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg());
masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
EmitBaselineCallVM(code, masm);
EmitBaselineLeaveStubFrame(masm);
// Success at end.
masm.bind(&success);
}
template <typename AddrType>
inline void
EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)

Просмотреть файл

@ -29,7 +29,6 @@ inline void EmitBaselineEnterStubFrame(MacroAssembler&, Register) { MOZ_CRASH();
inline void EmitBaselineLeaveStubFrame(MacroAssembler&, bool v = false) { MOZ_CRASH(); }
inline void EmitStowICValues(MacroAssembler&, int) { MOZ_CRASH(); }
inline void EmitUnstowICValues(MacroAssembler&, int, bool v = false) { MOZ_CRASH(); }
inline void EmitCallTypeUpdateIC(MacroAssembler&, JitCode*, uint32_t) { MOZ_CRASH(); }
inline void EmitStubGuardFailure(MacroAssembler&) { MOZ_CRASH(); }
template <typename T> inline void EmitPreBarrier(MacroAssembler&, T, MIRType) { MOZ_CRASH(); }

Просмотреть файл

@ -275,53 +275,6 @@ EmitUnstowICValues(MacroAssembler& masm, int values, bool discard = false)
masm.adjustFrame(-values * sizeof(Value));
}
inline void
EmitCallTypeUpdateIC(MacroAssembler& masm, JitCode* code, uint32_t objectOffset)
{
// R0 contains the value that needs to be typechecked.
// The object we're updating is a boxed Value on the stack, at offset
// objectOffset from stack top, excluding the return address.
// Save the current ICStubReg to stack
masm.push(ICStubReg);
// This is expected to be called from within an IC, when ICStubReg
// is properly initialized to point to the stub.
masm.loadPtr(Address(ICStubReg, (int32_t) ICUpdatedStub::offsetOfFirstUpdateStub()),
ICStubReg);
// Call the stubcode.
masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
// Restore the old stub reg.
masm.pop(ICStubReg);
// The update IC will store 0 or 1 in R1.scratchReg() reflecting if the
// value in R0 type-checked properly or not.
Label success;
masm.cmp32(R1.scratchReg(), Imm32(1));
masm.j(Assembler::Equal, &success);
// If the IC failed, then call the update fallback function.
EmitBaselineEnterStubFrame(masm, R1.scratchReg());
masm.loadValue(Address(BaselineStackReg, STUB_FRAME_SIZE + objectOffset), R1);
masm.Push(R0);
masm.Push(R1);
masm.Push(ICStubReg);
// Load previous frame pointer, push BaselineFrame*.
masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg());
masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
EmitBaselineCallVM(code, masm);
EmitBaselineLeaveStubFrame(masm);
// Success at end.
masm.bind(&success);
}
template <typename AddrType>
inline void
EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)

Просмотреть файл

@ -271,53 +271,6 @@ EmitUnstowICValues(MacroAssembler& masm, int values, bool discard = false)
masm.adjustFrame(-values * sizeof(Value));
}
inline void
EmitCallTypeUpdateIC(MacroAssembler& masm, JitCode* code, uint32_t objectOffset)
{
// R0 contains the value that needs to be typechecked.
// The object we're updating is a boxed Value on the stack, at offset
// objectOffset from stack top, excluding the return address.
// Save the current ICStubReg to stack
masm.push(ICStubReg);
// This is expected to be called from within an IC, when ICStubReg
// is properly initialized to point to the stub.
masm.loadPtr(Address(ICStubReg, (int32_t) ICUpdatedStub::offsetOfFirstUpdateStub()),
ICStubReg);
// Call the stubcode.
masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
// Restore the old stub reg.
masm.pop(ICStubReg);
// The update IC will store 0 or 1 in R1.scratchReg() reflecting if the
// value in R0 type-checked properly or not.
Label success;
masm.cmp32(R1.scratchReg(), Imm32(1));
masm.j(Assembler::Equal, &success);
// If the IC failed, then call the update fallback function.
EmitBaselineEnterStubFrame(masm, R1.scratchReg());
masm.loadValue(Address(BaselineStackReg, STUB_FRAME_SIZE + objectOffset), R1);
masm.Push(R0);
masm.Push(R1);
masm.Push(ICStubReg);
// Load previous frame pointer, push BaselineFrame*.
masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg());
masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
EmitBaselineCallVM(code, masm);
EmitBaselineLeaveStubFrame(masm);
// Success at end.
masm.bind(&success);
}
template <typename AddrType>
inline void
EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)