зеркало из https://github.com/mozilla/gecko-dev.git
merge mozilla-inbound to mozilla-central a=merge
This commit is contained in:
Коммит
0597294242
|
@ -330,7 +330,6 @@ SelectorAutocompleter.prototype = {
|
|||
*/
|
||||
_onSearchKeypress: function (event) {
|
||||
let popup = this.searchPopup;
|
||||
|
||||
switch (event.keyCode) {
|
||||
case KeyCodes.DOM_VK_RETURN:
|
||||
case KeyCodes.DOM_VK_TAB:
|
||||
|
@ -373,6 +372,9 @@ SelectorAutocompleter.prototype = {
|
|||
case KeyCodes.DOM_VK_ESCAPE:
|
||||
if (popup.isOpen) {
|
||||
this.hidePopup();
|
||||
} else {
|
||||
this.emit("processing-done");
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
||||
|
|
|
@ -149,7 +149,13 @@ already_AddRefed<PaintedLayer>
|
|||
ClientLayerManager::CreatePaintedLayerWithHint(PaintedLayerCreationHint aHint)
|
||||
{
|
||||
NS_ASSERTION(InConstruction(), "Only allowed in construction phase");
|
||||
// The non-tiling ContentClient requires CrossProcessSemaphore which
|
||||
// isn't implemented for OSX.
|
||||
#ifdef XP_MACOSX
|
||||
if (true) {
|
||||
#else
|
||||
if (gfxPrefs::LayersTilesEnabled()) {
|
||||
#endif
|
||||
RefPtr<ClientTiledPaintedLayer> layer = new ClientTiledPaintedLayer(this, aHint);
|
||||
CREATE_SHADOW(Painted);
|
||||
return layer.forget();
|
||||
|
|
|
@ -1843,7 +1843,7 @@ CompositorBridgeParent::NotifyDidComposite(uint64_t aTransactionId, TimeStamp& a
|
|||
|
||||
MonitorAutoLock lock(*sIndirectLayerTreesLock);
|
||||
ForEachIndirectLayerTree([&] (LayerTreeState* lts, const uint64_t& aLayersId) -> void {
|
||||
if (lts->mCrossProcessParent) {
|
||||
if (lts->mCrossProcessParent && lts->mParent == this) {
|
||||
CrossProcessCompositorBridgeParent* cpcp = lts->mCrossProcessParent;
|
||||
cpcp->DidComposite(aLayersId, aCompositeStart, aCompositeEnd);
|
||||
}
|
||||
|
|
|
@ -930,7 +930,7 @@ BaselineCacheIRCompiler::emitStoreUnboxedProperty()
|
|||
|
||||
// Note that the storeUnboxedProperty call here is infallible, as the
|
||||
// IR emitter is responsible for guarding on |val|'s type.
|
||||
EmitUnboxedPreBarrierForBaseline(masm, fieldAddr, fieldType);
|
||||
EmitICUnboxedPreBarrier(masm, fieldAddr, fieldType);
|
||||
masm.storeUnboxedProperty(fieldAddr, fieldType,
|
||||
ConstantOrRegister(TypedOrValueRegister(val)),
|
||||
/* failure = */ nullptr);
|
||||
|
@ -956,7 +956,7 @@ BaselineCacheIRCompiler::emitStoreTypedObjectReferenceProperty()
|
|||
Register obj = allocator.useRegister(masm, objId);
|
||||
AutoScratchRegister scratch2(allocator, masm);
|
||||
|
||||
// We don't need a type update IC if the property is always a string.scratch
|
||||
// We don't need a type update IC if the property is always a string.
|
||||
if (type != ReferenceTypeDescr::TYPE_STRING) {
|
||||
LiveGeneralRegisterSet saveRegs;
|
||||
saveRegs.add(obj);
|
||||
|
@ -970,35 +970,10 @@ BaselineCacheIRCompiler::emitStoreTypedObjectReferenceProperty()
|
|||
masm.addPtr(offsetAddr, scratch1);
|
||||
Address dest(scratch1, 0);
|
||||
|
||||
switch (type) {
|
||||
case ReferenceTypeDescr::TYPE_ANY:
|
||||
EmitPreBarrier(masm, dest, MIRType::Value);
|
||||
masm.storeValue(val, dest);
|
||||
break;
|
||||
|
||||
case ReferenceTypeDescr::TYPE_OBJECT: {
|
||||
EmitPreBarrier(masm, dest, MIRType::Object);
|
||||
Label isNull, done;
|
||||
masm.branchTestObject(Assembler::NotEqual, val, &isNull);
|
||||
masm.unboxObject(val, scratch2);
|
||||
masm.storePtr(scratch2, dest);
|
||||
masm.jump(&done);
|
||||
masm.bind(&isNull);
|
||||
masm.storePtr(ImmWord(0), dest);
|
||||
masm.bind(&done);
|
||||
break;
|
||||
}
|
||||
|
||||
case ReferenceTypeDescr::TYPE_STRING:
|
||||
EmitPreBarrier(masm, dest, MIRType::String);
|
||||
masm.unboxString(val, scratch2);
|
||||
masm.storePtr(scratch2, dest);
|
||||
break;
|
||||
}
|
||||
emitStoreTypedObjectReferenceProp(val, type, dest, scratch2);
|
||||
|
||||
if (type != ReferenceTypeDescr::TYPE_STRING)
|
||||
BaselineEmitPostWriteBarrierSlot(masm, obj, val, scratch1, LiveGeneralRegisterSet(), cx_);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1303,7 +1278,7 @@ BaselineCacheIRCompiler::emitStoreUnboxedArrayElement()
|
|||
// Note that the storeUnboxedProperty call here is infallible, as the
|
||||
// IR emitter is responsible for guarding on |val|'s type.
|
||||
BaseIndex element(scratch, index, ScaleFromElemWidth(UnboxedTypeSize(elementType)));
|
||||
EmitUnboxedPreBarrierForBaseline(masm, element, elementType);
|
||||
EmitICUnboxedPreBarrier(masm, element, elementType);
|
||||
masm.storeUnboxedProperty(element, elementType,
|
||||
ConstantOrRegister(TypedOrValueRegister(val)),
|
||||
/* failure = */ nullptr);
|
||||
|
@ -1370,13 +1345,13 @@ BaselineCacheIRCompiler::emitStoreUnboxedArrayElementHole()
|
|||
masm.add32(Imm32(1), length);
|
||||
masm.bind(&skipIncrementLength);
|
||||
|
||||
// Skip EmitUnboxedPreBarrierForBaseline as the memory is uninitialized.
|
||||
// Skip EmitICUnboxedPreBarrier as the memory is uninitialized.
|
||||
masm.jump(&doStore);
|
||||
|
||||
masm.bind(&inBounds);
|
||||
|
||||
BaseIndex element(scratch, index, ScaleFromElemWidth(UnboxedTypeSize(elementType)));
|
||||
EmitUnboxedPreBarrierForBaseline(masm, element, elementType);
|
||||
EmitICUnboxedPreBarrier(masm, element, elementType);
|
||||
|
||||
// Note that the storeUnboxedProperty call here is infallible, as the
|
||||
// IR emitter is responsible for guarding on |val|'s type.
|
||||
|
|
|
@ -903,6 +903,15 @@ LoadTypedThingLength(MacroAssembler& masm, TypedThingLayout layout, Register obj
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
SetUpdateStubData(ICCacheIR_Updated* stub, const PropertyTypeCheckInfo* info)
|
||||
{
|
||||
if (info->isSet()) {
|
||||
stub->updateStubGroup() = info->group();
|
||||
stub->updateStubId() = info->id();
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
DoSetElemFallback(JSContext* cx, BaselineFrame* frame, ICSetElem_Fallback* stub_, Value* stack,
|
||||
HandleValue objv, HandleValue index, HandleValue rhs)
|
||||
|
@ -953,10 +962,7 @@ DoSetElemFallback(JSContext* cx, BaselineFrame* frame, ICSetElem_Fallback* stub_
|
|||
JitSpew(JitSpew_BaselineIC, " Attached CacheIR stub");
|
||||
attached = true;
|
||||
|
||||
if (gen.needUpdateStub()) {
|
||||
newStub->toCacheIR_Updated()->updateStubGroup() = gen.updateStubGroup();
|
||||
newStub->toCacheIR_Updated()->updateStubId() = gen.updateStubId();
|
||||
}
|
||||
SetUpdateStubData(newStub->toCacheIR_Updated(), gen.typeCheckInfo());
|
||||
|
||||
if (gen.shouldNotePreliminaryObjectStub())
|
||||
newStub->toCacheIR_Updated()->notePreliminaryObject();
|
||||
|
@ -1019,8 +1025,7 @@ DoSetElemFallback(JSContext* cx, BaselineFrame* frame, ICSetElem_Fallback* stub_
|
|||
if (newStub) {
|
||||
JitSpew(JitSpew_BaselineIC, " Attached CacheIR stub");
|
||||
attached = true;
|
||||
newStub->toCacheIR_Updated()->updateStubGroup() = gen.updateStubGroup();
|
||||
newStub->toCacheIR_Updated()->updateStubId() = gen.updateStubId();
|
||||
SetUpdateStubData(newStub->toCacheIR_Updated(), gen.typeCheckInfo());
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
|
@ -1084,8 +1089,9 @@ BaselineScript::noteHasDenseAdd(uint32_t pcOffset)
|
|||
stub->toSetElem_Fallback()->noteHasDenseAdd();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void
|
||||
EmitUnboxedPreBarrierForBaseline(MacroAssembler &masm, const BaseIndex& address, JSValueType type)
|
||||
EmitICUnboxedPreBarrier(MacroAssembler& masm, const T& address, JSValueType type)
|
||||
{
|
||||
if (type == JSVAL_TYPE_OBJECT)
|
||||
EmitPreBarrier(masm, address, MIRType::Object);
|
||||
|
@ -1095,6 +1101,12 @@ EmitUnboxedPreBarrierForBaseline(MacroAssembler &masm, const BaseIndex& address,
|
|||
MOZ_ASSERT(!UnboxedTypeNeedsPreBarrier(type));
|
||||
}
|
||||
|
||||
template void
|
||||
EmitICUnboxedPreBarrier(MacroAssembler& masm, const Address& address, JSValueType type);
|
||||
|
||||
template void
|
||||
EmitICUnboxedPreBarrier(MacroAssembler& masm, const BaseIndex& address, JSValueType type);
|
||||
|
||||
template <typename T>
|
||||
void
|
||||
BaselineStoreToTypedArray(JSContext* cx, MacroAssembler& masm, Scalar::Type type,
|
||||
|
@ -1510,10 +1522,7 @@ DoSetPropFallback(JSContext* cx, BaselineFrame* frame, ICSetProp_Fallback* stub_
|
|||
JitSpew(JitSpew_BaselineIC, " Attached CacheIR stub");
|
||||
attached = true;
|
||||
|
||||
if (gen.needUpdateStub()) {
|
||||
newStub->toCacheIR_Updated()->updateStubGroup() = gen.updateStubGroup();
|
||||
newStub->toCacheIR_Updated()->updateStubId() = gen.updateStubId();
|
||||
}
|
||||
SetUpdateStubData(newStub->toCacheIR_Updated(), gen.typeCheckInfo());
|
||||
|
||||
if (gen.shouldNotePreliminaryObjectStub())
|
||||
newStub->toCacheIR_Updated()->notePreliminaryObject();
|
||||
|
@ -1578,8 +1587,7 @@ DoSetPropFallback(JSContext* cx, BaselineFrame* frame, ICSetProp_Fallback* stub_
|
|||
if (newStub) {
|
||||
JitSpew(JitSpew_BaselineIC, " Attached CacheIR stub");
|
||||
attached = true;
|
||||
newStub->toCacheIR_Updated()->updateStubGroup() = gen.updateStubGroup();
|
||||
newStub->toCacheIR_Updated()->updateStubId() = gen.updateStubId();
|
||||
SetUpdateStubData(newStub->toCacheIR_Updated(), gen.typeCheckInfo());
|
||||
}
|
||||
} else {
|
||||
gen.trackNotAttached();
|
||||
|
|
|
@ -1711,8 +1711,8 @@ IsCacheableDOMProxy(JSObject* obj)
|
|||
|
||||
struct IonOsrTempData;
|
||||
|
||||
void EmitUnboxedPreBarrierForBaseline(MacroAssembler &masm, const BaseIndex& address,
|
||||
JSValueType type);
|
||||
template <typename T>
|
||||
void EmitICUnboxedPreBarrier(MacroAssembler &masm, const T& address, JSValueType type);
|
||||
|
||||
// Write an arbitrary value to a typed array or typed object address at dest.
|
||||
// If the value could not be converted to the appropriate format, jump to
|
||||
|
|
|
@ -1925,17 +1925,17 @@ IRGenerator::maybeGuardInt32Index(const Value& index, ValOperandId indexId,
|
|||
|
||||
SetPropIRGenerator::SetPropIRGenerator(JSContext* cx, HandleScript script, jsbytecode* pc,
|
||||
CacheKind cacheKind, bool* isTemporarilyUnoptimizable,
|
||||
HandleValue lhsVal, HandleValue idVal, HandleValue rhsVal)
|
||||
HandleValue lhsVal, HandleValue idVal, HandleValue rhsVal,
|
||||
bool needsTypeBarrier, bool maybeHasExtraIndexedProps)
|
||||
: IRGenerator(cx, script, pc, cacheKind),
|
||||
lhsVal_(lhsVal),
|
||||
idVal_(idVal),
|
||||
rhsVal_(rhsVal),
|
||||
isTemporarilyUnoptimizable_(isTemporarilyUnoptimizable),
|
||||
typeCheckInfo_(cx, needsTypeBarrier),
|
||||
preliminaryObjectAction_(PreliminaryObjectAction::None),
|
||||
attachedTypedArrayOOBStub_(false),
|
||||
updateStubGroup_(cx),
|
||||
updateStubId_(cx, JSID_EMPTY),
|
||||
needUpdateStub_(false)
|
||||
maybeHasExtraIndexedProps_(maybeHasExtraIndexedProps)
|
||||
{}
|
||||
|
||||
bool
|
||||
|
@ -2060,12 +2060,12 @@ SetPropIRGenerator::tryAttachNativeSetSlot(HandleObject obj, ObjOperandId objId,
|
|||
|
||||
maybeEmitIdGuard(id);
|
||||
|
||||
// For Baseline, we have to guard on both the shape and group, because the
|
||||
// type update IC applies to a single group. When we port the Ion IC, we can
|
||||
// do a bit better and avoid the group guard if we don't have to guard on
|
||||
// the property types.
|
||||
// If we need a property type barrier (always in Baseline, sometimes in
|
||||
// Ion), guard on both the shape and the group. If Ion knows the property
|
||||
// types match, we don't need the group guard.
|
||||
NativeObject* nobj = &obj->as<NativeObject>();
|
||||
writer.guardGroup(objId, nobj->group());
|
||||
if (typeCheckInfo_.needsTypeBarrier())
|
||||
writer.guardGroup(objId, nobj->group());
|
||||
writer.guardShape(objId, nobj->lastProperty());
|
||||
|
||||
if (IsPreliminaryObject(obj))
|
||||
|
@ -2073,7 +2073,7 @@ SetPropIRGenerator::tryAttachNativeSetSlot(HandleObject obj, ObjOperandId objId,
|
|||
else
|
||||
preliminaryObjectAction_ = PreliminaryObjectAction::Unlink;
|
||||
|
||||
setUpdateStubInfo(nobj->group(), id);
|
||||
typeCheckInfo_.set(nobj->group(), id);
|
||||
EmitStoreSlotAndReturn(writer, objId, nobj, propShape, rhsId);
|
||||
|
||||
trackAttached("NativeSlot");
|
||||
|
@ -2102,7 +2102,7 @@ SetPropIRGenerator::tryAttachUnboxedExpandoSetSlot(HandleObject obj, ObjOperandI
|
|||
|
||||
// Property types must be added to the unboxed object's group, not the
|
||||
// expando's group (it has unknown properties).
|
||||
setUpdateStubInfo(obj->group(), id);
|
||||
typeCheckInfo_.set(obj->group(), id);
|
||||
EmitStoreSlotAndReturn(writer, expandoId, expando, propShape, rhsId);
|
||||
|
||||
trackAttached("UnboxedExpando");
|
||||
|
@ -2139,7 +2139,7 @@ SetPropIRGenerator::tryAttachUnboxedProperty(HandleObject obj, ObjOperandId objI
|
|||
rhsId);
|
||||
writer.returnFromIC();
|
||||
|
||||
setUpdateStubInfo(obj->group(), id);
|
||||
typeCheckInfo_.set(obj->group(), id);
|
||||
preliminaryObjectAction_ = PreliminaryObjectAction::Unlink;
|
||||
|
||||
trackAttached("Unboxed");
|
||||
|
@ -2176,7 +2176,7 @@ SetPropIRGenerator::tryAttachTypedObjectProperty(HandleObject obj, ObjOperandId
|
|||
writer.guardShape(objId, obj->as<TypedObject>().shape());
|
||||
writer.guardGroup(objId, obj->group());
|
||||
|
||||
setUpdateStubInfo(obj->group(), id);
|
||||
typeCheckInfo_.set(obj->group(), id);
|
||||
|
||||
// Scalar types can always be stored without a type update stub.
|
||||
if (fieldDescr->is<ScalarTypeDescr>()) {
|
||||
|
@ -2348,14 +2348,15 @@ SetPropIRGenerator::tryAttachSetDenseElement(HandleObject obj, ObjOperandId objI
|
|||
if (!nobj->containsDenseElement(index) || nobj->getElementsHeader()->isFrozen())
|
||||
return false;
|
||||
|
||||
writer.guardGroup(objId, nobj->group());
|
||||
if (typeCheckInfo_.needsTypeBarrier())
|
||||
writer.guardGroup(objId, nobj->group());
|
||||
writer.guardShape(objId, nobj->shape());
|
||||
|
||||
writer.storeDenseElement(objId, indexId, rhsId);
|
||||
writer.returnFromIC();
|
||||
|
||||
// Type inference uses JSID_VOID for the element types.
|
||||
setUpdateStubInfo(nobj->group(), JSID_VOID);
|
||||
typeCheckInfo_.set(nobj->group(), JSID_VOID);
|
||||
|
||||
trackAttached("SetDenseElement");
|
||||
return true;
|
||||
|
@ -2460,18 +2461,20 @@ SetPropIRGenerator::tryAttachSetDenseElementHole(HandleObject obj, ObjOperandId
|
|||
if (!CanAttachAddElement(nobj, IsPropertyInitOp(op)))
|
||||
return false;
|
||||
|
||||
writer.guardGroup(objId, nobj->group());
|
||||
if (typeCheckInfo_.needsTypeBarrier())
|
||||
writer.guardGroup(objId, nobj->group());
|
||||
writer.guardShape(objId, nobj->shape());
|
||||
|
||||
// Also shape guard the proto chain, unless this is an INITELEM.
|
||||
if (IsPropertySetOp(op))
|
||||
// Also shape guard the proto chain, unless this is an INITELEM or we know
|
||||
// the proto chain has no indexed props.
|
||||
if (IsPropertySetOp(op) && maybeHasExtraIndexedProps_)
|
||||
ShapeGuardProtoChain(writer, obj, objId);
|
||||
|
||||
writer.storeDenseElementHole(objId, indexId, rhsId, isAdd);
|
||||
writer.returnFromIC();
|
||||
|
||||
// Type inference uses JSID_VOID for the element types.
|
||||
setUpdateStubInfo(nobj->group(), JSID_VOID);
|
||||
typeCheckInfo_.set(nobj->group(), JSID_VOID);
|
||||
|
||||
trackAttached(isAdd ? "AddDenseElement" : "StoreDenseElementHole");
|
||||
return true;
|
||||
|
@ -2500,7 +2503,7 @@ SetPropIRGenerator::tryAttachSetUnboxedArrayElement(HandleObject obj, ObjOperand
|
|||
writer.returnFromIC();
|
||||
|
||||
// Type inference uses JSID_VOID for the element types.
|
||||
setUpdateStubInfo(obj->group(), JSID_VOID);
|
||||
typeCheckInfo_.set(obj->group(), JSID_VOID);
|
||||
|
||||
trackAttached("SetUnboxedArrayElement");
|
||||
return true;
|
||||
|
@ -2593,7 +2596,7 @@ SetPropIRGenerator::tryAttachSetUnboxedArrayElementHole(HandleObject obj, ObjOpe
|
|||
writer.returnFromIC();
|
||||
|
||||
// Type inference uses JSID_VOID for the element types.
|
||||
setUpdateStubInfo(aobj->group(), JSID_VOID);
|
||||
typeCheckInfo_.set(aobj->group(), JSID_VOID);
|
||||
|
||||
trackAttached("StoreUnboxedArrayElementHole");
|
||||
return true;
|
||||
|
@ -2883,6 +2886,6 @@ SetPropIRGenerator::tryAttachAddSlotStub(HandleObjectGroup oldGroup, HandleShape
|
|||
}
|
||||
writer.returnFromIC();
|
||||
|
||||
setUpdateStubInfo(oldGroup, id);
|
||||
typeCheckInfo_.set(oldGroup, id);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -125,6 +125,9 @@ class TypedOperandId : public OperandId
|
|||
MOZ_IMPLICIT TypedOperandId(Int32OperandId id)
|
||||
: OperandId(id.id()), type_(JSVAL_TYPE_INT32)
|
||||
{}
|
||||
TypedOperandId(ValOperandId val, JSValueType type)
|
||||
: OperandId(val.id()), type_(type)
|
||||
{}
|
||||
|
||||
JSValueType type() const { return type_; }
|
||||
};
|
||||
|
@ -1053,6 +1056,36 @@ class MOZ_RAII GetNameIRGenerator : public IRGenerator
|
|||
bool tryAttachStub();
|
||||
};
|
||||
|
||||
// Information used by SetProp/SetElem stubs to check/update property types.
|
||||
class MOZ_RAII PropertyTypeCheckInfo
|
||||
{
|
||||
RootedObjectGroup group_;
|
||||
RootedId id_;
|
||||
bool needsTypeBarrier_;
|
||||
|
||||
PropertyTypeCheckInfo(const PropertyTypeCheckInfo&) = delete;
|
||||
void operator=(const PropertyTypeCheckInfo&) = delete;
|
||||
|
||||
public:
|
||||
PropertyTypeCheckInfo(JSContext* cx, bool needsTypeBarrier)
|
||||
: group_(cx), id_(cx), needsTypeBarrier_(needsTypeBarrier)
|
||||
{}
|
||||
|
||||
bool needsTypeBarrier() const { return needsTypeBarrier_; }
|
||||
bool isSet() const { return group_ != nullptr; }
|
||||
ObjectGroup* group() const { MOZ_ASSERT(isSet()); return group_; }
|
||||
jsid id() const { MOZ_ASSERT(isSet()); return id_; }
|
||||
|
||||
void set(ObjectGroup* group, jsid id) {
|
||||
MOZ_ASSERT(!group_);
|
||||
MOZ_ASSERT(group);
|
||||
if (needsTypeBarrier_) {
|
||||
group_ = group;
|
||||
id_ = id;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// SetPropIRGenerator generates CacheIR for a SetProp IC.
|
||||
class MOZ_RAII SetPropIRGenerator : public IRGenerator
|
||||
{
|
||||
|
@ -1060,22 +1093,13 @@ class MOZ_RAII SetPropIRGenerator : public IRGenerator
|
|||
HandleValue idVal_;
|
||||
HandleValue rhsVal_;
|
||||
bool* isTemporarilyUnoptimizable_;
|
||||
PropertyTypeCheckInfo typeCheckInfo_;
|
||||
|
||||
enum class PreliminaryObjectAction { None, Unlink, NotePreliminary };
|
||||
PreliminaryObjectAction preliminaryObjectAction_;
|
||||
bool attachedTypedArrayOOBStub_;
|
||||
|
||||
// If Baseline needs an update stub, this contains information to create it.
|
||||
RootedObjectGroup updateStubGroup_;
|
||||
RootedId updateStubId_;
|
||||
bool needUpdateStub_;
|
||||
|
||||
void setUpdateStubInfo(ObjectGroup* group, jsid id) {
|
||||
MOZ_ASSERT(!needUpdateStub_);
|
||||
needUpdateStub_ = true;
|
||||
updateStubGroup_ = group;
|
||||
updateStubId_ = id;
|
||||
}
|
||||
bool maybeHasExtraIndexedProps_;
|
||||
|
||||
ValOperandId setElemKeyValueId() const {
|
||||
MOZ_ASSERT(cacheKind_ == CacheKind::SetElem);
|
||||
|
@ -1131,7 +1155,8 @@ class MOZ_RAII SetPropIRGenerator : public IRGenerator
|
|||
public:
|
||||
SetPropIRGenerator(JSContext* cx, HandleScript script, jsbytecode* pc, CacheKind cacheKind,
|
||||
bool* isTemporarilyUnoptimizable, HandleValue lhsVal, HandleValue idVal,
|
||||
HandleValue rhsVal);
|
||||
HandleValue rhsVal, bool needsTypeBarrier = true,
|
||||
bool maybeHasExtraIndexedProps = true);
|
||||
|
||||
bool tryAttachStub();
|
||||
bool tryAttachAddSlotStub(HandleObjectGroup oldGroup, HandleShape oldShape);
|
||||
|
@ -1144,20 +1169,13 @@ class MOZ_RAII SetPropIRGenerator : public IRGenerator
|
|||
return preliminaryObjectAction_ == PreliminaryObjectAction::NotePreliminary;
|
||||
}
|
||||
|
||||
bool needUpdateStub() const { return needUpdateStub_; }
|
||||
const PropertyTypeCheckInfo* typeCheckInfo() const {
|
||||
return &typeCheckInfo_;
|
||||
}
|
||||
|
||||
bool attachedTypedArrayOOBStub() const {
|
||||
return attachedTypedArrayOOBStub_;
|
||||
}
|
||||
|
||||
ObjectGroup* updateStubGroup() const {
|
||||
MOZ_ASSERT(updateStubGroup_);
|
||||
return updateStubGroup_;
|
||||
}
|
||||
jsid updateStubId() const {
|
||||
MOZ_ASSERT(needUpdateStub_);
|
||||
return updateStubId_;
|
||||
}
|
||||
};
|
||||
|
||||
// InIRGenerator generates CacheIR for a In IC.
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include "jit/CacheIRCompiler.h"
|
||||
|
||||
#include "jit/IonIC.h"
|
||||
#include "jit/SharedICHelpers.h"
|
||||
|
||||
#include "jscompartmentinlines.h"
|
||||
|
||||
|
@ -49,9 +50,12 @@ CacheRegisterAllocator::useValueRegister(MacroAssembler& masm, ValOperandId op)
|
|||
}
|
||||
|
||||
case OperandLocation::PayloadReg: {
|
||||
// Temporarily add the payload register to currentOpRegs_ so
|
||||
// allocateValueRegister will stay away from it.
|
||||
currentOpRegs_.add(loc.payloadReg());
|
||||
ValueOperand reg = allocateValueRegister(masm);
|
||||
masm.tagValue(loc.payloadType(), loc.payloadReg(), reg);
|
||||
MOZ_ASSERT(!currentOpRegs_.has(loc.payloadReg()), "Payload register shouldn't be in use");
|
||||
currentOpRegs_.take(loc.payloadReg());
|
||||
availableRegs_.add(loc.payloadReg());
|
||||
loc.setValueReg(reg);
|
||||
return reg;
|
||||
|
@ -64,6 +68,14 @@ CacheRegisterAllocator::useValueRegister(MacroAssembler& masm, ValOperandId op)
|
|||
loc.setValueReg(reg);
|
||||
return reg;
|
||||
}
|
||||
|
||||
case OperandLocation::DoubleReg: {
|
||||
ValueOperand reg = allocateValueRegister(masm);
|
||||
masm.boxDouble(loc.doubleReg(), reg);
|
||||
loc.setValueReg(reg);
|
||||
return reg;
|
||||
}
|
||||
|
||||
case OperandLocation::Uninitialized:
|
||||
break;
|
||||
}
|
||||
|
@ -104,6 +116,9 @@ CacheRegisterAllocator::useFixedValueRegister(MacroAssembler& masm, ValOperandId
|
|||
popPayload(masm, &loc, reg.scratchReg());
|
||||
masm.tagValue(loc.payloadType(), reg.scratchReg(), reg);
|
||||
break;
|
||||
case OperandLocation::DoubleReg:
|
||||
masm.boxDouble(loc.doubleReg(), reg);
|
||||
break;
|
||||
case OperandLocation::Uninitialized:
|
||||
MOZ_CRASH();
|
||||
}
|
||||
|
@ -179,6 +194,37 @@ CacheRegisterAllocator::useRegister(MacroAssembler& masm, TypedOperandId typedId
|
|||
return reg;
|
||||
}
|
||||
|
||||
case OperandLocation::DoubleReg:
|
||||
case OperandLocation::Uninitialized:
|
||||
break;
|
||||
}
|
||||
|
||||
MOZ_CRASH();
|
||||
}
|
||||
|
||||
ConstantOrRegister
|
||||
CacheRegisterAllocator::useConstantOrRegister(MacroAssembler& masm, ValOperandId val)
|
||||
{
|
||||
OperandLocation& loc = operandLocations_[val.id()];
|
||||
switch (loc.kind()) {
|
||||
case OperandLocation::Constant:
|
||||
return loc.constant();
|
||||
|
||||
case OperandLocation::PayloadReg:
|
||||
case OperandLocation::PayloadStack: {
|
||||
JSValueType payloadType = loc.payloadType();
|
||||
Register reg = useRegister(masm, TypedOperandId(val, payloadType));
|
||||
return TypedOrValueRegister(MIRTypeFromValueType(payloadType), AnyRegister(reg));
|
||||
}
|
||||
|
||||
case OperandLocation::ValueReg:
|
||||
case OperandLocation::ValueStack:
|
||||
case OperandLocation::BaselineFrame:
|
||||
return TypedOrValueRegister(useValueRegister(masm, val));
|
||||
|
||||
case OperandLocation::DoubleReg:
|
||||
return TypedOrValueRegister(MIRType::Double, AnyRegister(loc.doubleReg()));
|
||||
|
||||
case OperandLocation::Uninitialized:
|
||||
break;
|
||||
}
|
||||
|
@ -231,6 +277,7 @@ CacheRegisterAllocator::freeDeadOperandRegisters()
|
|||
case OperandLocation::ValueStack:
|
||||
case OperandLocation::BaselineFrame:
|
||||
case OperandLocation::Constant:
|
||||
case OperandLocation::DoubleReg:
|
||||
break;
|
||||
}
|
||||
loc.setUninitialized();
|
||||
|
@ -389,6 +436,42 @@ CacheRegisterAllocator::initAvailableRegsAfterSpill()
|
|||
GeneralRegisterSet::Not(inputRegisterSet()));
|
||||
}
|
||||
|
||||
void
|
||||
CacheRegisterAllocator::fixupAliasedInputs(MacroAssembler& masm)
|
||||
{
|
||||
// If IC inputs alias each other, make sure they are stored in different
|
||||
// locations so we don't have to deal with this complexity in the rest of
|
||||
// the allocator.
|
||||
//
|
||||
// Note that this can happen in IonMonkey with something like |o.foo = o|
|
||||
// or |o[i] = i|.
|
||||
|
||||
size_t numInputs = writer_.numInputOperands();
|
||||
MOZ_ASSERT(origInputLocations_.length() == numInputs);
|
||||
|
||||
for (size_t i = 1; i < numInputs; i++) {
|
||||
OperandLocation& loc1 = operandLocations_[i];
|
||||
if (!loc1.isInRegister())
|
||||
continue;
|
||||
|
||||
for (size_t j = 0; j < i; j++) {
|
||||
OperandLocation& loc2 = operandLocations_[j];
|
||||
if (!loc1.aliasesReg(loc2))
|
||||
continue;
|
||||
|
||||
if (loc1.kind() == OperandLocation::ValueReg) {
|
||||
MOZ_ASSERT_IF(loc2.kind() == OperandLocation::ValueReg,
|
||||
loc1 == loc2);
|
||||
spillOperandToStack(masm, &loc1);
|
||||
break;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(loc1.kind() == OperandLocation::PayloadReg);
|
||||
spillOperandToStack(masm, &loc2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GeneralRegisterSet
|
||||
CacheRegisterAllocator::inputRegisterSet() const
|
||||
{
|
||||
|
@ -401,15 +484,16 @@ CacheRegisterAllocator::inputRegisterSet() const
|
|||
|
||||
switch (loc.kind()) {
|
||||
case OperandLocation::PayloadReg:
|
||||
result.add(loc.payloadReg());
|
||||
result.addUnchecked(loc.payloadReg());
|
||||
continue;
|
||||
case OperandLocation::ValueReg:
|
||||
result.add(loc.valueReg());
|
||||
result.addUnchecked(loc.valueReg());
|
||||
continue;
|
||||
case OperandLocation::PayloadStack:
|
||||
case OperandLocation::ValueStack:
|
||||
case OperandLocation::BaselineFrame:
|
||||
case OperandLocation::Constant:
|
||||
case OperandLocation::DoubleReg:
|
||||
continue;
|
||||
case OperandLocation::Uninitialized:
|
||||
break;
|
||||
|
@ -440,6 +524,9 @@ CacheRegisterAllocator::knownType(ValOperandId val) const
|
|||
? JSVAL_TYPE_DOUBLE
|
||||
: loc.constant().extractNonDoubleType();
|
||||
|
||||
case OperandLocation::DoubleReg:
|
||||
return JSVAL_TYPE_DOUBLE;
|
||||
|
||||
case OperandLocation::Uninitialized:
|
||||
break;
|
||||
}
|
||||
|
@ -452,8 +539,10 @@ CacheRegisterAllocator::initInputLocation(size_t i, const TypedOrValueRegister&
|
|||
{
|
||||
if (reg.hasValue()) {
|
||||
initInputLocation(i, reg.valueReg());
|
||||
} else if (reg.typedReg().isFloat()) {
|
||||
MOZ_ASSERT(reg.type() == MIRType::Double);
|
||||
initInputLocation(i, reg.typedReg().fpu());
|
||||
} else {
|
||||
MOZ_ASSERT(!reg.typedReg().isFloat());
|
||||
initInputLocation(i, reg.typedReg().gpr(), ValueTypeFromMIRType(reg.type()));
|
||||
}
|
||||
}
|
||||
|
@ -566,6 +655,7 @@ OperandLocation::aliasesReg(const OperandLocation& other) const
|
|||
case ValueStack:
|
||||
case BaselineFrame:
|
||||
case Constant:
|
||||
case DoubleReg:
|
||||
return false;
|
||||
case Uninitialized:
|
||||
break;
|
||||
|
@ -617,6 +707,7 @@ CacheRegisterAllocator::restoreInputState(MacroAssembler& masm, bool shouldDisca
|
|||
continue;
|
||||
case OperandLocation::Constant:
|
||||
case OperandLocation::BaselineFrame:
|
||||
case OperandLocation::DoubleReg:
|
||||
case OperandLocation::Uninitialized:
|
||||
break;
|
||||
}
|
||||
|
@ -645,11 +736,13 @@ CacheRegisterAllocator::restoreInputState(MacroAssembler& masm, bool shouldDisca
|
|||
continue;
|
||||
case OperandLocation::Constant:
|
||||
case OperandLocation::BaselineFrame:
|
||||
case OperandLocation::DoubleReg:
|
||||
case OperandLocation::Uninitialized:
|
||||
break;
|
||||
}
|
||||
} else if (dest.kind() == OperandLocation::Constant ||
|
||||
dest.kind() == OperandLocation::BaselineFrame)
|
||||
dest.kind() == OperandLocation::BaselineFrame ||
|
||||
dest.kind() == OperandLocation::DoubleReg)
|
||||
{
|
||||
// Nothing to do.
|
||||
continue;
|
||||
|
@ -984,6 +1077,8 @@ OperandLocation::operator==(const OperandLocation& other) const
|
|||
return baselineFrameSlot() == other.baselineFrameSlot();
|
||||
case Constant:
|
||||
return constant() == other.constant();
|
||||
case DoubleReg:
|
||||
return doubleReg() == other.doubleReg();
|
||||
}
|
||||
|
||||
MOZ_CRASH("Invalid OperandLocation kind");
|
||||
|
@ -1403,7 +1498,6 @@ CacheIRCompiler::emitGuardAndGetIndexFromString()
|
|||
AllocatableRegisterSet regs(RegisterSet::Volatile());
|
||||
LiveRegisterSet save(regs.asLiveSet());
|
||||
masm.PushRegsInMask(save);
|
||||
regs.takeUnchecked(str);
|
||||
|
||||
masm.setupUnalignedABICall(output);
|
||||
masm.passABIArg(str);
|
||||
|
@ -1951,6 +2045,38 @@ CacheIRCompiler::emitLoadTypedObjectResultShared(const Address& fieldAddr, Regis
|
|||
}
|
||||
}
|
||||
|
||||
void
|
||||
CacheIRCompiler::emitStoreTypedObjectReferenceProp(ValueOperand val, ReferenceTypeDescr::Type type,
|
||||
const Address& dest, Register scratch)
|
||||
{
|
||||
switch (type) {
|
||||
case ReferenceTypeDescr::TYPE_ANY:
|
||||
EmitPreBarrier(masm, dest, MIRType::Value);
|
||||
masm.storeValue(val, dest);
|
||||
break;
|
||||
|
||||
case ReferenceTypeDescr::TYPE_OBJECT: {
|
||||
EmitPreBarrier(masm, dest, MIRType::Object);
|
||||
Label isNull, done;
|
||||
masm.branchTestObject(Assembler::NotEqual, val, &isNull);
|
||||
masm.unboxObject(val, scratch);
|
||||
masm.storePtr(scratch, dest);
|
||||
masm.jump(&done);
|
||||
masm.bind(&isNull);
|
||||
masm.storePtr(ImmWord(0), dest);
|
||||
masm.bind(&done);
|
||||
break;
|
||||
}
|
||||
|
||||
case ReferenceTypeDescr::TYPE_STRING:
|
||||
EmitPreBarrier(masm, dest, MIRType::String);
|
||||
masm.unboxString(val, scratch);
|
||||
masm.storePtr(scratch, dest);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
CacheIRCompiler::emitWrapResult()
|
||||
{
|
||||
|
|
|
@ -75,6 +75,7 @@ class OperandLocation
|
|||
enum Kind {
|
||||
Uninitialized = 0,
|
||||
PayloadReg,
|
||||
DoubleReg,
|
||||
ValueReg,
|
||||
PayloadStack,
|
||||
ValueStack,
|
||||
|
@ -90,6 +91,7 @@ class OperandLocation
|
|||
Register reg;
|
||||
JSValueType type;
|
||||
} payloadReg;
|
||||
FloatRegister doubleReg;
|
||||
ValueOperand valueReg;
|
||||
struct {
|
||||
uint32_t stackPushed;
|
||||
|
@ -120,6 +122,10 @@ class OperandLocation
|
|||
MOZ_ASSERT(kind_ == PayloadReg);
|
||||
return data_.payloadReg.reg;
|
||||
}
|
||||
FloatRegister doubleReg() const {
|
||||
MOZ_ASSERT(kind_ == DoubleReg);
|
||||
return data_.doubleReg;
|
||||
}
|
||||
uint32_t payloadStack() const {
|
||||
MOZ_ASSERT(kind_ == PayloadStack);
|
||||
return data_.payloadStack.stackPushed;
|
||||
|
@ -148,6 +154,10 @@ class OperandLocation
|
|||
data_.payloadReg.reg = reg;
|
||||
data_.payloadReg.type = type;
|
||||
}
|
||||
void setDoubleReg(FloatRegister reg) {
|
||||
kind_ = DoubleReg;
|
||||
data_.doubleReg = reg;
|
||||
}
|
||||
void setValueReg(ValueOperand reg) {
|
||||
kind_ = ValueReg;
|
||||
data_.valueReg = reg;
|
||||
|
@ -294,6 +304,8 @@ class MOZ_RAII CacheRegisterAllocator
|
|||
}
|
||||
void initAvailableRegsAfterSpill();
|
||||
|
||||
void fixupAliasedInputs(MacroAssembler& masm);
|
||||
|
||||
OperandLocation operandLocation(size_t i) const {
|
||||
return operandLocations_[i];
|
||||
}
|
||||
|
@ -312,6 +324,10 @@ class MOZ_RAII CacheRegisterAllocator
|
|||
origInputLocations_[i].setPayloadReg(reg, type);
|
||||
operandLocations_[i].setPayloadReg(reg, type);
|
||||
}
|
||||
void initInputLocation(size_t i, FloatRegister reg) {
|
||||
origInputLocations_[i].setDoubleReg(reg);
|
||||
operandLocations_[i].setDoubleReg(reg);
|
||||
}
|
||||
void initInputLocation(size_t i, const Value& v) {
|
||||
origInputLocations_[i].setConstant(v);
|
||||
operandLocations_[i].setConstant(v);
|
||||
|
@ -381,6 +397,8 @@ class MOZ_RAII CacheRegisterAllocator
|
|||
ValueOperand useFixedValueRegister(MacroAssembler& masm, ValOperandId valId, ValueOperand reg);
|
||||
Register useRegister(MacroAssembler& masm, TypedOperandId typedId);
|
||||
|
||||
ConstantOrRegister useConstantOrRegister(MacroAssembler& masm, ValOperandId val);
|
||||
|
||||
// Allocates an output register for the given operand.
|
||||
Register defineRegister(MacroAssembler& masm, TypedOperandId typedId);
|
||||
ValueOperand defineValueRegister(MacroAssembler& masm, ValOperandId val);
|
||||
|
@ -547,6 +565,9 @@ class MOZ_RAII CacheIRCompiler
|
|||
TypedThingLayout layout, uint32_t typeDescr,
|
||||
const AutoOutputRegister& output);
|
||||
|
||||
void emitStoreTypedObjectReferenceProp(ValueOperand val, ReferenceTypeDescr::Type type,
|
||||
const Address& dest, Register scratch);
|
||||
|
||||
#define DEFINE_SHARED_OP(op) MOZ_MUST_USE bool emit##op();
|
||||
CACHE_IR_SHARED_OPS(DEFINE_SHARED_OP)
|
||||
#undef DEFINE_SHARED_OP
|
||||
|
|
|
@ -222,9 +222,14 @@ CodeGenerator::visitOutOfLineCache(OutOfLineUpdateCache* ool)
|
|||
|
||||
typedef bool (*IonGetPropertyICFn)(JSContext*, HandleScript, IonGetPropertyIC*, HandleValue, HandleValue,
|
||||
MutableHandleValue);
|
||||
const VMFunction IonGetPropertyICInfo =
|
||||
static const VMFunction IonGetPropertyICInfo =
|
||||
FunctionInfo<IonGetPropertyICFn>(IonGetPropertyIC::update, "IonGetPropertyIC::update");
|
||||
|
||||
typedef bool (*IonSetPropertyICFn)(JSContext*, HandleScript, IonSetPropertyIC*, HandleObject,
|
||||
HandleValue, HandleValue);
|
||||
static const VMFunction IonSetPropertyICInfo =
|
||||
FunctionInfo<IonSetPropertyICFn>(IonSetPropertyIC::update, "IonSetPropertyIC::update");
|
||||
|
||||
void
|
||||
CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool)
|
||||
{
|
||||
|
@ -257,9 +262,26 @@ CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool)
|
|||
masm.jump(ool->rejoin());
|
||||
return;
|
||||
}
|
||||
case CacheKind::GetName:
|
||||
case CacheKind::SetProp:
|
||||
case CacheKind::SetElem:
|
||||
case CacheKind::SetElem: {
|
||||
IonSetPropertyIC* setPropIC = ic->asSetPropertyIC();
|
||||
|
||||
saveLive(lir);
|
||||
|
||||
pushArg(setPropIC->rhs());
|
||||
pushArg(setPropIC->id());
|
||||
pushArg(setPropIC->object());
|
||||
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
|
||||
pushArg(ImmGCPtr(gen->info().script()));
|
||||
|
||||
callVM(IonSetPropertyICInfo, lir);
|
||||
|
||||
restoreLive(lir);
|
||||
|
||||
masm.jump(ool->rejoin());
|
||||
return;
|
||||
}
|
||||
case CacheKind::GetName:
|
||||
case CacheKind::In:
|
||||
MOZ_CRASH("Baseline-specific for now");
|
||||
}
|
||||
|
@ -10278,16 +10300,22 @@ CodeGenerator::addGetPropertyCache(LInstruction* ins, LiveRegisterSet liveRegs,
|
|||
|
||||
void
|
||||
CodeGenerator::addSetPropertyCache(LInstruction* ins, LiveRegisterSet liveRegs, Register objReg,
|
||||
Register temp, Register tempUnbox, FloatRegister tempDouble,
|
||||
Register temp, FloatRegister tempDouble,
|
||||
FloatRegister tempF32, const ConstantOrRegister& id,
|
||||
const ConstantOrRegister& value,
|
||||
bool strict, bool needsTypeBarrier, bool guardHoles,
|
||||
jsbytecode* profilerLeavePc)
|
||||
{
|
||||
SetPropertyIC cache(liveRegs, objReg, temp, tempUnbox, tempDouble, tempF32, id, value, strict,
|
||||
needsTypeBarrier, guardHoles);
|
||||
cache.setProfilerLeavePC(profilerLeavePc);
|
||||
addCache(ins, allocateCache(cache));
|
||||
CacheKind kind = CacheKind::SetElem;
|
||||
if (id.constant() && id.value().isString()) {
|
||||
JSString* idString = id.value().toString();
|
||||
uint32_t dummy;
|
||||
if (idString->isAtom() && !idString->asAtom().isIndex(&dummy))
|
||||
kind = CacheKind::SetProp;
|
||||
}
|
||||
IonSetPropertyIC cache(kind, liveRegs, objReg, temp, tempDouble, tempF32,
|
||||
id, value, strict, needsTypeBarrier, guardHoles);
|
||||
addIC(ins, allocateIC(cache));
|
||||
}
|
||||
|
||||
ConstantOrRegister
|
||||
|
@ -10427,7 +10455,6 @@ CodeGenerator::visitSetPropertyCache(LSetPropertyCache* ins)
|
|||
LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
|
||||
Register objReg = ToRegister(ins->getOperand(0));
|
||||
Register temp = ToRegister(ins->temp());
|
||||
Register tempUnbox = ToTempUnboxRegister(ins->tempToUnboxIndex());
|
||||
FloatRegister tempDouble = ToTempFloatRegisterOrInvalid(ins->tempDouble());
|
||||
FloatRegister tempF32 = ToTempFloatRegisterOrInvalid(ins->tempFloat32());
|
||||
|
||||
|
@ -10436,33 +10463,11 @@ CodeGenerator::visitSetPropertyCache(LSetPropertyCache* ins)
|
|||
ConstantOrRegister value =
|
||||
toConstantOrRegister(ins, LSetPropertyCache::Value, ins->mir()->value()->type());
|
||||
|
||||
addSetPropertyCache(ins, liveRegs, objReg, temp, tempUnbox, tempDouble, tempF32,
|
||||
addSetPropertyCache(ins, liveRegs, objReg, temp, tempDouble, tempF32,
|
||||
id, value, ins->mir()->strict(), ins->mir()->needsTypeBarrier(),
|
||||
ins->mir()->guardHoles(), ins->mir()->profilerLeavePc());
|
||||
}
|
||||
|
||||
typedef bool (*SetPropertyICFn)(JSContext*, HandleScript, size_t, HandleObject, HandleValue,
|
||||
HandleValue);
|
||||
const VMFunction SetPropertyIC::UpdateInfo =
|
||||
FunctionInfo<SetPropertyICFn>(SetPropertyIC::update, "SetPropertyIC::update");
|
||||
|
||||
void
|
||||
CodeGenerator::visitSetPropertyIC(OutOfLineUpdateCache* ool, DataPtr<SetPropertyIC>& ic)
|
||||
{
|
||||
LInstruction* lir = ool->lir();
|
||||
saveLive(lir);
|
||||
|
||||
pushArg(ic->value());
|
||||
pushArg(ic->id());
|
||||
pushArg(ic->object());
|
||||
pushArg(Imm32(ool->getCacheIndex()));
|
||||
pushArg(ImmGCPtr(gen->info().script()));
|
||||
callVM(SetPropertyIC::UpdateInfo, lir);
|
||||
restoreLive(lir);
|
||||
|
||||
masm.jump(ool->rejoin());
|
||||
}
|
||||
|
||||
typedef bool (*ThrowFn)(JSContext*, HandleValue);
|
||||
static const VMFunction ThrowInfoCodeGen = FunctionInfo<ThrowFn>(js::Throw, "Throw");
|
||||
|
||||
|
|
|
@ -421,7 +421,6 @@ class CodeGenerator final : public CodeGeneratorSpecific
|
|||
void visitSetPropertyCache(LSetPropertyCache* ins);
|
||||
void visitGetNameCache(LGetNameCache* ins);
|
||||
|
||||
void visitSetPropertyIC(OutOfLineUpdateCache* ool, DataPtr<SetPropertyIC>& ic);
|
||||
void visitBindNameIC(OutOfLineUpdateCache* ool, DataPtr<BindNameIC>& ic);
|
||||
void visitNameIC(OutOfLineUpdateCache* ool, DataPtr<NameIC>& ic);
|
||||
|
||||
|
@ -461,7 +460,7 @@ class CodeGenerator final : public CodeGeneratorSpecific
|
|||
TypedOrValueRegister output, Register maybeTemp, bool monitoredResult,
|
||||
bool allowDoubleResult, jsbytecode* profilerLeavePc);
|
||||
void addSetPropertyCache(LInstruction* ins, LiveRegisterSet liveRegs, Register objReg,
|
||||
Register temp, Register tempUnbox, FloatRegister tempDouble,
|
||||
Register temp, FloatRegister tempDouble,
|
||||
FloatRegister tempF32, const ConstantOrRegister& id,
|
||||
const ConstantOrRegister& value,
|
||||
bool strict, bool needsTypeBarrier, bool guardHoles,
|
||||
|
|
|
@ -1275,6 +1275,9 @@ void
|
|||
IonScript::toggleBarriers(bool enabled, ReprotectCode reprotect)
|
||||
{
|
||||
method()->togglePreBarriers(enabled, reprotect);
|
||||
|
||||
for (size_t i = 0; i < numICs(); i++)
|
||||
getICFromIndex(i).togglePreBarriers(enabled, reprotect);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "jscompartmentinlines.h"
|
||||
|
||||
#include "jit/MacroAssembler-inl.h"
|
||||
#include "vm/TypeInference-inl.h"
|
||||
|
||||
using namespace js;
|
||||
using namespace js::jit;
|
||||
|
@ -33,12 +34,13 @@ class MOZ_RAII IonCacheIRCompiler : public CacheIRCompiler
|
|||
friend class AutoSaveLiveRegisters;
|
||||
|
||||
IonCacheIRCompiler(JSContext* cx, const CacheIRWriter& writer, IonIC* ic, IonScript* ionScript,
|
||||
IonICStub* stub)
|
||||
IonICStub* stub, const PropertyTypeCheckInfo* typeCheckInfo)
|
||||
: CacheIRCompiler(cx, writer, Mode::Ion),
|
||||
writer_(writer),
|
||||
ic_(ic),
|
||||
ionScript_(ionScript),
|
||||
stub_(stub),
|
||||
typeCheckInfo_(typeCheckInfo),
|
||||
nextStubField_(0),
|
||||
#ifdef DEBUG
|
||||
calledPrepareVMCall_(false),
|
||||
|
@ -60,6 +62,10 @@ class MOZ_RAII IonCacheIRCompiler : public CacheIRCompiler
|
|||
// The stub we're generating code for.
|
||||
IonICStub* stub_;
|
||||
|
||||
// Information necessary to generate property type checks. Non-null iff
|
||||
// this is a SetProp/SetElem stub.
|
||||
const PropertyTypeCheckInfo* typeCheckInfo_;
|
||||
|
||||
CodeOffsetJump rejoinOffset_;
|
||||
Vector<CodeOffset, 4, SystemAllocPolicy> nextCodeOffsets_;
|
||||
Maybe<LiveRegisterSet> liveRegs_;
|
||||
|
@ -125,6 +131,8 @@ class MOZ_RAII IonCacheIRCompiler : public CacheIRCompiler
|
|||
void prepareVMCall(MacroAssembler& masm);
|
||||
MOZ_MUST_USE bool callVM(MacroAssembler& masm, const VMFunction& fun);
|
||||
|
||||
MOZ_MUST_USE bool emitAddAndStoreSlotShared(CacheOp op);
|
||||
|
||||
void pushStubCodePointer() {
|
||||
stubJitCodeOffset_.emplace(masm.PushWithPatch(ImmPtr((void*)-1)));
|
||||
}
|
||||
|
@ -174,8 +182,6 @@ void
|
|||
CacheRegisterAllocator::saveIonLiveRegisters(MacroAssembler& masm, LiveRegisterSet liveRegs,
|
||||
Register scratch, IonScript* ionScript)
|
||||
{
|
||||
MOZ_ASSERT(!liveRegs.has(scratch));
|
||||
|
||||
// We have to push all registers in liveRegs on the stack. It's possible we
|
||||
// stored other values in our live registers and stored operands on the
|
||||
// stack (where our live registers should go), so this requires some careful
|
||||
|
@ -290,6 +296,10 @@ CacheRegisterAllocator::saveIonLiveRegisters(MacroAssembler& masm, LiveRegisterS
|
|||
// available.
|
||||
availableRegs_.set() = GeneralRegisterSet::Not(inputRegisterSet());
|
||||
availableRegsAfterSpill_.set() = GeneralRegisterSet();
|
||||
|
||||
// Step 8. We restored our input state, so we have to fix up aliased input
|
||||
// registers again.
|
||||
fixupAliasedInputs(masm);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -350,7 +360,9 @@ IonCacheIRCompiler::init()
|
|||
|
||||
AllocatableGeneralRegisterSet available;
|
||||
|
||||
if (ic_->kind() == CacheKind::GetProp || ic_->kind() == CacheKind::GetElem) {
|
||||
switch (ic_->kind()) {
|
||||
case CacheKind::GetProp:
|
||||
case CacheKind::GetElem: {
|
||||
IonGetPropertyIC* ic = ic_->asGetPropertyIC();
|
||||
TypedOrValueRegister output = ic->output();
|
||||
|
||||
|
@ -372,7 +384,30 @@ IonCacheIRCompiler::init()
|
|||
allocator.initInputLocation(0, ic->value());
|
||||
if (numInputs > 1)
|
||||
allocator.initInputLocation(1, ic->id());
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
case CacheKind::SetProp:
|
||||
case CacheKind::SetElem: {
|
||||
IonSetPropertyIC* ic = ic_->asSetPropertyIC();
|
||||
|
||||
available.add(ic->temp1());
|
||||
|
||||
liveRegs_.emplace(ic->liveRegs());
|
||||
|
||||
allocator.initInputLocation(0, ic->object(), JSVAL_TYPE_OBJECT);
|
||||
|
||||
if (ic->kind() == CacheKind::SetProp) {
|
||||
MOZ_ASSERT(numInputs == 2);
|
||||
allocator.initInputLocation(1, ic->rhs());
|
||||
} else {
|
||||
MOZ_ASSERT(numInputs == 3);
|
||||
allocator.initInputLocation(1, ic->id());
|
||||
allocator.initInputLocation(2, ic->rhs());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case CacheKind::GetName:
|
||||
case CacheKind::In:
|
||||
MOZ_CRASH("Invalid cache");
|
||||
}
|
||||
|
||||
|
@ -388,6 +423,8 @@ IonCacheIRCompiler::compile()
|
|||
if (cx_->runtime()->geckoProfiler().enabled())
|
||||
masm.enableProfilingInstrumentation();
|
||||
|
||||
allocator.fixupAliasedInputs(masm);
|
||||
|
||||
do {
|
||||
switch (reader.readOp()) {
|
||||
#define DEFINE_OP(op) \
|
||||
|
@ -861,112 +898,708 @@ IonCacheIRCompiler::emitLoadEnvironmentDynamicSlotResult()
|
|||
MOZ_CRASH("Baseline-specific op");
|
||||
}
|
||||
|
||||
static bool
|
||||
GroupHasPropertyTypes(ObjectGroup* group, jsid* id, Value* v)
|
||||
{
|
||||
if (group->unknownProperties())
|
||||
return true;
|
||||
HeapTypeSet* propTypes = group->maybeGetProperty(*id);
|
||||
if (!propTypes)
|
||||
return true;
|
||||
if (!propTypes->nonConstantProperty())
|
||||
return false;
|
||||
return propTypes->hasType(TypeSet::GetValueType(*v));
|
||||
}
|
||||
|
||||
static void
|
||||
EmitCheckPropertyTypes(MacroAssembler& masm, const PropertyTypeCheckInfo* typeCheckInfo,
|
||||
Register obj, const ConstantOrRegister& val,
|
||||
const LiveRegisterSet& liveRegs, Label* failures)
|
||||
{
|
||||
// Emit code to check |val| is part of the property's HeapTypeSet.
|
||||
|
||||
if (!typeCheckInfo->isSet())
|
||||
return;
|
||||
|
||||
ObjectGroup* group = typeCheckInfo->group();
|
||||
if (group->unknownProperties())
|
||||
return;
|
||||
|
||||
jsid id = typeCheckInfo->id();
|
||||
HeapTypeSet* propTypes = group->maybeGetProperty(id);
|
||||
if (propTypes && propTypes->unknown())
|
||||
return;
|
||||
|
||||
// Use the object register as scratch, as we don't need it here.
|
||||
masm.Push(obj);
|
||||
Register scratch1 = obj;
|
||||
|
||||
bool checkTypeSet = true;
|
||||
Label failedFastPath;
|
||||
|
||||
if (propTypes && !propTypes->nonConstantProperty())
|
||||
masm.jump(&failedFastPath);
|
||||
|
||||
if (val.constant()) {
|
||||
// If the input is a constant, then don't bother if the barrier will always fail.
|
||||
if (!propTypes || !propTypes->hasType(TypeSet::GetValueType(val.value())))
|
||||
masm.jump(&failedFastPath);
|
||||
checkTypeSet = false;
|
||||
} else {
|
||||
// We can do the same trick as above for primitive types of specialized
|
||||
// registers.
|
||||
TypedOrValueRegister reg = val.reg();
|
||||
if (reg.hasTyped() && reg.type() != MIRType::Object) {
|
||||
JSValueType valType = ValueTypeFromMIRType(reg.type());
|
||||
if (!propTypes || !propTypes->hasType(TypeSet::PrimitiveType(valType)))
|
||||
masm.jump(&failedFastPath);
|
||||
checkTypeSet = false;
|
||||
}
|
||||
}
|
||||
|
||||
Label done;
|
||||
if (checkTypeSet) {
|
||||
TypedOrValueRegister valReg = val.reg();
|
||||
if (propTypes) {
|
||||
// guardTypeSet can read from type sets without triggering read barriers.
|
||||
TypeSet::readBarrier(propTypes);
|
||||
masm.guardTypeSet(valReg, propTypes, BarrierKind::TypeSet, scratch1, &failedFastPath);
|
||||
masm.jump(&done);
|
||||
} else {
|
||||
masm.jump(&failedFastPath);
|
||||
}
|
||||
}
|
||||
|
||||
if (failedFastPath.used()) {
|
||||
// The inline type check failed. Do a callWithABI to check the current
|
||||
// TypeSet in case the type was added after we generated this stub.
|
||||
masm.bind(&failedFastPath);
|
||||
|
||||
AllocatableRegisterSet regs(GeneralRegisterSet::Volatile(), liveRegs.fpus());
|
||||
LiveRegisterSet save(regs.asLiveSet());
|
||||
masm.PushRegsInMask(save);
|
||||
|
||||
regs.takeUnchecked(scratch1);
|
||||
|
||||
// Push |val| first to make sure everything is fine if |val| aliases
|
||||
// scratch2.
|
||||
Register scratch2 = regs.takeAnyGeneral();
|
||||
masm.Push(val);
|
||||
masm.moveStackPtrTo(scratch2);
|
||||
|
||||
Register scratch3 = regs.takeAnyGeneral();
|
||||
masm.Push(id, scratch3);
|
||||
masm.moveStackPtrTo(scratch3);
|
||||
|
||||
masm.setupUnalignedABICall(scratch1);
|
||||
masm.movePtr(ImmGCPtr(group), scratch1);
|
||||
masm.passABIArg(scratch1);
|
||||
masm.passABIArg(scratch3);
|
||||
masm.passABIArg(scratch2);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, GroupHasPropertyTypes));
|
||||
masm.mov(ReturnReg, scratch1);
|
||||
|
||||
masm.adjustStack(sizeof(Value) + sizeof(jsid));
|
||||
|
||||
LiveRegisterSet ignore;
|
||||
ignore.add(scratch1);
|
||||
masm.PopRegsInMaskIgnore(save, ignore);
|
||||
|
||||
masm.branchIfTrueBool(scratch1, &done);
|
||||
masm.pop(obj);
|
||||
masm.jump(failures);
|
||||
}
|
||||
|
||||
masm.bind(&done);
|
||||
masm.Pop(obj);
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitStoreFixedSlot()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
Register obj = allocator.useRegister(masm, reader.objOperandId());
|
||||
int32_t offset = int32StubField(reader.stubOffset());
|
||||
ConstantOrRegister val = allocator.useConstantOrRegister(masm, reader.valOperandId());
|
||||
|
||||
if (typeCheckInfo_->isSet()) {
|
||||
FailurePath* failure;
|
||||
if (!addFailurePath(&failure))
|
||||
return false;
|
||||
|
||||
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, val, *liveRegs_, failure->label());
|
||||
}
|
||||
|
||||
Address slot(obj, offset);
|
||||
EmitPreBarrier(masm, slot, MIRType::Value);
|
||||
masm.storeConstantOrRegister(val, slot);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitStoreDynamicSlot()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
Register obj = allocator.useRegister(masm, reader.objOperandId());
|
||||
int32_t offset = int32StubField(reader.stubOffset());
|
||||
ConstantOrRegister val = allocator.useConstantOrRegister(masm, reader.valOperandId());
|
||||
AutoScratchRegister scratch(allocator, masm);
|
||||
|
||||
if (typeCheckInfo_->isSet()) {
|
||||
FailurePath* failure;
|
||||
if (!addFailurePath(&failure))
|
||||
return false;
|
||||
|
||||
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, val, *liveRegs_, failure->label());
|
||||
}
|
||||
|
||||
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
|
||||
Address slot(scratch, offset);
|
||||
EmitPreBarrier(masm, slot, MIRType::Value);
|
||||
masm.storeConstantOrRegister(val, slot);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitAddAndStoreSlotShared(CacheOp op)
|
||||
{
|
||||
Register obj = allocator.useRegister(masm, reader.objOperandId());
|
||||
int32_t offset = int32StubField(reader.stubOffset());
|
||||
ConstantOrRegister val = allocator.useConstantOrRegister(masm, reader.valOperandId());
|
||||
|
||||
AutoScratchRegister scratch1(allocator, masm);
|
||||
|
||||
Maybe<AutoScratchRegister> scratch2;
|
||||
if (op == CacheOp::AllocateAndStoreDynamicSlot)
|
||||
scratch2.emplace(allocator, masm);
|
||||
|
||||
bool changeGroup = reader.readBool();
|
||||
ObjectGroup* newGroup = groupStubField(reader.stubOffset());
|
||||
Shape* newShape = shapeStubField(reader.stubOffset());
|
||||
|
||||
FailurePath* failure;
|
||||
if (!addFailurePath(&failure))
|
||||
return false;
|
||||
|
||||
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, val, *liveRegs_, failure->label());
|
||||
|
||||
if (op == CacheOp::AllocateAndStoreDynamicSlot) {
|
||||
// We have to (re)allocate dynamic slots. Do this first, as it's the
|
||||
// only fallible operation here. This simplifies the callTypeUpdateIC
|
||||
// call below: it does not have to worry about saving registers used by
|
||||
// failure paths.
|
||||
int32_t numNewSlots = int32StubField(reader.stubOffset());
|
||||
MOZ_ASSERT(numNewSlots > 0);
|
||||
|
||||
AllocatableRegisterSet regs(RegisterSet::Volatile());
|
||||
LiveRegisterSet save(regs.asLiveSet());
|
||||
|
||||
masm.PushRegsInMask(save);
|
||||
|
||||
masm.setupUnalignedABICall(scratch1);
|
||||
masm.loadJSContext(scratch1);
|
||||
masm.passABIArg(scratch1);
|
||||
masm.passABIArg(obj);
|
||||
masm.move32(Imm32(numNewSlots), scratch2.ref());
|
||||
masm.passABIArg(scratch2.ref());
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NativeObject::growSlotsDontReportOOM));
|
||||
masm.mov(ReturnReg, scratch1);
|
||||
|
||||
LiveRegisterSet ignore;
|
||||
ignore.add(scratch1);
|
||||
masm.PopRegsInMaskIgnore(save, ignore);
|
||||
|
||||
masm.branchIfFalseBool(scratch1, failure->label());
|
||||
}
|
||||
|
||||
if (changeGroup) {
|
||||
// Changing object's group from a partially to fully initialized group,
|
||||
// per the acquired properties analysis. Only change the group if the
|
||||
// old group still has a newScript. This only applies to PlainObjects.
|
||||
Label noGroupChange;
|
||||
masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch1);
|
||||
masm.branchPtr(Assembler::Equal,
|
||||
Address(scratch1, ObjectGroup::offsetOfAddendum()),
|
||||
ImmWord(0),
|
||||
&noGroupChange);
|
||||
|
||||
Address groupAddr(obj, JSObject::offsetOfGroup());
|
||||
EmitPreBarrier(masm, groupAddr, MIRType::ObjectGroup);
|
||||
masm.storePtr(ImmGCPtr(newGroup), groupAddr);
|
||||
|
||||
masm.bind(&noGroupChange);
|
||||
}
|
||||
|
||||
// Update the object's shape.
|
||||
Address shapeAddr(obj, ShapedObject::offsetOfShape());
|
||||
EmitPreBarrier(masm, shapeAddr, MIRType::Shape);
|
||||
masm.storePtr(ImmGCPtr(newShape), shapeAddr);
|
||||
|
||||
// Perform the store. No pre-barrier required since this is a new
|
||||
// initialization.
|
||||
if (op == CacheOp::AddAndStoreFixedSlot) {
|
||||
Address slot(obj, offset);
|
||||
masm.storeConstantOrRegister(val, slot);
|
||||
} else {
|
||||
MOZ_ASSERT(op == CacheOp::AddAndStoreDynamicSlot ||
|
||||
op == CacheOp::AllocateAndStoreDynamicSlot);
|
||||
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
|
||||
Address slot(scratch1, offset);
|
||||
masm.storeConstantOrRegister(val, slot);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitAddAndStoreFixedSlot()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot);
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitAddAndStoreDynamicSlot()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
return emitAddAndStoreSlotShared(CacheOp::AddAndStoreDynamicSlot);
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitAllocateAndStoreDynamicSlot()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
return emitAddAndStoreSlotShared(CacheOp::AllocateAndStoreDynamicSlot);
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitStoreUnboxedProperty()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
Register obj = allocator.useRegister(masm, reader.objOperandId());
|
||||
JSValueType fieldType = reader.valueType();
|
||||
int32_t offset = int32StubField(reader.stubOffset());
|
||||
ConstantOrRegister val = allocator.useConstantOrRegister(masm, reader.valOperandId());
|
||||
|
||||
if (fieldType == JSVAL_TYPE_OBJECT && typeCheckInfo_->isSet()) {
|
||||
FailurePath* failure;
|
||||
if (!addFailurePath(&failure))
|
||||
return false;
|
||||
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, val, *liveRegs_, failure->label());
|
||||
}
|
||||
|
||||
// Note that the storeUnboxedProperty call here is infallible, as the
|
||||
// IR emitter is responsible for guarding on |val|'s type.
|
||||
Address fieldAddr(obj, offset);
|
||||
EmitICUnboxedPreBarrier(masm, fieldAddr, fieldType);
|
||||
masm.storeUnboxedProperty(fieldAddr, fieldType, val, /* failure = */ nullptr);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitStoreTypedObjectReferenceProperty()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
Register obj = allocator.useRegister(masm, reader.objOperandId());
|
||||
int32_t offset = int32StubField(reader.stubOffset());
|
||||
TypedThingLayout layout = reader.typedThingLayout();
|
||||
ReferenceTypeDescr::Type type = reader.referenceTypeDescrType();
|
||||
|
||||
ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
|
||||
|
||||
AutoScratchRegister scratch1(allocator, masm);
|
||||
AutoScratchRegister scratch2(allocator, masm);
|
||||
|
||||
// We don't need to check property types if the property is always a
|
||||
// string.
|
||||
if (type != ReferenceTypeDescr::TYPE_STRING) {
|
||||
FailurePath* failure;
|
||||
if (!addFailurePath(&failure))
|
||||
return false;
|
||||
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, TypedOrValueRegister(val),
|
||||
*liveRegs_, failure->label());
|
||||
}
|
||||
|
||||
// Compute the address being written to.
|
||||
LoadTypedThingData(masm, layout, obj, scratch1);
|
||||
Address dest(scratch1, offset);
|
||||
|
||||
emitStoreTypedObjectReferenceProp(val, type, dest, scratch2);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitStoreTypedObjectScalarProperty()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
Register obj = allocator.useRegister(masm, reader.objOperandId());
|
||||
int32_t offset = int32StubField(reader.stubOffset());
|
||||
TypedThingLayout layout = reader.typedThingLayout();
|
||||
Scalar::Type type = reader.scalarType();
|
||||
ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
|
||||
AutoScratchRegister scratch1(allocator, masm);
|
||||
AutoScratchRegister scratch2(allocator, masm);
|
||||
|
||||
FailurePath* failure;
|
||||
if (!addFailurePath(&failure))
|
||||
return false;
|
||||
|
||||
// Compute the address being written to.
|
||||
LoadTypedThingData(masm, layout, obj, scratch1);
|
||||
Address dest(scratch1, offset);
|
||||
|
||||
BaselineStoreToTypedArray(cx_, masm, type, val, dest, scratch2, failure->label());
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitStoreDenseElement()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
Register obj = allocator.useRegister(masm, reader.objOperandId());
|
||||
Register index = allocator.useRegister(masm, reader.int32OperandId());
|
||||
ConstantOrRegister val = allocator.useConstantOrRegister(masm, reader.valOperandId());
|
||||
|
||||
AutoScratchRegister scratch(allocator, masm);
|
||||
|
||||
FailurePath* failure;
|
||||
if (!addFailurePath(&failure))
|
||||
return false;
|
||||
|
||||
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, val, *liveRegs_, failure->label());
|
||||
|
||||
// Load obj->elements in scratch.
|
||||
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
|
||||
|
||||
// Bounds check.
|
||||
Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
|
||||
masm.branch32(Assembler::BelowOrEqual, initLength, index, failure->label());
|
||||
|
||||
// Hole check.
|
||||
BaseObjectElementIndex element(scratch, index);
|
||||
masm.branchTestMagic(Assembler::Equal, element, failure->label());
|
||||
|
||||
EmitPreBarrier(masm, element, MIRType::Value);
|
||||
EmitIonStoreDenseElement(masm, val, scratch, element);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitStoreDenseElementHole()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
Register obj = allocator.useRegister(masm, reader.objOperandId());
|
||||
Register index = allocator.useRegister(masm, reader.int32OperandId());
|
||||
ConstantOrRegister val = allocator.useConstantOrRegister(masm, reader.valOperandId());
|
||||
|
||||
// handleAdd boolean is only relevant for Baseline. Ion ICs can always
|
||||
// handle adds as we don't have to set any flags on the fallback stub to
|
||||
// track this.
|
||||
reader.readBool();
|
||||
|
||||
AutoScratchRegister scratch(allocator, masm);
|
||||
|
||||
FailurePath* failure;
|
||||
if (!addFailurePath(&failure))
|
||||
return false;
|
||||
|
||||
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, val, *liveRegs_, failure->label());
|
||||
|
||||
// Load obj->elements in scratch.
|
||||
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
|
||||
|
||||
Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
|
||||
BaseObjectElementIndex element(scratch, index);
|
||||
|
||||
Label inBounds, doStore;
|
||||
masm.branch32(Assembler::Above, initLength, index, &inBounds);
|
||||
masm.branch32(Assembler::NotEqual, initLength, index, failure->label());
|
||||
|
||||
// Check the capacity.
|
||||
Address capacity(scratch, ObjectElements::offsetOfCapacity());
|
||||
masm.branch32(Assembler::BelowOrEqual, capacity, index, failure->label());
|
||||
|
||||
// Increment initLength.
|
||||
masm.add32(Imm32(1), initLength);
|
||||
|
||||
// If length is now <= index, increment length too.
|
||||
Label skipIncrementLength;
|
||||
Address length(scratch, ObjectElements::offsetOfLength());
|
||||
masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
|
||||
masm.add32(Imm32(1), length);
|
||||
masm.bind(&skipIncrementLength);
|
||||
|
||||
// Skip EmitPreBarrier as the memory is uninitialized.
|
||||
masm.jump(&doStore);
|
||||
|
||||
masm.bind(&inBounds);
|
||||
|
||||
EmitPreBarrier(masm, element, MIRType::Value);
|
||||
|
||||
masm.bind(&doStore);
|
||||
EmitIonStoreDenseElement(masm, val, scratch, element);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitStoreTypedElement()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
Register obj = allocator.useRegister(masm, reader.objOperandId());
|
||||
Register index = allocator.useRegister(masm, reader.int32OperandId());
|
||||
ConstantOrRegister val = allocator.useConstantOrRegister(masm, reader.valOperandId());
|
||||
|
||||
TypedThingLayout layout = reader.typedThingLayout();
|
||||
Scalar::Type arrayType = reader.scalarType();
|
||||
bool handleOOB = reader.readBool();
|
||||
|
||||
AutoScratchRegister scratch1(allocator, masm);
|
||||
|
||||
Maybe<AutoScratchRegister> scratch2;
|
||||
if (arrayType != Scalar::Float32 && arrayType != Scalar::Float64)
|
||||
scratch2.emplace(allocator, masm);
|
||||
|
||||
FailurePath* failure;
|
||||
if (!addFailurePath(&failure))
|
||||
return false;
|
||||
|
||||
// Bounds check.
|
||||
Label done;
|
||||
LoadTypedThingLength(masm, layout, obj, scratch1);
|
||||
masm.branch32(Assembler::BelowOrEqual, scratch1, index, handleOOB ? &done : failure->label());
|
||||
|
||||
// Load the elements vector.
|
||||
LoadTypedThingData(masm, layout, obj, scratch1);
|
||||
|
||||
BaseIndex dest(scratch1, index, ScaleFromElemWidth(Scalar::byteSize(arrayType)));
|
||||
|
||||
FloatRegister maybeTempDouble = ic_->asSetPropertyIC()->maybeTempDouble();
|
||||
FloatRegister maybeTempFloat32 = ic_->asSetPropertyIC()->maybeTempFloat32();
|
||||
MOZ_ASSERT(maybeTempDouble != InvalidFloatReg);
|
||||
MOZ_ASSERT_IF(jit::hasUnaliasedDouble(), maybeTempFloat32 != InvalidFloatReg);
|
||||
|
||||
if (arrayType == Scalar::Float32) {
|
||||
FloatRegister tempFloat = hasUnaliasedDouble() ? maybeTempFloat32 : maybeTempDouble;
|
||||
if (!masm.convertConstantOrRegisterToFloat(cx_, val, tempFloat, failure->label()))
|
||||
return false;
|
||||
masm.storeToTypedFloatArray(arrayType, tempFloat, dest);
|
||||
} else if (arrayType == Scalar::Float64) {
|
||||
if (!masm.convertConstantOrRegisterToDouble(cx_, val, maybeTempDouble, failure->label()))
|
||||
return false;
|
||||
masm.storeToTypedFloatArray(arrayType, maybeTempDouble, dest);
|
||||
} else {
|
||||
Register valueToStore = scratch2.ref();
|
||||
if (arrayType == Scalar::Uint8Clamped) {
|
||||
if (!masm.clampConstantOrRegisterToUint8(cx_, val, maybeTempDouble, valueToStore,
|
||||
failure->label()))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (!masm.truncateConstantOrRegisterToInt32(cx_, val, maybeTempDouble, valueToStore,
|
||||
failure->label()))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
masm.storeToTypedIntArray(arrayType, valueToStore, dest);
|
||||
}
|
||||
|
||||
masm.bind(&done);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitStoreUnboxedArrayElement()
|
||||
{
|
||||
// --unboxed-arrays is currently untested and broken.
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitStoreUnboxedArrayElementHole()
|
||||
{
|
||||
// --unboxed-arrays is currently untested and broken.
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitCallNativeSetter()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
AutoSaveLiveRegisters save(*this);
|
||||
|
||||
Register obj = allocator.useRegister(masm, reader.objOperandId());
|
||||
JSFunction* target = &objectStubField(reader.stubOffset())->as<JSFunction>();
|
||||
MOZ_ASSERT(target->isNative());
|
||||
ConstantOrRegister val = allocator.useConstantOrRegister(masm, reader.valOperandId());
|
||||
|
||||
AutoScratchRegister argJSContext(allocator, masm);
|
||||
AutoScratchRegister argVp(allocator, masm);
|
||||
AutoScratchRegister argUintN(allocator, masm);
|
||||
AutoScratchRegister scratch(allocator, masm);
|
||||
|
||||
allocator.discardStack(masm);
|
||||
|
||||
// Set up the call:
|
||||
// bool (*)(JSContext*, unsigned, Value* vp)
|
||||
// vp[0] is callee/outparam
|
||||
// vp[1] is |this|
|
||||
// vp[2] is the value
|
||||
|
||||
// Build vp and move the base into argVpReg.
|
||||
masm.Push(val);
|
||||
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
|
||||
masm.Push(ObjectValue(*target));
|
||||
masm.moveStackPtrTo(argVp.get());
|
||||
|
||||
// Preload other regs.
|
||||
masm.loadJSContext(argJSContext);
|
||||
masm.move32(Imm32(1), argUintN);
|
||||
|
||||
// Push marking data for later use.
|
||||
masm.Push(argUintN);
|
||||
pushStubCodePointer();
|
||||
|
||||
if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save))
|
||||
return false;
|
||||
masm.enterFakeExitFrame(scratch, IonOOLNativeExitFrameLayoutToken);
|
||||
|
||||
// Make the call.
|
||||
masm.setupUnalignedABICall(scratch);
|
||||
masm.passABIArg(argJSContext);
|
||||
masm.passABIArg(argUintN);
|
||||
masm.passABIArg(argVp);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, target->native()));
|
||||
|
||||
// Test for failure.
|
||||
masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
|
||||
|
||||
masm.adjustStack(IonOOLNativeExitFrameLayout::Size(1));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitCallScriptedSetter()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
AutoSaveLiveRegisters save(*this);
|
||||
|
||||
Register obj = allocator.useRegister(masm, reader.objOperandId());
|
||||
JSFunction* target = &objectStubField(reader.stubOffset())->as<JSFunction>();
|
||||
ConstantOrRegister val = allocator.useConstantOrRegister(masm, reader.valOperandId());
|
||||
|
||||
AutoScratchRegister scratch(allocator, masm);
|
||||
|
||||
allocator.discardStack(masm);
|
||||
|
||||
uint32_t framePushedBefore = masm.framePushed();
|
||||
|
||||
// Construct IonICCallFrameLayout.
|
||||
uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS,
|
||||
IonICCallFrameLayout::Size());
|
||||
pushStubCodePointer();
|
||||
masm.Push(Imm32(descriptor));
|
||||
masm.Push(ImmPtr(GetReturnAddressToIonCode(cx_)));
|
||||
|
||||
// The JitFrameLayout pushed below will be aligned to JitStackAlignment,
|
||||
// so we just have to make sure the stack is aligned after we push the
|
||||
// |this| + argument Values.
|
||||
size_t numArgs = Max<size_t>(1, target->nargs());
|
||||
uint32_t argSize = (numArgs + 1) * sizeof(Value);
|
||||
uint32_t padding = ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
|
||||
MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
|
||||
MOZ_ASSERT(padding < JitStackAlignment);
|
||||
masm.reserveStack(padding);
|
||||
|
||||
for (size_t i = 1; i < target->nargs(); i++)
|
||||
masm.Push(UndefinedValue());
|
||||
masm.Push(val);
|
||||
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
|
||||
|
||||
masm.movePtr(ImmGCPtr(target), scratch);
|
||||
|
||||
descriptor = MakeFrameDescriptor(argSize + padding, JitFrame_IonICCall,
|
||||
JitFrameLayout::Size());
|
||||
masm.Push(Imm32(1)); // argc
|
||||
masm.Push(scratch);
|
||||
masm.Push(Imm32(descriptor));
|
||||
|
||||
// Check stack alignment. Add sizeof(uintptr_t) for the return address.
|
||||
MOZ_ASSERT(((masm.framePushed() + sizeof(uintptr_t)) % JitStackAlignment) == 0);
|
||||
|
||||
// The setter has JIT code now and we will only discard the setter's JIT
|
||||
// code when discarding all JIT code in the Zone, so we can assume it'll
|
||||
// still have JIT code.
|
||||
MOZ_ASSERT(target->hasJITCode());
|
||||
masm.loadPtr(Address(scratch, JSFunction::offsetOfNativeOrScript()), scratch);
|
||||
masm.loadBaselineOrIonRaw(scratch, scratch, nullptr);
|
||||
masm.callJit(scratch);
|
||||
|
||||
masm.freeStack(masm.framePushed() - framePushedBefore);
|
||||
return true;
|
||||
}
|
||||
|
||||
typedef bool (*SetArrayLengthFn)(JSContext*, HandleObject, HandleValue, bool);
|
||||
static const VMFunction SetArrayLengthInfo =
|
||||
FunctionInfo<SetArrayLengthFn>(SetArrayLength, "SetArrayLength");
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitCallSetArrayLength()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
AutoSaveLiveRegisters save(*this);
|
||||
|
||||
Register obj = allocator.useRegister(masm, reader.objOperandId());
|
||||
bool strict = reader.readBool();
|
||||
ConstantOrRegister val = allocator.useConstantOrRegister(masm, reader.valOperandId());
|
||||
|
||||
allocator.discardStack(masm);
|
||||
prepareVMCall(masm);
|
||||
|
||||
masm.Push(Imm32(strict));
|
||||
masm.Push(val);
|
||||
masm.Push(obj);
|
||||
|
||||
return callVM(masm, SetArrayLengthInfo);
|
||||
}
|
||||
|
||||
typedef bool (*ProxySetPropertyFn)(JSContext*, HandleObject, HandleId, HandleValue, bool);
|
||||
static const VMFunction ProxySetPropertyInfo =
|
||||
FunctionInfo<ProxySetPropertyFn>(ProxySetProperty, "ProxySetProperty");
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitCallProxySet()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
AutoSaveLiveRegisters save(*this);
|
||||
|
||||
Register obj = allocator.useRegister(masm, reader.objOperandId());
|
||||
ConstantOrRegister val = allocator.useConstantOrRegister(masm, reader.valOperandId());
|
||||
jsid id = idStubField(reader.stubOffset());
|
||||
bool strict = reader.readBool();
|
||||
|
||||
AutoScratchRegister scratch(allocator, masm);
|
||||
|
||||
allocator.discardStack(masm);
|
||||
prepareVMCall(masm);
|
||||
|
||||
masm.Push(Imm32(strict));
|
||||
masm.Push(val);
|
||||
masm.Push(id, scratch);
|
||||
masm.Push(obj);
|
||||
|
||||
return callVM(masm, ProxySetPropertyInfo);
|
||||
}
|
||||
|
||||
typedef bool (*ProxySetPropertyByValueFn)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
|
||||
static const VMFunction ProxySetPropertyByValueInfo =
|
||||
FunctionInfo<ProxySetPropertyByValueFn>(ProxySetPropertyByValue, "ProxySetPropertyByValue");
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitCallProxySetByValue()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
AutoSaveLiveRegisters save(*this);
|
||||
|
||||
Register obj = allocator.useRegister(masm, reader.objOperandId());
|
||||
ConstantOrRegister idVal = allocator.useConstantOrRegister(masm, reader.valOperandId());
|
||||
ConstantOrRegister val = allocator.useConstantOrRegister(masm, reader.valOperandId());
|
||||
bool strict = reader.readBool();
|
||||
|
||||
allocator.discardStack(masm);
|
||||
prepareVMCall(masm);
|
||||
|
||||
masm.Push(Imm32(strict));
|
||||
masm.Push(val);
|
||||
masm.Push(idVal);
|
||||
masm.Push(obj);
|
||||
|
||||
return callVM(masm, ProxySetPropertyByValueInfo);
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -1079,15 +1712,18 @@ IonCacheIRCompiler::emitLoadDOMExpandoValueGuardGeneration()
|
|||
|
||||
bool
|
||||
IonIC::attachCacheIRStub(JSContext* cx, const CacheIRWriter& writer, CacheKind kind,
|
||||
HandleScript outerScript)
|
||||
IonScript* ionScript, const PropertyTypeCheckInfo* typeCheckInfo)
|
||||
{
|
||||
// We shouldn't GC or report OOM (or any other exception) here.
|
||||
AutoAssertNoPendingException aanpe(cx);
|
||||
JS::AutoCheckCannotGC nogc;
|
||||
|
||||
// SetProp/SetElem stubs must have non-null typeCheckInfo.
|
||||
MOZ_ASSERT(!!typeCheckInfo == (kind == CacheKind::SetProp || kind == CacheKind::SetElem));
|
||||
|
||||
// Do nothing if the IR generator failed or triggered a GC that invalidated
|
||||
// the script.
|
||||
if (writer.failed() || !outerScript->hasIonScript())
|
||||
if (writer.failed() || ionScript->invalidated())
|
||||
return false;
|
||||
|
||||
JitZone* jitZone = cx->zone()->jitZone();
|
||||
|
@ -1144,7 +1780,7 @@ IonIC::attachCacheIRStub(JSContext* cx, const CacheIRWriter& writer, CacheKind k
|
|||
writer.copyStubData(newStub->stubDataStart());
|
||||
|
||||
JitContext jctx(cx, nullptr);
|
||||
IonCacheIRCompiler compiler(cx, writer, this, outerScript->ionScript(), newStub);
|
||||
IonCacheIRCompiler compiler(cx, writer, this, ionScript, newStub, typeCheckInfo);
|
||||
if (!compiler.init())
|
||||
return false;
|
||||
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -29,7 +29,6 @@ namespace jit {
|
|||
class LInstruction;
|
||||
|
||||
#define IONCACHE_KIND_LIST(_) \
|
||||
_(SetProperty) \
|
||||
_(BindName) \
|
||||
_(Name)
|
||||
|
||||
|
@ -360,158 +359,6 @@ class IonCache
|
|||
// Subclasses of IonCache for the various kinds of caches. These do not define
|
||||
// new data members; all caches must be of the same size.
|
||||
|
||||
class SetPropertyIC : public IonCache
|
||||
{
|
||||
protected:
|
||||
// Registers live after the cache, excluding output registers. The initial
|
||||
// value of these registers must be preserved by the cache.
|
||||
LiveRegisterSet liveRegs_;
|
||||
|
||||
Register object_;
|
||||
Register temp_;
|
||||
Register tempToUnboxIndex_;
|
||||
FloatRegister tempDouble_;
|
||||
FloatRegister tempFloat32_;
|
||||
ConstantOrRegister id_;
|
||||
ConstantOrRegister value_;
|
||||
bool strict_ : 1;
|
||||
bool needsTypeBarrier_ : 1;
|
||||
bool guardHoles_ : 1;
|
||||
|
||||
bool hasGenericProxyStub_ : 1;
|
||||
bool hasDenseStub_ : 1;
|
||||
|
||||
void emitIdGuard(MacroAssembler& masm, jsid id, Label* fail);
|
||||
|
||||
public:
|
||||
SetPropertyIC(LiveRegisterSet liveRegs, Register object, Register temp, Register tempToUnboxIndex,
|
||||
FloatRegister tempDouble, FloatRegister tempFloat32,
|
||||
const ConstantOrRegister& id, const ConstantOrRegister& value,
|
||||
bool strict, bool needsTypeBarrier, bool guardHoles)
|
||||
: liveRegs_(liveRegs),
|
||||
object_(object),
|
||||
temp_(temp),
|
||||
tempToUnboxIndex_(tempToUnboxIndex),
|
||||
tempDouble_(tempDouble),
|
||||
tempFloat32_(tempFloat32),
|
||||
id_(id),
|
||||
value_(value),
|
||||
strict_(strict),
|
||||
needsTypeBarrier_(needsTypeBarrier),
|
||||
guardHoles_(guardHoles),
|
||||
hasGenericProxyStub_(false),
|
||||
hasDenseStub_(false)
|
||||
{
|
||||
}
|
||||
|
||||
CACHE_HEADER(SetProperty)
|
||||
|
||||
void reset(ReprotectCode reprotect);
|
||||
|
||||
Register object() const {
|
||||
return object_;
|
||||
}
|
||||
Register temp() const {
|
||||
return temp_;
|
||||
}
|
||||
Register tempToUnboxIndex() const {
|
||||
return tempToUnboxIndex_;
|
||||
}
|
||||
FloatRegister tempDouble() const {
|
||||
return tempDouble_;
|
||||
}
|
||||
FloatRegister tempFloat32() const {
|
||||
return tempFloat32_;
|
||||
}
|
||||
ConstantOrRegister id() const {
|
||||
return id_;
|
||||
}
|
||||
ConstantOrRegister value() const {
|
||||
return value_;
|
||||
}
|
||||
bool strict() const {
|
||||
return strict_;
|
||||
}
|
||||
bool needsTypeBarrier() const {
|
||||
return needsTypeBarrier_;
|
||||
}
|
||||
bool guardHoles() const {
|
||||
return guardHoles_;
|
||||
}
|
||||
bool hasGenericProxyStub() const {
|
||||
return hasGenericProxyStub_;
|
||||
}
|
||||
|
||||
bool hasDenseStub() const {
|
||||
return hasDenseStub_;
|
||||
}
|
||||
void setHasDenseStub() {
|
||||
MOZ_ASSERT(!hasDenseStub());
|
||||
hasDenseStub_ = true;
|
||||
}
|
||||
|
||||
enum NativeSetPropCacheability {
|
||||
CanAttachNone,
|
||||
CanAttachSetSlot,
|
||||
MaybeCanAttachAddSlot,
|
||||
CanAttachCallSetter
|
||||
};
|
||||
|
||||
MOZ_MUST_USE bool attachSetSlot(JSContext* cx, HandleScript outerScript, IonScript* ion,
|
||||
HandleObject obj, HandleShape shape, bool checkTypeset);
|
||||
|
||||
MOZ_MUST_USE bool attachCallSetter(JSContext* cx, HandleScript outerScript, IonScript* ion,
|
||||
HandleObject obj, HandleObject holder, HandleShape shape,
|
||||
void* returnAddr);
|
||||
|
||||
MOZ_MUST_USE bool attachAddSlot(JSContext* cx, HandleScript outerScript, IonScript* ion,
|
||||
HandleObject obj, HandleId id, HandleShape oldShape,
|
||||
HandleObjectGroup oldGroup, bool checkTypeset);
|
||||
|
||||
MOZ_MUST_USE bool attachGenericProxy(JSContext* cx, HandleScript outerScript, IonScript* ion,
|
||||
HandleId id, void* returnAddr);
|
||||
|
||||
MOZ_MUST_USE bool attachDOMProxyShadowed(JSContext* cx, HandleScript outerScript,
|
||||
IonScript* ion, HandleObject obj, HandleId id,
|
||||
void* returnAddr);
|
||||
|
||||
MOZ_MUST_USE bool attachDOMProxyUnshadowed(JSContext* cx, HandleScript outerScript,
|
||||
IonScript* ion, HandleObject obj, HandleId id,
|
||||
void* returnAddr);
|
||||
|
||||
static MOZ_MUST_USE bool update(JSContext* cx, HandleScript outerScript, size_t cacheIndex,
|
||||
HandleObject obj, HandleValue idval, HandleValue value);
|
||||
|
||||
MOZ_MUST_USE bool tryAttachNative(JSContext* cx, HandleScript outerScript, IonScript* ion,
|
||||
HandleObject obj, HandleId id, bool* emitted,
|
||||
bool* tryNativeAddSlot);
|
||||
|
||||
MOZ_MUST_USE bool tryAttachUnboxed(JSContext* cx, HandleScript outerScript, IonScript* ion,
|
||||
HandleObject obj, HandleId id, bool* emitted);
|
||||
|
||||
MOZ_MUST_USE bool tryAttachUnboxedExpando(JSContext* cx, HandleScript outerScript,
|
||||
IonScript* ion, HandleObject obj, HandleId id,
|
||||
bool* emitted);
|
||||
|
||||
MOZ_MUST_USE bool tryAttachProxy(JSContext* cx, HandleScript outerScript, IonScript* ion,
|
||||
HandleObject obj, HandleId id, bool* emitted);
|
||||
|
||||
MOZ_MUST_USE bool tryAttachStub(JSContext* cx, HandleScript outerScript, IonScript* ion,
|
||||
HandleObject obj, HandleValue idval, HandleValue value,
|
||||
MutableHandleId id, bool* emitted, bool* tryNativeAddSlot);
|
||||
|
||||
MOZ_MUST_USE bool tryAttachAddSlot(JSContext* cx, HandleScript outerScript, IonScript* ion,
|
||||
HandleObject obj, HandleId id, HandleObjectGroup oldGroup,
|
||||
HandleShape oldShape, bool tryNativeAddSlot, bool* emitted);
|
||||
|
||||
MOZ_MUST_USE bool tryAttachDenseElement(JSContext* cx, HandleScript outerScript, IonScript* ion,
|
||||
HandleObject obj, const Value& idval, bool* emitted);
|
||||
|
||||
MOZ_MUST_USE bool tryAttachTypedArrayElement(JSContext* cx, HandleScript outerScript,
|
||||
IonScript* ion, HandleObject obj,
|
||||
HandleValue idval, HandleValue val, bool* emitted);
|
||||
};
|
||||
|
||||
class BindNameIC : public IonCache
|
||||
{
|
||||
protected:
|
||||
|
@ -639,6 +486,9 @@ bool ValueToNameOrSymbolId(JSContext* cx, HandleValue idval, MutableHandleId id,
|
|||
|
||||
void* GetReturnAddressToIonCode(JSContext* cx);
|
||||
|
||||
void EmitIonStoreDenseElement(MacroAssembler& masm, const ConstantOrRegister& value,
|
||||
Register elements, BaseObjectElementIndex target);
|
||||
|
||||
} // namespace jit
|
||||
} // namespace js
|
||||
|
||||
|
|
|
@ -41,9 +41,10 @@ IonIC::scratchRegisterForEntryJump()
|
|||
TypedOrValueRegister output = asGetPropertyIC()->output();
|
||||
return output.hasValue() ? output.valueReg().scratchReg() : output.typedReg().gpr();
|
||||
}
|
||||
case CacheKind::GetName:
|
||||
case CacheKind::SetProp:
|
||||
case CacheKind::SetElem:
|
||||
return asSetPropertyIC()->temp1();
|
||||
case CacheKind::GetName:
|
||||
case CacheKind::In:
|
||||
MOZ_CRASH("Baseline-specific for now");
|
||||
}
|
||||
|
@ -93,6 +94,19 @@ IonIC::trace(JSTracer* trc)
|
|||
MOZ_ASSERT(nextCodeRaw == fallbackLabel_.raw());
|
||||
}
|
||||
|
||||
void
|
||||
IonIC::togglePreBarriers(bool enabled, ReprotectCode reprotect)
|
||||
{
|
||||
uint8_t* nextCodeRaw = codeRaw_;
|
||||
for (IonICStub* stub = firstStub_; stub; stub = stub->next()) {
|
||||
JitCode* code = JitCode::FromExecutable(nextCodeRaw);
|
||||
code->togglePreBarriers(enabled, reprotect);
|
||||
nextCodeRaw = stub->nextCodeRaw();
|
||||
}
|
||||
|
||||
MOZ_ASSERT(nextCodeRaw == fallbackLabel_.raw());
|
||||
}
|
||||
|
||||
void
|
||||
IonGetPropertyIC::maybeDisable(Zone* zone, bool attached)
|
||||
{
|
||||
|
@ -121,7 +135,8 @@ IonGetPropertyIC::update(JSContext* cx, HandleScript outerScript, IonGetProperty
|
|||
HandleValue val, HandleValue idVal, MutableHandleValue res)
|
||||
{
|
||||
// Override the return value if we are invalidated (bug 728188).
|
||||
AutoDetectInvalidation adi(cx, res, outerScript->ionScript());
|
||||
IonScript* ionScript = outerScript->ionScript();
|
||||
AutoDetectInvalidation adi(cx, res, ionScript);
|
||||
|
||||
// If the IC is idempotent, we will redo the op in the interpreter.
|
||||
if (ic->idempotent())
|
||||
|
@ -141,8 +156,7 @@ IonGetPropertyIC::update(JSContext* cx, HandleScript outerScript, IonGetProperty
|
|||
GetPropIRGenerator gen(cx, outerScript, pc, ic->kind(), &isTemporarilyUnoptimizable,
|
||||
val, idVal, canAttachGetter);
|
||||
if (ic->idempotent() ? gen.tryAttachIdempotentStub() : gen.tryAttachStub()) {
|
||||
attached = ic->attachCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
|
||||
outerScript);
|
||||
attached = ic->attachCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), ionScript);
|
||||
}
|
||||
}
|
||||
ic->maybeDisable(cx->zone(), attached);
|
||||
|
@ -187,6 +201,74 @@ IonGetPropertyIC::update(JSContext* cx, HandleScript outerScript, IonGetProperty
|
|||
return true;
|
||||
}
|
||||
|
||||
/* static */ bool
|
||||
IonSetPropertyIC::update(JSContext* cx, HandleScript outerScript, IonSetPropertyIC* ic,
|
||||
HandleObject obj, HandleValue idVal, HandleValue rhs)
|
||||
{
|
||||
RootedShape oldShape(cx);
|
||||
RootedObjectGroup oldGroup(cx);
|
||||
IonScript* ionScript = outerScript->ionScript();
|
||||
|
||||
bool attached = false;
|
||||
if (!JitOptions.disableCacheIR && ic->canAttachStub()) {
|
||||
oldShape = obj->maybeShape();
|
||||
oldGroup = JSObject::getGroup(cx, obj);
|
||||
if (!oldGroup)
|
||||
return false;
|
||||
if (obj->is<UnboxedPlainObject>()) {
|
||||
MOZ_ASSERT(!oldShape);
|
||||
if (UnboxedExpandoObject* expando = obj->as<UnboxedPlainObject>().maybeExpando())
|
||||
oldShape = expando->lastProperty();
|
||||
}
|
||||
|
||||
RootedValue objv(cx, ObjectValue(*obj));
|
||||
RootedScript script(cx, ic->script());
|
||||
jsbytecode* pc = ic->pc();
|
||||
bool isTemporarilyUnoptimizable;
|
||||
SetPropIRGenerator gen(cx, script, pc, ic->kind(), &isTemporarilyUnoptimizable,
|
||||
objv, idVal, rhs, ic->needsTypeBarrier(), ic->guardHoles());
|
||||
if (gen.tryAttachStub()) {
|
||||
attached = ic->attachCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
|
||||
ionScript, gen.typeCheckInfo());
|
||||
}
|
||||
}
|
||||
|
||||
if (ic->kind() == CacheKind::SetElem) {
|
||||
if (!SetObjectElement(cx, obj, idVal, rhs, ic->strict()))
|
||||
return false;
|
||||
} else {
|
||||
MOZ_ASSERT(ic->kind() == CacheKind::SetProp);
|
||||
|
||||
jsbytecode* pc = ic->pc();
|
||||
if (*pc == JSOP_INITGLEXICAL) {
|
||||
RootedScript script(cx, ic->script());
|
||||
MOZ_ASSERT(!script->hasNonSyntacticScope());
|
||||
InitGlobalLexicalOperation(cx, &cx->global()->lexicalEnvironment(), script, pc, rhs);
|
||||
} else {
|
||||
RootedPropertyName name(cx, idVal.toString()->asAtom().asPropertyName());
|
||||
if (!SetProperty(cx, obj, name, rhs, ic->strict(), pc))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!attached && !JitOptions.disableCacheIR && ic->canAttachStub()) {
|
||||
RootedValue objv(cx, ObjectValue(*obj));
|
||||
RootedScript script(cx, ic->script());
|
||||
jsbytecode* pc = ic->pc();
|
||||
bool isTemporarilyUnoptimizable;
|
||||
SetPropIRGenerator gen(cx, script, pc, ic->kind(), &isTemporarilyUnoptimizable,
|
||||
objv, idVal, rhs, ic->needsTypeBarrier(), ic->guardHoles());
|
||||
if (gen.tryAttachAddSlotStub(oldGroup, oldShape)) {
|
||||
attached = ic->attachCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
|
||||
ionScript, gen.typeCheckInfo());
|
||||
} else {
|
||||
gen.trackNotAttached();
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
IonICStub::stubDataStart()
|
||||
{
|
||||
|
|
|
@ -57,6 +57,7 @@ class IonICStub
|
|||
};
|
||||
|
||||
class IonGetPropertyIC;
|
||||
class IonSetPropertyIC;
|
||||
|
||||
class IonIC
|
||||
{
|
||||
|
@ -125,6 +126,8 @@ class IonIC
|
|||
// Discard all stubs.
|
||||
void reset(Zone* zone);
|
||||
|
||||
void togglePreBarriers(bool enabled, ReprotectCode reprotect);
|
||||
|
||||
CacheKind kind() const { return kind_; }
|
||||
uint8_t** codeRawPtr() { return &codeRaw_; }
|
||||
|
||||
|
@ -138,6 +141,10 @@ class IonIC
|
|||
MOZ_ASSERT(kind_ == CacheKind::GetProp || kind_ == CacheKind::GetElem);
|
||||
return (IonGetPropertyIC*)this;
|
||||
}
|
||||
IonSetPropertyIC* asSetPropertyIC() {
|
||||
MOZ_ASSERT(kind_ == CacheKind::SetProp || kind_ == CacheKind::SetElem);
|
||||
return (IonSetPropertyIC*)this;
|
||||
}
|
||||
|
||||
void updateBaseAddress(JitCode* code, MacroAssembler& masm);
|
||||
|
||||
|
@ -148,7 +155,8 @@ class IonIC
|
|||
void trace(JSTracer* trc);
|
||||
|
||||
bool attachCacheIRStub(JSContext* cx, const CacheIRWriter& writer, CacheKind kind,
|
||||
HandleScript outerScript);
|
||||
IonScript* ionScript,
|
||||
const PropertyTypeCheckInfo* typeCheckInfo = nullptr);
|
||||
};
|
||||
|
||||
class IonGetPropertyIC : public IonIC
|
||||
|
@ -195,6 +203,55 @@ class IonGetPropertyIC : public IonIC
|
|||
HandleValue val, HandleValue idVal, MutableHandleValue res);
|
||||
};
|
||||
|
||||
class IonSetPropertyIC : public IonIC
|
||||
{
|
||||
LiveRegisterSet liveRegs_;
|
||||
|
||||
Register object_;
|
||||
Register temp1_;
|
||||
FloatRegister maybeTempDouble_;
|
||||
FloatRegister maybeTempFloat32_;
|
||||
ConstantOrRegister id_;
|
||||
ConstantOrRegister rhs_;
|
||||
bool strict_ : 1;
|
||||
bool needsTypeBarrier_ : 1;
|
||||
bool guardHoles_ : 1;
|
||||
|
||||
public:
|
||||
IonSetPropertyIC(CacheKind kind, LiveRegisterSet liveRegs, Register object, Register temp1,
|
||||
FloatRegister maybeTempDouble, FloatRegister maybeTempFloat32,
|
||||
const ConstantOrRegister& id, const ConstantOrRegister& rhs, bool strict,
|
||||
bool needsTypeBarrier, bool guardHoles)
|
||||
: IonIC(kind),
|
||||
liveRegs_(liveRegs),
|
||||
object_(object),
|
||||
temp1_(temp1),
|
||||
maybeTempDouble_(maybeTempDouble),
|
||||
maybeTempFloat32_(maybeTempFloat32),
|
||||
id_(id),
|
||||
rhs_(rhs),
|
||||
strict_(strict),
|
||||
needsTypeBarrier_(needsTypeBarrier),
|
||||
guardHoles_(guardHoles)
|
||||
{ }
|
||||
|
||||
LiveRegisterSet liveRegs() const { return liveRegs_; }
|
||||
Register object() const { return object_; }
|
||||
ConstantOrRegister id() const { return id_; }
|
||||
ConstantOrRegister rhs() const { return rhs_; }
|
||||
|
||||
Register temp1() const { return temp1_; }
|
||||
FloatRegister maybeTempDouble() const { return maybeTempDouble_; }
|
||||
FloatRegister maybeTempFloat32() const { return maybeTempFloat32_; }
|
||||
|
||||
bool strict() const { return strict_; }
|
||||
bool needsTypeBarrier() const { return needsTypeBarrier_; }
|
||||
bool guardHoles() const { return guardHoles_; }
|
||||
|
||||
static MOZ_MUST_USE bool update(JSContext* cx, HandleScript outerScript, IonSetPropertyIC* ic,
|
||||
HandleObject obj, HandleValue idVal, HandleValue rhs);
|
||||
};
|
||||
|
||||
} // namespace jit
|
||||
} // namespace js
|
||||
|
||||
|
|
|
@ -3975,15 +3975,11 @@ LIRGenerator::visitSetPropertyCache(MSetPropertyCache* ins)
|
|||
// that calls this script recursively.
|
||||
gen->setPerformsCall();
|
||||
|
||||
// If the index might be an integer, we need some extra temp registers for
|
||||
// the dense and typed array element stubs.
|
||||
LDefinition tempToUnboxIndex = LDefinition::BogusTemp();
|
||||
// We need a double/float32 temp register for typed array stubs if this is
|
||||
// a SETELEM.
|
||||
LDefinition tempD = LDefinition::BogusTemp();
|
||||
LDefinition tempF32 = LDefinition::BogusTemp();
|
||||
|
||||
if (id->mightBeType(MIRType::Int32)) {
|
||||
if (id->type() != MIRType::Int32)
|
||||
tempToUnboxIndex = tempToUnbox();
|
||||
if (IsSetElemPC(ins->resumePoint()->pc())) {
|
||||
tempD = tempDouble();
|
||||
tempF32 = hasUnaliasedDouble() ? tempFloat32() : LDefinition::BogusTemp();
|
||||
}
|
||||
|
@ -3992,8 +3988,7 @@ LIRGenerator::visitSetPropertyCache(MSetPropertyCache* ins)
|
|||
new(alloc()) LSetPropertyCache(useRegister(ins->object()),
|
||||
useBoxOrTypedOrConstant(id, useConstId),
|
||||
useBoxOrTypedOrConstant(ins->value(), useConstValue),
|
||||
temp(),
|
||||
tempToUnboxIndex, tempD, tempF32);
|
||||
temp(), tempD, tempF32);
|
||||
add(lir, ins);
|
||||
assignSafepoint(lir, ins);
|
||||
}
|
||||
|
|
|
@ -1031,6 +1031,19 @@ class CommonRegSet : public SpecializedRegSet<Accessors, Set>
|
|||
#error "Bad architecture"
|
||||
#endif
|
||||
}
|
||||
|
||||
using Parent::addUnchecked;
|
||||
void addUnchecked(ValueOperand value) {
|
||||
#if defined(JS_NUNBOX32)
|
||||
addUnchecked(value.payloadReg());
|
||||
addUnchecked(value.typeReg());
|
||||
#elif defined(JS_PUNBOX64)
|
||||
addUnchecked(value.valueReg());
|
||||
#else
|
||||
#error "Bad architecture"
|
||||
#endif
|
||||
}
|
||||
|
||||
void add(TypedOrValueRegister reg) {
|
||||
if (reg.hasValue())
|
||||
add(reg.valueReg());
|
||||
|
|
|
@ -414,9 +414,10 @@ class VFPRegister
|
|||
: kind(Double), code_(id), _isInvalid(false), _isMissing(false)
|
||||
{ }
|
||||
bool operator==(const VFPRegister& other) const {
|
||||
MOZ_ASSERT(!isInvalid());
|
||||
MOZ_ASSERT(!other.isInvalid());
|
||||
return kind == other.kind && code_ == other.code_;
|
||||
return kind == other.kind && code_ == other.code_ && isInvalid() == other.isInvalid();
|
||||
}
|
||||
bool operator!=(const VFPRegister& other) const {
|
||||
return !operator==(other);
|
||||
}
|
||||
|
||||
bool isSingle() const { return kind == Single; }
|
||||
|
@ -488,9 +489,6 @@ class VFPRegister
|
|||
return FloatRegisters::GetDoubleName(Encoding(code_));
|
||||
return FloatRegisters::GetSingleName(Encoding(code_));
|
||||
}
|
||||
bool operator != (const VFPRegister& other) const {
|
||||
return other.kind != kind || code_ != other.code_;
|
||||
}
|
||||
bool aliases(const VFPRegister& other) {
|
||||
if (kind == other.kind)
|
||||
return code_ == other.code_;
|
||||
|
|
|
@ -4841,8 +4841,6 @@ MacroAssembler::PushRegsInMask(LiveRegisterSet set)
|
|||
void
|
||||
MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest, Register scratch)
|
||||
{
|
||||
MOZ_ASSERT(!set.has(scratch));
|
||||
|
||||
int32_t diffF = set.fpus().getPushSizeInBytes();
|
||||
int32_t diffG = set.gprs().size() * sizeof(intptr_t);
|
||||
|
||||
|
|
|
@ -7213,22 +7213,20 @@ class LCallDeleteElement : public LCallInstructionHelper<1, 2 * BOX_PIECES, 0>
|
|||
};
|
||||
|
||||
// Patchable jump to stubs generated for a SetProperty cache.
|
||||
class LSetPropertyCache : public LInstructionHelper<0, 1 + 2 * BOX_PIECES, 4>
|
||||
class LSetPropertyCache : public LInstructionHelper<0, 1 + 2 * BOX_PIECES, 3>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(SetPropertyCache)
|
||||
|
||||
LSetPropertyCache(const LAllocation& object, const LBoxAllocation& id,
|
||||
const LBoxAllocation& value, const LDefinition& temp,
|
||||
const LDefinition& tempToUnboxIndex, const LDefinition& tempDouble,
|
||||
const LDefinition& tempFloat32) {
|
||||
const LDefinition& tempDouble, const LDefinition& tempFloat32) {
|
||||
setOperand(0, object);
|
||||
setBoxOperand(Id, id);
|
||||
setBoxOperand(Value, value);
|
||||
setTemp(0, temp);
|
||||
setTemp(1, tempToUnboxIndex);
|
||||
setTemp(2, tempDouble);
|
||||
setTemp(3, tempFloat32);
|
||||
setTemp(1, tempDouble);
|
||||
setTemp(2, tempFloat32);
|
||||
}
|
||||
|
||||
static const size_t Id = 1;
|
||||
|
@ -7241,16 +7239,13 @@ class LSetPropertyCache : public LInstructionHelper<0, 1 + 2 * BOX_PIECES, 4>
|
|||
const LDefinition* temp() {
|
||||
return getTemp(0);
|
||||
}
|
||||
const LDefinition* tempToUnboxIndex() {
|
||||
return getTemp(1);
|
||||
}
|
||||
const LDefinition* tempDouble() {
|
||||
return getTemp(2);
|
||||
return getTemp(1);
|
||||
}
|
||||
const LDefinition* tempFloat32() {
|
||||
if (hasUnaliasedDouble())
|
||||
return getTemp(3);
|
||||
return getTemp(2);
|
||||
return getTemp(2);
|
||||
return getTemp(1);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -462,11 +462,8 @@ MacroAssembler::PushRegsInMask(LiveRegisterSet set)
|
|||
}
|
||||
|
||||
void
|
||||
MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest, Register scratch)
|
||||
MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest, Register)
|
||||
{
|
||||
// We don't use |scratch| here, but assert this for other platforms.
|
||||
MOZ_ASSERT(!set.has(scratch));
|
||||
|
||||
FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
|
||||
unsigned numFpu = fpuSet.size();
|
||||
int32_t diffF = fpuSet.getPushSizeInBytes();
|
||||
|
@ -474,8 +471,6 @@ MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest, Register scra
|
|||
|
||||
MOZ_ASSERT(dest.offset >= diffG + diffF);
|
||||
|
||||
// On x86, always use push to push the integer registers, as it's fast
|
||||
// on modern hardware and it's a small instruction.
|
||||
for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
|
||||
diffG -= sizeof(intptr_t);
|
||||
dest.offset -= sizeof(intptr_t);
|
||||
|
|
Загрузка…
Ссылка в новой задаче