зеркало из https://github.com/mozilla/gecko-dev.git
Backed out changeset 5d72e00c604a (bug 1326067) for near-permaorange in wasm-06.js in Windows cgc builds
This commit is contained in:
Родитель
ab29b8925b
Коммит
2a6cfd0c07
|
@ -18,8 +18,6 @@ using namespace js::jit;
|
|||
|
||||
using mozilla::Maybe;
|
||||
|
||||
class AutoStubFrame;
|
||||
|
||||
// BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code.
|
||||
class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
|
||||
{
|
||||
|
@ -34,11 +32,6 @@ class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
|
|||
|
||||
MOZ_MUST_USE bool callVM(MacroAssembler& masm, const VMFunction& fun);
|
||||
|
||||
MOZ_MUST_USE bool callTypeUpdateIC(AutoStubFrame& stubFrame, Register obj, ValueOperand val,
|
||||
Register scratch, LiveGeneralRegisterSet saveRegs);
|
||||
|
||||
MOZ_MUST_USE bool emitStoreSlotShared(bool isFixed);
|
||||
|
||||
public:
|
||||
friend class AutoStubFrame;
|
||||
|
||||
|
@ -72,8 +65,6 @@ class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
|
|||
CACHE_IR_SHARED_OPS(DEFINE_SHARED_OP)
|
||||
#undef DEFINE_SHARED_OP
|
||||
|
||||
enum class CallCanGC { CanGC, CanNotGC };
|
||||
|
||||
// Instructions that have to perform a callVM require a stub frame. Use
|
||||
// AutoStubFrame before allocating any registers, then call its enter() and
|
||||
// leave() methods to enter/leave the stub frame.
|
||||
|
@ -102,7 +93,7 @@ class MOZ_RAII AutoStubFrame
|
|||
tail.emplace(compiler.allocator, compiler.masm, ICTailCallReg);
|
||||
}
|
||||
|
||||
void enter(MacroAssembler& masm, Register scratch, CallCanGC canGC = CallCanGC::CanGC) {
|
||||
void enter(MacroAssembler& masm, Register scratch) {
|
||||
if (compiler.engine_ == ICStubEngine::Baseline) {
|
||||
EmitBaselineEnterStubFrame(masm, scratch);
|
||||
#ifdef DEBUG
|
||||
|
@ -114,8 +105,7 @@ class MOZ_RAII AutoStubFrame
|
|||
|
||||
MOZ_ASSERT(!compiler.inStubFrame_);
|
||||
compiler.inStubFrame_ = true;
|
||||
if (canGC == CallCanGC::CanGC)
|
||||
compiler.makesGCCalls_ = true;
|
||||
compiler.makesGCCalls_ = true;
|
||||
}
|
||||
void leave(MacroAssembler& masm, bool calledIntoIon = false) {
|
||||
MOZ_ASSERT(compiler.inStubFrame_);
|
||||
|
@ -659,112 +649,6 @@ BaselineCacheIRCompiler::emitLoadEnvironmentDynamicSlotResult()
|
|||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
BaselineCacheIRCompiler::callTypeUpdateIC(AutoStubFrame& stubFrame, Register obj, ValueOperand val,
|
||||
Register scratch, LiveGeneralRegisterSet saveRegs)
|
||||
{
|
||||
// R0 contains the value that needs to be typechecked.
|
||||
MOZ_ASSERT(val == R0);
|
||||
MOZ_ASSERT(scratch == R1.scratchReg());
|
||||
|
||||
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
|
||||
static const bool CallClobbersTailReg = false;
|
||||
#else
|
||||
static const bool CallClobbersTailReg = true;
|
||||
#endif
|
||||
|
||||
// Call the first type update stub.
|
||||
if (CallClobbersTailReg)
|
||||
masm.push(ICTailCallReg);
|
||||
masm.push(ICStubReg);
|
||||
masm.loadPtr(Address(ICStubReg, ICUpdatedStub::offsetOfFirstUpdateStub()),
|
||||
ICStubReg);
|
||||
masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
|
||||
masm.pop(ICStubReg);
|
||||
if (CallClobbersTailReg)
|
||||
masm.pop(ICTailCallReg);
|
||||
|
||||
// The update IC will store 0 or 1 in |scratch|, R1.scratchReg(), reflecting
|
||||
// if the value in R0 type-checked properly or not.
|
||||
Label done;
|
||||
masm.branch32(Assembler::Equal, scratch, Imm32(1), &done);
|
||||
|
||||
stubFrame.enter(masm, scratch, CallCanGC::CanNotGC);
|
||||
|
||||
masm.PushRegsInMask(saveRegs);
|
||||
|
||||
masm.Push(val);
|
||||
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
|
||||
masm.Push(ICStubReg);
|
||||
|
||||
// Load previous frame pointer, push BaselineFrame*.
|
||||
masm.loadPtr(Address(BaselineFrameReg, 0), scratch);
|
||||
masm.pushBaselineFramePtr(scratch, scratch);
|
||||
|
||||
if (!callVM(masm, DoTypeUpdateFallbackInfo))
|
||||
return false;
|
||||
|
||||
masm.PopRegsInMask(saveRegs);
|
||||
|
||||
stubFrame.leave(masm);
|
||||
|
||||
masm.bind(&done);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
BaselineCacheIRCompiler::emitStoreSlotShared(bool isFixed)
|
||||
{
|
||||
ObjOperandId objId = reader.objOperandId();
|
||||
Address offsetAddr = stubAddress(reader.stubOffset());
|
||||
|
||||
// Allocate the fixed registers first. These need to be fixed for
|
||||
// callTypeUpdateIC.
|
||||
AutoStubFrame stubFrame(*this);
|
||||
AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
|
||||
ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
|
||||
|
||||
Register obj = allocator.useRegister(masm, objId);
|
||||
|
||||
LiveGeneralRegisterSet saveRegs;
|
||||
saveRegs.add(obj);
|
||||
saveRegs.add(val);
|
||||
if (!callTypeUpdateIC(stubFrame, obj, val, scratch, saveRegs))
|
||||
return false;
|
||||
|
||||
masm.load32(offsetAddr, scratch);
|
||||
|
||||
if (isFixed) {
|
||||
BaseIndex slot(obj, scratch, TimesOne);
|
||||
EmitPreBarrier(masm, slot, MIRType::Value);
|
||||
masm.storeValue(val, slot);
|
||||
} else {
|
||||
// To avoid running out of registers on x86, use ICStubReg as scratch.
|
||||
// We don't need it anymore.
|
||||
Register slots = ICStubReg;
|
||||
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), slots);
|
||||
BaseIndex slot(slots, scratch, TimesOne);
|
||||
EmitPreBarrier(masm, slot, MIRType::Value);
|
||||
masm.storeValue(val, slot);
|
||||
}
|
||||
|
||||
if (cx_->gc.nursery.exists())
|
||||
BaselineEmitPostWriteBarrierSlot(masm, obj, val, scratch, LiveGeneralRegisterSet(), cx_);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
BaselineCacheIRCompiler::emitStoreFixedSlot()
|
||||
{
|
||||
return emitStoreSlotShared(true);
|
||||
}
|
||||
|
||||
bool
|
||||
BaselineCacheIRCompiler::emitStoreDynamicSlot()
|
||||
{
|
||||
return emitStoreSlotShared(false);
|
||||
}
|
||||
|
||||
bool
|
||||
BaselineCacheIRCompiler::emitTypeMonitorResult()
|
||||
{
|
||||
|
@ -864,7 +748,6 @@ BaselineCacheIRCompiler::init(CacheKind kind)
|
|||
allocator.initInputLocation(0, R0);
|
||||
break;
|
||||
case CacheKind::GetElem:
|
||||
case CacheKind::SetProp:
|
||||
MOZ_ASSERT(numInputs == 2);
|
||||
allocator.initInputLocation(0, R0);
|
||||
allocator.initInputLocation(1, R1);
|
||||
|
@ -903,22 +786,9 @@ jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
|
|||
// unlimited number of stubs.
|
||||
MOZ_ASSERT(stub->numOptimizedStubs() < MaxOptimizedCacheIRStubs);
|
||||
|
||||
enum class CacheIRStubKind { Monitored, Updated };
|
||||
|
||||
uint32_t stubDataOffset;
|
||||
CacheIRStubKind stubKind;
|
||||
switch (kind) {
|
||||
case CacheKind::GetProp:
|
||||
case CacheKind::GetElem:
|
||||
case CacheKind::GetName:
|
||||
stubDataOffset = sizeof(ICCacheIR_Monitored);
|
||||
stubKind = CacheIRStubKind::Monitored;
|
||||
break;
|
||||
case CacheKind::SetProp:
|
||||
stubDataOffset = sizeof(ICCacheIR_Updated);
|
||||
stubKind = CacheIRStubKind::Updated;
|
||||
break;
|
||||
}
|
||||
MOZ_ASSERT(kind == CacheKind::GetProp || kind == CacheKind::GetElem ||
|
||||
kind == CacheKind::GetName, "sizeof needs to change for SetProp!");
|
||||
uint32_t stubDataOffset = sizeof(ICCacheIR_Monitored);
|
||||
|
||||
JitCompartment* jitCompartment = cx->compartment()->jitCompartment();
|
||||
|
||||
|
@ -952,34 +822,21 @@ jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
|
|||
|
||||
MOZ_ASSERT(code);
|
||||
MOZ_ASSERT(stubInfo);
|
||||
MOZ_ASSERT(stub->isMonitoredFallback());
|
||||
MOZ_ASSERT(stubInfo->stubDataSize() == writer.stubDataSize());
|
||||
|
||||
// Ensure we don't attach duplicate stubs. This can happen if a stub failed
|
||||
// for some reason and the IR generator doesn't check for exactly the same
|
||||
// conditions.
|
||||
for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
|
||||
switch (stubKind) {
|
||||
case CacheIRStubKind::Monitored: {
|
||||
if (!iter->isCacheIR_Monitored())
|
||||
continue;
|
||||
auto otherStub = iter->toCacheIR_Monitored();
|
||||
if (otherStub->stubInfo() != stubInfo)
|
||||
continue;
|
||||
if (!writer.stubDataEquals(otherStub->stubDataStart()))
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
case CacheIRStubKind::Updated: {
|
||||
if (!iter->isCacheIR_Updated())
|
||||
continue;
|
||||
auto otherStub = iter->toCacheIR_Updated();
|
||||
if (otherStub->stubInfo() != stubInfo)
|
||||
continue;
|
||||
if (!writer.stubDataEquals(otherStub->stubDataStart()))
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!iter->isCacheIR_Monitored())
|
||||
continue;
|
||||
|
||||
ICCacheIR_Monitored* otherStub = iter->toCacheIR_Monitored();
|
||||
if (otherStub->stubInfo() != stubInfo)
|
||||
continue;
|
||||
if (!writer.stubDataEquals(otherStub->stubDataStart()))
|
||||
continue;
|
||||
|
||||
// We found a stub that's exactly the same as the stub we're about to
|
||||
// attach. Just return nullptr, the caller should do nothing in this
|
||||
|
@ -997,28 +854,12 @@ jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
|
|||
if (!newStubMem)
|
||||
return nullptr;
|
||||
|
||||
switch (stubKind) {
|
||||
case CacheIRStubKind::Monitored: {
|
||||
ICStub* monitorStub =
|
||||
stub->toMonitoredFallbackStub()->fallbackMonitorStub()->firstMonitorStub();
|
||||
auto newStub = new(newStubMem) ICCacheIR_Monitored(code, monitorStub, stubInfo);
|
||||
writer.copyStubData(newStub->stubDataStart());
|
||||
stub->addNewStub(newStub);
|
||||
return newStub;
|
||||
}
|
||||
case CacheIRStubKind::Updated: {
|
||||
auto newStub = new(newStubMem) ICCacheIR_Updated(code, stubInfo);
|
||||
if (!newStub->initUpdatingChain(cx, stubSpace)) {
|
||||
cx->recoverFromOutOfMemory();
|
||||
return nullptr;
|
||||
}
|
||||
writer.copyStubData(newStub->stubDataStart());
|
||||
stub->addNewStub(newStub);
|
||||
return newStub;
|
||||
}
|
||||
}
|
||||
ICStub* monitorStub = stub->toMonitoredFallbackStub()->fallbackMonitorStub()->firstMonitorStub();
|
||||
auto newStub = new(newStubMem) ICCacheIR_Monitored(code, monitorStub, stubInfo);
|
||||
|
||||
MOZ_CRASH("Invalid kind");
|
||||
writer.copyStubData(newStub->stubDataStart());
|
||||
stub->addNewStub(newStub);
|
||||
return newStub;
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
|
@ -1027,12 +868,6 @@ ICCacheIR_Monitored::stubDataStart()
|
|||
return reinterpret_cast<uint8_t*>(this) + stubInfo_->stubDataOffset();
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
ICCacheIR_Updated::stubDataStart()
|
||||
{
|
||||
return reinterpret_cast<uint8_t*>(this) + stubInfo_->stubDataOffset();
|
||||
}
|
||||
|
||||
/* static */ ICCacheIR_Monitored*
|
||||
ICCacheIR_Monitored::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
|
||||
ICCacheIR_Monitored& other)
|
||||
|
|
|
@ -280,18 +280,14 @@ DoTypeUpdateFallback(JSContext* cx, BaselineFrame* frame, ICUpdatedStub* stub, H
|
|||
RootedObject obj(cx, &objval.toObject());
|
||||
RootedId id(cx);
|
||||
|
||||
switch (stub->kind()) {
|
||||
case ICStub::CacheIR_Updated:
|
||||
id = stub->toCacheIR_Updated()->updateStubId();
|
||||
MOZ_ASSERT(id != JSID_EMPTY);
|
||||
AddTypePropertyId(cx, obj, id, value);
|
||||
break;
|
||||
switch(stub->kind()) {
|
||||
case ICStub::SetElem_DenseOrUnboxedArray:
|
||||
case ICStub::SetElem_DenseOrUnboxedArrayAdd: {
|
||||
id = JSID_VOID;
|
||||
AddTypePropertyId(cx, obj, id, value);
|
||||
break;
|
||||
}
|
||||
case ICStub::SetProp_Native:
|
||||
case ICStub::SetProp_NativeAdd:
|
||||
case ICStub::SetProp_Unboxed: {
|
||||
MOZ_ASSERT(obj->isNative() || obj->is<UnboxedPlainObject>());
|
||||
|
@ -741,6 +737,23 @@ LastPropertyForSetProp(JSObject* obj)
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
static bool
|
||||
IsCacheableSetPropWriteSlot(JSObject* obj, Shape* oldShape, Shape* propertyShape)
|
||||
{
|
||||
// Object shape must not have changed during the property set.
|
||||
if (LastPropertyForSetProp(obj) != oldShape)
|
||||
return false;
|
||||
|
||||
if (!propertyShape->hasSlot() ||
|
||||
!propertyShape->hasDefaultSetter() ||
|
||||
!propertyShape->writable())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
IsCacheableSetPropAddSlot(JSContext* cx, JSObject* obj, Shape* oldShape,
|
||||
jsid id, Shape* propertyShape, size_t* protoChainDepth)
|
||||
|
@ -1518,7 +1531,7 @@ ICSetElem_DenseOrUnboxedArray::Compiler::generateStubCode(MacroAssembler& masm)
|
|||
saveRegs.add(R0);
|
||||
saveRegs.addUnchecked(obj);
|
||||
saveRegs.add(ICStubReg);
|
||||
BaselineEmitPostWriteBarrierSlot(masm, obj, R1, scratchReg, saveRegs, cx);
|
||||
emitPostWriteBarrierSlot(masm, obj, R1, scratchReg, saveRegs);
|
||||
|
||||
masm.Pop(R1);
|
||||
}
|
||||
|
@ -1726,7 +1739,7 @@ ICSetElemDenseOrUnboxedArrayAddCompiler::generateStubCode(MacroAssembler& masm)
|
|||
saveRegs.add(R0);
|
||||
saveRegs.addUnchecked(obj);
|
||||
saveRegs.add(ICStubReg);
|
||||
BaselineEmitPostWriteBarrierSlot(masm, obj, R1, scratchReg, saveRegs, cx);
|
||||
emitPostWriteBarrierSlot(masm, obj, R1, scratchReg, saveRegs);
|
||||
|
||||
masm.Pop(R1);
|
||||
}
|
||||
|
@ -2623,6 +2636,41 @@ TryAttachSetValuePropStub(JSContext* cx, HandleScript script, jsbytecode* pc, IC
|
|||
return true;
|
||||
}
|
||||
|
||||
if (IsCacheableSetPropWriteSlot(obj, oldShape, shape)) {
|
||||
// For some property writes, such as the initial overwrite of global
|
||||
// properties, TI will not mark the property as having been
|
||||
// overwritten. Don't attach a stub in this case, so that we don't
|
||||
// execute another write to the property without TI seeing that write.
|
||||
EnsureTrackPropertyTypes(cx, obj, id);
|
||||
if (!PropertyHasBeenMarkedNonConstant(obj, id)) {
|
||||
*attached = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool isFixedSlot;
|
||||
uint32_t offset;
|
||||
GetFixedOrDynamicSlotOffset(shape, &isFixedSlot, &offset);
|
||||
|
||||
JitSpew(JitSpew_BaselineIC, " Generating SetProp(NativeObject.PROP) stub");
|
||||
MOZ_ASSERT(LastPropertyForSetProp(obj) == oldShape,
|
||||
"Should this really be a SetPropWriteSlot?");
|
||||
ICSetProp_Native::Compiler compiler(cx, obj, isFixedSlot, offset);
|
||||
ICSetProp_Native* newStub = compiler.getStub(compiler.getStubSpace(script));
|
||||
if (!newStub)
|
||||
return false;
|
||||
if (!newStub->addUpdateStubForValue(cx, script, obj, id, rhs))
|
||||
return false;
|
||||
|
||||
if (IsPreliminaryObject(obj))
|
||||
newStub->notePreliminaryObject();
|
||||
else
|
||||
StripPreliminaryObjectStubs(cx, stub);
|
||||
|
||||
stub->addNewStub(newStub);
|
||||
*attached = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2887,30 +2935,6 @@ DoSetPropFallback(JSContext* cx, BaselineFrame* frame, ICSetProp_Fallback* stub_
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!attached &&
|
||||
stub->numOptimizedStubs() < ICSetProp_Fallback::MAX_OPTIMIZED_STUBS &&
|
||||
!JitOptions.disableCacheIR)
|
||||
{
|
||||
RootedValue idVal(cx, StringValue(name));
|
||||
SetPropIRGenerator gen(cx, pc, CacheKind::SetProp, &isTemporarilyUnoptimizable,
|
||||
lhs, idVal, rhs);
|
||||
if (gen.tryAttachStub()) {
|
||||
ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
|
||||
ICStubEngine::Baseline, frame->script(), stub);
|
||||
if (newStub) {
|
||||
JitSpew(JitSpew_BaselineIC, " Attached CacheIR stub");
|
||||
attached = true;
|
||||
|
||||
newStub->toCacheIR_Updated()->updateStubId() = gen.updateStubId();
|
||||
|
||||
if (gen.shouldNotePreliminaryObjectStub())
|
||||
newStub->toCacheIR_Updated()->notePreliminaryObject();
|
||||
else if (gen.shouldUnlinkPreliminaryObjectStubs())
|
||||
StripPreliminaryObjectStubs(cx, stub);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (op == JSOP_INITPROP ||
|
||||
op == JSOP_INITLOCKEDPROP ||
|
||||
op == JSOP_INITHIDDENPROP)
|
||||
|
@ -3083,6 +3107,77 @@ GuardGroupAndShapeMaybeUnboxedExpando(MacroAssembler& masm, JSObject* obj,
|
|||
}
|
||||
}
|
||||
|
||||
bool
|
||||
ICSetProp_Native::Compiler::generateStubCode(MacroAssembler& masm)
|
||||
{
|
||||
MOZ_ASSERT(engine_ == Engine::Baseline);
|
||||
|
||||
Label failure;
|
||||
|
||||
// Guard input is an object.
|
||||
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
|
||||
Register objReg = masm.extractObject(R0, ExtractTemp0);
|
||||
|
||||
AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
|
||||
Register scratch = regs.takeAny();
|
||||
|
||||
GuardGroupAndShapeMaybeUnboxedExpando(masm, obj_, objReg, scratch,
|
||||
ICSetProp_Native::offsetOfGroup(),
|
||||
ICSetProp_Native::offsetOfShape(),
|
||||
&failure);
|
||||
|
||||
// Stow both R0 and R1 (object and value).
|
||||
EmitStowICValues(masm, 2);
|
||||
|
||||
// Type update stub expects the value to check in R0.
|
||||
masm.moveValue(R1, R0);
|
||||
|
||||
// Call the type-update stub.
|
||||
if (!callTypeUpdateIC(masm, sizeof(Value)))
|
||||
return false;
|
||||
|
||||
// Unstow R0 and R1 (object and key)
|
||||
EmitUnstowICValues(masm, 2);
|
||||
|
||||
regs.add(R0);
|
||||
regs.takeUnchecked(objReg);
|
||||
|
||||
Register holderReg;
|
||||
if (obj_->is<UnboxedPlainObject>()) {
|
||||
// We are loading off the expando object, so use that for the holder.
|
||||
holderReg = regs.takeAny();
|
||||
masm.loadPtr(Address(objReg, UnboxedPlainObject::offsetOfExpando()), holderReg);
|
||||
if (!isFixedSlot_)
|
||||
masm.loadPtr(Address(holderReg, NativeObject::offsetOfSlots()), holderReg);
|
||||
} else if (isFixedSlot_) {
|
||||
holderReg = objReg;
|
||||
} else {
|
||||
holderReg = regs.takeAny();
|
||||
masm.loadPtr(Address(objReg, NativeObject::offsetOfSlots()), holderReg);
|
||||
}
|
||||
|
||||
// Perform the store.
|
||||
masm.load32(Address(ICStubReg, ICSetProp_Native::offsetOfOffset()), scratch);
|
||||
EmitPreBarrier(masm, BaseIndex(holderReg, scratch, TimesOne), MIRType::Value);
|
||||
masm.storeValue(R1, BaseIndex(holderReg, scratch, TimesOne));
|
||||
if (holderReg != objReg)
|
||||
regs.add(holderReg);
|
||||
if (cx->runtime()->gc.nursery.exists()) {
|
||||
Register scr = regs.takeAny();
|
||||
LiveGeneralRegisterSet saveRegs;
|
||||
saveRegs.add(R1);
|
||||
emitPostWriteBarrierSlot(masm, objReg, R1, scr, saveRegs);
|
||||
regs.add(scr);
|
||||
}
|
||||
|
||||
EmitReturnFromIC(masm);
|
||||
|
||||
// Failure case - jump to next stub
|
||||
masm.bind(&failure);
|
||||
EmitStubGuardFailure(masm);
|
||||
return true;
|
||||
}
|
||||
|
||||
ICUpdatedStub*
|
||||
ICSetPropNativeAddCompiler::getStub(ICStubSpace* space)
|
||||
{
|
||||
|
@ -3228,7 +3323,7 @@ ICSetPropNativeAddCompiler::generateStubCode(MacroAssembler& masm)
|
|||
Register scr = regs.takeAny();
|
||||
LiveGeneralRegisterSet saveRegs;
|
||||
saveRegs.add(R1);
|
||||
BaselineEmitPostWriteBarrierSlot(masm, objReg, R1, scr, saveRegs, cx);
|
||||
emitPostWriteBarrierSlot(masm, objReg, R1, scr, saveRegs);
|
||||
}
|
||||
|
||||
EmitReturnFromIC(masm);
|
||||
|
@ -3285,7 +3380,7 @@ ICSetProp_Unboxed::Compiler::generateStubCode(MacroAssembler& masm)
|
|||
saveRegs.add(R1);
|
||||
saveRegs.addUnchecked(object);
|
||||
saveRegs.add(ICStubReg);
|
||||
BaselineEmitPostWriteBarrierSlot(masm, object, R1, scratch, saveRegs, cx);
|
||||
emitPostWriteBarrierSlot(masm, object, R1, scratch, saveRegs);
|
||||
}
|
||||
|
||||
// Compute the address being written to.
|
||||
|
@ -3352,7 +3447,7 @@ ICSetProp_TypedObject::Compiler::generateStubCode(MacroAssembler& masm)
|
|||
saveRegs.add(R1);
|
||||
saveRegs.addUnchecked(object);
|
||||
saveRegs.add(ICStubReg);
|
||||
BaselineEmitPostWriteBarrierSlot(masm, object, R1, scratch, saveRegs, cx);
|
||||
emitPostWriteBarrierSlot(masm, object, R1, scratch, saveRegs);
|
||||
}
|
||||
|
||||
// Save the rhs on the stack so we can get a second scratch register.
|
||||
|
@ -6587,6 +6682,28 @@ ICInstanceOf_Function::ICInstanceOf_Function(JitCode* stubCode, Shape* shape,
|
|||
slot_(slot)
|
||||
{ }
|
||||
|
||||
ICSetProp_Native::ICSetProp_Native(JitCode* stubCode, ObjectGroup* group, Shape* shape,
|
||||
uint32_t offset)
|
||||
: ICUpdatedStub(SetProp_Native, stubCode),
|
||||
group_(group),
|
||||
shape_(shape),
|
||||
offset_(offset)
|
||||
{ }
|
||||
|
||||
ICSetProp_Native*
|
||||
ICSetProp_Native::Compiler::getStub(ICStubSpace* space)
|
||||
{
|
||||
RootedObjectGroup group(cx, JSObject::getGroup(cx, obj_));
|
||||
if (!group)
|
||||
return nullptr;
|
||||
|
||||
RootedShape shape(cx, LastPropertyForSetProp(obj_));
|
||||
ICSetProp_Native* stub = newStub<ICSetProp_Native>(space, getStubCode(), group, shape, offset_);
|
||||
if (!stub || !stub->initUpdatingChain(cx, space))
|
||||
return nullptr;
|
||||
return stub;
|
||||
}
|
||||
|
||||
ICSetProp_NativeAdd::ICSetProp_NativeAdd(JitCode* stubCode, ObjectGroup* group,
|
||||
size_t protoChainDepth,
|
||||
Shape* newShape,
|
||||
|
|
|
@ -1111,6 +1111,69 @@ class ICSetProp_Fallback : public ICFallbackStub
|
|||
};
|
||||
};
|
||||
|
||||
// Optimized SETPROP/SETGNAME/SETNAME stub.
|
||||
class ICSetProp_Native : public ICUpdatedStub
|
||||
{
|
||||
friend class ICStubSpace;
|
||||
|
||||
protected: // Protected to silence Clang warning.
|
||||
GCPtrObjectGroup group_;
|
||||
GCPtrShape shape_;
|
||||
uint32_t offset_;
|
||||
|
||||
ICSetProp_Native(JitCode* stubCode, ObjectGroup* group, Shape* shape, uint32_t offset);
|
||||
|
||||
public:
|
||||
GCPtrObjectGroup& group() {
|
||||
return group_;
|
||||
}
|
||||
GCPtrShape& shape() {
|
||||
return shape_;
|
||||
}
|
||||
void notePreliminaryObject() {
|
||||
extra_ = 1;
|
||||
}
|
||||
bool hasPreliminaryObject() const {
|
||||
return extra_;
|
||||
}
|
||||
static size_t offsetOfGroup() {
|
||||
return offsetof(ICSetProp_Native, group_);
|
||||
}
|
||||
static size_t offsetOfShape() {
|
||||
return offsetof(ICSetProp_Native, shape_);
|
||||
}
|
||||
static size_t offsetOfOffset() {
|
||||
return offsetof(ICSetProp_Native, offset_);
|
||||
}
|
||||
|
||||
class Compiler : public ICStubCompiler {
|
||||
RootedObject obj_;
|
||||
bool isFixedSlot_;
|
||||
uint32_t offset_;
|
||||
|
||||
protected:
|
||||
virtual int32_t getKey() const {
|
||||
return static_cast<int32_t>(engine_) |
|
||||
(static_cast<int32_t>(kind) << 1) |
|
||||
(static_cast<int32_t>(isFixedSlot_) << 17) |
|
||||
(static_cast<int32_t>(obj_->is<UnboxedPlainObject>()) << 18);
|
||||
}
|
||||
|
||||
MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
|
||||
|
||||
public:
|
||||
Compiler(JSContext* cx, HandleObject obj, bool isFixedSlot, uint32_t offset)
|
||||
: ICStubCompiler(cx, ICStub::SetProp_Native, Engine::Baseline),
|
||||
obj_(cx, obj),
|
||||
isFixedSlot_(isFixedSlot),
|
||||
offset_(offset)
|
||||
{}
|
||||
|
||||
ICSetProp_Native* getStub(ICStubSpace* space);
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
template <size_t ProtoChainDepth> class ICSetProp_NativeAddImpl;
|
||||
|
||||
class ICSetProp_NativeAdd : public ICUpdatedStub
|
||||
|
|
|
@ -69,6 +69,7 @@ namespace jit {
|
|||
_(GetIntrinsic_Constant) \
|
||||
\
|
||||
_(SetProp_Fallback) \
|
||||
_(SetProp_Native) \
|
||||
_(SetProp_NativeAdd) \
|
||||
_(SetProp_Unboxed) \
|
||||
_(SetProp_TypedObject) \
|
||||
|
|
|
@ -171,49 +171,6 @@ GetCacheIRReceiverForUnboxedProperty(ICCacheIR_Monitored* stub, ReceiverGuard* r
|
|||
return reader.matchOp(CacheOp::LoadUnboxedPropertyResult, objId);
|
||||
}
|
||||
|
||||
static bool
|
||||
GetCacheIRReceiverForNativeSetSlot(ICCacheIR_Updated* stub, ReceiverGuard* receiver)
|
||||
{
|
||||
// We match either:
|
||||
//
|
||||
// GuardIsObject 0
|
||||
// GuardGroup 0
|
||||
// GuardShape 0
|
||||
// StoreFixedSlot 0 or StoreDynamicSlot 0
|
||||
//
|
||||
// or
|
||||
//
|
||||
// GuardIsObject 0
|
||||
// GuardGroup 0
|
||||
// 1: GuardAndLoadUnboxedExpando 0
|
||||
// GuardShape 1
|
||||
// StoreFixedSlot 1 or StoreDynamicSlot 1
|
||||
|
||||
*receiver = ReceiverGuard();
|
||||
CacheIRReader reader(stub->stubInfo());
|
||||
|
||||
ObjOperandId objId = ObjOperandId(0);
|
||||
if (!reader.matchOp(CacheOp::GuardIsObject, objId))
|
||||
return false;
|
||||
|
||||
if (!reader.matchOp(CacheOp::GuardGroup, objId))
|
||||
return false;
|
||||
ObjectGroup* group = stub->stubInfo()->getStubField<ObjectGroup*>(stub, reader.stubOffset());
|
||||
|
||||
if (reader.matchOp(CacheOp::GuardAndLoadUnboxedExpando, objId))
|
||||
objId = reader.objOperandId();
|
||||
|
||||
if (!reader.matchOp(CacheOp::GuardShape, objId))
|
||||
return false;
|
||||
Shape* shape = stub->stubInfo()->getStubField<Shape*>(stub, reader.stubOffset());
|
||||
|
||||
if (!reader.matchOpEither(CacheOp::StoreFixedSlot, CacheOp::StoreDynamicSlot))
|
||||
return false;
|
||||
|
||||
*receiver = ReceiverGuard(group, shape);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
BaselineInspector::maybeInfoForPropertyOp(jsbytecode* pc, ReceiverVector& receivers,
|
||||
ObjectGroupVector& convertUnboxedGroups)
|
||||
|
@ -242,11 +199,9 @@ BaselineInspector::maybeInfoForPropertyOp(jsbytecode* pc, ReceiverVector& receiv
|
|||
receivers.clear();
|
||||
return true;
|
||||
}
|
||||
} else if (stub->isCacheIR_Updated()) {
|
||||
if (!GetCacheIRReceiverForNativeSetSlot(stub->toCacheIR_Updated(), &receiver)) {
|
||||
receivers.clear();
|
||||
return true;
|
||||
}
|
||||
} else if (stub->isSetProp_Native()) {
|
||||
receiver = ReceiverGuard(stub->toSetProp_Native()->group(),
|
||||
stub->toSetProp_Native()->shape());
|
||||
} else if (stub->isSetProp_Unboxed()) {
|
||||
receiver = ReceiverGuard(stub->toSetProp_Unboxed()->group(), nullptr);
|
||||
} else {
|
||||
|
|
|
@ -1523,141 +1523,3 @@ IRGenerator::maybeGuardInt32Index(const Value& index, ValOperandId indexId,
|
|||
|
||||
return false;
|
||||
}
|
||||
|
||||
SetPropIRGenerator::SetPropIRGenerator(JSContext* cx, jsbytecode* pc, CacheKind cacheKind,
|
||||
bool* isTemporarilyUnoptimizable, HandleValue lhsVal,
|
||||
HandleValue idVal, HandleValue rhsVal)
|
||||
: IRGenerator(cx, pc, cacheKind),
|
||||
lhsVal_(lhsVal),
|
||||
idVal_(idVal),
|
||||
rhsVal_(rhsVal),
|
||||
isTemporarilyUnoptimizable_(isTemporarilyUnoptimizable),
|
||||
preliminaryObjectAction_(PreliminaryObjectAction::None),
|
||||
updateStubId_(cx, JSID_EMPTY),
|
||||
needUpdateStub_(false)
|
||||
{}
|
||||
|
||||
bool
|
||||
SetPropIRGenerator::tryAttachStub()
|
||||
{
|
||||
AutoAssertNoPendingException aanpe(cx_);
|
||||
|
||||
ValOperandId lhsValId(writer.setInputOperandId(0));
|
||||
ValOperandId rhsValId(writer.setInputOperandId(1));
|
||||
|
||||
RootedId id(cx_);
|
||||
bool nameOrSymbol;
|
||||
if (!ValueToNameOrSymbolId(cx_, idVal_, &id, &nameOrSymbol)) {
|
||||
cx_->clearPendingException();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (lhsVal_.isObject()) {
|
||||
RootedObject obj(cx_, &lhsVal_.toObject());
|
||||
if (obj->watched())
|
||||
return false;
|
||||
|
||||
ObjOperandId objId = writer.guardIsObject(lhsValId);
|
||||
if (nameOrSymbol) {
|
||||
if (tryAttachNativeSetSlot(obj, objId, id, rhsValId))
|
||||
return true;
|
||||
if (tryAttachUnboxedExpandoSetSlot(obj, objId, id, rhsValId))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
EmitStoreSlotAndReturn(CacheIRWriter& writer, ObjOperandId objId, NativeObject* nobj, Shape* shape,
|
||||
ValOperandId rhsId)
|
||||
{
|
||||
if (nobj->isFixedSlot(shape->slot())) {
|
||||
size_t offset = NativeObject::getFixedSlotOffset(shape->slot());
|
||||
writer.storeFixedSlot(objId, offset, rhsId);
|
||||
} else {
|
||||
size_t offset = nobj->dynamicSlotIndex(shape->slot()) * sizeof(Value);
|
||||
writer.storeDynamicSlot(objId, offset, rhsId);
|
||||
}
|
||||
writer.returnFromIC();
|
||||
}
|
||||
|
||||
static Shape*
|
||||
LookupShapeForSetSlot(NativeObject* obj, jsid id)
|
||||
{
|
||||
Shape* shape = obj->lookupPure(id);
|
||||
if (shape && shape->hasSlot() && shape->hasDefaultSetter() && shape->writable())
|
||||
return shape;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool
|
||||
SetPropIRGenerator::tryAttachNativeSetSlot(HandleObject obj, ObjOperandId objId, HandleId id,
|
||||
ValOperandId rhsId)
|
||||
{
|
||||
if (!obj->isNative())
|
||||
return false;
|
||||
|
||||
RootedShape propShape(cx_, LookupShapeForSetSlot(&obj->as<NativeObject>(), id));
|
||||
if (!propShape)
|
||||
return false;
|
||||
|
||||
RootedObjectGroup group(cx_, JSObject::getGroup(cx_, obj));
|
||||
if (!group) {
|
||||
cx_->recoverFromOutOfMemory();
|
||||
return false;
|
||||
}
|
||||
|
||||
// For some property writes, such as the initial overwrite of global
|
||||
// properties, TI will not mark the property as having been
|
||||
// overwritten. Don't attach a stub in this case, so that we don't
|
||||
// execute another write to the property without TI seeing that write.
|
||||
EnsureTrackPropertyTypes(cx_, obj, id);
|
||||
if (!PropertyHasBeenMarkedNonConstant(obj, id)) {
|
||||
*isTemporarilyUnoptimizable_ = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
// For Baseline, we have to guard on both the shape and group, because the
|
||||
// type update IC applies to a single group. When we port the Ion IC, we can
|
||||
// do a bit better and avoid the group guard if we don't have to guard on
|
||||
// the property types.
|
||||
NativeObject* nobj = &obj->as<NativeObject>();
|
||||
writer.guardGroup(objId, nobj->group());
|
||||
writer.guardShape(objId, nobj->lastProperty());
|
||||
|
||||
if (IsPreliminaryObject(obj))
|
||||
preliminaryObjectAction_ = PreliminaryObjectAction::NotePreliminary;
|
||||
else
|
||||
preliminaryObjectAction_ = PreliminaryObjectAction::Unlink;
|
||||
|
||||
setUpdateStubInfo(id);
|
||||
EmitStoreSlotAndReturn(writer, objId, nobj, propShape, rhsId);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
SetPropIRGenerator::tryAttachUnboxedExpandoSetSlot(HandleObject obj, ObjOperandId objId,
|
||||
HandleId id, ValOperandId rhsId)
|
||||
{
|
||||
if (!obj->is<UnboxedPlainObject>())
|
||||
return false;
|
||||
|
||||
UnboxedExpandoObject* expando = obj->as<UnboxedPlainObject>().maybeExpando();
|
||||
if (!expando)
|
||||
return false;
|
||||
|
||||
Shape* propShape = LookupShapeForSetSlot(expando, id);
|
||||
if (!propShape)
|
||||
return false;
|
||||
|
||||
writer.guardGroup(objId, obj->group());
|
||||
ObjOperandId expandoId = writer.guardAndLoadUnboxedExpando(objId);
|
||||
writer.guardShape(expandoId, expando->lastProperty());
|
||||
|
||||
setUpdateStubInfo(id);
|
||||
EmitStoreSlotAndReturn(writer, expandoId, expando, propShape, rhsId);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -134,7 +134,6 @@ enum class CacheKind : uint8_t
|
|||
GetProp,
|
||||
GetElem,
|
||||
GetName,
|
||||
SetProp,
|
||||
};
|
||||
|
||||
#define CACHE_IR_OPS(_) \
|
||||
|
@ -169,9 +168,6 @@ enum class CacheKind : uint8_t
|
|||
_(LoadDOMExpandoValueIgnoreGeneration)\
|
||||
_(GuardDOMExpandoMissingOrGuardShape) \
|
||||
\
|
||||
_(StoreFixedSlot) \
|
||||
_(StoreDynamicSlot) \
|
||||
\
|
||||
/* The *Result ops load a value into the cache's result register. */ \
|
||||
_(LoadFixedSlotResult) \
|
||||
_(LoadDynamicSlotResult) \
|
||||
|
@ -551,17 +547,6 @@ class MOZ_RAII CacheIRWriter : public JS::CustomAutoRooter
|
|||
return res;
|
||||
}
|
||||
|
||||
void storeFixedSlot(ObjOperandId obj, size_t offset, ValOperandId rhs) {
|
||||
writeOpWithOperandId(CacheOp::StoreFixedSlot, obj);
|
||||
addStubField(offset, StubField::Type::RawWord);
|
||||
writeOperandId(rhs);
|
||||
}
|
||||
void storeDynamicSlot(ObjOperandId obj, size_t offset, ValOperandId rhs) {
|
||||
writeOpWithOperandId(CacheOp::StoreDynamicSlot, obj);
|
||||
addStubField(offset, StubField::Type::RawWord);
|
||||
writeOperandId(rhs);
|
||||
}
|
||||
|
||||
void loadUndefinedResult() {
|
||||
writeOp(CacheOp::LoadUndefinedResult);
|
||||
}
|
||||
|
@ -839,51 +824,6 @@ class MOZ_RAII GetNameIRGenerator : public IRGenerator
|
|||
bool tryAttachStub();
|
||||
};
|
||||
|
||||
// SetPropIRGenerator generates CacheIR for a SetProp IC.
|
||||
class MOZ_RAII SetPropIRGenerator : public IRGenerator
|
||||
{
|
||||
HandleValue lhsVal_;
|
||||
HandleValue idVal_;
|
||||
HandleValue rhsVal_;
|
||||
bool* isTemporarilyUnoptimizable_;
|
||||
|
||||
enum class PreliminaryObjectAction { None, Unlink, NotePreliminary };
|
||||
PreliminaryObjectAction preliminaryObjectAction_;
|
||||
|
||||
// If Baseline needs an update stub, this contains information to create it.
|
||||
RootedId updateStubId_;
|
||||
bool needUpdateStub_;
|
||||
|
||||
void setUpdateStubInfo(jsid id) {
|
||||
MOZ_ASSERT(!needUpdateStub_);
|
||||
needUpdateStub_ = true;
|
||||
updateStubId_ = id;
|
||||
}
|
||||
|
||||
bool tryAttachNativeSetSlot(HandleObject obj, ObjOperandId objId, HandleId id,
|
||||
ValOperandId rhsId);
|
||||
bool tryAttachUnboxedExpandoSetSlot(HandleObject obj, ObjOperandId objId, HandleId id,
|
||||
ValOperandId rhsId);
|
||||
|
||||
public:
|
||||
SetPropIRGenerator(JSContext* cx, jsbytecode* pc, CacheKind cacheKind,
|
||||
bool* isTemporarilyUnoptimizable, HandleValue lhsVal, HandleValue idVal,
|
||||
HandleValue rhsVal);
|
||||
|
||||
bool tryAttachStub();
|
||||
|
||||
bool shouldUnlinkPreliminaryObjectStubs() const {
|
||||
return preliminaryObjectAction_ == PreliminaryObjectAction::Unlink;
|
||||
}
|
||||
bool shouldNotePreliminaryObjectStub() const {
|
||||
return preliminaryObjectAction_ == PreliminaryObjectAction::NotePreliminary;
|
||||
}
|
||||
jsid updateStubId() const {
|
||||
MOZ_ASSERT(needUpdateStub_);
|
||||
return updateStubId_;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace jit
|
||||
} // namespace js
|
||||
|
||||
|
|
|
@ -61,42 +61,6 @@ CacheRegisterAllocator::useValueRegister(MacroAssembler& masm, ValOperandId op)
|
|||
MOZ_CRASH();
|
||||
}
|
||||
|
||||
ValueOperand
|
||||
CacheRegisterAllocator::useFixedValueRegister(MacroAssembler& masm, ValOperandId valId,
|
||||
ValueOperand reg)
|
||||
{
|
||||
allocateFixedValueRegister(masm, reg);
|
||||
|
||||
OperandLocation& loc = operandLocations_[valId.id()];
|
||||
switch (loc.kind()) {
|
||||
case OperandLocation::ValueReg:
|
||||
masm.moveValue(loc.valueReg(), reg);
|
||||
MOZ_ASSERT(!currentOpRegs_.aliases(loc.valueReg()), "Register shouldn't be in use");
|
||||
availableRegs_.add(loc.valueReg());
|
||||
break;
|
||||
case OperandLocation::ValueStack:
|
||||
popValue(masm, &loc, reg);
|
||||
break;
|
||||
case OperandLocation::Constant:
|
||||
masm.moveValue(loc.constant(), reg);
|
||||
break;
|
||||
case OperandLocation::PayloadReg:
|
||||
masm.tagValue(loc.payloadType(), loc.payloadReg(), reg);
|
||||
MOZ_ASSERT(!currentOpRegs_.has(loc.payloadReg()), "Register shouldn't be in use");
|
||||
availableRegs_.add(loc.payloadReg());
|
||||
break;
|
||||
case OperandLocation::PayloadStack:
|
||||
popPayload(masm, &loc, reg.scratchReg());
|
||||
masm.tagValue(loc.payloadType(), reg.scratchReg(), reg);
|
||||
break;
|
||||
case OperandLocation::Uninitialized:
|
||||
MOZ_CRASH();
|
||||
}
|
||||
|
||||
loc.setValueReg(reg);
|
||||
return reg;
|
||||
}
|
||||
|
||||
Register
|
||||
CacheRegisterAllocator::useRegister(MacroAssembler& masm, TypedOperandId typedId)
|
||||
{
|
||||
|
|
|
@ -339,7 +339,6 @@ class MOZ_RAII CacheRegisterAllocator
|
|||
// Returns the register for the given operand. If the operand is currently
|
||||
// not in a register, it will load it into one.
|
||||
ValueOperand useValueRegister(MacroAssembler& masm, ValOperandId val);
|
||||
ValueOperand useFixedValueRegister(MacroAssembler& masm, ValOperandId valId, ValueOperand reg);
|
||||
Register useRegister(MacroAssembler& masm, TypedOperandId typedId);
|
||||
|
||||
// Allocates an output register for the given operand.
|
||||
|
|
|
@ -257,7 +257,6 @@ CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool)
|
|||
return;
|
||||
}
|
||||
case CacheKind::GetName:
|
||||
case CacheKind::SetProp:
|
||||
MOZ_CRASH("Baseline-specific for now");
|
||||
}
|
||||
MOZ_CRASH();
|
||||
|
|
|
@ -820,18 +820,6 @@ IonCacheIRCompiler::emitLoadEnvironmentDynamicSlotResult()
|
|||
MOZ_CRASH("Baseline-specific op");
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitStoreFixedSlot()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitStoreDynamicSlot()
|
||||
{
|
||||
MOZ_CRASH("Baseline-specific op");
|
||||
}
|
||||
|
||||
bool
|
||||
IonCacheIRCompiler::emitLoadTypedObjectResult()
|
||||
{
|
||||
|
|
|
@ -42,7 +42,6 @@ IonIC::scratchRegisterForEntryJump()
|
|||
return output.hasValue() ? output.valueReg().scratchReg() : output.typedReg().gpr();
|
||||
}
|
||||
case CacheKind::GetName:
|
||||
case CacheKind::SetProp:
|
||||
MOZ_CRASH("Baseline-specific for now");
|
||||
}
|
||||
|
||||
|
|
|
@ -488,7 +488,7 @@ class MacroAssembler : public MacroAssemblerSpecific
|
|||
|
||||
CodeOffset call(Register reg) PER_SHARED_ARCH;
|
||||
CodeOffset call(Label* label) PER_SHARED_ARCH;
|
||||
void call(const Address& addr) DEFINED_ON(x86_shared, arm, arm64);
|
||||
void call(const Address& addr) DEFINED_ON(x86_shared);
|
||||
void call(ImmWord imm) PER_SHARED_ARCH;
|
||||
// Call a target native function, which is neither traceable nor movable.
|
||||
void call(ImmPtr imm) PER_SHARED_ARCH;
|
||||
|
|
|
@ -193,14 +193,9 @@ ICStub::NonCacheIRStubMakesGCCalls(Kind kind)
|
|||
bool
|
||||
ICStub::makesGCCalls() const
|
||||
{
|
||||
switch (kind()) {
|
||||
case CacheIR_Monitored:
|
||||
if (isCacheIR_Monitored())
|
||||
return toCacheIR_Monitored()->stubInfo()->makesGCCalls();
|
||||
case CacheIR_Updated:
|
||||
return toCacheIR_Updated()->stubInfo()->makesGCCalls();
|
||||
default:
|
||||
return NonCacheIRStubMakesGCCalls(kind());
|
||||
}
|
||||
return NonCacheIRStubMakesGCCalls(kind());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -356,6 +351,12 @@ ICStub::trace(JSTracer* trc)
|
|||
TraceEdge(trc, &constantStub->value(), "baseline-getintrinsic-constant-value");
|
||||
break;
|
||||
}
|
||||
case ICStub::SetProp_Native: {
|
||||
ICSetProp_Native* propStub = toSetProp_Native();
|
||||
TraceEdge(trc, &propStub->shape(), "baseline-setpropnative-stub-shape");
|
||||
TraceEdge(trc, &propStub->group(), "baseline-setpropnative-stub-group");
|
||||
break;
|
||||
}
|
||||
case ICStub::SetProp_NativeAdd: {
|
||||
ICSetProp_NativeAdd* propStub = toSetProp_NativeAdd();
|
||||
TraceEdge(trc, &propStub->group(), "baseline-setpropnativeadd-stub-group");
|
||||
|
@ -424,12 +425,6 @@ ICStub::trace(JSTracer* trc)
|
|||
case ICStub::CacheIR_Monitored:
|
||||
TraceCacheIRStub(trc, this, toCacheIR_Monitored()->stubInfo());
|
||||
break;
|
||||
case ICStub::CacheIR_Updated: {
|
||||
ICCacheIR_Updated* stub = toCacheIR_Updated();
|
||||
TraceEdge(trc, &stub->updateStubId(), "baseline-updated-id");
|
||||
TraceCacheIRStub(trc, this, stub->stubInfo());
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -736,9 +731,8 @@ ICStubCompiler::PushStubPayload(MacroAssembler& masm, Register scratch)
|
|||
}
|
||||
|
||||
void
|
||||
BaselineEmitPostWriteBarrierSlot(MacroAssembler& masm, Register obj, ValueOperand val,
|
||||
Register scratch, LiveGeneralRegisterSet saveRegs,
|
||||
JSRuntime* rt)
|
||||
ICStubCompiler::emitPostWriteBarrierSlot(MacroAssembler& masm, Register obj, ValueOperand val,
|
||||
Register scratch, LiveGeneralRegisterSet saveRegs)
|
||||
{
|
||||
Label skipBarrier;
|
||||
masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, &skipBarrier);
|
||||
|
@ -751,7 +745,7 @@ BaselineEmitPostWriteBarrierSlot(MacroAssembler& masm, Register obj, ValueOperan
|
|||
saveRegs.set() = GeneralRegisterSet::Intersect(saveRegs.set(), GeneralRegisterSet::Volatile());
|
||||
masm.PushRegsInMask(saveRegs);
|
||||
masm.setupUnalignedABICall(scratch);
|
||||
masm.movePtr(ImmPtr(rt), scratch);
|
||||
masm.movePtr(ImmPtr(cx->runtime()), scratch);
|
||||
masm.passABIArg(scratch);
|
||||
masm.passABIArg(obj);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostWriteBarrier));
|
||||
|
@ -1981,7 +1975,7 @@ StripPreliminaryObjectStubs(JSContext* cx, ICFallbackStub* stub)
|
|||
for (ICStubIterator iter = stub->beginChain(); !iter.atEnd(); iter++) {
|
||||
if (iter->isCacheIR_Monitored() && iter->toCacheIR_Monitored()->hasPreliminaryObject())
|
||||
iter.unlink(cx);
|
||||
else if (iter->isCacheIR_Updated() && iter->toCacheIR_Updated()->hasPreliminaryObject())
|
||||
else if (iter->isSetProp_Native() && iter->toSetProp_Native()->hasPreliminaryObject())
|
||||
iter.unlink(cx);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -508,7 +508,7 @@ class ICStub
|
|||
return (k > INVALID) && (k < LIMIT);
|
||||
}
|
||||
static bool IsCacheIRKind(Kind k) {
|
||||
return k == CacheIR_Monitored || k == CacheIR_Updated;
|
||||
return k == CacheIR_Monitored;
|
||||
}
|
||||
|
||||
static const char* KindString(Kind k) {
|
||||
|
@ -956,36 +956,6 @@ class ICUpdatedStub : public ICStub
|
|||
}
|
||||
};
|
||||
|
||||
class ICCacheIR_Updated : public ICUpdatedStub
|
||||
{
|
||||
const CacheIRStubInfo* stubInfo_;
|
||||
GCPtrId updateStubId_;
|
||||
|
||||
public:
|
||||
ICCacheIR_Updated(JitCode* stubCode, const CacheIRStubInfo* stubInfo)
|
||||
: ICUpdatedStub(ICStub::CacheIR_Updated, stubCode),
|
||||
stubInfo_(stubInfo),
|
||||
updateStubId_(JSID_EMPTY)
|
||||
{}
|
||||
|
||||
GCPtrId& updateStubId() {
|
||||
return updateStubId_;
|
||||
}
|
||||
|
||||
void notePreliminaryObject() {
|
||||
extra_ = 1;
|
||||
}
|
||||
bool hasPreliminaryObject() const {
|
||||
return extra_;
|
||||
}
|
||||
|
||||
const CacheIRStubInfo* stubInfo() const {
|
||||
return stubInfo_;
|
||||
}
|
||||
|
||||
uint8_t* stubDataStart();
|
||||
};
|
||||
|
||||
// Base class for stubcode compilers.
|
||||
class ICStubCompiler
|
||||
{
|
||||
|
@ -1092,6 +1062,9 @@ class ICStubCompiler
|
|||
}
|
||||
|
||||
protected:
|
||||
void emitPostWriteBarrierSlot(MacroAssembler& masm, Register obj, ValueOperand val,
|
||||
Register scratch, LiveGeneralRegisterSet saveRegs);
|
||||
|
||||
template <typename T, typename... Args>
|
||||
T* newStub(Args&&... args) {
|
||||
return ICStub::New<T>(cx, mozilla::Forward<Args>(args)...);
|
||||
|
@ -1113,10 +1086,6 @@ class ICStubCompiler
|
|||
}
|
||||
};
|
||||
|
||||
void BaselineEmitPostWriteBarrierSlot(MacroAssembler& masm, Register obj, ValueOperand val,
|
||||
Register scratch, LiveGeneralRegisterSet saveRegs,
|
||||
JSRuntime* rt);
|
||||
|
||||
class SharedStubInfo
|
||||
{
|
||||
BaselineFrame* maybeFrame_;
|
||||
|
|
|
@ -38,7 +38,6 @@ namespace jit {
|
|||
_(GetProp_Generic) \
|
||||
\
|
||||
_(CacheIR_Monitored) \
|
||||
_(CacheIR_Updated) \
|
||||
\
|
||||
|
||||
} // namespace jit
|
||||
|
|
|
@ -5021,13 +5021,6 @@ MacroAssembler::call(wasm::SymbolicAddress imm)
|
|||
call(CallReg);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::call(const Address& addr)
|
||||
{
|
||||
loadPtr(addr, CallReg);
|
||||
call(CallReg);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::call(JitCode* c)
|
||||
{
|
||||
|
|
|
@ -528,16 +528,6 @@ MacroAssembler::call(wasm::SymbolicAddress imm)
|
|||
call(scratch);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::call(const Address& addr)
|
||||
{
|
||||
vixl::UseScratchRegisterScope temps(this);
|
||||
const Register scratch = temps.AcquireX().asUnsized();
|
||||
syncStackPtr();
|
||||
loadPtr(addr, scratch);
|
||||
call(scratch);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::call(JitCode* c)
|
||||
{
|
||||
|
|
Загрузка…
Ссылка в новой задаче