зеркало из https://github.com/mozilla/pjs.git
Bug 561359 - Predication of method optimization is too dynamic, causing "Assertion failure: &shape.methodObject() == &prev.toObject()". r=dvander.
--HG-- extra : rebase_source : 0a101adb7b237c56bde65ff7af3fdfcee0ad13df
This commit is contained in:
Родитель
2d3dda6542
Коммит
81c001571c
|
@ -0,0 +1,4 @@
|
|||
for (let z = 0; z < 2; z++) {
|
||||
with ({x: function () {}})
|
||||
f = x;
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
function f(s) {
|
||||
var obj = {m: function () { return a; }};
|
||||
eval(s);
|
||||
return obj;
|
||||
}
|
||||
var obj = f("var a = 'right';");
|
||||
var a = 'wrong';
|
||||
assertEq(obj.m(), 'right');
|
|
@ -234,8 +234,7 @@ JSCompartment::wrap(JSContext *cx, Value *vp)
|
|||
/*
|
||||
* Wrappers should really be parented to the wrapped parent of the wrapped
|
||||
* object, but in that case a wrapped global object would have a NULL
|
||||
* parent without being a proper global object (JSCLASS_IS_GLOBAL). Instead
|
||||
,
|
||||
* parent without being a proper global object (JSCLASS_IS_GLOBAL). Instead,
|
||||
* we parent all wrappers to the global object in their home compartment.
|
||||
* This loses us some transparency, and is generally very cheesy.
|
||||
*/
|
||||
|
|
|
@ -5014,7 +5014,7 @@ BEGIN_CASE(JSOP_LAMBDA)
|
|||
if (fun->isNullClosure()) {
|
||||
parent = ®s.fp()->scopeChain();
|
||||
|
||||
if (obj->getParent() == parent) {
|
||||
if (fun->joinable()) {
|
||||
jsbytecode *pc2 = AdvanceOverBlockchainOp(regs.pc + JSOP_LAMBDA_LENGTH);
|
||||
JSOp op2 = JSOp(*pc2);
|
||||
|
||||
|
@ -5049,41 +5049,39 @@ BEGIN_CASE(JSOP_LAMBDA)
|
|||
fun->setMethodAtom(script->getAtom(GET_FULL_INDEX(pc2 - regs.pc)));
|
||||
break;
|
||||
}
|
||||
} else if (fun->joinable()) {
|
||||
if (op2 == JSOP_CALL) {
|
||||
/*
|
||||
* Array.prototype.sort and String.prototype.replace are
|
||||
* optimized as if they are special form. We know that they
|
||||
* won't leak the joined function object in obj, therefore
|
||||
* we don't need to clone that compiler- created function
|
||||
* object for identity/mutation reasons.
|
||||
*/
|
||||
int iargc = GET_ARGC(pc2);
|
||||
} else if (op2 == JSOP_CALL) {
|
||||
/*
|
||||
* Array.prototype.sort and String.prototype.replace are
|
||||
* optimized as if they are special form. We know that they
|
||||
* won't leak the joined function object in obj, therefore
|
||||
* we don't need to clone that compiler-created function
|
||||
* object for identity/mutation reasons.
|
||||
*/
|
||||
int iargc = GET_ARGC(pc2);
|
||||
|
||||
/*
|
||||
* Note that we have not yet pushed obj as the final argument,
|
||||
* so regs.sp[1 - (iargc + 2)], and not regs.sp[-(iargc + 2)],
|
||||
* is the callee for this JSOP_CALL.
|
||||
*/
|
||||
const Value &cref = regs.sp[1 - (iargc + 2)];
|
||||
JSObject *callee;
|
||||
/*
|
||||
* Note that we have not yet pushed obj as the final argument,
|
||||
* so regs.sp[1 - (iargc + 2)], and not regs.sp[-(iargc + 2)],
|
||||
* is the callee for this JSOP_CALL.
|
||||
*/
|
||||
const Value &cref = regs.sp[1 - (iargc + 2)];
|
||||
JSObject *callee;
|
||||
|
||||
if (IsFunctionObject(cref, &callee)) {
|
||||
JSFunction *calleeFun = callee->getFunctionPrivate();
|
||||
if (Native native = calleeFun->maybeNative()) {
|
||||
if ((iargc == 1 && native == array_sort) ||
|
||||
(iargc == 2 && native == str_replace)) {
|
||||
break;
|
||||
}
|
||||
if (IsFunctionObject(cref, &callee)) {
|
||||
JSFunction *calleeFun = callee->getFunctionPrivate();
|
||||
if (Native native = calleeFun->maybeNative()) {
|
||||
if ((iargc == 1 && native == array_sort) ||
|
||||
(iargc == 2 && native == str_replace)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (op2 == JSOP_NULL) {
|
||||
pc2 += JSOP_NULL_LENGTH;
|
||||
op2 = JSOp(*pc2);
|
||||
|
||||
if (op2 == JSOP_CALL && GET_ARGC(pc2) == 0)
|
||||
break;
|
||||
}
|
||||
} else if (op2 == JSOP_NULL) {
|
||||
pc2 += JSOP_NULL_LENGTH;
|
||||
op2 = JSOp(*pc2);
|
||||
|
||||
if (op2 == JSOP_CALL && GET_ARGC(pc2) == 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -311,7 +311,9 @@ bool
|
|||
JSFunctionBox::joinable() const
|
||||
{
|
||||
return function()->isNullClosure() &&
|
||||
!(tcflags & (TCF_FUN_USES_ARGUMENTS | TCF_FUN_USES_OWN_NAME));
|
||||
(tcflags & (TCF_FUN_USES_ARGUMENTS |
|
||||
TCF_FUN_USES_OWN_NAME |
|
||||
TCF_COMPILE_N_GO)) == TCF_COMPILE_N_GO;
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -4434,8 +4436,6 @@ CloneParseTree(JSParseNode *opn, JSTreeContext *tc)
|
|||
|
||||
#endif /* JS_HAS_DESTRUCTURING */
|
||||
|
||||
extern const char js_with_statement_str[];
|
||||
|
||||
static JSParseNode *
|
||||
ContainsStmt(JSParseNode *pn, TokenKind tt)
|
||||
{
|
||||
|
|
|
@ -154,7 +154,7 @@ using namespace js::tjit;
|
|||
|
||||
/* Implement embedder-specific nanojit members. */
|
||||
|
||||
/*
|
||||
/*
|
||||
* Nanojit requires infallible allocations most of the time. We satisfy this by
|
||||
* reserving some space in each allocator which is used as a fallback if
|
||||
* rt->calloc_() fails. Ideally this reserve space should be big enough to allow
|
||||
|
@ -2433,7 +2433,7 @@ TraceRecorder::TraceRecorder(JSContext* cx, TraceMonitor *tm,
|
|||
LIns* counterValue = w.ldiVolatile(counterPtr);
|
||||
LIns* test = w.ltiN(counterValue, LOOP_COUNT_MAX);
|
||||
LIns *branch = w.jfUnoptimizable(test);
|
||||
/*
|
||||
/*
|
||||
* stiVolatile() uses ACCSET_STORE_ANY; If LICM is implemented
|
||||
* (bug 545406) this counter will need its own region.
|
||||
*/
|
||||
|
@ -2924,7 +2924,7 @@ ContainsUnrechableGCThingImpl(JSContext *cx, TreeFragment *f)
|
|||
if (f->visiting)
|
||||
return false;
|
||||
f->visiting = true;
|
||||
|
||||
|
||||
if (!f->code())
|
||||
return false;
|
||||
|
||||
|
@ -2972,7 +2972,7 @@ ClearVisitingFlag(TreeFragment *f)
|
|||
* Recursively check if the fragment and its dependent and linked trees has
|
||||
* dead GC things. As the trees can point to each other we use the visiting
|
||||
* flag to detect already visited fragments. The flag is cleared after we
|
||||
* walked the whole graph in the separated ClearVisitingFlag function.
|
||||
* walked the whole graph in the separated ClearVisitingFlag function.
|
||||
*/
|
||||
static bool
|
||||
ContainsUnrechableGCThing(JSContext *cx, TreeFragment *f)
|
||||
|
@ -2994,7 +2994,7 @@ TraceMonitor::sweep(JSContext *cx)
|
|||
recorderTree = recorder->getTree();
|
||||
shouldAbortRecording = HasUnreachableGCThings(cx, recorderTree);
|
||||
}
|
||||
|
||||
|
||||
for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
|
||||
TreeFragment** fragp = &vmfragments[i];
|
||||
while (TreeFragment* frag = *fragp) {
|
||||
|
@ -3008,7 +3008,7 @@ TraceMonitor::sweep(JSContext *cx)
|
|||
fragp = &frag->next;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
debug_only_printf(LC_TMTracer,
|
||||
"TreeFragment peer %p has dead gc thing."
|
||||
"Disconnecting tree %p with ip %p\n",
|
||||
|
@ -4381,7 +4381,7 @@ TraceRecorder::snapshot(ExitType exitType)
|
|||
} else if (pendingSpecializedNative &&
|
||||
(pendingSpecializedNative->flags & JSTN_RETURN_NULLABLE_OBJ)) {
|
||||
typemap[stackSlots - 1] = JSVAL_TYPE_OBJORNULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Now restore the the original pc (after which early returns are ok). */
|
||||
if (resumeAfter) {
|
||||
|
@ -4852,7 +4852,7 @@ class SlotMap : public SlotVisitorBase
|
|||
JS_ASSERT(info.type == JSVAL_TYPE_INT32 || info.type == JSVAL_TYPE_DOUBLE);
|
||||
/*
|
||||
* This should only happen if the slot has a trivial conversion, i.e.
|
||||
* IsPromotedInt32() is true. We check this.
|
||||
* IsPromotedInt32() is true. We check this.
|
||||
*
|
||||
* Note that getFromTracker() will return NULL if the slot was
|
||||
* never used, in which case we don't do the check. We could
|
||||
|
@ -4862,7 +4862,7 @@ class SlotMap : public SlotVisitorBase
|
|||
*/
|
||||
LIns* ins = mRecorder.getFromTrackerImpl(info.vp);
|
||||
JS_ASSERT_IF(ins, IsPromotedInt32(ins));
|
||||
} else
|
||||
} else
|
||||
#endif
|
||||
if (info.lastCheck == TypeCheck_Demote) {
|
||||
JS_ASSERT(info.type == JSVAL_TYPE_INT32 || info.type == JSVAL_TYPE_DOUBLE);
|
||||
|
@ -4918,7 +4918,7 @@ class DefaultSlotMap : public SlotMap
|
|||
DefaultSlotMap(TraceRecorder& tr) : SlotMap(tr)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
virtual ~DefaultSlotMap()
|
||||
{
|
||||
}
|
||||
|
@ -6268,7 +6268,7 @@ IsEntryTypeCompatible(const Value &v, JSValueType type)
|
|||
|
||||
} else if (v.isDouble()) {
|
||||
int32_t _;
|
||||
ok = (type == JSVAL_TYPE_DOUBLE) ||
|
||||
ok = (type == JSVAL_TYPE_DOUBLE) ||
|
||||
(type == JSVAL_TYPE_INT32 && JSDOUBLE_IS_INT32(v.toDouble(), &_));
|
||||
|
||||
} else if (v.isObject()) {
|
||||
|
@ -6579,7 +6579,7 @@ TracerState::~TracerState()
|
|||
JS_ASSERT(JS_THREAD_DATA(cx)->onTraceCompartment == cx->compartment);
|
||||
JS_THREAD_DATA(cx)->onTraceCompartment = NULL;
|
||||
}
|
||||
|
||||
|
||||
traceMonitor->tracerState = prev;
|
||||
traceMonitor->tracecx = NULL;
|
||||
}
|
||||
|
@ -6738,7 +6738,7 @@ ExecuteTree(JSContext* cx, TraceMonitor* tm, TreeFragment* f,
|
|||
CurrentLine(cx),
|
||||
js_CodeName[fp->hasImacropc() ? *fp->imacropc() : *cx->regs().pc]);
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef JS_METHODJIT
|
||||
if (cx->methodJitEnabled) {
|
||||
if (lr->exitType == LOOP_EXIT && f->iters < MIN_LOOP_ITERS
|
||||
|
@ -7331,7 +7331,7 @@ TraceRecorder::monitorRecording(JSOp op)
|
|||
JS_ASSERT(!addPropShapeBefore);
|
||||
|
||||
JS_ASSERT(traceMonitor == cx->compartment->traceMonitor());
|
||||
|
||||
|
||||
TraceMonitor &localtm = *traceMonitor;
|
||||
debug_only_stmt( JSContext *localcx = cx; )
|
||||
assertInsideLoop();
|
||||
|
@ -7927,7 +7927,7 @@ PurgeScriptFragments(TraceMonitor* tm, JSScript* script)
|
|||
"Purging fragments for JSScript %p.\n", (void*)script);
|
||||
|
||||
/* A recorder script is being evaluated and can not be destroyed or GC-ed. */
|
||||
JS_ASSERT_IF(tm->recorder,
|
||||
JS_ASSERT_IF(tm->recorder,
|
||||
JS_UPTRDIFF(tm->recorder->getTree()->ip, script->code) >= script->length);
|
||||
|
||||
for (LoopProfileMap::Enum e(*tm->loopProfiles); !e.empty(); e.popFront()) {
|
||||
|
@ -8422,13 +8422,13 @@ TraceRecorder::tryToDemote(LOpcode op, jsdouble v0, jsdouble v1, LIns* s0, LIns*
|
|||
* at record-time, and the oracle doesn't direct us otherwise, we
|
||||
* speculatively emit a demoted (integer) operation, betting that at
|
||||
* runtime we will get integer results again.
|
||||
*
|
||||
*
|
||||
* We also have to protect against various edge cases. For example,
|
||||
* to protect against overflow we emit a guard that will inform the oracle
|
||||
* on overflow and cause a non-demoted trace to be attached that uses
|
||||
* floating-point math for this operation; the exception to this case is
|
||||
* if the operands guarantee that the result will be an integer (e.g.
|
||||
* z = d0 * d1 with 0 <= (d0|d1) <= 0xffff guarantees z <= fffe0001).
|
||||
* z = d0 * d1 with 0 <= (d0|d1) <= 0xffff guarantees z <= fffe0001).
|
||||
*/
|
||||
|
||||
if (!oracle || oracle->isInstructionUndemotable(cx->regs().pc) ||
|
||||
|
@ -8499,7 +8499,7 @@ TraceRecorder::tryToDemote(LOpcode op, jsdouble v0, jsdouble v1, LIns* s0, LIns*
|
|||
}
|
||||
|
||||
/*
|
||||
* A would-be negative zero result can only occur if we have
|
||||
* A would-be negative zero result can only occur if we have
|
||||
* mul(0, -n) or mul(-n, 0), where n != 0. In particular, a multiply
|
||||
* where one operand is a positive immediate cannot result in negative
|
||||
* zero.
|
||||
|
@ -9733,7 +9733,7 @@ TraceRecorder::stobj_set_fslot(LIns *obj_ins, unsigned slot, const Value &v, LIn
|
|||
}
|
||||
|
||||
void
|
||||
TraceRecorder::stobj_set_dslot(LIns *obj_ins, unsigned slot, LIns*& slots_ins,
|
||||
TraceRecorder::stobj_set_dslot(LIns *obj_ins, unsigned slot, LIns*& slots_ins,
|
||||
const Value &v, LIns* v_ins)
|
||||
{
|
||||
if (!slots_ins)
|
||||
|
@ -10116,7 +10116,7 @@ TraceRecorder::getThis(LIns*& this_ins)
|
|||
return RECORD_CONTINUE;
|
||||
}
|
||||
|
||||
JS_ASSERT(fp->callee().getGlobal() == globalObj);
|
||||
JS_ASSERT(fp->callee().getGlobal() == globalObj);
|
||||
Value& thisv = fp->thisValue();
|
||||
|
||||
if (thisv.isObject() || fp->fun()->inStrictMode()) {
|
||||
|
@ -10359,7 +10359,7 @@ TraceRecorder::putActivationObjects()
|
|||
if (nslots) {
|
||||
slots_ins = w.allocp(sizeof(Value) * nslots);
|
||||
for (int i = 0; i < nslots; ++i) {
|
||||
box_value_into(fp->slots()[i], get(&fp->slots()[i]),
|
||||
box_value_into(fp->slots()[i], get(&fp->slots()[i]),
|
||||
AllocSlotsAddress(slots_ins, i));
|
||||
}
|
||||
} else {
|
||||
|
@ -12603,7 +12603,7 @@ GetPropertyByName(JSContext* cx, JSObject* obj, JSString** namep, Value* vp)
|
|||
SetBuiltinError(tm);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
return WasBuiltinSuccessful(tm);
|
||||
}
|
||||
JS_DEFINE_CALLINFO_4(static, BOOL_FAIL, GetPropertyByName, CONTEXT, OBJECT, STRINGPTR, VALUEPTR,
|
||||
|
@ -13085,7 +13085,7 @@ SetPropertyByName(JSContext* cx, JSObject* obj, JSString** namep, Value* vp, JSB
|
|||
}
|
||||
return WasBuiltinSuccessful(tm);
|
||||
}
|
||||
JS_DEFINE_CALLINFO_5(static, BOOL_FAIL, SetPropertyByName,
|
||||
JS_DEFINE_CALLINFO_5(static, BOOL_FAIL, SetPropertyByName,
|
||||
CONTEXT, OBJECT, STRINGPTR, VALUEPTR, BOOL,
|
||||
0, ACCSET_STORE_ANY)
|
||||
|
||||
|
@ -13225,7 +13225,7 @@ TraceRecorder::setElem(int lval_spindex, int idx_spindex, int v_spindex)
|
|||
JSObject* tarray = js::TypedArray::getTypedArray(obj);
|
||||
|
||||
// The index was on the stack and is therefore a LIR float; force it to
|
||||
// be an integer.
|
||||
// be an integer.
|
||||
CHECK_STATUS_A(makeNumberInt32(idx_ins, &idx_ins));
|
||||
|
||||
// Ensure idx >= 0 && idx < length (by using uint32)
|
||||
|
@ -13291,7 +13291,7 @@ TraceRecorder::setElem(int lval_spindex, int idx_spindex, int v_spindex)
|
|||
// Do nothing, this is already a float
|
||||
break;
|
||||
default:
|
||||
JS_NOT_REACHED("Unknown typed array type in tracer");
|
||||
JS_NOT_REACHED("Unknown typed array type in tracer");
|
||||
}
|
||||
|
||||
switch (js::TypedArray::getType(tarray)) {
|
||||
|
@ -13315,7 +13315,7 @@ TraceRecorder::setElem(int lval_spindex, int idx_spindex, int v_spindex)
|
|||
w.stdTypedArrayElement(typed_v_ins, data_ins, pidx_ins);
|
||||
break;
|
||||
default:
|
||||
JS_NOT_REACHED("Unknown typed array type in tracer");
|
||||
JS_NOT_REACHED("Unknown typed array type in tracer");
|
||||
}
|
||||
} else if (idx.toInt32() < 0 || !obj->isDenseArray()) {
|
||||
CHECK_STATUS_A(initOrSetPropertyByIndex(obj_ins, idx_ins, &v,
|
||||
|
@ -13326,7 +13326,7 @@ TraceRecorder::setElem(int lval_spindex, int idx_spindex, int v_spindex)
|
|||
VMSideExit* mismatchExit = snapshot(MISMATCH_EXIT);
|
||||
|
||||
// Make sure the array is actually dense.
|
||||
if (!obj->isDenseArray())
|
||||
if (!obj->isDenseArray())
|
||||
return ARECORD_STOP;
|
||||
guardDenseArray(obj_ins, branchExit);
|
||||
|
||||
|
@ -15456,30 +15456,30 @@ TraceRecorder::record_JSOP_LAMBDA()
|
|||
* JSOP_INITMETHOD logic governing the early ARECORD_CONTINUE returns below
|
||||
* must agree with the corresponding break-from-do-while(0) logic there.
|
||||
*/
|
||||
if (fun->isNullClosure() && fun->getParent() == &cx->fp()->scopeChain()) {
|
||||
jsbytecode *pc2 = AdvanceOverBlockchainOp(cx->regs().pc + JSOP_LAMBDA_LENGTH);
|
||||
JSOp op2 = JSOp(*pc2);
|
||||
if (fun->isNullClosure()) {
|
||||
if (fun->joinable()) {
|
||||
jsbytecode *pc2 = AdvanceOverBlockchainOp(cx->regs().pc + JSOP_LAMBDA_LENGTH);
|
||||
JSOp op2 = JSOp(*pc2);
|
||||
|
||||
if (op2 == JSOP_INITMETHOD) {
|
||||
stack(0, w.immpObjGC(fun));
|
||||
return ARECORD_CONTINUE;
|
||||
}
|
||||
|
||||
if (op2 == JSOP_SETMETHOD) {
|
||||
Value lval = stackval(-1);
|
||||
|
||||
if (!lval.isPrimitive() && lval.toObject().canHaveMethodBarrier()) {
|
||||
if (op2 == JSOP_INITMETHOD) {
|
||||
stack(0, w.immpObjGC(fun));
|
||||
return ARECORD_CONTINUE;
|
||||
}
|
||||
} else if (fun->joinable()) {
|
||||
if (op2 == JSOP_CALL) {
|
||||
|
||||
if (op2 == JSOP_SETMETHOD) {
|
||||
Value lval = stackval(-1);
|
||||
|
||||
if (!lval.isPrimitive() && lval.toObject().canHaveMethodBarrier()) {
|
||||
stack(0, w.immpObjGC(fun));
|
||||
return ARECORD_CONTINUE;
|
||||
}
|
||||
} else if (op2 == JSOP_CALL) {
|
||||
/*
|
||||
* Array.prototype.sort and String.prototype.replace are
|
||||
* optimized as if they are special form. We know that they
|
||||
* won't leak the joined function object in obj, therefore
|
||||
* we don't need to clone that compiler- created function
|
||||
* object for identity/mutation reasons.
|
||||
* won't leak the joined function object in obj, therefore we
|
||||
* don't need to clone that compiler-created function object
|
||||
* for identity/mutation reasons.
|
||||
*/
|
||||
int iargc = GET_ARGC(pc2);
|
||||
|
||||
|
@ -16734,7 +16734,7 @@ RecordTracePoint(JSContext* cx, TraceMonitor* tm, bool* blacklist, bool execAllo
|
|||
return TPA_Error;
|
||||
|
||||
JS_ASSERT(!cx->isExceptionPending());
|
||||
|
||||
|
||||
return TPA_RanStuff;
|
||||
}
|
||||
|
||||
|
@ -17094,7 +17094,7 @@ LoopProfile::profileOperation(JSContext* cx, JSOp op)
|
|||
|
||||
if (op == JSOP_LOOKUPSWITCH)
|
||||
branchMultiplier *= GET_UINT16(pc + JUMP_OFFSET_LEN);
|
||||
|
||||
|
||||
if (numAllOps >= MAX_PROFILE_OPS) {
|
||||
debug_only_print0(LC_TMProfiler, "Profiling complete (maxops)\n");
|
||||
tm->profile->decide(cx);
|
||||
|
@ -17165,7 +17165,7 @@ LoopProfile::profileOperation(JSContext* cx, JSOp op)
|
|||
} else {
|
||||
stackClear();
|
||||
}
|
||||
|
||||
|
||||
return ProfContinue;
|
||||
}
|
||||
|
||||
|
@ -17214,7 +17214,7 @@ LoopProfile::isCompilationUnprofitable(JSContext *cx, uintN goodOps)
|
|||
|
||||
if (goodOps <= 22 && allOps[OP_FWDJUMP])
|
||||
return true;
|
||||
|
||||
|
||||
/* Ensure that inner loops aren't fleeting. */
|
||||
for (uintN i=0; i<numInnerLoops; i++) {
|
||||
LoopProfile *prof = LookupLoopProfile(traceMonitor, innerLoops[i].top);
|
||||
|
@ -17231,7 +17231,7 @@ LoopProfile::decide(JSContext *cx)
|
|||
{
|
||||
bool wasUndecided = undecided;
|
||||
bool wasTraceOK = traceOK;
|
||||
|
||||
|
||||
profiled = true;
|
||||
traceOK = false;
|
||||
undecided = false;
|
||||
|
@ -17364,7 +17364,7 @@ AbortProfiling(JSContext *cx)
|
|||
{
|
||||
JS_ASSERT(TRACE_PROFILER(cx));
|
||||
LoopProfile *prof = TRACE_PROFILER(cx);
|
||||
|
||||
|
||||
debug_only_print0(LC_TMProfiler, "Profiling complete (aborted)\n");
|
||||
prof->profiled = true;
|
||||
prof->traceOK = false;
|
||||
|
|
|
@ -110,7 +110,7 @@ mjit::Compiler::Compiler(JSContext *cx, JSScript *outerScript, bool isConstructi
|
|||
traceICs(CompilerAllocPolicy(cx, *thisFromCtor())),
|
||||
#endif
|
||||
#if defined JS_POLYIC
|
||||
pics(CompilerAllocPolicy(cx, *thisFromCtor())),
|
||||
pics(CompilerAllocPolicy(cx, *thisFromCtor())),
|
||||
getElemICs(CompilerAllocPolicy(cx, *thisFromCtor())),
|
||||
setElemICs(CompilerAllocPolicy(cx, *thisFromCtor())),
|
||||
#endif
|
||||
|
@ -870,7 +870,7 @@ mjit::Compiler::finishThisUp(JITScript **jitp)
|
|||
JSC::ExecutableAllocator::makeWritable(result, codeSize);
|
||||
masm.executableCopy(result);
|
||||
stubcc.masm.executableCopy(result + masm.size());
|
||||
|
||||
|
||||
JSC::LinkBuffer fullCode(result, codeSize);
|
||||
JSC::LinkBuffer stubCode(result + masm.size(), stubcc.size());
|
||||
|
||||
|
@ -927,7 +927,7 @@ mjit::Compiler::finishThisUp(JITScript **jitp)
|
|||
}
|
||||
jit->pcLengths = pcLengths;
|
||||
|
||||
/*
|
||||
/*
|
||||
* WARNING: mics(), callICs() et al depend on the ordering of these
|
||||
* variable-length sections. See JITScript's declaration for details.
|
||||
*/
|
||||
|
@ -1107,7 +1107,7 @@ mjit::Compiler::finishThisUp(JITScript **jitp)
|
|||
fullCode.locationOf(callICs[i].funGuard);
|
||||
jitCallICs[i].joinPointOffset = offset;
|
||||
JS_ASSERT(jitCallICs[i].joinPointOffset == offset);
|
||||
|
||||
|
||||
/* Compute the OOL call offset. */
|
||||
offset = stubCode.locationOf(callICs[i].oolCall) -
|
||||
stubCode.locationOf(callICs[i].slowPathStart);
|
||||
|
@ -1167,7 +1167,7 @@ mjit::Compiler::finishThisUp(JITScript **jitp)
|
|||
if (equalityICs[i].jumpToStub.isSet())
|
||||
jitEqualityICs[i].jumpToStub = fullCode.locationOf(equalityICs[i].jumpToStub.get());
|
||||
jitEqualityICs[i].fallThrough = fullCode.locationOf(equalityICs[i].fallThrough);
|
||||
|
||||
|
||||
stubCode.patch(equalityICs[i].addrLabel, &jitEqualityICs[i]);
|
||||
}
|
||||
|
||||
|
@ -1204,7 +1204,7 @@ mjit::Compiler::finishThisUp(JITScript **jitp)
|
|||
jitTraceICs[i].loopCounterStart = hotloop;
|
||||
jitTraceICs[i].loopCounter = hotloop < prevCount ? 1 : hotloop - prevCount;
|
||||
#endif
|
||||
|
||||
|
||||
stubCode.patch(traceICs[i].addrLabel, &jitTraceICs[i]);
|
||||
}
|
||||
#endif /* JS_MONOIC */
|
||||
|
@ -1565,7 +1565,7 @@ mjit::Compiler::generateMethod()
|
|||
|
||||
/**********************
|
||||
* BEGIN COMPILER OPS *
|
||||
**********************/
|
||||
**********************/
|
||||
|
||||
lastPC = PC;
|
||||
|
||||
|
@ -1936,7 +1936,7 @@ mjit::Compiler::generateMethod()
|
|||
frame.pop();
|
||||
pushSyncedEntry(0);
|
||||
}
|
||||
END_CASE(JSOP_DELPROP)
|
||||
END_CASE(JSOP_DELPROP)
|
||||
|
||||
BEGIN_CASE(JSOP_DELELEM)
|
||||
{
|
||||
|
@ -2465,20 +2465,26 @@ mjit::Compiler::generateMethod()
|
|||
JSObjStubFun stub = stubs::Lambda;
|
||||
uint32 uses = 0;
|
||||
|
||||
jsbytecode *pc2 = AdvanceOverBlockchainOp(PC + JSOP_LAMBDA_LENGTH);
|
||||
JSOp next = JSOp(*pc2);
|
||||
|
||||
if (next == JSOP_INITMETHOD) {
|
||||
stub = stubs::LambdaForInit;
|
||||
} else if (next == JSOP_SETMETHOD) {
|
||||
stub = stubs::LambdaForSet;
|
||||
uses = 1;
|
||||
} else if (fun->joinable()) {
|
||||
if (next == JSOP_CALL) {
|
||||
stub = stubs::LambdaJoinableForCall;
|
||||
uses = frame.frameSlots();
|
||||
jsbytecode *pc2 = NULL;
|
||||
if (fun->joinable()) {
|
||||
pc2 = AdvanceOverBlockchainOp(PC + JSOP_LAMBDA_LENGTH);
|
||||
JSOp next = JSOp(*pc2);
|
||||
|
||||
if (next == JSOP_INITMETHOD) {
|
||||
stub = stubs::LambdaJoinableForInit;
|
||||
} else if (next == JSOP_SETMETHOD) {
|
||||
stub = stubs::LambdaJoinableForSet;
|
||||
uses = 1;
|
||||
} else if (next == JSOP_CALL) {
|
||||
int iargc = GET_ARGC(pc2);
|
||||
if (iargc == 1 || iargc == 2) {
|
||||
stub = stubs::LambdaJoinableForCall;
|
||||
uses = frame.frameSlots();
|
||||
}
|
||||
} else if (next == JSOP_NULL) {
|
||||
stub = stubs::LambdaJoinableForNull;
|
||||
pc2 += JSOP_NULL_LENGTH;
|
||||
if (JSOp(*pc2) == JSOP_CALL && GET_ARGC(pc2) == 0)
|
||||
stub = stubs::LambdaJoinableForNull;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2712,7 +2718,7 @@ mjit::Compiler::generateMethod()
|
|||
|
||||
/**********************
|
||||
* END COMPILER OPS *
|
||||
**********************/
|
||||
**********************/
|
||||
|
||||
if (cx->typeInferenceEnabled() && PC == lastPC + analyze::GetBytecodeLength(lastPC)) {
|
||||
/*
|
||||
|
@ -4031,7 +4037,7 @@ mjit::Compiler::compareTwoValues(JSContext *cx, JSOp op, const Value &lhs, const
|
|||
}
|
||||
} else {
|
||||
double ld, rd;
|
||||
|
||||
|
||||
/* These should be infallible w/ primitives. */
|
||||
JS_ALWAYS_TRUE(ToNumber(cx, lhs, &ld));
|
||||
JS_ALWAYS_TRUE(ToNumber(cx, rhs, &rd));
|
||||
|
@ -4525,7 +4531,7 @@ mjit::Compiler::jsop_callprop_generic(JSAtom *atom)
|
|||
|
||||
RETURN_IF_OOM(false);
|
||||
|
||||
/*
|
||||
/*
|
||||
* Initialize op labels. We use GetPropLabels here because we have the same patching
|
||||
* requirements for CallProp.
|
||||
*/
|
||||
|
@ -4559,7 +4565,7 @@ mjit::Compiler::jsop_callprop_str(JSAtom *atom)
|
|||
{
|
||||
if (!globalObj) {
|
||||
jsop_callprop_slow(atom);
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4622,7 +4628,7 @@ mjit::Compiler::jsop_callprop_obj(JSAtom *atom)
|
|||
|
||||
JS_ASSERT(top->isTypeKnown());
|
||||
JS_ASSERT(top->getKnownType() == JSVAL_TYPE_OBJECT);
|
||||
|
||||
|
||||
RESERVE_IC_SPACE(masm);
|
||||
|
||||
pic.pc = PC;
|
||||
|
@ -4683,7 +4689,7 @@ mjit::Compiler::jsop_callprop_obj(JSAtom *atom)
|
|||
frame.storeRegs(-2, shapeReg, objReg, knownPushedType(0));
|
||||
BarrierState barrier = testBarrier(shapeReg, objReg);
|
||||
|
||||
/*
|
||||
/*
|
||||
* Assert correctness of hardcoded offsets.
|
||||
* No type guard: type is asserted.
|
||||
*/
|
||||
|
@ -5420,7 +5426,7 @@ mjit::Compiler::jsop_this()
|
|||
{
|
||||
frame.pushThis();
|
||||
|
||||
/*
|
||||
/*
|
||||
* In strict mode code, we don't wrap 'this'.
|
||||
* In direct-call eval code, we wrapped 'this' before entering the eval.
|
||||
* In global code, 'this' is always an object.
|
||||
|
@ -5840,7 +5846,7 @@ mjit::Compiler::jsop_getgname(uint32 index)
|
|||
|
||||
masm.loadPtr(Address(objReg, offsetof(JSObject, slots)), objReg);
|
||||
Address address(objReg, slot);
|
||||
|
||||
|
||||
/* Allocate any register other than objReg. */
|
||||
RegisterID treg = frame.allocReg();
|
||||
/* After dreg is loaded, it's safe to clobber objReg. */
|
||||
|
@ -6138,7 +6144,7 @@ mjit::Compiler::jsop_instanceof()
|
|||
OOL_STUBCALL(stubs::InstanceOf, REJOIN_FALLTHROUGH);
|
||||
firstSlow = stubcc.masm.jump();
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* This is sadly necessary because the error case needs the object. */
|
||||
frame.dup();
|
||||
|
|
|
@ -1481,28 +1481,23 @@ stubs::RegExp(VMFrame &f, JSObject *regex)
|
|||
}
|
||||
|
||||
JSObject * JS_FASTCALL
|
||||
stubs::LambdaForInit(VMFrame &f, JSFunction *fun)
|
||||
stubs::LambdaJoinableForInit(VMFrame &f, JSFunction *fun)
|
||||
{
|
||||
JSObject *obj = fun;
|
||||
jsbytecode *nextpc = (jsbytecode *) f.scratch;
|
||||
if (fun->isNullClosure() && obj->getParent() == &f.fp()->scopeChain()) {
|
||||
fun->setMethodAtom(f.script()->getAtom(GET_SLOTNO(nextpc)));
|
||||
return obj;
|
||||
}
|
||||
return Lambda(f, fun);
|
||||
JS_ASSERT(fun->joinable());
|
||||
fun->setMethodAtom(f.fp()->script()->getAtom(GET_SLOTNO(nextpc)));
|
||||
return fun;
|
||||
}
|
||||
|
||||
JSObject * JS_FASTCALL
|
||||
stubs::LambdaForSet(VMFrame &f, JSFunction *fun)
|
||||
stubs::LambdaJoinableForSet(VMFrame &f, JSFunction *fun)
|
||||
{
|
||||
JSObject *obj = fun;
|
||||
JS_ASSERT(fun->joinable());
|
||||
jsbytecode *nextpc = (jsbytecode *) f.scratch;
|
||||
if (fun->isNullClosure() && obj->getParent() == &f.fp()->scopeChain()) {
|
||||
const Value &lref = f.regs.sp[-1];
|
||||
if (lref.isObject() && lref.toObject().canHaveMethodBarrier()) {
|
||||
fun->setMethodAtom(f.script()->getAtom(GET_SLOTNO(nextpc)));
|
||||
return obj;
|
||||
}
|
||||
const Value &lref = f.regs.sp[-1];
|
||||
if (lref.isObject() && lref.toObject().canHaveMethodBarrier()) {
|
||||
fun->setMethodAtom(f.fp()->script()->getAtom(GET_SLOTNO(nextpc)));
|
||||
return fun;
|
||||
}
|
||||
return Lambda(f, fun);
|
||||
}
|
||||
|
@ -1510,36 +1505,34 @@ stubs::LambdaForSet(VMFrame &f, JSFunction *fun)
|
|||
JSObject * JS_FASTCALL
|
||||
stubs::LambdaJoinableForCall(VMFrame &f, JSFunction *fun)
|
||||
{
|
||||
JSObject *obj = fun;
|
||||
JS_ASSERT(fun->joinable());
|
||||
jsbytecode *nextpc = (jsbytecode *) f.scratch;
|
||||
if (fun->isNullClosure() && obj->getParent() == &f.fp()->scopeChain()) {
|
||||
/*
|
||||
* Array.prototype.sort and String.prototype.replace are
|
||||
* optimized as if they are special form. We know that they
|
||||
* won't leak the joined function object in obj, therefore
|
||||
* we don't need to clone that compiler- created function
|
||||
* object for identity/mutation reasons.
|
||||
*/
|
||||
int iargc = GET_ARGC(nextpc);
|
||||
|
||||
/*
|
||||
* Note that we have not yet pushed obj as the final argument,
|
||||
* so regs.sp[1 - (iargc + 2)], and not regs.sp[-(iargc + 2)],
|
||||
* is the callee for this JSOP_CALL.
|
||||
*/
|
||||
const Value &cref = f.regs.sp[1 - (iargc + 2)];
|
||||
JSObject *callee;
|
||||
/*
|
||||
* Array.prototype.sort and String.prototype.replace are optimized as if
|
||||
* they are special form. We know that they won't leak the joined function
|
||||
* object fun, therefore we don't need to clone that compiler-created
|
||||
* function object for identity/mutation reasons.
|
||||
*/
|
||||
int iargc = GET_ARGC(nextpc);
|
||||
|
||||
if (IsFunctionObject(cref, &callee)) {
|
||||
JSFunction *calleeFun = callee->getFunctionPrivate();
|
||||
Native native = calleeFun->maybeNative();
|
||||
/*
|
||||
* Note that we have not yet pushed fun as the final argument, so
|
||||
* regs.sp[1 - (iargc + 2)], and not regs.sp[-(iargc + 2)], is the callee
|
||||
* for this JSOP_CALL.
|
||||
*/
|
||||
const Value &cref = f.regs.sp[1 - (iargc + 2)];
|
||||
JSObject *callee;
|
||||
|
||||
if (native) {
|
||||
if (iargc == 1 && native == array_sort)
|
||||
return obj;
|
||||
if (iargc == 2 && native == str_replace)
|
||||
return obj;
|
||||
}
|
||||
if (IsFunctionObject(cref, &callee)) {
|
||||
JSFunction *calleeFun = callee->getFunctionPrivate();
|
||||
Native native = calleeFun->maybeNative();
|
||||
|
||||
if (native) {
|
||||
if (iargc == 1 && native == array_sort)
|
||||
return fun;
|
||||
if (iargc == 2 && native == str_replace)
|
||||
return fun;
|
||||
}
|
||||
}
|
||||
return Lambda(f, fun);
|
||||
|
@ -1548,23 +1541,13 @@ stubs::LambdaJoinableForCall(VMFrame &f, JSFunction *fun)
|
|||
JSObject * JS_FASTCALL
|
||||
stubs::LambdaJoinableForNull(VMFrame &f, JSFunction *fun)
|
||||
{
|
||||
JSObject *obj = fun;
|
||||
jsbytecode *nextpc = (jsbytecode *) f.scratch;
|
||||
if (fun->isNullClosure() && obj->getParent() == &f.fp()->scopeChain()) {
|
||||
jsbytecode *pc2 = nextpc + JSOP_NULL_LENGTH;
|
||||
JSOp op2 = JSOp(*pc2);
|
||||
|
||||
if (op2 == JSOP_CALL && GET_ARGC(pc2) == 0)
|
||||
return obj;
|
||||
}
|
||||
return Lambda(f, fun);
|
||||
JS_ASSERT(fun->joinable());
|
||||
return fun;
|
||||
}
|
||||
|
||||
JSObject * JS_FASTCALL
|
||||
stubs::Lambda(VMFrame &f, JSFunction *fun)
|
||||
{
|
||||
JSObject *obj = fun;
|
||||
|
||||
JSObject *parent;
|
||||
if (fun->isNullClosure()) {
|
||||
parent = &f.fp()->scopeChain();
|
||||
|
@ -1574,7 +1557,7 @@ stubs::Lambda(VMFrame &f, JSFunction *fun)
|
|||
THROWV(NULL);
|
||||
}
|
||||
|
||||
obj = CloneFunctionObject(f.cx, fun, parent, true);
|
||||
JSObject *obj = CloneFunctionObject(f.cx, fun, parent, true);
|
||||
if (!obj)
|
||||
THROWV(NULL);
|
||||
|
||||
|
|
|
@ -156,8 +156,8 @@ JSObject * JS_FASTCALL DefLocalFun(VMFrame &f, JSFunction *fun);
|
|||
JSObject * JS_FASTCALL DefLocalFun_FC(VMFrame &f, JSFunction *fun);
|
||||
JSObject * JS_FASTCALL RegExp(VMFrame &f, JSObject *regex);
|
||||
JSObject * JS_FASTCALL Lambda(VMFrame &f, JSFunction *fun);
|
||||
JSObject * JS_FASTCALL LambdaForInit(VMFrame &f, JSFunction *fun);
|
||||
JSObject * JS_FASTCALL LambdaForSet(VMFrame &f, JSFunction *fun);
|
||||
JSObject * JS_FASTCALL LambdaJoinableForInit(VMFrame &f, JSFunction *fun);
|
||||
JSObject * JS_FASTCALL LambdaJoinableForSet(VMFrame &f, JSFunction *fun);
|
||||
JSObject * JS_FASTCALL LambdaJoinableForCall(VMFrame &f, JSFunction *fun);
|
||||
JSObject * JS_FASTCALL LambdaJoinableForNull(VMFrame &f, JSFunction *fun);
|
||||
JSObject * JS_FASTCALL FlatLambda(VMFrame &f, JSFunction *fun);
|
||||
|
|
Загрузка…
Ссылка в новой задаче