зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1846051 - Optimize array destructuring r=anba,arai
This is I think the simplest possible approach. This patch basically just mirrors the OptimizeSpreadCall optimization, but for array destructuring. We did add one constraint which has effects outside of this, which is that the array iterator prototype can't have a return method defined. I think in practice only very weird code should have this set, but we can be more targeted if this is a real issue. Differential Revision: https://phabricator.services.mozilla.com/D184843
This commit is contained in:
Родитель
df3c6bd6a8
Коммит
7983ab7c2a
|
@ -1605,6 +1605,7 @@ static bool BytecodeIsEffectful(JSScript* script, size_t offset) {
|
||||||
case JSOp::IsNoIter:
|
case JSOp::IsNoIter:
|
||||||
case JSOp::EndIter:
|
case JSOp::EndIter:
|
||||||
case JSOp::CloseIter:
|
case JSOp::CloseIter:
|
||||||
|
case JSOp::OptimizeGetIterator:
|
||||||
case JSOp::IsNullOrUndefined:
|
case JSOp::IsNullOrUndefined:
|
||||||
case JSOp::In:
|
case JSOp::In:
|
||||||
case JSOp::HasOwn:
|
case JSOp::HasOwn:
|
||||||
|
|
|
@ -3167,6 +3167,16 @@ bool BytecodeEmitter::emitDestructuringOpsArray(ListNode* pattern,
|
||||||
// let a, b, c, d;
|
// let a, b, c, d;
|
||||||
// let iter, next, lref, result, done, value; // stack values
|
// let iter, next, lref, result, done, value; // stack values
|
||||||
//
|
//
|
||||||
|
// // NOTE: the fast path for this example is not applicable, because of
|
||||||
|
// // the spread and the assignment |c=y|, but it is documented here for a
|
||||||
|
// // simpler example, |let [a,b] = x;|
|
||||||
|
// //
|
||||||
|
// // if (IsOptimizableArray(x)) {
|
||||||
|
// // a = x[0];
|
||||||
|
// // b = x[1];
|
||||||
|
// // goto end: // (skip everything below)
|
||||||
|
// // }
|
||||||
|
//
|
||||||
// iter = x[Symbol.iterator]();
|
// iter = x[Symbol.iterator]();
|
||||||
// next = iter.next;
|
// next = iter.next;
|
||||||
//
|
//
|
||||||
|
@ -3243,6 +3253,36 @@ bool BytecodeEmitter::emitDestructuringOpsArray(ListNode* pattern,
|
||||||
// // === emitted after loop ===
|
// // === emitted after loop ===
|
||||||
// if (!done)
|
// if (!done)
|
||||||
// IteratorClose(iter);
|
// IteratorClose(iter);
|
||||||
|
//
|
||||||
|
// end:
|
||||||
|
|
||||||
|
bool isEligibleForArrayOptimizations = true;
|
||||||
|
for (ParseNode* member : pattern->contents()) {
|
||||||
|
switch (member->getKind()) {
|
||||||
|
case ParseNodeKind::Elision:
|
||||||
|
break;
|
||||||
|
case ParseNodeKind::Name: {
|
||||||
|
auto name = member->as<NameNode>().name();
|
||||||
|
NameLocation loc = lookupName(name);
|
||||||
|
if (loc.kind() != NameLocation::Kind::ArgumentSlot &&
|
||||||
|
loc.kind() != NameLocation::Kind::FrameSlot &&
|
||||||
|
loc.kind() != NameLocation::Kind::EnvironmentCoordinate) {
|
||||||
|
isEligibleForArrayOptimizations = false;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// Unfortunately we can't handle any recursive destructuring,
|
||||||
|
// because we can't guarantee that the recursed-into parts
|
||||||
|
// won't run code which invalidates our constraints. We also
|
||||||
|
// cannot handle ParseNodeKind::AssignExpr for similar reasons.
|
||||||
|
isEligibleForArrayOptimizations = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (!isEligibleForArrayOptimizations) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Use an iterator to destructure the RHS, instead of index lookup. We
|
// Use an iterator to destructure the RHS, instead of index lookup. We
|
||||||
// must leave the *original* value on the stack.
|
// must leave the *original* value on the stack.
|
||||||
|
@ -3250,6 +3290,126 @@ bool BytecodeEmitter::emitDestructuringOpsArray(ListNode* pattern,
|
||||||
// [stack] ... OBJ OBJ
|
// [stack] ... OBJ OBJ
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Maybe<InternalIfEmitter> ifArrayOptimizable;
|
||||||
|
|
||||||
|
if (isEligibleForArrayOptimizations) {
|
||||||
|
ifArrayOptimizable.emplace(
|
||||||
|
this, BranchEmitterBase::LexicalKind::MayContainLexicalAccessInBranch);
|
||||||
|
|
||||||
|
if (!emit1(JSOp::Dup)) {
|
||||||
|
// [stack] OBJ OBJ
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!emit1(JSOp::OptimizeGetIterator)) {
|
||||||
|
// [stack] OBJ OBJ IS_OPTIMIZABLE
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ifArrayOptimizable->emitThenElse()) {
|
||||||
|
// [stack] OBJ OBJ
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!emitAtomOp(JSOp::GetProp,
|
||||||
|
TaggedParserAtomIndex::WellKnown::length())) {
|
||||||
|
// [stack] OBJ LENGTH
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!emit1(JSOp::Swap)) {
|
||||||
|
// [stack] LENGTH OBJ
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t idx = 0;
|
||||||
|
for (ParseNode* member : pattern->contents()) {
|
||||||
|
if (member->isKind(ParseNodeKind::Elision)) {
|
||||||
|
idx += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!emit1(JSOp::Dup)) {
|
||||||
|
// [stack] LENGTH OBJ OBJ
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!emitNumberOp(idx)) {
|
||||||
|
// [stack] LENGTH OBJ OBJ IDX
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!emit1(JSOp::Dup)) {
|
||||||
|
// [stack] LENGTH OBJ OBJ IDX IDX
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!emitDupAt(4)) {
|
||||||
|
// [stack] LENGTH OBJ OBJ IDX IDX LENGTH
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!emit1(JSOp::Lt)) {
|
||||||
|
// [stack] LENGTH OBJ OBJ IDX IS_IN_DENSE_BOUNDS
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
InternalIfEmitter isInDenseBounds(this);
|
||||||
|
if (!isInDenseBounds.emitThenElse()) {
|
||||||
|
// [stack] LENGTH OBJ OBJ IDX
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!emit1(JSOp::GetElem)) {
|
||||||
|
// [stack] LENGTH OBJ VALUE
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!isInDenseBounds.emitElse()) {
|
||||||
|
// [stack] LENGTH OBJ OBJ IDX
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!emitPopN(2)) {
|
||||||
|
// [stack] LENGTH OBJ
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!emit1(JSOp::Undefined)) {
|
||||||
|
// [stack] LENGTH OBJ UNDEFINED
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!isInDenseBounds.emitEnd()) {
|
||||||
|
// [stack] LENGTH OBJ VALUE|UNDEFINED
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!emitSetOrInitializeDestructuring(member, flav)) {
|
||||||
|
// [stack] LENGTH OBJ
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
idx += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!emit1(JSOp::Swap)) {
|
||||||
|
// [stack] OBJ LENGTH
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!emit1(JSOp::Pop)) {
|
||||||
|
// [stack] OBJ
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ifArrayOptimizable->emitElse()) {
|
||||||
|
// [stack] OBJ OBJ
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!emitIterator(SelfHostedIter::Deny)) {
|
if (!emitIterator(SelfHostedIter::Deny)) {
|
||||||
// [stack] ... OBJ NEXT ITER
|
// [stack] ... OBJ NEXT ITER
|
||||||
return false;
|
return false;
|
||||||
|
@ -3267,8 +3427,19 @@ bool BytecodeEmitter::emitDestructuringOpsArray(ListNode* pattern,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return emitIteratorCloseInInnermostScope();
|
if (!emitIteratorCloseInInnermostScope()) {
|
||||||
// [stack] ... OBJ
|
// [stack] ... OBJ
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ifArrayOptimizable.isSome()) {
|
||||||
|
if (!ifArrayOptimizable->emitEnd()) {
|
||||||
|
// [stack] OBJ
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Push an initial FALSE value for DONE.
|
// Push an initial FALSE value for DONE.
|
||||||
|
@ -3573,6 +3744,13 @@ bool BytecodeEmitter::emitDestructuringOpsArray(ListNode* pattern,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ifArrayOptimizable.isSome()) {
|
||||||
|
if (!ifArrayOptimizable->emitEnd()) {
|
||||||
|
// [stack] OBJ
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -226,8 +226,9 @@ bool IfEmitter::emitEnd() {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
InternalIfEmitter::InternalIfEmitter(BytecodeEmitter* bce)
|
InternalIfEmitter::InternalIfEmitter(BytecodeEmitter* bce,
|
||||||
: IfEmitter(bce, LexicalKind::NoLexicalAccessInBranch) {
|
LexicalKind lexicalKind)
|
||||||
|
: IfEmitter(bce, lexicalKind) {
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
// Skip emitIf (see the comment above InternalIfEmitter declaration).
|
// Skip emitIf (see the comment above InternalIfEmitter declaration).
|
||||||
state_ = State::If;
|
state_ = State::If;
|
||||||
|
|
|
@ -21,23 +21,7 @@ namespace frontend {
|
||||||
struct BytecodeEmitter;
|
struct BytecodeEmitter;
|
||||||
|
|
||||||
class MOZ_STACK_CLASS BranchEmitterBase {
|
class MOZ_STACK_CLASS BranchEmitterBase {
|
||||||
protected:
|
public:
|
||||||
BytecodeEmitter* bce_;
|
|
||||||
|
|
||||||
// Jump around the then clause, to the beginning of the else clause.
|
|
||||||
JumpList jumpAroundThen_;
|
|
||||||
|
|
||||||
// Jump around the else clause, to the end of the entire branch.
|
|
||||||
JumpList jumpsAroundElse_;
|
|
||||||
|
|
||||||
// The stack depth before emitting the then block.
|
|
||||||
// Used for restoring stack depth before emitting the else block.
|
|
||||||
// Also used for assertion to make sure then and else blocks pushed the
|
|
||||||
// same number of values.
|
|
||||||
int32_t thenDepth_ = 0;
|
|
||||||
|
|
||||||
enum class ConditionKind { Positive, Negative };
|
|
||||||
|
|
||||||
// Whether the then-clause, the else-clause, or else-if condition may
|
// Whether the then-clause, the else-clause, or else-if condition may
|
||||||
// contain declaration or access to lexical variables, which means they
|
// contain declaration or access to lexical variables, which means they
|
||||||
// should have their own TDZCheckCache. Basically TDZCheckCache should be
|
// should have their own TDZCheckCache. Basically TDZCheckCache should be
|
||||||
|
@ -57,6 +41,23 @@ class MOZ_STACK_CLASS BranchEmitterBase {
|
||||||
// inside then-clause, else-clause, nor else-if condition.
|
// inside then-clause, else-clause, nor else-if condition.
|
||||||
NoLexicalAccessInBranch
|
NoLexicalAccessInBranch
|
||||||
};
|
};
|
||||||
|
|
||||||
|
protected:
|
||||||
|
BytecodeEmitter* bce_;
|
||||||
|
|
||||||
|
// Jump around the then clause, to the beginning of the else clause.
|
||||||
|
JumpList jumpAroundThen_;
|
||||||
|
|
||||||
|
// Jump around the else clause, to the end of the entire branch.
|
||||||
|
JumpList jumpsAroundElse_;
|
||||||
|
|
||||||
|
// The stack depth before emitting the then block.
|
||||||
|
// Used for restoring stack depth before emitting the else block.
|
||||||
|
// Also used for assertion to make sure then and else blocks pushed the
|
||||||
|
// same number of values.
|
||||||
|
int32_t thenDepth_ = 0;
|
||||||
|
|
||||||
|
enum class ConditionKind { Positive, Negative };
|
||||||
LexicalKind lexicalKind_;
|
LexicalKind lexicalKind_;
|
||||||
|
|
||||||
mozilla::Maybe<TDZCheckCache> tdzCache_;
|
mozilla::Maybe<TDZCheckCache> tdzCache_;
|
||||||
|
@ -246,7 +247,10 @@ class MOZ_STACK_CLASS IfEmitter : public BranchEmitterBase {
|
||||||
//
|
//
|
||||||
class MOZ_STACK_CLASS InternalIfEmitter : public IfEmitter {
|
class MOZ_STACK_CLASS InternalIfEmitter : public IfEmitter {
|
||||||
public:
|
public:
|
||||||
explicit InternalIfEmitter(BytecodeEmitter* bce);
|
explicit InternalIfEmitter(
|
||||||
|
BytecodeEmitter* bce,
|
||||||
|
LexicalKind lexicalKind =
|
||||||
|
BranchEmitterBase::LexicalKind::NoLexicalAccessInBranch);
|
||||||
};
|
};
|
||||||
|
|
||||||
// Class for emitting bytecode for conditional expression.
|
// Class for emitting bytecode for conditional expression.
|
||||||
|
|
|
@ -0,0 +1,13 @@
|
||||||
|
(() => {
|
||||||
|
let returnCalled = false;
|
||||||
|
({}).__proto__.return = () => {
|
||||||
|
returnCalled = true;
|
||||||
|
return { value: 3, done: true };
|
||||||
|
};
|
||||||
|
|
||||||
|
assertEq(returnCalled, false);
|
||||||
|
let [a,b] = [1,2,3];
|
||||||
|
assertEq(returnCalled, true);
|
||||||
|
assertEq(a, 1);
|
||||||
|
assertEq(b, 2);
|
||||||
|
})();
|
|
@ -0,0 +1,17 @@
|
||||||
|
(() => {
|
||||||
|
let returnCalled = false;
|
||||||
|
|
||||||
|
function foo() {
|
||||||
|
({}).__proto__.return = () => {
|
||||||
|
returnCalled = true;
|
||||||
|
return { value: 3, done: true };
|
||||||
|
};
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
assertEq(returnCalled, false);
|
||||||
|
let [a,[b=foo()]] = [1,[],3];
|
||||||
|
assertEq(returnCalled, true);
|
||||||
|
assertEq(a, 1);
|
||||||
|
assertEq(b, 2);
|
||||||
|
})();
|
|
@ -0,0 +1,13 @@
|
||||||
|
(() => {
|
||||||
|
let nextCalled = 0;
|
||||||
|
([])[Symbol.iterator]().__proto__.next = () => {
|
||||||
|
nextCalled++;
|
||||||
|
return {value: nextCalled, done: false};
|
||||||
|
};
|
||||||
|
|
||||||
|
assertEq(nextCalled, 0);
|
||||||
|
let [a,b] = [1,2,3];
|
||||||
|
assertEq(nextCalled, 2);
|
||||||
|
assertEq(a, 1);
|
||||||
|
assertEq(b, 2);
|
||||||
|
})();
|
|
@ -0,0 +1,36 @@
|
||||||
|
(() => {
|
||||||
|
let iterablesBase = [
|
||||||
|
[1,2],
|
||||||
|
[1,2,3],
|
||||||
|
[1,2,3],
|
||||||
|
[3,2,1],
|
||||||
|
];
|
||||||
|
|
||||||
|
let iterables = [];
|
||||||
|
for (let i = 0; i < 1000; i++) {
|
||||||
|
iterables.push([...iterablesBase[i % iterablesBase.length]]);
|
||||||
|
}
|
||||||
|
|
||||||
|
iterables.push(new Map([[1, 3], [2,4]]).keys());
|
||||||
|
|
||||||
|
function testDestructuringInitialization(a) {
|
||||||
|
let [x,y] = a;
|
||||||
|
return y;
|
||||||
|
}
|
||||||
|
|
||||||
|
function testDestructuringAssignment(a) {
|
||||||
|
let x, y;
|
||||||
|
[x,y] = a;
|
||||||
|
return y;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let i = 0; i < iterables.length; i++) {
|
||||||
|
assertEq(testDestructuringInitialization(iterables[i]), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
// refresh the last iterator
|
||||||
|
iterables[iterables.length - 1] = new Map([[1, 3], [2,4]]).keys();
|
||||||
|
for (let i = 0; i < iterables.length; i++) {
|
||||||
|
assertEq(testDestructuringAssignment(iterables[i]), 2);
|
||||||
|
}
|
||||||
|
})();
|
|
@ -0,0 +1,19 @@
|
||||||
|
(() => {
|
||||||
|
var returnCalled = false;
|
||||||
|
Object.defineProperty(globalThis, 'x', {
|
||||||
|
get() {
|
||||||
|
return 42;
|
||||||
|
},
|
||||||
|
set(value) {
|
||||||
|
({}).__proto__.return = () => {
|
||||||
|
returnCalled = true;
|
||||||
|
return { value: 3, done: true };
|
||||||
|
};
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
[x] = [1, 2];
|
||||||
|
|
||||||
|
assertEq(x, 42);
|
||||||
|
assertEq(returnCalled, true);
|
||||||
|
})();
|
|
@ -0,0 +1,7 @@
|
||||||
|
(() => {
|
||||||
|
({}).__proto__[1] = 2;
|
||||||
|
let [x,y] = [1];
|
||||||
|
|
||||||
|
assertEq(x, 1);
|
||||||
|
assertEq(y, undefined);
|
||||||
|
})();
|
|
@ -0,0 +1,10 @@
|
||||||
|
setJitCompilerOption("ion.forceinlineCaches", 1);
|
||||||
|
|
||||||
|
function f(arr) {
|
||||||
|
var [a, b] = arr;
|
||||||
|
return b;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (var i = 0; i < 10_000; ++i) {
|
||||||
|
assertEq(f([0, 1]), 1);
|
||||||
|
}
|
|
@ -2011,6 +2011,7 @@ bool BaselineCacheIRCompiler::init(CacheKind kind) {
|
||||||
case CacheKind::ToPropertyKey:
|
case CacheKind::ToPropertyKey:
|
||||||
case CacheKind::GetIterator:
|
case CacheKind::GetIterator:
|
||||||
case CacheKind::OptimizeSpreadCall:
|
case CacheKind::OptimizeSpreadCall:
|
||||||
|
case CacheKind::OptimizeGetIterator:
|
||||||
case CacheKind::ToBool:
|
case CacheKind::ToBool:
|
||||||
case CacheKind::UnaryArith:
|
case CacheKind::UnaryArith:
|
||||||
MOZ_ASSERT(numInputs == 1);
|
MOZ_ASSERT(numInputs == 1);
|
||||||
|
|
|
@ -5323,6 +5323,18 @@ bool BaselineCodeGen<Handler>::emit_CloseIter() {
|
||||||
return emitNextIC();
|
return emitNextIC();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename Handler>
|
||||||
|
bool BaselineCodeGen<Handler>::emit_OptimizeGetIterator() {
|
||||||
|
frame.popRegsAndSync(1);
|
||||||
|
|
||||||
|
if (!emitNextIC()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
frame.push(R0);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
template <typename Handler>
|
template <typename Handler>
|
||||||
bool BaselineCodeGen<Handler>::emit_IsGenClosing() {
|
bool BaselineCodeGen<Handler>::emit_IsGenClosing() {
|
||||||
return emitIsMagicValue();
|
return emitIsMagicValue();
|
||||||
|
|
|
@ -367,6 +367,8 @@ class MOZ_STATIC_CLASS OpToFallbackKindTable {
|
||||||
setKind(JSOp::Rest, BaselineICFallbackKind::Rest);
|
setKind(JSOp::Rest, BaselineICFallbackKind::Rest);
|
||||||
|
|
||||||
setKind(JSOp::CloseIter, BaselineICFallbackKind::CloseIter);
|
setKind(JSOp::CloseIter, BaselineICFallbackKind::CloseIter);
|
||||||
|
setKind(JSOp::OptimizeGetIterator,
|
||||||
|
BaselineICFallbackKind::OptimizeGetIterator);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2510,6 +2512,40 @@ bool FallbackICCodeCompiler::emit_CloseIter() {
|
||||||
return tailCallVM<Fn, DoCloseIterFallback>(masm);
|
return tailCallVM<Fn, DoCloseIterFallback>(masm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// OptimizeGetIterator_Fallback
|
||||||
|
//
|
||||||
|
|
||||||
|
bool DoOptimizeGetIteratorFallback(JSContext* cx, BaselineFrame* frame,
|
||||||
|
ICFallbackStub* stub, HandleValue value,
|
||||||
|
MutableHandleValue res) {
|
||||||
|
stub->incrementEnteredCount();
|
||||||
|
MaybeNotifyWarp(frame->outerScript(), stub);
|
||||||
|
FallbackICSpew(cx, stub, "OptimizeGetIterator");
|
||||||
|
|
||||||
|
TryAttachStub<OptimizeGetIteratorIRGenerator>("OptimizeGetIterator", cx,
|
||||||
|
frame, stub, value);
|
||||||
|
|
||||||
|
bool result;
|
||||||
|
if (!OptimizeGetIterator(cx, value, &result)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
res.setBoolean(result);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool FallbackICCodeCompiler::emit_OptimizeGetIterator() {
|
||||||
|
EmitRestoreTailCallReg(masm);
|
||||||
|
|
||||||
|
masm.pushValue(R0);
|
||||||
|
masm.push(ICStubReg);
|
||||||
|
pushStubPayload(masm, R0.scratchReg());
|
||||||
|
|
||||||
|
using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleValue,
|
||||||
|
MutableHandleValue);
|
||||||
|
return tailCallVM<Fn, DoOptimizeGetIteratorFallback>(masm);
|
||||||
|
}
|
||||||
|
|
||||||
bool JitRuntime::generateBaselineICFallbackCode(JSContext* cx) {
|
bool JitRuntime::generateBaselineICFallbackCode(JSContext* cx) {
|
||||||
TempAllocator temp(&cx->tempLifoAlloc());
|
TempAllocator temp(&cx->tempLifoAlloc());
|
||||||
StackMacroAssembler masm(cx, temp);
|
StackMacroAssembler masm(cx, temp);
|
||||||
|
|
|
@ -436,6 +436,11 @@ extern bool DoCompareFallback(JSContext* cx, BaselineFrame* frame,
|
||||||
extern bool DoCloseIterFallback(JSContext* cx, BaselineFrame* frame,
|
extern bool DoCloseIterFallback(JSContext* cx, BaselineFrame* frame,
|
||||||
ICFallbackStub* stub, HandleObject iter);
|
ICFallbackStub* stub, HandleObject iter);
|
||||||
|
|
||||||
|
extern bool DoOptimizeGetIteratorFallback(JSContext* cx, BaselineFrame* frame,
|
||||||
|
ICFallbackStub* stub,
|
||||||
|
HandleValue value,
|
||||||
|
MutableHandleValue res);
|
||||||
|
|
||||||
} // namespace jit
|
} // namespace jit
|
||||||
} // namespace js
|
} // namespace js
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,8 @@ namespace jit {
|
||||||
_(Compare) \
|
_(Compare) \
|
||||||
_(GetProp) \
|
_(GetProp) \
|
||||||
_(GetPropSuper) \
|
_(GetPropSuper) \
|
||||||
_(CloseIter)
|
_(CloseIter) \
|
||||||
|
_(OptimizeGetIterator)
|
||||||
|
|
||||||
} // namespace jit
|
} // namespace jit
|
||||||
} // namespace js
|
} // namespace js
|
||||||
|
|
|
@ -114,6 +114,7 @@ size_t js::jit::NumInputsForCacheKind(CacheKind kind) {
|
||||||
case CacheKind::Call:
|
case CacheKind::Call:
|
||||||
case CacheKind::OptimizeSpreadCall:
|
case CacheKind::OptimizeSpreadCall:
|
||||||
case CacheKind::CloseIter:
|
case CacheKind::CloseIter:
|
||||||
|
case CacheKind::OptimizeGetIterator:
|
||||||
return 1;
|
return 1;
|
||||||
case CacheKind::Compare:
|
case CacheKind::Compare:
|
||||||
case CacheKind::GetElem:
|
case CacheKind::GetElem:
|
||||||
|
@ -5603,8 +5604,13 @@ static bool IsArrayPrototypeOptimizable(JSContext* cx, Handle<ArrayObject*> arr,
|
||||||
return IsSelfHostedFunctionWithName(iterFun, cx->names().dollar_ArrayValues_);
|
return IsSelfHostedFunctionWithName(iterFun, cx->names().dollar_ArrayValues_);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum class AllowIteratorReturn : bool {
|
||||||
|
No,
|
||||||
|
Yes,
|
||||||
|
};
|
||||||
static bool IsArrayIteratorPrototypeOptimizable(
|
static bool IsArrayIteratorPrototypeOptimizable(
|
||||||
JSContext* cx, MutableHandle<NativeObject*> arrIterProto, uint32_t* slot,
|
JSContext* cx, AllowIteratorReturn allowReturn,
|
||||||
|
MutableHandle<NativeObject*> arrIterProto, uint32_t* slot,
|
||||||
MutableHandle<JSFunction*> nextFun) {
|
MutableHandle<JSFunction*> nextFun) {
|
||||||
auto* proto =
|
auto* proto =
|
||||||
GlobalObject::getOrCreateArrayIteratorPrototype(cx, cx->global());
|
GlobalObject::getOrCreateArrayIteratorPrototype(cx, cx->global());
|
||||||
|
@ -5629,7 +5635,18 @@ static bool IsArrayIteratorPrototypeOptimizable(
|
||||||
}
|
}
|
||||||
|
|
||||||
nextFun.set(&nextVal.toObject().as<JSFunction>());
|
nextFun.set(&nextVal.toObject().as<JSFunction>());
|
||||||
return IsSelfHostedFunctionWithName(nextFun, cx->names().ArrayIteratorNext);
|
if (!IsSelfHostedFunctionWithName(nextFun, cx->names().ArrayIteratorNext)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (allowReturn == AllowIteratorReturn::No) {
|
||||||
|
// Ensure that %ArrayIteratorPrototype% doesn't define "return".
|
||||||
|
if (!CheckHasNoSuchProperty(cx, proto, NameToId(cx->names().return_))) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
AttachDecision OptimizeSpreadCallIRGenerator::tryAttachArray() {
|
AttachDecision OptimizeSpreadCallIRGenerator::tryAttachArray() {
|
||||||
|
@ -5660,8 +5677,9 @@ AttachDecision OptimizeSpreadCallIRGenerator::tryAttachArray() {
|
||||||
Rooted<NativeObject*> arrayIteratorProto(cx_);
|
Rooted<NativeObject*> arrayIteratorProto(cx_);
|
||||||
uint32_t iterNextSlot;
|
uint32_t iterNextSlot;
|
||||||
Rooted<JSFunction*> nextFun(cx_);
|
Rooted<JSFunction*> nextFun(cx_);
|
||||||
if (!IsArrayIteratorPrototypeOptimizable(cx_, &arrayIteratorProto,
|
if (!IsArrayIteratorPrototypeOptimizable(cx_, AllowIteratorReturn::Yes,
|
||||||
&iterNextSlot, &nextFun)) {
|
&arrayIteratorProto, &iterNextSlot,
|
||||||
|
&nextFun)) {
|
||||||
return AttachDecision::NoAction;
|
return AttachDecision::NoAction;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5720,7 +5738,8 @@ AttachDecision OptimizeSpreadCallIRGenerator::tryAttachArguments() {
|
||||||
Rooted<NativeObject*> arrayIteratorProto(cx_);
|
Rooted<NativeObject*> arrayIteratorProto(cx_);
|
||||||
uint32_t slot;
|
uint32_t slot;
|
||||||
Rooted<JSFunction*> nextFun(cx_);
|
Rooted<JSFunction*> nextFun(cx_);
|
||||||
if (!IsArrayIteratorPrototypeOptimizable(cx_, &arrayIteratorProto, &slot,
|
if (!IsArrayIteratorPrototypeOptimizable(cx_, AllowIteratorReturn::Yes,
|
||||||
|
&arrayIteratorProto, &slot,
|
||||||
&nextFun)) {
|
&nextFun)) {
|
||||||
return AttachDecision::NoAction;
|
return AttachDecision::NoAction;
|
||||||
}
|
}
|
||||||
|
@ -9895,7 +9914,8 @@ InlinableNativeIRGenerator::tryAttachArrayIteratorPrototypeOptimizable() {
|
||||||
Rooted<NativeObject*> arrayIteratorProto(cx_);
|
Rooted<NativeObject*> arrayIteratorProto(cx_);
|
||||||
uint32_t slot;
|
uint32_t slot;
|
||||||
Rooted<JSFunction*> nextFun(cx_);
|
Rooted<JSFunction*> nextFun(cx_);
|
||||||
if (!IsArrayIteratorPrototypeOptimizable(cx_, &arrayIteratorProto, &slot,
|
if (!IsArrayIteratorPrototypeOptimizable(cx_, AllowIteratorReturn::Yes,
|
||||||
|
&arrayIteratorProto, &slot,
|
||||||
&nextFun)) {
|
&nextFun)) {
|
||||||
return AttachDecision::NoAction;
|
return AttachDecision::NoAction;
|
||||||
}
|
}
|
||||||
|
@ -13407,6 +13427,107 @@ AttachDecision CloseIterIRGenerator::tryAttachStub() {
|
||||||
return AttachDecision::NoAction;
|
return AttachDecision::NoAction;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
OptimizeGetIteratorIRGenerator::OptimizeGetIteratorIRGenerator(
|
||||||
|
JSContext* cx, HandleScript script, jsbytecode* pc, ICState state,
|
||||||
|
HandleValue value)
|
||||||
|
: IRGenerator(cx, script, pc, CacheKind::OptimizeGetIterator, state),
|
||||||
|
val_(value) {}
|
||||||
|
|
||||||
|
AttachDecision OptimizeGetIteratorIRGenerator::tryAttachStub() {
|
||||||
|
MOZ_ASSERT(cacheKind_ == CacheKind::OptimizeGetIterator);
|
||||||
|
|
||||||
|
AutoAssertNoPendingException aanpe(cx_);
|
||||||
|
|
||||||
|
TRY_ATTACH(tryAttachArray());
|
||||||
|
TRY_ATTACH(tryAttachNotOptimizable());
|
||||||
|
|
||||||
|
MOZ_CRASH("Failed to attach unoptimizable case.");
|
||||||
|
}
|
||||||
|
|
||||||
|
AttachDecision OptimizeGetIteratorIRGenerator::tryAttachArray() {
|
||||||
|
if (!isFirstStub_) {
|
||||||
|
return AttachDecision::NoAction;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The value must be a packed array.
|
||||||
|
if (!val_.isObject()) {
|
||||||
|
return AttachDecision::NoAction;
|
||||||
|
}
|
||||||
|
Rooted<JSObject*> obj(cx_, &val_.toObject());
|
||||||
|
if (!IsPackedArray(obj)) {
|
||||||
|
return AttachDecision::NoAction;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prototype must be Array.prototype and Array.prototype[@@iterator] must not
|
||||||
|
// be modified.
|
||||||
|
Rooted<NativeObject*> arrProto(cx_);
|
||||||
|
uint32_t arrProtoIterSlot;
|
||||||
|
Rooted<JSFunction*> iterFun(cx_);
|
||||||
|
if (!IsArrayPrototypeOptimizable(cx_, obj.as<ArrayObject>(), &arrProto,
|
||||||
|
&arrProtoIterSlot, &iterFun)) {
|
||||||
|
return AttachDecision::NoAction;
|
||||||
|
}
|
||||||
|
|
||||||
|
// %ArrayIteratorPrototype%.next must not be modified and
|
||||||
|
// %ArrayIteratorPrototype%.return must not be present.
|
||||||
|
Rooted<NativeObject*> arrayIteratorProto(cx_);
|
||||||
|
uint32_t slot;
|
||||||
|
Rooted<JSFunction*> nextFun(cx_);
|
||||||
|
if (!IsArrayIteratorPrototypeOptimizable(
|
||||||
|
cx_, AllowIteratorReturn::No, &arrayIteratorProto, &slot, &nextFun)) {
|
||||||
|
return AttachDecision::NoAction;
|
||||||
|
}
|
||||||
|
|
||||||
|
ValOperandId valId(writer.setInputOperandId(0));
|
||||||
|
ObjOperandId objId = writer.guardToObject(valId);
|
||||||
|
|
||||||
|
// Guard the object is a packed array with Array.prototype as proto.
|
||||||
|
MOZ_ASSERT(obj->is<ArrayObject>());
|
||||||
|
writer.guardShape(objId, obj->shape());
|
||||||
|
writer.guardArrayIsPacked(objId);
|
||||||
|
|
||||||
|
// Guard on Array.prototype[@@iterator].
|
||||||
|
ObjOperandId arrProtoId = writer.loadObject(arrProto);
|
||||||
|
ObjOperandId iterId = writer.loadObject(iterFun);
|
||||||
|
writer.guardShape(arrProtoId, arrProto->shape());
|
||||||
|
writer.guardDynamicSlotIsSpecificObject(arrProtoId, iterId, arrProtoIterSlot);
|
||||||
|
|
||||||
|
// Guard on %ArrayIteratorPrototype%.next.
|
||||||
|
ObjOperandId iterProtoId = writer.loadObject(arrayIteratorProto);
|
||||||
|
ObjOperandId nextId = writer.loadObject(nextFun);
|
||||||
|
writer.guardShape(iterProtoId, arrayIteratorProto->shape());
|
||||||
|
writer.guardDynamicSlotIsSpecificObject(iterProtoId, nextId, slot);
|
||||||
|
|
||||||
|
// Guard on the prototype chain to ensure no "return" method is present.
|
||||||
|
ShapeGuardProtoChain(writer, arrayIteratorProto, iterProtoId);
|
||||||
|
|
||||||
|
writer.loadBooleanResult(true);
|
||||||
|
writer.returnFromIC();
|
||||||
|
|
||||||
|
trackAttached("OptimizeGetIterator.Array");
|
||||||
|
return AttachDecision::Attach;
|
||||||
|
}
|
||||||
|
|
||||||
|
AttachDecision OptimizeGetIteratorIRGenerator::tryAttachNotOptimizable() {
|
||||||
|
ValOperandId valId(writer.setInputOperandId(0));
|
||||||
|
|
||||||
|
writer.loadBooleanResult(false);
|
||||||
|
writer.returnFromIC();
|
||||||
|
|
||||||
|
trackAttached("OptimizeGetIterator.NotOptimizable");
|
||||||
|
return AttachDecision::Attach;
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizeGetIteratorIRGenerator::trackAttached(const char* name) {
|
||||||
|
stubName_ = name ? name : "NotAttached";
|
||||||
|
|
||||||
|
#ifdef JS_CACHEIR_SPEW
|
||||||
|
if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
|
||||||
|
sp.valueProperty("val", val_);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef JS_SIMULATOR
|
#ifdef JS_SIMULATOR
|
||||||
bool js::jit::CallAnyNative(JSContext* cx, unsigned argc, Value* vp) {
|
bool js::jit::CallAnyNative(JSContext* cx, unsigned argc, Value* vp) {
|
||||||
CallArgs args = CallArgsFromVp(argc, vp);
|
CallArgs args = CallArgsFromVp(argc, vp);
|
||||||
|
|
|
@ -186,6 +186,7 @@ class TypedOperandId : public OperandId {
|
||||||
_(InstanceOf) \
|
_(InstanceOf) \
|
||||||
_(GetIterator) \
|
_(GetIterator) \
|
||||||
_(CloseIter) \
|
_(CloseIter) \
|
||||||
|
_(OptimizeGetIterator) \
|
||||||
_(OptimizeSpreadCall) \
|
_(OptimizeSpreadCall) \
|
||||||
_(Compare) \
|
_(Compare) \
|
||||||
_(ToBool) \
|
_(ToBool) \
|
||||||
|
|
|
@ -488,6 +488,22 @@ class MOZ_RAII OptimizeSpreadCallIRGenerator : public IRGenerator {
|
||||||
void trackAttached(const char* name /* must be a C string literal */);
|
void trackAttached(const char* name /* must be a C string literal */);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class MOZ_RAII OptimizeGetIteratorIRGenerator : public IRGenerator {
|
||||||
|
HandleValue val_;
|
||||||
|
|
||||||
|
AttachDecision tryAttachArray();
|
||||||
|
AttachDecision tryAttachNotOptimizable();
|
||||||
|
|
||||||
|
public:
|
||||||
|
OptimizeGetIteratorIRGenerator(JSContext* cx, HandleScript script,
|
||||||
|
jsbytecode* pc, ICState state,
|
||||||
|
HandleValue value);
|
||||||
|
|
||||||
|
AttachDecision tryAttachStub();
|
||||||
|
|
||||||
|
void trackAttached(const char* name /* must be a C string literal */);
|
||||||
|
};
|
||||||
|
|
||||||
enum class StringChar { CodeAt, At };
|
enum class StringChar { CodeAt, At };
|
||||||
enum class ScriptedThisResult { NoAction, UninitializedThis, PlainObjectShape };
|
enum class ScriptedThisResult { NoAction, UninitializedThis, PlainObjectShape };
|
||||||
|
|
||||||
|
|
|
@ -948,6 +948,26 @@ void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
|
||||||
masm.jump(ool->rejoin());
|
masm.jump(ool->rejoin());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
case CacheKind::OptimizeGetIterator: {
|
||||||
|
auto* optimizeGetIteratorIC = ic->asOptimizeGetIteratorIC();
|
||||||
|
|
||||||
|
saveLive(lir);
|
||||||
|
|
||||||
|
pushArg(optimizeGetIteratorIC->value());
|
||||||
|
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
|
||||||
|
pushArg(ImmGCPtr(gen->outerInfo().script()));
|
||||||
|
|
||||||
|
using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeGetIteratorIC*,
|
||||||
|
HandleValue, bool* res);
|
||||||
|
callVM<Fn, IonOptimizeGetIteratorIC::update>(lir);
|
||||||
|
|
||||||
|
StoreRegisterTo(optimizeGetIteratorIC->output()).generate(this);
|
||||||
|
restoreLiveIgnore(
|
||||||
|
lir, StoreRegisterTo(optimizeGetIteratorIC->output()).clobbered());
|
||||||
|
|
||||||
|
masm.jump(ool->rejoin());
|
||||||
|
return;
|
||||||
|
}
|
||||||
case CacheKind::Call:
|
case CacheKind::Call:
|
||||||
case CacheKind::TypeOf:
|
case CacheKind::TypeOf:
|
||||||
case CacheKind::ToBool:
|
case CacheKind::ToBool:
|
||||||
|
@ -13746,6 +13766,17 @@ void CodeGenerator::visitCloseIterCache(LCloseIterCache* lir) {
|
||||||
addIC(lir, allocateIC(ic));
|
addIC(lir, allocateIC(ic));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void CodeGenerator::visitOptimizeGetIteratorCache(
|
||||||
|
LOptimizeGetIteratorCache* lir) {
|
||||||
|
LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
|
||||||
|
ValueOperand val = ToValue(lir, LOptimizeGetIteratorCache::ValueIndex);
|
||||||
|
Register output = ToRegister(lir->output());
|
||||||
|
Register temp = ToRegister(lir->temp0());
|
||||||
|
|
||||||
|
IonOptimizeGetIteratorIC ic(liveRegs, val, output, temp);
|
||||||
|
addIC(lir, allocateIC(ic));
|
||||||
|
}
|
||||||
|
|
||||||
void CodeGenerator::visitIteratorMore(LIteratorMore* lir) {
|
void CodeGenerator::visitIteratorMore(LIteratorMore* lir) {
|
||||||
const Register obj = ToRegister(lir->iterator());
|
const Register obj = ToRegister(lir->iterator());
|
||||||
const ValueOperand output = ToOutValue(lir);
|
const ValueOperand output = ToOutValue(lir);
|
||||||
|
|
|
@ -546,6 +546,21 @@ bool IonCacheIRCompiler::init() {
|
||||||
allocator.initInputLocation(0, ic->iter(), JSVAL_TYPE_OBJECT);
|
allocator.initInputLocation(0, ic->iter(), JSVAL_TYPE_OBJECT);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case CacheKind::OptimizeGetIterator: {
|
||||||
|
auto* ic = ic_->asOptimizeGetIteratorIC();
|
||||||
|
Register output = ic->output();
|
||||||
|
|
||||||
|
available.add(output);
|
||||||
|
available.add(ic->temp());
|
||||||
|
|
||||||
|
liveRegs_.emplace(ic->liveRegs());
|
||||||
|
outputUnchecked_.emplace(
|
||||||
|
TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
|
||||||
|
|
||||||
|
MOZ_ASSERT(numInputs == 1);
|
||||||
|
allocator.initInputLocation(0, ic->value());
|
||||||
|
break;
|
||||||
|
}
|
||||||
case CacheKind::Call:
|
case CacheKind::Call:
|
||||||
case CacheKind::TypeOf:
|
case CacheKind::TypeOf:
|
||||||
case CacheKind::ToBool:
|
case CacheKind::ToBool:
|
||||||
|
@ -646,6 +661,7 @@ void IonCacheIRCompiler::assertFloatRegisterAvailable(FloatRegister reg) {
|
||||||
case CacheKind::ToPropertyKey:
|
case CacheKind::ToPropertyKey:
|
||||||
case CacheKind::OptimizeSpreadCall:
|
case CacheKind::OptimizeSpreadCall:
|
||||||
case CacheKind::CloseIter:
|
case CacheKind::CloseIter:
|
||||||
|
case CacheKind::OptimizeGetIterator:
|
||||||
MOZ_CRASH("No float registers available");
|
MOZ_CRASH("No float registers available");
|
||||||
case CacheKind::SetProp:
|
case CacheKind::SetProp:
|
||||||
case CacheKind::SetElem:
|
case CacheKind::SetElem:
|
||||||
|
|
|
@ -69,6 +69,8 @@ Register IonIC::scratchRegisterForEntryJump() {
|
||||||
return asCompareIC()->output();
|
return asCompareIC()->output();
|
||||||
case CacheKind::CloseIter:
|
case CacheKind::CloseIter:
|
||||||
return asCloseIterIC()->temp();
|
return asCloseIterIC()->temp();
|
||||||
|
case CacheKind::OptimizeGetIterator:
|
||||||
|
return asOptimizeGetIteratorIC()->temp();
|
||||||
case CacheKind::Call:
|
case CacheKind::Call:
|
||||||
case CacheKind::TypeOf:
|
case CacheKind::TypeOf:
|
||||||
case CacheKind::ToBool:
|
case CacheKind::ToBool:
|
||||||
|
@ -488,6 +490,17 @@ bool IonCloseIterIC::update(JSContext* cx, HandleScript outerScript,
|
||||||
return CloseIterOperation(cx, iter, kind);
|
return CloseIterOperation(cx, iter, kind);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* static */
|
||||||
|
bool IonOptimizeGetIteratorIC::update(JSContext* cx, HandleScript outerScript,
|
||||||
|
IonOptimizeGetIteratorIC* ic,
|
||||||
|
HandleValue value, bool* result) {
|
||||||
|
IonScript* ionScript = outerScript->ionScript();
|
||||||
|
|
||||||
|
TryAttachIonStub<OptimizeGetIteratorIRGenerator>(cx, ic, ionScript, value);
|
||||||
|
|
||||||
|
return OptimizeGetIterator(cx, value, result);
|
||||||
|
}
|
||||||
|
|
||||||
/* static */
|
/* static */
|
||||||
bool IonUnaryArithIC::update(JSContext* cx, HandleScript outerScript,
|
bool IonUnaryArithIC::update(JSContext* cx, HandleScript outerScript,
|
||||||
IonUnaryArithIC* ic, HandleValue val,
|
IonUnaryArithIC* ic, HandleValue val,
|
||||||
|
|
|
@ -80,6 +80,7 @@ class IonBinaryArithIC;
|
||||||
class IonToPropertyKeyIC;
|
class IonToPropertyKeyIC;
|
||||||
class IonOptimizeSpreadCallIC;
|
class IonOptimizeSpreadCallIC;
|
||||||
class IonCloseIterIC;
|
class IonCloseIterIC;
|
||||||
|
class IonOptimizeGetIteratorIC;
|
||||||
|
|
||||||
class IonIC {
|
class IonIC {
|
||||||
// This either points at the OOL path for the fallback path, or the code for
|
// This either points at the OOL path for the fallback path, or the code for
|
||||||
|
@ -220,6 +221,10 @@ class IonIC {
|
||||||
MOZ_ASSERT(kind_ == CacheKind::CloseIter);
|
MOZ_ASSERT(kind_ == CacheKind::CloseIter);
|
||||||
return (IonCloseIterIC*)this;
|
return (IonCloseIterIC*)this;
|
||||||
}
|
}
|
||||||
|
IonOptimizeGetIteratorIC* asOptimizeGetIteratorIC() {
|
||||||
|
MOZ_ASSERT(kind_ == CacheKind::OptimizeGetIterator);
|
||||||
|
return (IonOptimizeGetIteratorIC*)this;
|
||||||
|
}
|
||||||
|
|
||||||
// Returns the Register to use as scratch when entering IC stubs. This
|
// Returns the Register to use as scratch when entering IC stubs. This
|
||||||
// should either be an output register or a temp.
|
// should either be an output register or a temp.
|
||||||
|
@ -658,6 +663,31 @@ class IonCloseIterIC : public IonIC {
|
||||||
IonCloseIterIC* ic, HandleObject iter);
|
IonCloseIterIC* ic, HandleObject iter);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class IonOptimizeGetIteratorIC : public IonIC {
|
||||||
|
LiveRegisterSet liveRegs_;
|
||||||
|
ValueOperand value_;
|
||||||
|
Register output_;
|
||||||
|
Register temp_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
IonOptimizeGetIteratorIC(LiveRegisterSet liveRegs, ValueOperand value,
|
||||||
|
Register output, Register temp)
|
||||||
|
: IonIC(CacheKind::OptimizeGetIterator),
|
||||||
|
liveRegs_(liveRegs),
|
||||||
|
value_(value),
|
||||||
|
output_(output),
|
||||||
|
temp_(temp) {}
|
||||||
|
|
||||||
|
ValueOperand value() const { return value_; }
|
||||||
|
Register output() const { return output_; }
|
||||||
|
Register temp() const { return temp_; }
|
||||||
|
LiveRegisterSet liveRegs() const { return liveRegs_; }
|
||||||
|
|
||||||
|
static bool update(JSContext* cx, HandleScript outerScript,
|
||||||
|
IonOptimizeGetIteratorIC* ic, HandleValue value,
|
||||||
|
bool* result);
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace jit
|
} // namespace jit
|
||||||
} // namespace js
|
} // namespace js
|
||||||
|
|
||||||
|
|
|
@ -2362,6 +2362,13 @@
|
||||||
num_temps: 1
|
num_temps: 1
|
||||||
mir_op: true
|
mir_op: true
|
||||||
|
|
||||||
|
- name: OptimizeGetIteratorCache
|
||||||
|
result_type: WordSized
|
||||||
|
operands:
|
||||||
|
value: BoxedValue
|
||||||
|
num_temps: 1
|
||||||
|
mir_op: true
|
||||||
|
|
||||||
# Read the number of actual arguments.
|
# Read the number of actual arguments.
|
||||||
- name: ArgumentsLength
|
- name: ArgumentsLength
|
||||||
result_type: WordSized
|
result_type: WordSized
|
||||||
|
|
|
@ -5185,6 +5185,16 @@ void LIRGenerator::visitCloseIterCache(MCloseIterCache* ins) {
|
||||||
assignSafepoint(lir, ins);
|
assignSafepoint(lir, ins);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void LIRGenerator::visitOptimizeGetIteratorCache(
|
||||||
|
MOptimizeGetIteratorCache* ins) {
|
||||||
|
MDefinition* value = ins->value();
|
||||||
|
MOZ_ASSERT(value->type() == MIRType::Value);
|
||||||
|
|
||||||
|
auto* lir = new (alloc()) LOptimizeGetIteratorCache(useBox(value), temp());
|
||||||
|
define(lir, ins);
|
||||||
|
assignSafepoint(lir, ins);
|
||||||
|
}
|
||||||
|
|
||||||
void LIRGenerator::visitStringLength(MStringLength* ins) {
|
void LIRGenerator::visitStringLength(MStringLength* ins) {
|
||||||
MOZ_ASSERT(ins->string()->type() == MIRType::String);
|
MOZ_ASSERT(ins->string()->type() == MIRType::String);
|
||||||
define(new (alloc()) LStringLength(useRegisterAtStart(ins->string())), ins);
|
define(new (alloc()) LStringLength(useRegisterAtStart(ins->string())), ins);
|
||||||
|
|
|
@ -2157,6 +2157,11 @@
|
||||||
completionKind: uint8_t
|
completionKind: uint8_t
|
||||||
possibly_calls: true
|
possibly_calls: true
|
||||||
|
|
||||||
|
- name: OptimizeGetIteratorCache
|
||||||
|
operands:
|
||||||
|
value: Value
|
||||||
|
result_type: Boolean
|
||||||
|
|
||||||
- name: InCache
|
- name: InCache
|
||||||
gen_boilerplate: false
|
gen_boilerplate: false
|
||||||
|
|
||||||
|
|
|
@ -160,6 +160,7 @@ namespace jit {
|
||||||
_(DoInstanceOfFallback, js::jit::DoInstanceOfFallback, 2) \
|
_(DoInstanceOfFallback, js::jit::DoInstanceOfFallback, 2) \
|
||||||
_(DoNewArrayFallback, js::jit::DoNewArrayFallback) \
|
_(DoNewArrayFallback, js::jit::DoNewArrayFallback) \
|
||||||
_(DoNewObjectFallback, js::jit::DoNewObjectFallback) \
|
_(DoNewObjectFallback, js::jit::DoNewObjectFallback) \
|
||||||
|
_(DoOptimizeGetIteratorFallback, js::jit::DoOptimizeGetIteratorFallback) \
|
||||||
_(DoOptimizeSpreadCallFallback, js::jit::DoOptimizeSpreadCallFallback) \
|
_(DoOptimizeSpreadCallFallback, js::jit::DoOptimizeSpreadCallFallback) \
|
||||||
_(DoRestFallback, js::jit::DoRestFallback) \
|
_(DoRestFallback, js::jit::DoRestFallback) \
|
||||||
_(DoSetElemFallback, js::jit::DoSetElemFallback, 2) \
|
_(DoSetElemFallback, js::jit::DoSetElemFallback, 2) \
|
||||||
|
@ -221,6 +222,7 @@ namespace jit {
|
||||||
_(IonHasOwnICUpdate, js::jit::IonHasOwnIC::update) \
|
_(IonHasOwnICUpdate, js::jit::IonHasOwnIC::update) \
|
||||||
_(IonInICUpdate, js::jit::IonInIC::update) \
|
_(IonInICUpdate, js::jit::IonInIC::update) \
|
||||||
_(IonInstanceOfICUpdate, js::jit::IonInstanceOfIC::update) \
|
_(IonInstanceOfICUpdate, js::jit::IonInstanceOfIC::update) \
|
||||||
|
_(IonOptimizeGetIteratorICUpdate, js::jit::IonOptimizeGetIteratorIC::update) \
|
||||||
_(IonOptimizeSpreadCallICUpdate, js::jit::IonOptimizeSpreadCallIC::update) \
|
_(IonOptimizeSpreadCallICUpdate, js::jit::IonOptimizeSpreadCallIC::update) \
|
||||||
_(IonSetPropertyICUpdate, js::jit::IonSetPropertyIC::update) \
|
_(IonSetPropertyICUpdate, js::jit::IonSetPropertyIC::update) \
|
||||||
_(IonToPropertyKeyICUpdate, js::jit::IonToPropertyKeyIC::update) \
|
_(IonToPropertyKeyICUpdate, js::jit::IonToPropertyKeyIC::update) \
|
||||||
|
|
|
@ -1716,6 +1716,11 @@ bool WarpBuilder::build_IsNoIter(BytecodeLocation) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool WarpBuilder::build_OptimizeGetIterator(BytecodeLocation loc) {
|
||||||
|
MDefinition* value = current->pop();
|
||||||
|
return buildIC(loc, CacheKind::OptimizeGetIterator, {value});
|
||||||
|
}
|
||||||
|
|
||||||
bool WarpBuilder::transpileCall(BytecodeLocation loc,
|
bool WarpBuilder::transpileCall(BytecodeLocation loc,
|
||||||
const WarpCacheIR* cacheIRSnapshot,
|
const WarpCacheIR* cacheIRSnapshot,
|
||||||
CallInfo* callInfo) {
|
CallInfo* callInfo) {
|
||||||
|
@ -3394,6 +3399,13 @@ bool WarpBuilder::buildIC(BytecodeLocation loc, CacheKind kind,
|
||||||
current->add(ins);
|
current->add(ins);
|
||||||
return resumeAfter(ins, loc);
|
return resumeAfter(ins, loc);
|
||||||
}
|
}
|
||||||
|
case CacheKind::OptimizeGetIterator: {
|
||||||
|
MOZ_ASSERT(numInputs == 1);
|
||||||
|
auto* ins = MOptimizeGetIteratorCache::New(alloc(), getInput(0));
|
||||||
|
current->add(ins);
|
||||||
|
current->push(ins);
|
||||||
|
return resumeAfter(ins, loc);
|
||||||
|
}
|
||||||
case CacheKind::GetIntrinsic:
|
case CacheKind::GetIntrinsic:
|
||||||
case CacheKind::ToBool:
|
case CacheKind::ToBool:
|
||||||
case CacheKind::Call:
|
case CacheKind::Call:
|
||||||
|
@ -3441,6 +3453,7 @@ bool WarpBuilder::buildBailoutForColdIC(BytecodeLocation loc, CacheKind kind) {
|
||||||
case CacheKind::HasOwn:
|
case CacheKind::HasOwn:
|
||||||
case CacheKind::CheckPrivateField:
|
case CacheKind::CheckPrivateField:
|
||||||
case CacheKind::InstanceOf:
|
case CacheKind::InstanceOf:
|
||||||
|
case CacheKind::OptimizeGetIterator:
|
||||||
resultType = MIRType::Boolean;
|
resultType = MIRType::Boolean;
|
||||||
break;
|
break;
|
||||||
case CacheKind::SetProp:
|
case CacheKind::SetProp:
|
||||||
|
|
|
@ -585,6 +585,7 @@ AbortReasonOr<WarpScriptSnapshot*> WarpScriptOracle::createScriptSnapshot() {
|
||||||
case JSOp::Or:
|
case JSOp::Or:
|
||||||
case JSOp::Not:
|
case JSOp::Not:
|
||||||
case JSOp::CloseIter:
|
case JSOp::CloseIter:
|
||||||
|
case JSOp::OptimizeGetIterator:
|
||||||
MOZ_TRY(maybeInlineIC(opSnapshots, loc));
|
MOZ_TRY(maybeInlineIC(opSnapshots, loc));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
@ -2165,6 +2165,7 @@ bool ExpressionDecompiler::decompilePC(jsbytecode* pc, uint8_t defIndex) {
|
||||||
case JSOp::ObjWithProto:
|
case JSOp::ObjWithProto:
|
||||||
return write("OBJ");
|
return write("OBJ");
|
||||||
|
|
||||||
|
case JSOp::OptimizeGetIterator:
|
||||||
case JSOp::OptimizeSpreadCall:
|
case JSOp::OptimizeSpreadCall:
|
||||||
return write("OPTIMIZED");
|
return write("OPTIMIZED");
|
||||||
|
|
||||||
|
|
|
@ -2572,6 +2572,17 @@ bool MOZ_NEVER_INLINE JS_HAZ_JSNATIVE_CALLER js::Interpret(JSContext* cx,
|
||||||
}
|
}
|
||||||
END_CASE(CloseIter)
|
END_CASE(CloseIter)
|
||||||
|
|
||||||
|
CASE(OptimizeGetIterator) {
|
||||||
|
ReservedRooted<Value> val(&rootValue0, REGS.sp[-1]);
|
||||||
|
MutableHandleValue rval = REGS.stackHandleAt(-1);
|
||||||
|
bool result;
|
||||||
|
if (!OptimizeGetIterator(cx, val, &result)) {
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
rval.setBoolean(result);
|
||||||
|
}
|
||||||
|
END_CASE(OptimizeGetIterator)
|
||||||
|
|
||||||
CASE(IsGenClosing) {
|
CASE(IsGenClosing) {
|
||||||
bool b = REGS.sp[-1].isMagic(JS_GENERATOR_CLOSING);
|
bool b = REGS.sp[-1].isMagic(JS_GENERATOR_CLOSING);
|
||||||
PUSH_BOOLEAN(b);
|
PUSH_BOOLEAN(b);
|
||||||
|
@ -5213,9 +5224,9 @@ bool js::SpreadCallOperation(JSContext* cx, HandleScript script, jsbytecode* pc,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool OptimizeArraySpreadCall(JSContext* cx, HandleObject obj,
|
static bool OptimizeArrayIteration(JSContext* cx, HandleObject obj,
|
||||||
MutableHandleValue result) {
|
bool* optimized) {
|
||||||
MOZ_ASSERT(result.isUndefined());
|
*optimized = false;
|
||||||
|
|
||||||
// Optimize spread call by skipping spread operation when following
|
// Optimize spread call by skipping spread operation when following
|
||||||
// conditions are met:
|
// conditions are met:
|
||||||
|
@ -5225,6 +5236,8 @@ static bool OptimizeArraySpreadCall(JSContext* cx, HandleObject obj,
|
||||||
// * the array's prototype is Array.prototype
|
// * the array's prototype is Array.prototype
|
||||||
// * Array.prototype[@@iterator] is not modified
|
// * Array.prototype[@@iterator] is not modified
|
||||||
// * %ArrayIteratorPrototype%.next is not modified
|
// * %ArrayIteratorPrototype%.next is not modified
|
||||||
|
// * %ArrayIteratorPrototype%.return is not defined
|
||||||
|
// * return is nowhere on the proto chain
|
||||||
if (!IsPackedArray(obj)) {
|
if (!IsPackedArray(obj)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -5234,15 +5247,10 @@ static bool OptimizeArraySpreadCall(JSContext* cx, HandleObject obj,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool optimized;
|
if (!stubChain->tryOptimizeArray(cx, obj.as<ArrayObject>(), optimized)) {
|
||||||
if (!stubChain->tryOptimizeArray(cx, obj.as<ArrayObject>(), &optimized)) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (!optimized) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
result.setObject(*obj);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5301,12 +5309,15 @@ bool js::OptimizeSpreadCall(JSContext* cx, HandleValue arg,
|
||||||
}
|
}
|
||||||
|
|
||||||
RootedObject obj(cx, &arg.toObject());
|
RootedObject obj(cx, &arg.toObject());
|
||||||
if (!OptimizeArraySpreadCall(cx, obj, result)) {
|
bool optimized;
|
||||||
|
if (!OptimizeArrayIteration(cx, obj, &optimized)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (result.isObject()) {
|
if (optimized) {
|
||||||
|
result.setObject(*obj);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!OptimizeArgumentsSpreadCall(cx, obj, result)) {
|
if (!OptimizeArgumentsSpreadCall(cx, obj, result)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -5318,6 +5329,30 @@ bool js::OptimizeSpreadCall(JSContext* cx, HandleValue arg,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool js::OptimizeGetIterator(JSContext* cx, HandleValue arg, bool* result) {
|
||||||
|
// This function returns |false| if the iteration can't be optimized.
|
||||||
|
*result = false;
|
||||||
|
|
||||||
|
if (!arg.isObject()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
RootedObject obj(cx, &arg.toObject());
|
||||||
|
|
||||||
|
bool optimized;
|
||||||
|
if (!OptimizeArrayIteration(cx, obj, &optimized)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (optimized) {
|
||||||
|
*result = true;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
MOZ_ASSERT(!*result);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
ArrayObject* js::ArrayFromArgumentsObject(JSContext* cx,
|
ArrayObject* js::ArrayFromArgumentsObject(JSContext* cx,
|
||||||
Handle<ArgumentsObject*> args) {
|
Handle<ArgumentsObject*> args) {
|
||||||
MOZ_ASSERT(!args->hasOverriddenLength());
|
MOZ_ASSERT(!args->hasOverriddenLength());
|
||||||
|
|
|
@ -623,6 +623,8 @@ bool SpreadCallOperation(JSContext* cx, HandleScript script, jsbytecode* pc,
|
||||||
bool OptimizeSpreadCall(JSContext* cx, HandleValue arg,
|
bool OptimizeSpreadCall(JSContext* cx, HandleValue arg,
|
||||||
MutableHandleValue result);
|
MutableHandleValue result);
|
||||||
|
|
||||||
|
bool OptimizeGetIterator(JSContext* cx, HandleValue arg, bool* result);
|
||||||
|
|
||||||
ArrayObject* ArrayFromArgumentsObject(JSContext* cx,
|
ArrayObject* ArrayFromArgumentsObject(JSContext* cx,
|
||||||
Handle<ArgumentsObject*> args);
|
Handle<ArgumentsObject*> args);
|
||||||
|
|
||||||
|
|
|
@ -1410,6 +1410,18 @@
|
||||||
* Stack: iter =>
|
* Stack: iter =>
|
||||||
*/ \
|
*/ \
|
||||||
MACRO(CloseIter, close_iter, NULL, 2, 1, 0, JOF_UINT8|JOF_IC) \
|
MACRO(CloseIter, close_iter, NULL, 2, 1, 0, JOF_UINT8|JOF_IC) \
|
||||||
|
/*
|
||||||
|
* If we can optimize iteration for `iterable`, meaning that it is a packed
|
||||||
|
* array and nothing important has been tampered with, then we replace it
|
||||||
|
* with `true`, otherwise we replace it with `false`. This is similar in
|
||||||
|
* operation to OptimizeSpreadCall.
|
||||||
|
*
|
||||||
|
* Category: Objects
|
||||||
|
* Type: Iteration
|
||||||
|
* Operands:
|
||||||
|
* Stack: iterable => is_optimizable
|
||||||
|
*/ \
|
||||||
|
MACRO(OptimizeGetIterator, optimize_get_iterator, NULL, 1, 1, 1, JOF_BYTE|JOF_IC) \
|
||||||
/*
|
/*
|
||||||
* Check that the top value on the stack is an object, and throw a
|
* Check that the top value on the stack is an object, and throw a
|
||||||
* TypeError if not. `kind` is used only to generate an appropriate error
|
* TypeError if not. `kind` is used only to generate an appropriate error
|
||||||
|
@ -3573,14 +3585,13 @@
|
||||||
* a power of two. Use this macro to do so.
|
* a power of two. Use this macro to do so.
|
||||||
*/
|
*/
|
||||||
#define FOR_EACH_TRAILING_UNUSED_OPCODE(MACRO) \
|
#define FOR_EACH_TRAILING_UNUSED_OPCODE(MACRO) \
|
||||||
IF_RECORD_TUPLE(/* empty */, MACRO(231)) \
|
|
||||||
IF_RECORD_TUPLE(/* empty */, MACRO(232)) \
|
IF_RECORD_TUPLE(/* empty */, MACRO(232)) \
|
||||||
IF_RECORD_TUPLE(/* empty */, MACRO(233)) \
|
IF_RECORD_TUPLE(/* empty */, MACRO(233)) \
|
||||||
IF_RECORD_TUPLE(/* empty */, MACRO(234)) \
|
IF_RECORD_TUPLE(/* empty */, MACRO(234)) \
|
||||||
IF_RECORD_TUPLE(/* empty */, MACRO(235)) \
|
IF_RECORD_TUPLE(/* empty */, MACRO(235)) \
|
||||||
IF_RECORD_TUPLE(/* empty */, MACRO(236)) \
|
IF_RECORD_TUPLE(/* empty */, MACRO(236)) \
|
||||||
IF_RECORD_TUPLE(/* empty */, MACRO(237)) \
|
IF_RECORD_TUPLE(/* empty */, MACRO(237)) \
|
||||||
MACRO(238) \
|
IF_RECORD_TUPLE(/* empty */, MACRO(238)) \
|
||||||
MACRO(239) \
|
MACRO(239) \
|
||||||
MACRO(240) \
|
MACRO(240) \
|
||||||
MACRO(241) \
|
MACRO(241) \
|
||||||
|
|
|
@ -55,11 +55,25 @@ bool js::ForOfPIC::Chain::initialize(JSContext* cx) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get the canonical Iterator.prototype
|
||||||
|
Rooted<NativeObject*> iteratorProto(
|
||||||
|
cx, MaybeNativeObject(
|
||||||
|
GlobalObject::getOrCreateIteratorPrototype(cx, cx->global())));
|
||||||
|
if (!iteratorProto) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Rooted<NativeObject*> objectProto(
|
||||||
|
cx, MaybeNativeObject(&cx->global()->getObjectPrototype()));
|
||||||
|
MOZ_ASSERT(objectProto);
|
||||||
|
|
||||||
// From this point on, we can't fail. Set initialized and fill the fields
|
// From this point on, we can't fail. Set initialized and fill the fields
|
||||||
// for the canonical Array.prototype and ArrayIterator.prototype objects.
|
// for the canonical Array.prototype and ArrayIterator.prototype objects.
|
||||||
initialized_ = true;
|
initialized_ = true;
|
||||||
arrayProto_ = arrayProto;
|
arrayProto_ = arrayProto;
|
||||||
arrayIteratorProto_ = arrayIteratorProto;
|
arrayIteratorProto_ = arrayIteratorProto;
|
||||||
|
iteratorProto_ = iteratorProto;
|
||||||
|
objectProto_ = objectProto;
|
||||||
|
|
||||||
// Shortcut returns below means Array for-of will never be optimizable,
|
// Shortcut returns below means Array for-of will never be optimizable,
|
||||||
// do set disabled_ now, and clear it later when we succeed.
|
// do set disabled_ now, and clear it later when we succeed.
|
||||||
|
@ -101,6 +115,31 @@ bool js::ForOfPIC::Chain::initialize(JSContext* cx) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure ArrayIterator.prototype doesn't define a "return" property
|
||||||
|
if (arrayIteratorProto->lookup(cx, cx->names().return_).isSome()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure ArrayIterator.prototype's prototype is Iterator.prototype
|
||||||
|
if (arrayIteratorProto->staticPrototype() != iteratorProto) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure Iterator.prototype doesn't define a "return" property
|
||||||
|
if (iteratorProto->lookup(cx, cx->names().return_).isSome()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure Iterator.prototype's prototype is Object.prototype
|
||||||
|
if (iteratorProto->staticPrototype() != objectProto) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure Object.prototype doesn't define a "return" property
|
||||||
|
if (objectProto->lookup(cx, cx->names().return_).isSome()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
disabled_ = false;
|
disabled_ = false;
|
||||||
arrayProtoShape_ = arrayProto->shape();
|
arrayProtoShape_ = arrayProto->shape();
|
||||||
arrayProtoIteratorSlot_ = iterProp->slot();
|
arrayProtoIteratorSlot_ = iterProp->slot();
|
||||||
|
@ -108,6 +147,8 @@ bool js::ForOfPIC::Chain::initialize(JSContext* cx) {
|
||||||
arrayIteratorProtoShape_ = arrayIteratorProto->shape();
|
arrayIteratorProtoShape_ = arrayIteratorProto->shape();
|
||||||
arrayIteratorProtoNextSlot_ = nextProp->slot();
|
arrayIteratorProtoNextSlot_ = nextProp->slot();
|
||||||
canonicalNextFunc_ = next;
|
canonicalNextFunc_ = next;
|
||||||
|
iteratorProtoShape_ = iteratorProto->shape();
|
||||||
|
objectProtoShape_ = objectProto->shape();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -221,7 +262,7 @@ bool js::ForOfPIC::Chain::tryOptimizeArrayIteratorNext(JSContext* cx,
|
||||||
if (!initialize(cx)) {
|
if (!initialize(cx)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else if (!disabled_ && !isArrayNextStillSane()) {
|
} else if (!disabled_ && !isArrayIteratorStateStillSane()) {
|
||||||
// Otherwise, if array iterator state is no longer sane, reinitialize.
|
// Otherwise, if array iterator state is no longer sane, reinitialize.
|
||||||
reset(cx);
|
reset(cx);
|
||||||
|
|
||||||
|
@ -237,7 +278,7 @@ bool js::ForOfPIC::Chain::tryOptimizeArrayIteratorNext(JSContext* cx,
|
||||||
}
|
}
|
||||||
|
|
||||||
// By the time we get here, we should have a sane iterator state to work with.
|
// By the time we get here, we should have a sane iterator state to work with.
|
||||||
MOZ_ASSERT(isArrayNextStillSane());
|
MOZ_ASSERT(isArrayIteratorStateStillSane());
|
||||||
|
|
||||||
*optimized = true;
|
*optimized = true;
|
||||||
return true;
|
return true;
|
||||||
|
@ -269,8 +310,8 @@ bool js::ForOfPIC::Chain::isArrayStateStillSane() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Chain to isArrayNextStillSane.
|
// Chain to isArrayIteratorStateStillSane.
|
||||||
return isArrayNextStillSane();
|
return isArrayIteratorStateStillSane();
|
||||||
}
|
}
|
||||||
|
|
||||||
void js::ForOfPIC::Chain::reset(JSContext* cx) {
|
void js::ForOfPIC::Chain::reset(JSContext* cx) {
|
||||||
|
@ -282,6 +323,8 @@ void js::ForOfPIC::Chain::reset(JSContext* cx) {
|
||||||
|
|
||||||
arrayProto_ = nullptr;
|
arrayProto_ = nullptr;
|
||||||
arrayIteratorProto_ = nullptr;
|
arrayIteratorProto_ = nullptr;
|
||||||
|
iteratorProto_ = nullptr;
|
||||||
|
objectProto_ = nullptr;
|
||||||
|
|
||||||
arrayProtoShape_ = nullptr;
|
arrayProtoShape_ = nullptr;
|
||||||
arrayProtoIteratorSlot_ = -1;
|
arrayProtoIteratorSlot_ = -1;
|
||||||
|
@ -291,6 +334,9 @@ void js::ForOfPIC::Chain::reset(JSContext* cx) {
|
||||||
arrayIteratorProtoNextSlot_ = -1;
|
arrayIteratorProtoNextSlot_ = -1;
|
||||||
canonicalNextFunc_ = UndefinedValue();
|
canonicalNextFunc_ = UndefinedValue();
|
||||||
|
|
||||||
|
iteratorProtoShape_ = nullptr;
|
||||||
|
objectProtoShape_ = nullptr;
|
||||||
|
|
||||||
initialized_ = false;
|
initialized_ = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -310,10 +356,14 @@ void js::ForOfPIC::Chain::trace(JSTracer* trc) {
|
||||||
|
|
||||||
TraceEdge(trc, &arrayProto_, "ForOfPIC Array.prototype.");
|
TraceEdge(trc, &arrayProto_, "ForOfPIC Array.prototype.");
|
||||||
TraceEdge(trc, &arrayIteratorProto_, "ForOfPIC ArrayIterator.prototype.");
|
TraceEdge(trc, &arrayIteratorProto_, "ForOfPIC ArrayIterator.prototype.");
|
||||||
|
TraceEdge(trc, &iteratorProto_, "ForOfPIC Iterator.prototype.");
|
||||||
|
TraceEdge(trc, &objectProto_, "ForOfPIC Object.prototype.");
|
||||||
|
|
||||||
TraceEdge(trc, &arrayProtoShape_, "ForOfPIC Array.prototype shape.");
|
TraceEdge(trc, &arrayProtoShape_, "ForOfPIC Array.prototype shape.");
|
||||||
TraceEdge(trc, &arrayIteratorProtoShape_,
|
TraceEdge(trc, &arrayIteratorProtoShape_,
|
||||||
"ForOfPIC ArrayIterator.prototype shape.");
|
"ForOfPIC ArrayIterator.prototype shape.");
|
||||||
|
TraceEdge(trc, &iteratorProtoShape_, "ForOfPIC Iterator.prototype shape.");
|
||||||
|
TraceEdge(trc, &objectProtoShape_, "ForOfPIC Object.prototype shape.");
|
||||||
|
|
||||||
TraceEdge(trc, &canonicalIteratorFunc_, "ForOfPIC ArrayValues builtin.");
|
TraceEdge(trc, &canonicalIteratorFunc_, "ForOfPIC ArrayValues builtin.");
|
||||||
TraceEdge(trc, &canonicalNextFunc_,
|
TraceEdge(trc, &canonicalNextFunc_,
|
||||||
|
|
|
@ -148,9 +148,12 @@ struct ForOfPIC {
|
||||||
// Pointer to owning JSObject for memory accounting purposes.
|
// Pointer to owning JSObject for memory accounting purposes.
|
||||||
const GCPtr<JSObject*> picObject_;
|
const GCPtr<JSObject*> picObject_;
|
||||||
|
|
||||||
// Pointer to canonical Array.prototype and ArrayIterator.prototype
|
// Pointer to canonical Array.prototype, ArrayIterator.prototype,
|
||||||
|
// Iterator.prototype, and Object.prototype
|
||||||
GCPtr<NativeObject*> arrayProto_;
|
GCPtr<NativeObject*> arrayProto_;
|
||||||
GCPtr<NativeObject*> arrayIteratorProto_;
|
GCPtr<NativeObject*> arrayIteratorProto_;
|
||||||
|
GCPtr<NativeObject*> iteratorProto_;
|
||||||
|
GCPtr<NativeObject*> objectProto_;
|
||||||
|
|
||||||
// Shape of matching Array.prototype object, and slot containing
|
// Shape of matching Array.prototype object, and slot containing
|
||||||
// the @@iterator for it, and the canonical value.
|
// the @@iterator for it, and the canonical value.
|
||||||
|
@ -164,6 +167,11 @@ struct ForOfPIC {
|
||||||
uint32_t arrayIteratorProtoNextSlot_;
|
uint32_t arrayIteratorProtoNextSlot_;
|
||||||
GCPtr<Value> canonicalNextFunc_;
|
GCPtr<Value> canonicalNextFunc_;
|
||||||
|
|
||||||
|
// Shape of matching Iterator.prototype object.
|
||||||
|
GCPtr<Shape*> iteratorProtoShape_;
|
||||||
|
// Shape of matching Object.prototype object.
|
||||||
|
GCPtr<Shape*> objectProtoShape_;
|
||||||
|
|
||||||
// Initialization flag marking lazy initialization of above fields.
|
// Initialization flag marking lazy initialization of above fields.
|
||||||
bool initialized_;
|
bool initialized_;
|
||||||
|
|
||||||
|
@ -210,11 +218,25 @@ struct ForOfPIC {
|
||||||
// in a way that would disable this PIC.
|
// in a way that would disable this PIC.
|
||||||
bool isArrayStateStillSane();
|
bool isArrayStateStillSane();
|
||||||
|
|
||||||
// Check if ArrayIterator.next is still optimizable.
|
// Check if ArrayIterator.next and ArrayIterator.return are still
|
||||||
inline bool isArrayNextStillSane() {
|
// optimizable.
|
||||||
return (arrayIteratorProto_->shape() == arrayIteratorProtoShape_) &&
|
inline bool isArrayIteratorStateStillSane() {
|
||||||
(arrayIteratorProto_->getSlot(arrayIteratorProtoNextSlot_) ==
|
// Ensure the prototype chain is intact, which will ensure that "return"
|
||||||
canonicalNextFunc_);
|
// has not been defined.
|
||||||
|
if (arrayIteratorProto_->shape() != arrayIteratorProtoShape_) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (iteratorProto_->shape() != iteratorProtoShape_) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (objectProto_->shape() != objectProtoShape_) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return arrayIteratorProto_->getSlot(arrayIteratorProtoNextSlot_) ==
|
||||||
|
canonicalNextFunc_;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if a matching optimized stub for the given object exists.
|
// Check if a matching optimized stub for the given object exists.
|
||||||
|
|
Загрузка…
Ссылка в новой задаче