Merge inbound to mozilla-central. a=merge

This commit is contained in:
Bogdan Tara 2018-08-03 13:16:27 +03:00
Родитель 5d438c8022 879bf0a2bd
Коммит f2cb75b28c
63 изменённых файлов: 663 добавлений и 442 удалений

Просмотреть файл

@ -113,6 +113,8 @@ export default class AddressPicker extends RichPicker {
throw new Error(`${this.selectedStateKey} option ${selectedAddressGUID} ` +
`does not exist in the address picker`);
}
super.render(state);
}
get selectedStateKey() {

Просмотреть файл

@ -65,6 +65,8 @@ export default class PaymentMethodPicker extends RichPicker {
throw new Error(`The option ${selectedPaymentCardGUID} ` +
`does not exist in the payment method picker`);
}
super.render(state);
}
get selectedStateKey() {

Просмотреть файл

@ -49,6 +49,10 @@ export default class RichPicker extends PaymentStateSubscriberMixin(HTMLElement)
}
}
render(state) {
this.editLink.hidden = !this.dropdown.value;
}
get value() {
return this.dropdown &&
this.dropdown.selectedOption;

Просмотреть файл

@ -38,6 +38,7 @@ add_task(async function test_empty() {
ok(picker1, "Check picker1 exists");
let {savedAddresses} = picker1.requestStore.getState();
is(Object.keys(savedAddresses).length, 0, "Check empty initial state");
is(picker1.editLink.hidden, true, "Check that picker edit link is hidden");
is(picker1.dropdown.popupBox.children.length, 0, "Check dropdown is empty");
});
@ -112,6 +113,7 @@ add_task(async function test_update() {
add_task(async function test_change_selected_address() {
let options = picker1.dropdown.popupBox.children;
is(picker1.dropdown.selectedOption, null, "Should default to no selected option");
is(picker1.editLink.hidden, true, "Picker edit link should be hidden when no option is selected");
let {selectedShippingAddress} = picker1.requestStore.getState();
is(selectedShippingAddress, null, "store should have no option selected");

Просмотреть файл

@ -39,6 +39,8 @@ add_task(async function test_empty() {
let {shippingOptions} = state && state.request && state.request.paymentDetails;
is(Object.keys(shippingOptions).length, 0, "Check empty initial state");
is(picker1.dropdown.popupBox.children.length, 0, "Check dropdown is empty");
ok(picker1.editLink.hidden, true, "Check that picker edit link is always hidden");
ok(picker1.addLink.hidden, true, "Check that picker add link is always hidden");
});
add_task(async function test_initialSet() {

Просмотреть файл

@ -589,6 +589,7 @@ def get_vc_paths(topsrcdir):
'x64': [os.path.join(tools_path, 'x64')],
# The x64->x86 cross toolchain requires DLLs from the native x64 toolchain.
'x86': [os.path.join(tools_path, 'x86'), os.path.join(tools_path, 'x64')],
'arm64': [os.path.join(tools_path, 'x64')],
})
@ -614,6 +615,7 @@ def vc_compiler_path(host, target, vs_major_version, env, vs_release_name):
'x86': 'x86',
'x86_64': 'x64',
'arm': 'arm',
'aarch64': 'arm64'
}.get(target.cpu)
if vc_target is None:
return

Просмотреть файл

@ -356,12 +356,14 @@ def lib_path(target, c_compiler, vc_path, windows_sdk_dir, ucrt_sdk_dir, dia_sdk
'x86': 'x86',
'x86_64': 'x64',
'arm': 'arm',
'aarch64': 'arm64',
}.get(target.cpu)
old_target = {
'x86': '',
'x86_64': 'amd64',
'arm': 'arm',
'aarch64': 'arm64'
}.get(target.cpu)
if old_target is None:
return

Просмотреть файл

@ -50,7 +50,7 @@ JS::ubi::Concrete<nsINode>::edges(JSContext* cx, bool wantNames) const
return nullptr;
}
}
return range;
return js::UniquePtr<EdgeRange>(range.release());
}
JS::ubi::Node::Size

Просмотреть файл

@ -1956,13 +1956,13 @@ BytecodeEmitter::emitPropOp(ParseNode* pn, JSOp op)
}
bool
BytecodeEmitter::emitSuperPropOp(ParseNode* pn, JSOp op, bool isCall)
BytecodeEmitter::emitSuperGetProp(ParseNode* pn, bool isCall)
{
ParseNode* base = &pn->as<PropertyAccess>().expression();
if (!emitSuperPropLHS(base, isCall))
return false;
if (!emitAtomOp(pn, op))
if (!emitAtomOp(pn, JSOP_GETPROP_SUPER))
return false;
if (isCall && !emit1(JSOP_SWAP))
@ -2110,10 +2110,7 @@ BytecodeEmitter::emitElemOperands(ParseNode* pn, EmitElemOption opts)
if (!emitTree(pn->pn_right))
return false;
if (opts == EmitElemOption::Set) {
if (!emit2(JSOP_PICK, 2))
return false;
} else if (opts == EmitElemOption::IncDec || opts == EmitElemOption::CompoundAssign) {
if (opts == EmitElemOption::IncDec || opts == EmitElemOption::CompoundAssign) {
if (!emit1(JSOP_TOID))
return false;
}
@ -2125,37 +2122,27 @@ BytecodeEmitter::emitSuperElemOperands(ParseNode* pn, EmitElemOption opts)
{
MOZ_ASSERT(pn->isKind(ParseNodeKind::Elem) && pn->as<PropertyByValue>().isSuper());
// The ordering here is somewhat screwy. We need to evaluate the propval
// first, by spec. Do a little dance to not emit more than one JSOP_THIS.
// Since JSOP_THIS might throw in derived class constructors, we cannot
// just push it earlier as the receiver. We have to swap it down instead.
if (!emitGetThisForSuperBase(pn->pn_left)) // THIS
return false;
if (!emitTree(pn->pn_right))
if (opts == EmitElemOption::Call) {
// We need a second |this| that will be consumed during computation of
// the property value. (The original |this| is passed to the call.)
if (!emit1(JSOP_DUP)) // THIS THIS
return false;
}
if (!emitTree(pn->pn_right)) // THIS? THIS KEY
return false;
// We need to convert the key to an object id first, so that we do not do
// it inside both the GETELEM and the SETELEM.
if (opts == EmitElemOption::IncDec || opts == EmitElemOption::CompoundAssign) {
if (!emit1(JSOP_TOID))
if (!emit1(JSOP_TOID)) // THIS? THIS KEY
return false;
}
if (!emitGetThisForSuperBase(pn->pn_left))
return false;
if (opts == EmitElemOption::Call) {
if (!emit1(JSOP_SWAP))
return false;
// We need another |this| on top, also
if (!emitDupAt(1))
return false;
}
if (!emit1(JSOP_SUPERBASE))
return false;
if (opts == EmitElemOption::Set && !emit2(JSOP_PICK, 3))
if (!emit1(JSOP_SUPERBASE)) // THIS? THIS KEY SUPERBASE
return false;
return true;
@ -2174,30 +2161,27 @@ BytecodeEmitter::emitElemOpBase(JSOp op)
bool
BytecodeEmitter::emitElemOp(ParseNode* pn, JSOp op)
{
EmitElemOption opts = EmitElemOption::Get;
if (op == JSOP_CALLELEM)
opts = EmitElemOption::Call;
else if (op == JSOP_SETELEM || op == JSOP_STRICTSETELEM)
opts = EmitElemOption::Set;
MOZ_ASSERT(op == JSOP_GETELEM ||
op == JSOP_CALLELEM ||
op == JSOP_DELELEM ||
op == JSOP_STRICTDELELEM);
EmitElemOption opts = op == JSOP_CALLELEM ? EmitElemOption::Call : EmitElemOption::Get;
return emitElemOperands(pn, opts) && emitElemOpBase(op);
}
bool
BytecodeEmitter::emitSuperElemOp(ParseNode* pn, JSOp op, bool isCall)
BytecodeEmitter::emitSuperGetElem(ParseNode* pn, bool isCall)
{
EmitElemOption opts = EmitElemOption::Get;
if (isCall)
opts = EmitElemOption::Call;
else if (op == JSOP_SETELEM_SUPER || op == JSOP_STRICTSETELEM_SUPER)
opts = EmitElemOption::Set;
EmitElemOption opts = isCall ? EmitElemOption::Call : EmitElemOption::Get;
if (!emitSuperElemOperands(pn, opts))
if (!emitSuperElemOperands(pn, opts)) // THIS? THIS KEY SUPERBASE
return false;
if (!emitElemOpBase(op))
if (!emitElemOpBase(JSOP_GETELEM_SUPER)) // THIS? VALUE
return false;
if (isCall && !emit1(JSOP_SWAP))
if (isCall && !emit1(JSOP_SWAP)) // VALUE THIS
return false;
return true;
@ -2228,11 +2212,11 @@ BytecodeEmitter::emitElemIncDec(ParseNode* pn)
if (isSuper) {
// There's no such thing as JSOP_DUP3, so we have to be creative.
// Note that pushing things again is no fewer JSOps.
if (!emitDupAt(2)) // KEY THIS OBJ KEY
if (!emitDupAt(2)) // THIS KEY OBJ THIS
return false;
if (!emitDupAt(2)) // KEY THIS OBJ KEY THIS
if (!emitDupAt(2)) // THIS KEY OBJ THIS KEY
return false;
if (!emitDupAt(2)) // KEY THIS OBJ KEY THIS OBJ
if (!emitDupAt(2)) // THIS KEY OBJ THIS KEY OBJ
return false;
getOp = JSOP_GETELEM_SUPER;
} else {
@ -2245,27 +2229,16 @@ BytecodeEmitter::emitElemIncDec(ParseNode* pn)
return false;
if (!emit1(JSOP_POS)) // OBJ KEY N
return false;
if (post && !emit1(JSOP_DUP)) // OBJ KEY N? N
return false;
if (!emit1(JSOP_ONE)) // OBJ KEY N? N 1
return false;
if (!emit1(binop)) // OBJ KEY N? N+1
return false;
if (post) {
if (isSuper) {
// We have one more value to rotate around, because of |this|
// on the stack
if (!emit2(JSOP_PICK, 4))
return false;
}
if (!emit2(JSOP_PICK, 3 + isSuper)) // KEY N N+1 OBJ
if (!emit1(JSOP_DUP)) // OBJ KEY N N
return false;
if (!emit2(JSOP_PICK, 3 + isSuper)) // N N+1 OBJ KEY
return false;
if (!emit2(JSOP_PICK, 2 + isSuper)) // N OBJ KEY N+1
if (!emit2(JSOP_UNPICK, 3 + isSuper)) // N OBJ KEY N
return false;
}
if (!emit1(JSOP_ONE)) // N? OBJ KEY N 1
return false;
if (!emit1(binop)) // N? OBJ KEY N+1
return false;
JSOp setOp = isSuper ? (sc->strict() ? JSOP_STRICTSETELEM_SUPER : JSOP_SETELEM_SUPER)
: (sc->strict() ? JSOP_STRICTSETELEM : JSOP_SETELEM);
@ -6481,17 +6454,26 @@ BytecodeEmitter::emitDeleteProperty(ParseNode* node)
MOZ_ASSERT(node->isKind(ParseNodeKind::DeleteProp));
MOZ_ASSERT(node->isArity(PN_UNARY));
ParseNode* propExpr = node->pn_kid;
MOZ_ASSERT(propExpr->isKind(ParseNodeKind::Dot));
PropertyAccess* propExpr = &node->pn_kid->as<PropertyAccess>();
if (propExpr->isSuper()) {
// The expression |delete super.foo;| has to evaluate |super.foo|,
// which could throw if |this| hasn't yet been set by a |super(...)|
// call or the super-base is not an object, before throwing a
// ReferenceError for attempting to delete a super-reference.
if (!emitGetThisForSuperBase(&propExpr->expression()))
return false;
if (propExpr->as<PropertyAccess>().isSuper()) {
// Still have to calculate the base, even though we are are going
// to throw unconditionally, as calculating the base could also
// throw.
if (!emit1(JSOP_SUPERBASE))
return false;
return emitUint16Operand(JSOP_THROWMSG, JSMSG_CANT_DELETE_SUPER);
// Unconditionally throw when attempting to delete a super-reference.
if (!emitUint16Operand(JSOP_THROWMSG, JSMSG_CANT_DELETE_SUPER))
return false;
// Another wrinkle: Balance the stack from the emitter's point of view.
// Execution will not reach here, as the last bytecode threw.
return emit1(JSOP_POP);
}
JSOp delOp = sc->strict() ? JSOP_STRICTDELPROP : JSOP_DELPROP;
@ -6504,23 +6486,32 @@ BytecodeEmitter::emitDeleteElement(ParseNode* node)
MOZ_ASSERT(node->isKind(ParseNodeKind::DeleteElem));
MOZ_ASSERT(node->isArity(PN_UNARY));
ParseNode* elemExpr = node->pn_kid;
MOZ_ASSERT(elemExpr->isKind(ParseNodeKind::Elem));
PropertyByValue* elemExpr = &node->pn_kid->as<PropertyByValue>();
if (elemExpr->isSuper()) {
// The expression |delete super[foo];| has to evaluate |super[foo]|,
// which could throw if |this| hasn't yet been set by a |super(...)|
// call, or trigger side-effects when evaluating ToPropertyKey(foo),
// or also throw when the super-base is not an object, before throwing
// a ReferenceError for attempting to delete a super-reference.
if (!emitGetThisForSuperBase(elemExpr->pn_left))
return false;
if (elemExpr->as<PropertyByValue>().isSuper()) {
// Still have to calculate everything, even though we're gonna throw
// since it may have side effects
if (!emitTree(elemExpr->pn_right))
return false;
if (!emit1(JSOP_TOID))
return false;
if (!emit1(JSOP_SUPERBASE))
return false;
// Unconditionally throw when attempting to delete a super-reference.
if (!emitUint16Operand(JSOP_THROWMSG, JSMSG_CANT_DELETE_SUPER))
return false;
// Another wrinkle: Balance the stack from the emitter's point of view.
// Execution will not reach here, as the last bytecode threw.
return emit1(JSOP_POP);
return emitPopN(2);
}
JSOp delOp = sc->strict() ? JSOP_STRICTDELELEM : JSOP_DELELEM;
@ -6754,10 +6745,10 @@ BytecodeEmitter::emitSelfHostedGetPropertySuper(ParseNode* pn)
ParseNode* idNode = objNode->pn_next;
ParseNode* receiverNode = idNode->pn_next;
if (!emitTree(idNode))
if (!emitTree(receiverNode))
return false;
if (!emitTree(receiverNode))
if (!emitTree(idNode))
return false;
if (!emitTree(objNode))
@ -6816,7 +6807,7 @@ BytecodeEmitter::emitCallee(ParseNode* callee, ParseNode* call, bool* callop)
case ParseNodeKind::Dot:
MOZ_ASSERT(emitterMode != BytecodeEmitter::SelfHosting);
if (callee->as<PropertyAccess>().isSuper()) {
if (!emitSuperPropOp(callee, JSOP_GETPROP_SUPER, /* isCall = */ *callop))
if (!emitSuperGetProp(callee, /* isCall = */ *callop))
return false;
} else {
if (!emitPropOp(callee, *callop ? JSOP_CALLPROP : JSOP_GETPROP))
@ -6827,7 +6818,7 @@ BytecodeEmitter::emitCallee(ParseNode* callee, ParseNode* call, bool* callop)
case ParseNodeKind::Elem:
MOZ_ASSERT(emitterMode != BytecodeEmitter::SelfHosting);
if (callee->as<PropertyByValue>().isSuper()) {
if (!emitSuperElemOp(callee, JSOP_GETELEM_SUPER, /* isCall = */ *callop))
if (!emitSuperGetElem(callee, /* isCall = */ *callop))
return false;
} else {
if (!emitElemOp(callee, *callop ? JSOP_CALLELEM : JSOP_GETELEM))
@ -8546,7 +8537,7 @@ BytecodeEmitter::emitTree(ParseNode* pn, ValueUsage valueUsage /* = ValueUsage::
case ParseNodeKind::Dot:
if (pn->as<PropertyAccess>().isSuper()) {
if (!emitSuperPropOp(pn, JSOP_GETPROP_SUPER))
if (!emitSuperGetProp(pn))
return false;
} else {
if (!emitPropOp(pn, JSOP_GETPROP))
@ -8556,7 +8547,7 @@ BytecodeEmitter::emitTree(ParseNode* pn, ValueUsage valueUsage /* = ValueUsage::
case ParseNodeKind::Elem:
if (pn->as<PropertyByValue>().isSuper()) {
if (!emitSuperElemOp(pn, JSOP_GETELEM_SUPER))
if (!emitSuperGetElem(pn))
return false;
} else {
if (!emitElemOp(pn, JSOP_GETELEM))

Просмотреть файл

@ -673,7 +673,7 @@ struct MOZ_STACK_CLASS BytecodeEmitter
// Emit bytecode to put operands for a JSOP_GETELEM/CALLELEM/SETELEM/DELELEM
// opcode onto the stack in the right order. In the case of SETELEM, the
// value to be assigned must already be pushed.
enum class EmitElemOption { Get, Set, Call, IncDec, CompoundAssign, Ref };
enum class EmitElemOption { Get, Call, IncDec, CompoundAssign, Ref };
MOZ_MUST_USE bool emitElemOperands(ParseNode* pn, EmitElemOption opts);
MOZ_MUST_USE bool emitElemOpBase(JSOp op);
@ -846,10 +846,10 @@ struct MOZ_STACK_CLASS BytecodeEmitter
MOZ_MUST_USE bool emitClass(ParseNode* pn);
MOZ_MUST_USE bool emitSuperPropLHS(ParseNode* superBase, bool isCall = false);
MOZ_MUST_USE bool emitSuperPropOp(ParseNode* pn, JSOp op, bool isCall = false);
MOZ_MUST_USE bool emitSuperGetProp(ParseNode* pn, bool isCall = false);
MOZ_MUST_USE bool emitSuperElemOperands(ParseNode* pn,
EmitElemOption opts = EmitElemOption::Get);
MOZ_MUST_USE bool emitSuperElemOp(ParseNode* pn, JSOp op, bool isCall = false);
MOZ_MUST_USE bool emitSuperGetElem(ParseNode* pn, bool isCall = false);
MOZ_MUST_USE bool emitCallee(ParseNode* callee, ParseNode* call, bool* callop);

Просмотреть файл

@ -0,0 +1,22 @@
function f(x) {
var y, z;
arguments; // Force creation of mapped arguments, so modifying |x| writes to the arguments object.
Math; // Access a global variable to create a resume point.
z = x + 1; // Was executed twice because only the resume point for 'Math' was present before the fix.
x = z; // Modifying |x| writes into the arguments object. We missed to create a resume point here.
y = 2 * x; // Triggers a bailout when overflowing int32 boundaries.
return [x, y];
}
var x = [0, 0, 0x3FFFFFFF];
for (var j = 0; j < 3; ++j) {
var value = x[j];
var expected = [value + 1, (value + 1) * 2];
var actual = f(value);
assertEq(actual[0], expected[0]);
assertEq(actual[1], expected[1]);
}

Просмотреть файл

@ -2303,7 +2303,7 @@ BaselineCompiler::emit_JSOP_GETELEM_SUPER()
storeValue(frame.peek(-1), frame.addressOfScratchValue(), R2);
frame.pop();
// Keep index and receiver in R0 and R1.
// Keep receiver and index in R0 and R1.
frame.popRegsAndSync(2);
// Keep obj on the stack.
@ -2356,10 +2356,10 @@ BaselineCompiler::emit_JSOP_SETELEM_SUPER()
{
bool strict = IsCheckStrictOp(JSOp(*pc));
// Incoming stack is |propval, receiver, obj, rval|. We need to shuffle
// Incoming stack is |receiver, propval, obj, rval|. We need to shuffle
// stack to leave rval when operation is complete.
// Pop rval into R0, then load propval into R1 and replace with rval.
// Pop rval into R0, then load receiver into R1 and replace with rval.
frame.popRegsAndSync(1);
masm.loadValue(frame.addressOfStackValue(frame.peek(-3)), R1);
masm.storeValue(R0, frame.addressOfStackValue(frame.peek(-3)));
@ -2367,10 +2367,10 @@ BaselineCompiler::emit_JSOP_SETELEM_SUPER()
prepareVMCall();
pushArg(Imm32(strict));
masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R2);
pushArg(R2); // receiver
pushArg(R1); // receiver
pushArg(R0); // rval
pushArg(R1); // propval
masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R0);
pushArg(R0); // propval
masm.unboxObject(frame.addressOfStackValue(frame.peek(-1)), R0.scratchReg());
pushArg(R0.scratchReg()); // obj
@ -2727,7 +2727,6 @@ BaselineCompiler::getEnvironmentCoordinateAddressFromObject(Register objReg, Reg
EnvironmentCoordinate ec(pc);
Shape* shape = EnvironmentCoordinateToEnvironmentShape(script, pc);
Address addr;
if (shape->numFixedSlots() <= ec.slot()) {
masm.loadPtr(Address(objReg, NativeObject::offsetOfSlots()), reg);
return Address(reg, (ec.slot() - shape->numFixedSlots()) * sizeof(Value));

Просмотреть файл

@ -639,7 +639,7 @@ DoGetElemFallback(JSContext* cx, BaselineFrame* frame, ICGetElem_Fallback* stub_
static bool
DoGetElemSuperFallback(JSContext* cx, BaselineFrame* frame, ICGetElem_Fallback* stub_,
HandleValue lhs, HandleValue receiver, HandleValue rhs,
HandleValue lhs, HandleValue rhs, HandleValue receiver,
MutableHandleValue res)
{
// This fallback stub may trigger debug mode toggling.
@ -734,17 +734,17 @@ ICGetElem_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
// Super property getters use a |this| that differs from base object
if (hasReceiver_) {
// State: index in R0, receiver in R1, obj on the stack
// State: receiver in R0, index in R1, obj on the stack
// Ensure stack is fully synced for the expression decompiler.
// We need: index, receiver, obj
// We need: receiver, index, obj
masm.pushValue(R0);
masm.pushValue(R1);
masm.pushValue(Address(masm.getStackPointer(), sizeof(Value) * 2));
// Push arguments.
masm.pushValue(R0); // Index
masm.pushValue(R1); // Reciver
masm.pushValue(R0); // Receiver
masm.pushValue(R1); // Index
masm.pushValue(Address(masm.getStackPointer(), sizeof(Value) * 5)); // Obj
masm.push(ICStubReg);
pushStubPayload(masm, R0.scratchReg());

Просмотреть файл

@ -1651,7 +1651,7 @@ CacheIRCompiler::emitGuardFunctionPrototype()
StubFieldOffset slot(reader.stubOffset(), StubField::Type::RawWord);
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
emitLoadStubField(slot, scratch2);
BaseValueIndex prototypeSlot(scratch1, scratch2);
BaseObjectSlotIndex prototypeSlot(scratch1, scratch2);
masm.branchTestObject(Assembler::NotEqual, prototypeSlot, failure->label());
masm.unboxObject(prototypeSlot, scratch1);
masm.branchPtr(Assembler::NotEqual,

Просмотреть файл

@ -1486,22 +1486,38 @@ PrepareAndExecuteRegExp(JSContext* cx, MacroAssembler& masm, Register regexp, Re
masm.loadStringLength(input, temp2);
masm.branch32(Assembler::AboveOrEqual, lastIndex, temp2, &done);
// For TrailSurrogateMin ≤ x ≤ TrailSurrogateMax and
// LeadSurrogateMin ≤ x ≤ LeadSurrogateMax, the following
// equations hold.
//
// SurrogateMin ≤ x ≤ SurrogateMax
// <> SurrogateMin ≤ x ≤ SurrogateMin + 2^10 - 1
// <> ((x - SurrogateMin) >>> 10) = 0 where >>> is an unsigned-shift
// See Hacker's Delight, section 4-1 for details.
//
// ((x - SurrogateMin) >>> 10) = 0
// <> floor((x - SurrogateMin) / 1024) = 0
// <> floor((x / 1024) - (SurrogateMin / 1024)) = 0
// <> floor(x / 1024) = SurrogateMin / 1024
// <> floor(x / 1024) * 1024 = SurrogateMin
// <> (x >>> 10) << 10 = SurrogateMin
// <> x & ~(2^10 - 1) = SurrogateMin
constexpr char16_t SurrogateMask = 0xFC00;
// Check if input[lastIndex] is trail surrogate.
masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
masm.computeEffectiveAddress(BaseIndex(temp2, lastIndex, TimesTwo), temp3);
masm.load16ZeroExtend(Address(temp3, 0), temp3);
masm.load16ZeroExtend(BaseIndex(temp2, lastIndex, TimesTwo), temp3);
masm.branch32(Assembler::Below, temp3, Imm32(unicode::TrailSurrogateMin), &done);
masm.branch32(Assembler::Above, temp3, Imm32(unicode::TrailSurrogateMax), &done);
masm.and32(Imm32(SurrogateMask), temp3);
masm.branch32(Assembler::NotEqual, temp3, Imm32(unicode::TrailSurrogateMin), &done);
// Check if input[lastIndex-1] is lead surrogate.
masm.move32(lastIndex, temp3);
masm.sub32(Imm32(1), temp3);
masm.computeEffectiveAddress(BaseIndex(temp2, temp3, TimesTwo), temp3);
masm.load16ZeroExtend(Address(temp3, 0), temp3);
masm.load16ZeroExtend(BaseIndex(temp2, lastIndex, TimesTwo, -int32_t(sizeof(char16_t))),
temp3);
masm.branch32(Assembler::Below, temp3, Imm32(unicode::LeadSurrogateMin), &done);
masm.branch32(Assembler::Above, temp3, Imm32(unicode::LeadSurrogateMax), &done);
masm.and32(Imm32(SurrogateMask), temp3);
masm.branch32(Assembler::NotEqual, temp3, Imm32(unicode::LeadSurrogateMin), &done);
// Move lastIndex to lead surrogate.
masm.subPtr(Imm32(1), lastIndex);
@ -3691,7 +3707,7 @@ CodeGenerator::visitElements(LElements* lir)
masm.loadPtr(elements, ToRegister(lir->output()));
}
typedef bool (*ConvertElementsToDoublesFn)(JSContext*, uintptr_t);
typedef void (*ConvertElementsToDoublesFn)(JSContext*, uintptr_t);
static const VMFunction ConvertElementsToDoublesInfo =
FunctionInfo<ConvertElementsToDoublesFn>(ObjectElements::ConvertElementsToDoubles,
"ObjectElements::ConvertElementsToDoubles");
@ -7581,7 +7597,6 @@ CodeGenerator::visitSignDI(LSignDI* ins)
// The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
// is -Infinity instead of Infinity.
Label isNegInf;
masm.loadConstantDouble(1.0, temp);
masm.divDouble(input, temp);
masm.branchDouble(Assembler::DoubleLessThan, temp, input, &bailout);
@ -9262,7 +9277,7 @@ CodeGenerator::emitStoreHoleCheck(Register elements, const LAllocation* index,
Address dest(elements, ToInt32(index) * sizeof(js::Value) + offsetAdjustment);
masm.branchTestMagic(Assembler::Equal, dest, &bail);
} else {
BaseIndex dest(elements, ToRegister(index), TimesEight, offsetAdjustment);
BaseObjectElementIndex dest(elements, ToRegister(index), offsetAdjustment);
masm.branchTestMagic(Assembler::Equal, dest, &bail);
}
bailoutFrom(&bail, snapshot);
@ -9287,7 +9302,7 @@ CodeGenerator::emitStoreElementTyped(const LAllocation* value,
Address dest(elements, ToInt32(index) * sizeof(js::Value) + offsetAdjustment);
masm.storeUnboxedValue(v, valueType, dest, elementType);
} else {
BaseIndex dest(elements, ToRegister(index), TimesEight, offsetAdjustment);
BaseObjectElementIndex dest(elements, ToRegister(index), offsetAdjustment);
masm.storeUnboxedValue(v, valueType, dest, elementType);
}
}
@ -9327,8 +9342,8 @@ CodeGenerator::visitStoreElementV(LStoreElementV* lir)
ToInt32(lir->index()) * sizeof(js::Value) + lir->mir()->offsetAdjustment());
masm.storeValue(value, dest);
} else {
BaseIndex dest(elements, ToRegister(lir->index()), TimesEight,
lir->mir()->offsetAdjustment());
BaseObjectElementIndex dest(elements, ToRegister(lir->index()),
lir->mir()->offsetAdjustment());
masm.storeValue(value, dest);
}
}
@ -9360,7 +9375,7 @@ CodeGenerator::emitStoreElementHoleT(T* lir)
masm.branchTest32(Assembler::NonZero, flags, Imm32(ObjectElements::FROZEN),
ool->callStub());
if (lir->toFallibleStoreElementT()->mir()->needsHoleCheck()) {
masm.branchTestMagic(Assembler::Equal, BaseValueIndex(elements, index),
masm.branchTestMagic(Assembler::Equal, BaseObjectElementIndex(elements, index),
ool->callStub());
}
}
@ -9403,7 +9418,7 @@ CodeGenerator::emitStoreElementHoleV(T* lir)
masm.branchTest32(Assembler::NonZero, flags, Imm32(ObjectElements::FROZEN),
ool->callStub());
if (lir->toFallibleStoreElementV()->mir()->needsHoleCheck()) {
masm.branchTestMagic(Assembler::Equal, BaseValueIndex(elements, index),
masm.branchTestMagic(Assembler::Equal, BaseObjectElementIndex(elements, index),
ool->callStub());
}
}
@ -9412,7 +9427,7 @@ CodeGenerator::emitStoreElementHoleV(T* lir)
emitPreBarrier(elements, lir->index(), 0);
masm.bind(ool->rejoinStore());
masm.storeValue(value, BaseIndex(elements, index, TimesEight));
masm.storeValue(value, BaseObjectElementIndex(elements, index));
masm.bind(ool->rejoin());
}
@ -9685,7 +9700,7 @@ CodeGenerator::emitArrayPopShift(LInstruction* lir, const MArrayPopShift* mir, R
masm.sub32(Imm32(1), lengthTemp);
if (mir->mode() == MArrayPopShift::Pop) {
BaseIndex addr(elementsTemp, lengthTemp, TimesEight);
BaseObjectElementIndex addr(elementsTemp, lengthTemp);
masm.loadElementTypedOrValue(addr, out, mir->needsHoleCheck(), ool->entry());
} else {
MOZ_ASSERT(mir->mode() == MArrayPopShift::Shift);
@ -9768,7 +9783,7 @@ CodeGenerator::emitArrayPush(LInstruction* lir, Register obj,
masm.spectreBoundsCheck32(length, capacity, spectreTemp, ool->entry());
// Do the store.
masm.storeConstantOrRegister(value, BaseIndex(elementsTemp, length, TimesEight));
masm.storeConstantOrRegister(value, BaseObjectElementIndex(elementsTemp, length));
masm.add32(Imm32(1), length);
@ -11423,8 +11438,8 @@ CodeGenerator::visitLoadElementT(LLoadElementT* lir)
int32_t offset = ToInt32(index) * sizeof(js::Value) + lir->mir()->offsetAdjustment();
emitLoadElementT(lir, Address(elements, offset));
} else {
emitLoadElementT(lir, BaseIndex(elements, ToRegister(index), TimesEight,
lir->mir()->offsetAdjustment()));
emitLoadElementT(lir, BaseObjectElementIndex(elements, ToRegister(index),
lir->mir()->offsetAdjustment()));
}
}
@ -11569,7 +11584,7 @@ CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir)
const MLoadUnboxedScalar* mir = lir->mir();
Scalar::Type readType = mir->readType();
int width = Scalar::byteSize(mir->storageType());
size_t width = Scalar::byteSize(mir->storageType());
bool canonicalizeDouble = mir->canonicalizeDoubles();
Label fail;
@ -11606,7 +11621,7 @@ CodeGenerator::visitLoadTypedArrayElementHole(LLoadTypedArrayElementHole* lir)
masm.loadPtr(Address(object, TypedArrayObject::dataOffset()), scratch);
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
size_t width = Scalar::byteSize(arrayType);
Label fail;
BaseIndex source(scratch, index, ScaleFromElemWidth(width));
masm.loadFromTypedArray(arrayType, source, out, lir->mir()->allowDouble(),
@ -11880,7 +11895,7 @@ CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir)
Scalar::Type writeType = mir->writeType();
int width = Scalar::byteSize(mir->storageType());
size_t width = Scalar::byteSize(mir->storageType());
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width + mir->offsetAdjustment());
@ -11899,7 +11914,7 @@ CodeGenerator::visitStoreTypedArrayElementHole(LStoreTypedArrayElementHole* lir)
const LAllocation* value = lir->value();
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
size_t width = Scalar::byteSize(arrayType);
Register index = ToRegister(lir->index());
const LAllocation* length = lir->length();
@ -12053,7 +12068,7 @@ CodeGenerator::visitInArray(LInArray* lir)
masm.branch32(Assembler::BelowOrEqual, initLength, index, failedInitLength);
if (mir->needsHoleCheck()) {
BaseIndex address = BaseIndex(elements, ToRegister(lir->index()), TimesEight);
BaseObjectElementIndex address(elements, ToRegister(lir->index()));
masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
}
masm.jump(&trueBranch);
@ -12450,14 +12465,12 @@ CodeGenerator::emitIsCallableOrConstructor(Register object, Register output, Lab
if (mode == Callable) {
masm.move32(Imm32(1), output);
} else {
Label notConstructor;
static_assert(mozilla::IsPowerOfTwo(unsigned(JSFunction::CONSTRUCTOR)),
"JSFunction::CONSTRUCTOR has only one bit set");
masm.load16ZeroExtend(Address(object, JSFunction::offsetOfFlags()), output);
masm.and32(Imm32(JSFunction::CONSTRUCTOR), output);
masm.branchTest32(Assembler::Zero, output, output, &notConstructor);
masm.move32(Imm32(1), output);
masm.jump(&done);
masm.bind(&notConstructor);
masm.move32(Imm32(0), output);
masm.rshift32(Imm32(mozilla::FloorLog2(JSFunction::CONSTRUCTOR)), output);
masm.and32(Imm32(1), output);
}
masm.jump(&done);

Просмотреть файл

@ -9765,8 +9765,8 @@ AbortReasonOr<Ok>
IonBuilder::jsop_getelem_super()
{
MDefinition* obj = current->pop();
MDefinition* receiver = current->pop();
MDefinition* id = current->pop();
MDefinition* receiver = current->pop();
#if defined(JS_CODEGEN_X86)
if (instrumentedProfiling())
@ -12257,8 +12257,10 @@ IonBuilder::jsop_setarg(uint32_t arg)
if (info().argsObjAliasesFormals()) {
if (needsPostBarrier(val))
current->add(MPostWriteBarrier::New(alloc(), current->argumentsObject(), val));
current->add(MSetArgumentsObjectArg::New(alloc(), current->argumentsObject(),
GET_ARGNO(pc), val));
auto* ins = MSetArgumentsObjectArg::New(alloc(), current->argumentsObject(),
GET_ARGNO(pc), val);
current->add(ins);
MOZ_TRY(resumeAfter(ins));
return Ok();
}

Просмотреть файл

@ -3208,8 +3208,10 @@ MacroAssembler::branchIfNotInterpretedConstructor(Register fun, Register scratch
{
// 16-bit loads are slow and unaligned 32-bit loads may be too so
// perform an aligned 32-bit load and adjust the bitmask accordingly.
MOZ_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
MOZ_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
static_assert(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0,
"JSFunction nargs are aligned to uint32_t");
static_assert(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2,
"JSFunction nargs and flags are stored next to each other");
// First, ensure it's a scripted function.
load32(Address(fun, JSFunction::offsetOfNargs()), scratch);

Просмотреть файл

@ -1336,17 +1336,17 @@ class Assembler : public AssemblerShared
protected:
// Structure for fixing up pc-relative loads/jumps when a the machine code
// gets moved (executable copy, gc, etc.).
struct RelativePatch
class RelativePatch
{
void* target_;
Relocation::Kind kind_;
RelocationKind kind_;
public:
RelativePatch(void* target, Relocation::Kind kind)
RelativePatch(void* target, RelocationKind kind)
: target_(target), kind_(kind)
{ }
void* target() const { return target_; }
Relocation::Kind kind() const { return kind_; }
RelocationKind kind() const { return kind_; }
};
// TODO: this should actually be a pool-like object. It is currently a big
@ -1776,7 +1776,7 @@ class Assembler : public AssemblerShared
}
void retarget(Label* label, Label* target);
// I'm going to pretend this doesn't exist for now.
void retarget(Label* label, void* target, Relocation::Kind reloc);
void retarget(Label* label, void* target, RelocationKind reloc);
static void Bind(uint8_t* rawCode, const CodeLabel& label);
@ -1791,7 +1791,7 @@ class Assembler : public AssemblerShared
#ifdef DEBUG
MOZ_ASSERT(dataRelocations_.length() == 0);
for (auto& j : jumps_)
MOZ_ASSERT(j.kind() == Relocation::HARDCODED);
MOZ_ASSERT(j.kind() == RelocationKind::HARDCODED);
#endif
}
@ -1808,9 +1808,9 @@ class Assembler : public AssemblerShared
static bool HasRoundInstruction(RoundingMode mode) { return false; }
protected:
void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind) {
void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind) {
enoughMemory_ &= jumps_.append(RelativePatch(target.value, kind));
if (kind == Relocation::JITCODE)
if (kind == RelocationKind::JITCODE)
writeRelocation(src);
}

Просмотреть файл

@ -1806,7 +1806,7 @@ CodeGenerator::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayE
Register newval = ToRegister(lir->newval());
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width);
@ -1827,7 +1827,7 @@ CodeGenerator::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayEle
Register value = ToRegister(lir->value());
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width);
@ -1850,7 +1850,7 @@ CodeGenerator::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop*
Register value = ToRegister(lir->value());
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);
@ -1872,7 +1872,7 @@ CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayEleme
Register flagTemp = ToRegister(lir->flagTemp());
Register value = ToRegister(lir->value());
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);

Просмотреть файл

@ -3968,7 +3968,7 @@ CodeOffset
MacroAssemblerARMCompat::toggledCall(JitCode* target, bool enabled)
{
BufferOffset bo = nextOffset();
addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
ScratchRegisterScope scratch(asMasm());
ma_movPatchable(ImmPtr(target->raw()), scratch, Always);
if (enabled)
@ -4541,7 +4541,7 @@ void
MacroAssembler::call(ImmPtr imm)
{
BufferOffset bo = m_buffer.nextOffset();
addPendingJump(bo, imm, Relocation::HARDCODED);
addPendingJump(bo, imm, RelocationKind::HARDCODED);
ma_call(imm);
}
@ -4563,7 +4563,7 @@ void
MacroAssembler::call(JitCode* c)
{
BufferOffset bo = m_buffer.nextOffset();
addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
ScratchRegisterScope scratch(*this);
ma_movPatchable(ImmPtr(c->raw()), scratch, Always);
callJitNoProfiler(scratch);
@ -5023,7 +5023,7 @@ MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType value
const Address& dest, MIRType slotType);
template void
MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
const BaseIndex& dest, MIRType slotType);
const BaseObjectElementIndex& dest, MIRType slotType);
CodeOffset
MacroAssembler::wasmTrapInstruction()

Просмотреть файл

@ -572,7 +572,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void branch(JitCode* c) {
BufferOffset bo = m_buffer.nextOffset();
addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
ScratchRegisterScope scratch(asMasm());
ma_movPatchable(ImmPtr(c->raw()), scratch, Always);
ma_bx(scratch);

Просмотреть файл

@ -315,10 +315,10 @@ Assembler::bind(RepatchLabel* label)
}
void
Assembler::addJumpRelocation(BufferOffset src, Relocation::Kind reloc)
Assembler::addJumpRelocation(BufferOffset src, RelocationKind reloc)
{
// Only JITCODE relocations are patchable at runtime.
MOZ_ASSERT(reloc == Relocation::JITCODE);
MOZ_ASSERT(reloc == RelocationKind::JITCODE);
// The jump relocation table starts with a fixed-width integer pointing
// to the start of the extended jump table. But, we don't know the
@ -333,11 +333,11 @@ Assembler::addJumpRelocation(BufferOffset src, Relocation::Kind reloc)
}
void
Assembler::addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind reloc)
Assembler::addPendingJump(BufferOffset src, ImmPtr target, RelocationKind reloc)
{
MOZ_ASSERT(target.value != nullptr);
if (reloc == Relocation::JITCODE)
if (reloc == RelocationKind::JITCODE)
addJumpRelocation(src, reloc);
// This jump is not patchable at runtime. Extended jump table entry requirements
@ -347,10 +347,10 @@ Assembler::addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind relo
}
size_t
Assembler::addPatchableJump(BufferOffset src, Relocation::Kind reloc)
Assembler::addPatchableJump(BufferOffset src, RelocationKind reloc)
{
MOZ_CRASH("TODO: This is currently unused (and untested)");
if (reloc == Relocation::JITCODE)
if (reloc == RelocationKind::JITCODE)
addJumpRelocation(src, reloc);
size_t extendedTableIndex = pendingJumps_.length();

Просмотреть файл

@ -275,16 +275,16 @@ class Assembler : public vixl::Assembler
static bool HasRoundInstruction(RoundingMode mode) { return false; }
// Tracks a jump that is patchable after finalization.
void addJumpRelocation(BufferOffset src, Relocation::Kind reloc);
void addJumpRelocation(BufferOffset src, RelocationKind reloc);
protected:
// Add a jump whose target is unknown until finalization.
// The jump may not be patched at runtime.
void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind);
void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind);
// Add a jump whose target is unknown until finalization, and may change
// thereafter. The jump is patchable at runtime.
size_t addPatchableJump(BufferOffset src, Relocation::Kind kind);
size_t addPatchableJump(BufferOffset src, RelocationKind kind);
public:
static uint32_t PatchWrite_NearCallSize() {
@ -337,7 +337,7 @@ class Assembler : public vixl::Assembler
#ifdef DEBUG
MOZ_ASSERT(dataRelocations_.length() == 0);
for (auto& j : pendingJumps_)
MOZ_ASSERT(j.kind == Relocation::HARDCODED);
MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
#endif
}
@ -376,7 +376,7 @@ class Assembler : public vixl::Assembler
protected:
// Because jumps may be relocated to a target inaccessible by a short jump,
// each relocatable jump must have a unique entry in the extended jump table.
// Valid relocatable targets are of type Relocation::JITCODE.
// Valid relocatable targets are of type RelocationKind::JITCODE.
struct JumpRelocation
{
BufferOffset jump; // Offset to the short jump, from the start of the code buffer.
@ -393,9 +393,9 @@ class Assembler : public vixl::Assembler
{
BufferOffset offset;
void* target;
Relocation::Kind kind;
RelocationKind kind;
RelativePatch(BufferOffset offset, void* target, Relocation::Kind kind)
RelativePatch(BufferOffset offset, void* target, RelocationKind kind)
: offset(offset), target(target), kind(kind)
{ }
};

Просмотреть файл

@ -815,7 +815,7 @@ CodeGenerator::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayE
Register newval = ToRegister(lir->newval());
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width);
@ -836,7 +836,7 @@ CodeGenerator::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayEle
Register value = ToRegister(lir->value());
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width);

Просмотреть файл

@ -623,7 +623,7 @@ MacroAssembler::call(JitCode* c)
const ARMRegister scratch64 = temps.AcquireX();
syncStackPtr();
BufferOffset off = immPool64(scratch64, uint64_t(c->raw()));
addPendingJump(off, ImmPtr(c->raw()), Relocation::JITCODE);
addPendingJump(off, ImmPtr(c->raw()), RelocationKind::JITCODE);
blr(scratch64);
}
@ -1046,7 +1046,7 @@ MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType value
const Address& dest, MIRType slotType);
template void
MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
const BaseIndex& dest, MIRType slotType);
const BaseObjectElementIndex& dest, MIRType slotType);
void
MacroAssembler::comment(const char* msg)

Просмотреть файл

@ -676,7 +676,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
void jump(TrampolinePtr code) {
syncStackPtr();
BufferOffset loc = b(-1, LabelDoc()); // The jump target will be patched by executableCopy().
addPendingJump(loc, ImmPtr(code.value), Relocation::HARDCODED);
addPendingJump(loc, ImmPtr(code.value), RelocationKind::HARDCODED);
}
void jump(RepatchLabel* label) {
MOZ_CRASH("jump (repatchlabel)");
@ -1230,7 +1230,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
void branch(JitCode* target) {
syncStackPtr();
BufferOffset loc = b(-1, LabelDoc()); // The jump target will be patched by executableCopy().
addPendingJump(loc, ImmPtr(target->raw()), Relocation::JITCODE);
addPendingJump(loc, ImmPtr(target->raw()), RelocationKind::JITCODE);
}
CodeOffsetJump jumpWithPatch(RepatchLabel* label)
@ -1896,7 +1896,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
nop();
}
addPendingJump(loadOffset, ImmPtr(target->raw()), Relocation::JITCODE);
addPendingJump(loadOffset, ImmPtr(target->raw()), RelocationKind::JITCODE);
CodeOffset ret(offset.getOffset());
return ret;
}

Просмотреть файл

@ -861,9 +861,9 @@ class AssemblerMIPSShared : public AssemblerShared
// we want to fix-up
BufferOffset offset;
void* target;
Relocation::Kind kind;
RelocationKind kind;
RelativePatch(BufferOffset offset, void* target, Relocation::Kind kind)
RelativePatch(BufferOffset offset, void* target, RelocationKind kind)
: offset(offset),
target(target),
kind(kind)
@ -911,7 +911,7 @@ class AssemblerMIPSShared : public AssemblerShared
#ifdef DEBUG
MOZ_ASSERT(dataRelocations_.length() == 0);
for (auto& j : jumps_)
MOZ_ASSERT(j.kind == Relocation::HARDCODED);
MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
#endif
}
@ -1285,9 +1285,9 @@ class AssemblerMIPSShared : public AssemblerShared
protected:
InstImm invertBranch(InstImm branch, BOffImm16 skipOffset);
void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind) {
void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind) {
enoughMemory_ &= jumps_.append(RelativePatch(src, target.value, kind));
if (kind == Relocation::JITCODE)
if (kind == RelocationKind::JITCODE)
writeRelocation(src);
}

Просмотреть файл

@ -2417,7 +2417,7 @@ CodeGenerator::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop*
Register value = ToRegister(lir->value());
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);
@ -2441,7 +2441,7 @@ CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayEleme
Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
Register value = ToRegister(lir->value());
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);
@ -2468,7 +2468,7 @@ CodeGenerator::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayE
Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width);
@ -2494,7 +2494,7 @@ CodeGenerator::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayEle
Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width);

Просмотреть файл

@ -1613,7 +1613,7 @@ void
MacroAssembler::call(ImmPtr target)
{
BufferOffset bo = m_buffer.nextOffset();
addPendingJump(bo, target, Relocation::HARDCODED);
addPendingJump(bo, target, RelocationKind::HARDCODED);
ma_call(target);
}
@ -1621,7 +1621,7 @@ void
MacroAssembler::call(JitCode* c)
{
BufferOffset bo = m_buffer.nextOffset();
addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
callJitNoProfiler(ScratchRegister);
}

Просмотреть файл

@ -2023,7 +2023,7 @@ MacroAssemblerMIPSCompat::toggledCall(JitCode* target, bool enabled)
{
BufferOffset bo = nextOffset();
CodeOffset offset(bo.getOffset());
addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
if (enabled) {
as_jalr(ScratchRegister);
@ -2429,7 +2429,7 @@ MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType value
const Address& dest, MIRType slotType);
template void
MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
const BaseIndex& dest, MIRType slotType);
const BaseObjectElementIndex& dest, MIRType slotType);
void

Просмотреть файл

@ -244,7 +244,7 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
void branch(JitCode* c) {
BufferOffset bo = m_buffer.nextOffset();
addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
as_jr(ScratchRegister);
as_nop();
@ -347,7 +347,7 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
{
auto target = ImmPtr(code.value);
BufferOffset bo = m_buffer.nextOffset();
addPendingJump(bo, target, Relocation::HARDCODED);
addPendingJump(bo, target, RelocationKind::HARDCODED);
ma_jump(target);
}

Просмотреть файл

@ -1890,7 +1890,7 @@ MacroAssemblerMIPS64Compat::toggledCall(JitCode* target, bool enabled)
{
BufferOffset bo = nextOffset();
CodeOffset offset(bo.getOffset());
addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
if (enabled) {
as_jalr(ScratchRegister);
@ -2257,7 +2257,7 @@ MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType value
const Address& dest, MIRType slotType);
template void
MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
const BaseIndex& dest, MIRType slotType);
const BaseObjectElementIndex& dest, MIRType slotType);
void

Просмотреть файл

@ -255,7 +255,7 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
void branch(JitCode* c) {
BufferOffset bo = m_buffer.nextOffset();
addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
as_jr(ScratchRegister);
as_nop();
@ -362,7 +362,7 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
{
auto target = ImmPtr(code.value);
BufferOffset bo = m_buffer.nextOffset();
addPendingJump(bo, target, Relocation::HARDCODED);
addPendingJump(bo, target, RelocationKind::HARDCODED);
ma_jump(target);
}

Просмотреть файл

@ -322,8 +322,7 @@ struct Address
{ }
#endif
Address() : base(RegisterOrSP(Registers::Invalid)), offset(0)
{ }
Address() = delete;
};
#if JS_BITS_PER_WORD == 32
@ -363,12 +362,7 @@ struct BaseIndex
{ }
#endif
BaseIndex()
: base(RegisterOrSP(Registers::Invalid))
, index(Registers::Invalid)
, scale(TimesOne)
, offset(0)
{}
BaseIndex() = delete;
};
#if JS_BITS_PER_WORD == 32
@ -445,17 +439,14 @@ struct BaseObjectSlotIndex : BaseValueIndex
#endif
};
class Relocation {
public:
enum Kind {
// The target is immovable, so patching is only needed if the source
// buffer is relocated and the reference is relative.
HARDCODED,
enum class RelocationKind {
// The target is immovable, so patching is only needed if the source
// buffer is relocated and the reference is relative.
HARDCODED,
// The target is the start of a JitCode buffer, which must be traced
// during garbage collection. Relocations and patching may be needed.
JITCODE
};
// The target is the start of a JitCode buffer, which must be traced
// during garbage collection. Relocations and patching may be needed.
JITCODE
};
class RepatchLabel

Просмотреть файл

@ -1497,13 +1497,14 @@ CodeGeneratorShared::omitOverRecursedCheck() const
}
void
CodeGeneratorShared::emitPreBarrier(Register base, const LAllocation* index, int32_t offsetAdjustment)
CodeGeneratorShared::emitPreBarrier(Register elements, const LAllocation* index,
int32_t offsetAdjustment)
{
if (index->isConstant()) {
Address address(base, ToInt32(index) * sizeof(Value) + offsetAdjustment);
Address address(elements, ToInt32(index) * sizeof(Value) + offsetAdjustment);
masm.guardedCallPreBarrier(address, MIRType::Value);
} else {
BaseIndex address(base, ToRegister(index), TimesEight, offsetAdjustment);
BaseObjectElementIndex address(elements, ToRegister(index), offsetAdjustment);
masm.guardedCallPreBarrier(address, MIRType::Value);
}
}

Просмотреть файл

@ -317,7 +317,7 @@ class CodeGeneratorShared : public LElementVisitor
void emitTruncateDouble(FloatRegister src, Register dest, MTruncateToInt32* mir);
void emitTruncateFloat32(FloatRegister src, Register dest, MTruncateToInt32* mir);
void emitPreBarrier(Register base, const LAllocation* index, int32_t offsetAdjustment);
void emitPreBarrier(Register elements, const LAllocation* index, int32_t offsetAdjustment);
void emitPreBarrier(Address address);
// We don't emit code for trivial blocks, so if we want to branch to the

Просмотреть файл

@ -118,7 +118,7 @@ ABIArgGenerator::next(MIRType type)
}
void
Assembler::writeRelocation(JmpSrc src, Relocation::Kind reloc)
Assembler::writeRelocation(JmpSrc src, RelocationKind reloc)
{
if (!jumpRelocations_.length()) {
// The jump relocation table starts with a fixed-width integer pointing
@ -127,26 +127,26 @@ Assembler::writeRelocation(JmpSrc src, Relocation::Kind reloc)
// patch later.
jumpRelocations_.writeFixedUint32_t(0);
}
if (reloc == Relocation::JITCODE) {
if (reloc == RelocationKind::JITCODE) {
jumpRelocations_.writeUnsigned(src.offset());
jumpRelocations_.writeUnsigned(jumps_.length());
}
}
void
Assembler::addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc)
Assembler::addPendingJump(JmpSrc src, ImmPtr target, RelocationKind reloc)
{
MOZ_ASSERT(target.value != nullptr);
// Emit reloc before modifying the jump table, since it computes a 0-based
// index. This jump is not patchable at runtime.
if (reloc == Relocation::JITCODE)
if (reloc == RelocationKind::JITCODE)
writeRelocation(src, reloc);
enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, reloc));
}
size_t
Assembler::addPatchableJump(JmpSrc src, Relocation::Kind reloc)
Assembler::addPatchableJump(JmpSrc src, RelocationKind reloc)
{
// This jump is patchable at runtime so we always need to make sure the
// jump table is emitted.

Просмотреть файл

@ -288,11 +288,11 @@ class Assembler : public AssemblerX86Shared
static JitCode* CodeFromJump(JitCode* code, uint8_t* jump);
private:
void writeRelocation(JmpSrc src, Relocation::Kind reloc);
void addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc);
void writeRelocation(JmpSrc src, RelocationKind reloc);
void addPendingJump(JmpSrc src, ImmPtr target, RelocationKind reloc);
protected:
size_t addPatchableJump(JmpSrc src, Relocation::Kind reloc);
size_t addPatchableJump(JmpSrc src, RelocationKind reloc);
public:
using AssemblerX86Shared::j;
@ -1060,32 +1060,31 @@ class Assembler : public AssemblerX86Shared
}
}
void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) {
void jmp(ImmPtr target, RelocationKind reloc = RelocationKind::HARDCODED) {
JmpSrc src = masm.jmp();
addPendingJump(src, target, reloc);
}
void j(Condition cond, ImmPtr target,
Relocation::Kind reloc = Relocation::HARDCODED) {
void j(Condition cond, ImmPtr target, RelocationKind reloc = RelocationKind::HARDCODED) {
JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
addPendingJump(src, target, reloc);
}
void jmp(JitCode* target) {
jmp(ImmPtr(target->raw()), Relocation::JITCODE);
jmp(ImmPtr(target->raw()), RelocationKind::JITCODE);
}
void j(Condition cond, JitCode* target) {
j(cond, ImmPtr(target->raw()), Relocation::JITCODE);
j(cond, ImmPtr(target->raw()), RelocationKind::JITCODE);
}
void call(JitCode* target) {
JmpSrc src = masm.call();
addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
}
void call(ImmWord target) {
call(ImmPtr((void*)target.value));
}
void call(ImmPtr target) {
JmpSrc src = masm.call();
addPendingJump(src, target, Relocation::HARDCODED);
addPendingJump(src, target, RelocationKind::HARDCODED);
}
// Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
@ -1093,7 +1092,7 @@ class Assembler : public AssemblerX86Shared
CodeOffset toggledCall(JitCode* target, bool enabled) {
CodeOffset offset(size());
JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
return offset;
}

Просмотреть файл

@ -584,7 +584,7 @@ MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType value
const Address& dest, MIRType slotType);
template void
MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
const BaseIndex& dest, MIRType slotType);
const BaseObjectElementIndex& dest, MIRType slotType);
// ========================================================================
// wasm support

Просмотреть файл

@ -546,7 +546,7 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
CodeOffsetJump jumpWithPatch(RepatchLabel* label) {
JmpSrc src = jmpSrc(label);
return CodeOffsetJump(size(), addPatchableJump(src, Relocation::HARDCODED));
return CodeOffsetJump(size(), addPatchableJump(src, RelocationKind::HARDCODED));
}
void movePtr(Register src, Register dest) {

Просмотреть файл

@ -457,7 +457,7 @@ JitRuntime::generateArgumentsRectifier(MacroAssembler& masm)
// | - sizeof(Value)| is used to put rcx such that we can read the last
// argument, and not the value which is after.
BaseIndex b = BaseIndex(r9, r8, TimesEight, sizeof(RectifierFrameLayout) - sizeof(Value));
BaseIndex b(r9, r8, TimesEight, sizeof(RectifierFrameLayout) - sizeof(Value));
masm.lea(Operand(b), rcx);
// Copy & Push arguments, |nargs| + 1 times (to include |this|).

Просмотреть файл

@ -255,9 +255,9 @@ class AssemblerX86Shared : public AssemblerShared
struct RelativePatch {
int32_t offset;
void* target;
Relocation::Kind kind;
RelocationKind kind;
RelativePatch(int32_t offset, void* target, Relocation::Kind kind)
RelativePatch(int32_t offset, void* target, RelocationKind kind)
: offset(offset),
target(target),
kind(kind)
@ -399,7 +399,7 @@ class AssemblerX86Shared : public AssemblerShared
#ifdef DEBUG
MOZ_ASSERT(dataRelocations_.length() == 0);
for (auto& j : jumps_)
MOZ_ASSERT(j.kind == Relocation::HARDCODED);
MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
#endif
}

Просмотреть файл

@ -37,7 +37,7 @@
#include "jit/JitSpewer.h"
// Spew formatting helpers.
#define PRETTYHEX(x) (((x)<0)?"-":""),(((x)<0)?-(x):(x))
#define PRETTYHEX(x) (((x)<0)?"-":""),((unsigned)((x)^((x)>>31))+((unsigned)(x)>>31))
#define MEM_o "%s0x%x"
#define MEM_os MEM_o "(,%s,%d)"

Просмотреть файл

@ -1802,7 +1802,7 @@ public:
void cmpl_im(int32_t rhs, int32_t offset, RegisterID base, RegisterID index, int scale)
{
spew("cmpl $0x%x, " MEM_o32b, rhs, ADDR_o32b(offset, base));
spew("cmpl $0x%x, " MEM_obs, rhs, ADDR_obs(offset, base, index, scale));
if (CAN_SIGN_EXTEND_8_32(rhs)) {
m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale, GROUP1_OP_CMP);
m_formatter.immediate8s(rhs);

Просмотреть файл

@ -499,7 +499,7 @@ class BailoutJump {
{ }
#ifdef JS_CODEGEN_X86
void operator()(MacroAssembler& masm, uint8_t* code) const {
masm.j(cond_, ImmPtr(code), Relocation::HARDCODED);
masm.j(cond_, ImmPtr(code), RelocationKind::HARDCODED);
}
#endif
void operator()(MacroAssembler& masm, Label* label) const {
@ -515,7 +515,7 @@ class BailoutLabel {
{ }
#ifdef JS_CODEGEN_X86
void operator()(MacroAssembler& masm, uint8_t* code) const {
masm.retarget(label_, ImmPtr(code), Relocation::HARDCODED);
masm.retarget(label_, ImmPtr(code), RelocationKind::HARDCODED);
}
#endif
void operator()(MacroAssembler& masm, Label* label) const {
@ -2490,7 +2490,7 @@ CodeGenerator::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayE
Register newval = ToRegister(lir->newval());
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width);
@ -2511,7 +2511,7 @@ CodeGenerator::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayEle
Register value = ToRegister(lir->value());
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width);
@ -2549,7 +2549,7 @@ CodeGenerator::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop*
const LAllocation* value = lir->value();
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);
@ -2582,7 +2582,7 @@ CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayEleme
Register elements = ToRegister(lir->elements());
const LAllocation* value = lir->value();
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);

Просмотреть файл

@ -222,9 +222,9 @@ class Assembler : public AssemblerX86Shared
void writeRelocation(JmpSrc src) {
jumpRelocations_.writeUnsigned(src.offset());
}
void addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind kind) {
void addPendingJump(JmpSrc src, ImmPtr target, RelocationKind kind) {
enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, kind));
if (kind == Relocation::JITCODE)
if (kind == RelocationKind::JITCODE)
writeRelocation(src);
}
@ -506,32 +506,31 @@ class Assembler : public AssemblerX86Shared
}
}
void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) {
void jmp(ImmPtr target, RelocationKind reloc = RelocationKind::HARDCODED) {
JmpSrc src = masm.jmp();
addPendingJump(src, target, reloc);
}
void j(Condition cond, ImmPtr target,
Relocation::Kind reloc = Relocation::HARDCODED) {
void j(Condition cond, ImmPtr target, RelocationKind reloc = RelocationKind::HARDCODED) {
JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
addPendingJump(src, target, reloc);
}
void jmp(JitCode* target) {
jmp(ImmPtr(target->raw()), Relocation::JITCODE);
jmp(ImmPtr(target->raw()), RelocationKind::JITCODE);
}
void j(Condition cond, JitCode* target) {
j(cond, ImmPtr(target->raw()), Relocation::JITCODE);
j(cond, ImmPtr(target->raw()), RelocationKind::JITCODE);
}
void call(JitCode* target) {
JmpSrc src = masm.call();
addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
}
void call(ImmWord target) {
call(ImmPtr((void*)target.value));
}
void call(ImmPtr target) {
JmpSrc src = masm.call();
addPendingJump(src, target, Relocation::HARDCODED);
addPendingJump(src, target, RelocationKind::HARDCODED);
}
// Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
@ -539,7 +538,7 @@ class Assembler : public AssemblerX86Shared
CodeOffset toggledCall(JitCode* target, bool enabled) {
CodeOffset offset(size());
JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
return offset;
}
@ -551,7 +550,7 @@ class Assembler : public AssemblerX86Shared
// Re-routes pending jumps to an external target, flushing the label in the
// process.
void retarget(Label* label, ImmPtr target, Relocation::Kind reloc) {
void retarget(Label* label, ImmPtr target, RelocationKind reloc) {
if (label->used()) {
bool more;
X86Encoding::JmpSrc jmp(label->offset());

Просмотреть файл

@ -592,7 +592,7 @@ MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType value
const Address& dest, MIRType slotType);
template void
MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
const BaseIndex& dest, MIRType slotType);
const BaseObjectElementIndex& dest, MIRType slotType);
// wasm specific methods, used in both the wasm baseline compiler and ion.

Просмотреть файл

@ -450,8 +450,7 @@ JitRuntime::generateArgumentsRectifier(MacroAssembler& masm)
// Get the topmost argument. We did a push of %ebp earlier, so be sure to
// account for this in the offset
BaseIndex b = BaseIndex(FramePointer, esi, TimesEight,
sizeof(RectifierFrameLayout) + sizeof(void*));
BaseIndex b(FramePointer, esi, TimesEight, sizeof(RectifierFrameLayout) + sizeof(void*));
masm.lea(Operand(b), ecx);
// Push arguments, |nargs| + 1 times (to include |this|).
@ -514,7 +513,7 @@ JitRuntime::generateArgumentsRectifier(MacroAssembler& masm)
masm.pop(edi); // Discard number of actual arguments.
// Discard pushed arguments, but not the pushed frame pointer.
BaseIndex unwind = BaseIndex(esp, ebx, TimesOne, -int32_t(sizeof(void*)));
BaseIndex unwind(esp, ebx, TimesOne, -int32_t(sizeof(void*)));
masm.lea(Operand(unwind), esp);
masm.pop(FramePointer);

Просмотреть файл

@ -436,9 +436,6 @@ skip script test262/annexB/language/function-code/block-decl-nested-blocks-with-
# https://bugzilla.mozilla.org/show_bug.cgi?id=1406171
skip script test262/built-ins/Reflect/ownKeys/return-on-corresponding-order-large-index.js
# https://bugzilla.mozilla.org/show_bug.cgi?id=1472211
skip script test262/language/statements/class/super/in-constructor-superproperty-evaluation.js
# https://bugzilla.mozilla.org/show_bug.cgi?id=1473228
skip script test262/intl402/RelativeTimeFormat/prototype/toStringTag/toStringTag.js

Просмотреть файл

@ -0,0 +1,68 @@
// Make sure we get the proper side effects.
// |delete super[expr]| applies ToPropertyKey on |expr| before throwing.
class base {
constructor() { }
}
class derived extends base {
constructor() { super(); }
testDeleteElem() {
let sideEffect = 0;
let key = {
toString() {
sideEffect++;
return "";
}
};
assertThrowsInstanceOf(() => delete super[key], ReferenceError);
assertEq(sideEffect, 1);
}
testDeleteElemPropValFirst() {
// The deletion error is a reference error, but by munging the prototype
// chain, we can force a type error from JSOP_SUPERBASE.
let key = {
toString() {
Object.setPrototypeOf(derived.prototype, null);
return "";
}
};
delete super[key];
}
}
class derivedTestDeleteElem extends base {
constructor() {
let sideEffect = 0;
let key = {
toString() {
sideEffect++;
return "";
}
};
assertThrowsInstanceOf(() => delete super[key], ReferenceError);
assertEq(sideEffect, 0);
super();
assertThrowsInstanceOf(() => delete super[key], ReferenceError);
assertEq(sideEffect, 1);
Object.setPrototypeOf(derivedTestDeleteElem.prototype, null);
assertThrowsInstanceOf(() => delete super[key], TypeError);
assertEq(sideEffect, 2);
return {};
}
}
var d = new derived();
d.testDeleteElem();
assertThrowsInstanceOf(() => d.testDeleteElemPropValFirst(), TypeError);
new derivedTestDeleteElem();
if (typeof reportCompare === 'function')
reportCompare(0,0,"OK");

Просмотреть файл

@ -42,5 +42,23 @@ Object.setPrototypeOf(thing2, new Proxy({}, {
}));
assertThrowsInstanceOf(() => thing2.go(), ReferenceError);
class derivedTestDeleteProp extends base {
constructor() {
// The deletion error is a reference error, but by munging the prototype
// chain, we can force a type error from JSOP_SUPERBASE.
Object.setPrototypeOf(derivedTestDeleteProp.prototype, null);
assertThrowsInstanceOf(() => delete super.prop, ReferenceError);
super();
assertThrowsInstanceOf(() => delete super.prop, TypeError);
return {};
}
}
new derivedTestDeleteProp();
if (typeof reportCompare === 'function')
reportCompare(0,0,"OK");

Просмотреть файл

@ -1799,7 +1799,7 @@ ExpressionDecompiler::decompilePC(jsbytecode* pc, uint8_t defIndex)
case JSOP_GETELEM_SUPER:
return write("super[") &&
decompilePCForStackOperand(pc, -3) &&
decompilePCForStackOperand(pc, -2) &&
write("]");
case JSOP_NULL:
return write(js_null_str);

Просмотреть файл

@ -3083,8 +3083,8 @@ END_CASE(JSOP_GETELEM)
CASE(JSOP_GETELEM_SUPER)
{
ReservedRooted<Value> rval(&rootValue0, REGS.sp[-3]);
ReservedRooted<Value> receiver(&rootValue1, REGS.sp[-2]);
ReservedRooted<Value> receiver(&rootValue1, REGS.sp[-3]);
ReservedRooted<Value> rval(&rootValue0, REGS.sp[-2]);
ReservedRooted<JSObject*> obj(&rootObject1, &REGS.sp[-1].toObject());
MutableHandleValue res = REGS.stackHandleAt(-3);
@ -3126,8 +3126,8 @@ CASE(JSOP_STRICTSETELEM_SUPER)
static_assert(JSOP_SETELEM_SUPER_LENGTH == JSOP_STRICTSETELEM_SUPER_LENGTH,
"setelem-super and strictsetelem-super must be the same size");
ReservedRooted<Value> index(&rootValue1, REGS.sp[-4]);
ReservedRooted<Value> receiver(&rootValue0, REGS.sp[-3]);
ReservedRooted<Value> receiver(&rootValue0, REGS.sp[-4]);
ReservedRooted<Value> index(&rootValue1, REGS.sp[-3]);
ReservedRooted<JSObject*> obj(&rootObject1, &REGS.sp[-2].toObject());
HandleValue value = REGS.stackHandleAt(-1);

Просмотреть файл

@ -76,11 +76,11 @@ NativeObject::canHaveNonEmptyElements()
#endif // DEBUG
/* static */ bool
/* static */ void
ObjectElements::ConvertElementsToDoubles(JSContext* cx, uintptr_t elementsPtr)
{
/*
* This function is infallible, but has a fallible interface so that it can
* This function has an otherwise unused JSContext argument so that it can
* be called directly from Ion code. Only arrays can have their dense
* elements converted to doubles, and arrays never have empty elements.
*/
@ -100,7 +100,6 @@ ObjectElements::ConvertElementsToDoubles(JSContext* cx, uintptr_t elementsPtr)
}
header->setShouldConvertDoubleElements();
return true;
}
/* static */ bool

Просмотреть файл

@ -367,7 +367,7 @@ class ObjectElements
return int(offsetof(ObjectElements, length)) - int(sizeof(ObjectElements));
}
static bool ConvertElementsToDoubles(JSContext* cx, uintptr_t elements);
static void ConvertElementsToDoubles(JSContext* cx, uintptr_t elements);
static bool MakeElementsCopyOnWrite(JSContext* cx, NativeObject* obj);
static MOZ_MUST_USE bool PreventExtensions(JSContext* cx, NativeObject* obj);

Просмотреть файл

@ -538,7 +538,7 @@
macro(JSOP_GETELEM, 55, "getelem", NULL, 1, 2, 1, JOF_BYTE |JOF_ELEM|JOF_TYPESET|JOF_LEFTASSOC) \
/*
* Pops the top three values on the stack as 'val', 'propval' and 'obj',
* sets 'propval' property of 'obj' as 'val', pushes 'obj' onto the
* sets 'propval' property of 'obj' as 'val', pushes 'val' onto the
* stack.
* Category: Literals
* Type: Object
@ -548,7 +548,7 @@
macro(JSOP_SETELEM, 56, "setelem", NULL, 1, 3, 1, JOF_BYTE |JOF_ELEM|JOF_PROPSET|JOF_DETECTING|JOF_CHECKSLOPPY) \
/*
* Pops the top three values on the stack as 'val', 'propval' and 'obj',
* sets 'propval' property of 'obj' as 'val', pushes 'obj' onto the
* sets 'propval' property of 'obj' as 'val', pushes 'val' onto the
* stack. Throws a TypeError if the set fails, per strict mode
* semantics.
* Category: Literals
@ -1281,7 +1281,7 @@
* Category: Literals
* Type: Object
* Operands:
* Stack: propval, receiver, obj => obj[propval]
* Stack: receiver, propval, obj => obj[propval]
*/ \
macro(JSOP_GETELEM_SUPER, 125, "getelem-super", NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_TYPESET|JOF_LEFTASSOC) \
macro(JSOP_UNUSED126, 126, "unused126", NULL, 5, 0, 1, JOF_UINT32) \
@ -1639,7 +1639,7 @@
* Category: Literals
* Type: Object
* Operands:
* Stack: propval, receiver, obj, val => val
* Stack: receiver, propval, obj, val => val
*/ \
macro(JSOP_SETELEM_SUPER, 158, "setelem-super", NULL, 1, 4, 1, JOF_BYTE |JOF_ELEM|JOF_PROPSET|JOF_DETECTING|JOF_CHECKSLOPPY) \
/*
@ -1648,7 +1648,7 @@
* Category: Literals
* Type: Object
* Operands:
* Stack: propval, receiver, obj, val => val
* Stack: receiver, propval, obj, val => val
*/ \
macro(JSOP_STRICTSETELEM_SUPER, 159, "strict-setelem-super", NULL, 1, 4, 1, JOF_BYTE |JOF_ELEM|JOF_PROPSET|JOF_DETECTING|JOF_CHECKSTRICT) \
\

Просмотреть файл

@ -2879,7 +2879,7 @@ ReflectorNode::edges(JSContext* cx, bool wantNames) const
}
}
}
return range;
return js::UniquePtr<EdgeRange>(range.release());
}
} // Namespace ubi

Просмотреть файл

@ -4,8 +4,51 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// A note on the differences between mozilla::HashTable and PLDHashTable (and
// its subclasses, such as nsTHashtable).
//---------------------------------------------------------------------------
// Overview
//---------------------------------------------------------------------------
//
// This file defines HashMap<Key, Value> and HashSet<T>, hash tables that are
// fast and have a nice API.
//
// Both hash tables have two optional template parameters.
//
// - HashPolicy. This defines the operations for hashing and matching keys. The
// default HashPolicy is appropriate when both of the following two
// conditions are true.
//
// - The key type stored in the table (|Key| for |HashMap<Key, Value>|, |T|
// for |HashSet<T>|) is an integer, pointer, UniquePtr, float, double, or
// char*.
//
// - The type used for lookups (|Lookup|) is the same as the key type. This
// is usually the case, but not always.
//
// Otherwise, you must provide your own hash policy; see the "Hash Policy"
// section below.
//
// - AllocPolicy. This defines how allocations are done by the table.
//
// - |MallocAllocPolicy| is the default and is usually appropriate; note that
// operations (such as insertions) that might cause allocations are
// fallible and must be checked for OOM. These checks are enforced by the
// use of MOZ_MUST_USE.
//
// - |InfallibleAllocPolicy| is another possibility; it allows the
// abovementioned OOM checks to be done with MOZ_ALWAYS_TRUE().
//
// See AllocPolicy.h for more details.
//
// Documentation on how to use HashMap and HashSet, including examples, is
// present within those classes. Search for "class HashMap" and "class
// HashSet".
//
// Both HashMap and HashSet are implemented on top of a third class, HashTable.
// You only need to look at HashTable if you want to understand the
// implementation.
//
// How does mozilla::HashTable (this file) compare with PLDHashTable (and its
// subclasses, such as nsTHashtable)?
//
// - mozilla::HashTable is a lot faster, largely because it uses templates
// throughout *and* inlines everything. PLDHashTable inlines operations much
@ -18,11 +61,8 @@
// - mozilla::HashTable has a nicer API, with a proper HashSet vs. HashMap
// distinction.
//
// - mozilla::HashTable requires more explicit OOM checking. Use
// mozilla::InfallibleAllocPolicy to make allocations infallible; note that
// return values of possibly-allocating methods such as add() will still need
// checking in some fashion -- e.g. with MOZ_ALWAYS_TRUE() -- due to the use
// of MOZ_MUST_USE.
// - mozilla::HashTable requires more explicit OOM checking. As mentioned
// above, the use of |InfallibleAllocPolicy| can simplify things.
//
// - mozilla::HashTable has a default capacity on creation of 32 and a minimum
// capacity of 4. PLDHashTable has a default capacity on creation of 8 and a
@ -67,8 +107,6 @@ class HashTable;
} // namespace detail
/*****************************************************************************/
// The "generation" of a hash table is an opaque value indicating the state of
// modification of the hash table through its lifetime. If the generation of
// a hash table compares equal at times T1 and T2, then lookups in the hash
@ -81,21 +119,22 @@ class HashTable;
// tables H1 and H2.
using Generation = Opaque<uint64_t>;
// A performant, STL-like container providing a hash-based map from keys to
// values. In particular, HashMap calls constructors and destructors of all
// objects added so non-PODs may be used safely.
//---------------------------------------------------------------------------
// HashMap
//---------------------------------------------------------------------------
// HashMap is a fast hash-based map from keys to values.
//
// Key/Value requirements:
// - movable, destructible, assignable
// HashPolicy requirements:
// - see Hash Policy section below
// AllocPolicy:
// - see AllocPolicy.h
// Template parameter requirements:
// - Key/Value: movable, destructible, assignable.
// - HashPolicy: see the "Hash Policy" section below.
// - AllocPolicy: see AllocPolicy.h.
//
// Note:
// - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members
// called by HashMap must not call back into the same HashMap object.
// - Due to the lack of exception handling, the user must call |init()|.
//
template<class Key,
class Value,
class HashPolicy = DefaultHasher<Key>,
@ -124,47 +163,50 @@ public:
using Lookup = typename HashPolicy::Lookup;
using Entry = TableEntry;
// HashMap construction is fallible (due to OOM); thus the user must call
// init after constructing a HashMap and check the return value.
// HashMap construction is fallible (due to possible OOM). The user must
// call init() after construction and check the return value.
explicit HashMap(AllocPolicy aPolicy = AllocPolicy())
: mImpl(aPolicy)
{
}
// Initialize the map for use. Must be called after construction, before
// any other operations (other than initialized()).
MOZ_MUST_USE bool init(uint32_t aLen = 16) { return mImpl.init(aLen); }
// Has the map been initialized?
bool initialized() const { return mImpl.initialized(); }
// Return whether the given lookup value is present in the map. E.g.:
// Return a Ptr indicating whether a key/value matching |aLookup| is
// present in the map. E.g.:
//
// using HM = HashMap<int,char>;
// HM h;
// if (HM::Ptr p = h.lookup(3)) {
// const HM::Entry& e = *p; // p acts like a pointer to Entry
// assert(p->key == 3); // Entry contains the key
// char val = p->value; // and value
// assert(p->key() == 3);
// char val = p->value();
// }
//
// Also see the definition of Ptr in HashTable above (with T = Entry).
using Ptr = typename Impl::Ptr;
MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const
{
return mImpl.lookup(aLookup);
}
// Like lookup, but does not assert if two threads call lookup at the same
// Like lookup(), but does not assert if two threads call it at the same
// time. Only use this method when none of the threads will modify the map.
MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const
{
return mImpl.readonlyThreadsafeLookup(aLookup);
}
// Assuming |p.found()|, remove |*p|.
// Remove a previously found key/value (assuming aPtr.found()). The map
// must not have been mutated in the interim.
void remove(Ptr aPtr) { mImpl.remove(aPtr); }
// Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
// insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using
// |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new Entry. E.g.:
// |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new key/value. E.g.:
//
// using HM = HashMap<int,char>;
// HM h;
@ -174,18 +216,15 @@ public:
// return false;
// }
// }
// const HM::Entry& e = *p; // p acts like a pointer to Entry
// assert(p->key == 3); // Entry contains the key
// char val = p->value; // and value
// assert(p->key() == 3);
// char val = p->value();
//
// Also see the definition of AddPtr in HashTable above (with T = Entry).
//
// N.B. The caller must ensure that no mutating hash table operations
// occur between a pair of |lookupForAdd| and |add| calls. To avoid
// looking up the key a second time, the caller may use the more efficient
// relookupOrAdd method. This method reuses part of the hashing computation
// to more efficiently insert the key if it has not been added. For
// example, a mutation-handling version of the previous example:
// N.B. The caller must ensure that no mutating hash table operations occur
// between a pair of lookupForAdd() and add() calls. To avoid looking up the
// key a second time, the caller may use the more efficient relookupOrAdd()
// method. This method reuses part of the hashing computation to more
// efficiently insert the key if it has not been added. For example, a
// mutation-handling version of the previous example:
//
// HM::AddPtr p = h.lookupForAdd(3);
// if (!p) {
@ -194,9 +233,8 @@ public:
// return false;
// }
// }
// const HM::Entry& e = *p;
// assert(p->key == 3);
// char val = p->value;
// assert(p->key() == 3);
// char val = p->value();
//
using AddPtr = typename Impl::AddPtr;
MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) const
@ -204,6 +242,7 @@ public:
return mImpl.lookupForAdd(aLookup);
}
// Add a key/value. Returns false on OOM.
template<typename KeyInput, typename ValueInput>
MOZ_MUST_USE bool add(AddPtr& aPtr, KeyInput&& aKey, ValueInput&& aValue)
{
@ -211,12 +250,14 @@ public:
aPtr, std::forward<KeyInput>(aKey), std::forward<ValueInput>(aValue));
}
// Add a given key and a default value. Returns false on OOM.
template<typename KeyInput>
MOZ_MUST_USE bool add(AddPtr& aPtr, KeyInput&& aKey)
{
return mImpl.add(aPtr, std::forward<KeyInput>(aKey), Value());
}
// See the comment above lookupForAdd() for details.
template<typename KeyInput, typename ValueInput>
MOZ_MUST_USE bool relookupOrAdd(AddPtr& aPtr,
KeyInput&& aKey,
@ -235,7 +276,6 @@ public:
// char c = iter.get().value();
// }
//
// Also see the definition of Iterator in HashTable above (with T = Entry).
using Iterator = typename Impl::Iterator;
Iterator iter() const { return mImpl.iter(); }
@ -248,41 +288,38 @@ public:
// }
// }
//
// Table resize may occur in ModIterator's destructor. Also see the
// definition of ModIterator in HashTable above (with T = Entry).
// Table resize may occur in ModIterator's destructor.
using ModIterator = typename Impl::ModIterator;
ModIterator modIter() { return mImpl.modIter(); }
// These are similar to Iterator/ModIterator/iter(), but use less common
// These are similar to Iterator/ModIterator/iter(), but use different
// terminology.
using Range = typename Impl::Range;
using Enum = typename Impl::Enum;
Range all() const { return mImpl.all(); }
// Remove all entries. This does not shrink the table. For that consider
// using the finish() method.
// Remove all keys/values without changing the capacity.
void clear() { mImpl.clear(); }
// Remove all entries. Unlike clear() this method tries to shrink the table.
// Unlike finish() it does not require the map to be initialized again.
// Remove all keys/values and attempt to minimize the capacity.
void clearAndShrink() { mImpl.clearAndShrink(); }
// Remove all the entries and release all internal buffers. The map must
// be initialized again before any use.
// Remove all keys/values and release entry storage. The map must be
// initialized via init() again before further use.
void finish() { mImpl.finish(); }
// Does the table contain any entries?
// Is the map empty?
bool empty() const { return mImpl.empty(); }
// Number of live elements in the map.
// Number of keys/values in the map.
uint32_t count() const { return mImpl.count(); }
// Total number of allocation in the dynamic table. Note: resize will
// happen well before count() == capacity().
// Number of key/value slots in the map. Note: resize will happen well before
// count() == capacity().
size_t capacity() const { return mImpl.capacity(); }
// Measure the size of the HashMap's entry storage. If the entries contain
// pointers to other heap blocks, you must iterate over the table and measure
// The size of the map's entry storage, in bytes. If the keys/values contain
// pointers to other heap blocks, you must iterate over the map and measure
// them separately; hence the "shallow" prefix.
size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
{
@ -294,16 +331,19 @@ public:
mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
}
// The map's current generation.
Generation generation() const { return mImpl.generation(); }
/************************************************** Shorthand operations */
// Does the map contain a key/value matching |aLookup|?
bool has(const Lookup& aLookup) const
{
return mImpl.lookup(aLookup).found();
}
// Overwrite existing value with aValue. Return false on oom.
// Overwrite existing value with |aValue|, or add it if not present. Returns
// false on OOM.
template<typename KeyInput, typename ValueInput>
MOZ_MUST_USE bool put(KeyInput&& aKey, ValueInput&& aValue)
{
@ -316,7 +356,7 @@ public:
p, std::forward<KeyInput>(aKey), std::forward<ValueInput>(aValue));
}
// Like put, but assert that the given key is not already present.
// Like put(), but asserts that the given key is not already present.
template<typename KeyInput, typename ValueInput>
MOZ_MUST_USE bool putNew(KeyInput&& aKey, ValueInput&& aValue)
{
@ -333,7 +373,7 @@ public:
}
// Add (aKey,aDefaultValue) if |aKey| is not found. Return a false-y Ptr on
// oom.
// OOM.
Ptr lookupWithDefault(const Key& aKey, const Value& aDefaultValue)
{
AddPtr p = lookupForAdd(aKey);
@ -341,12 +381,12 @@ public:
return p;
}
bool ok = add(p, aKey, aDefaultValue);
MOZ_ASSERT_IF(!ok, !p); // p is left false-y on oom.
MOZ_ASSERT_IF(!ok, !p); // p is left false-y on OOM.
(void)ok;
return p;
}
// Remove if present.
// Lookup and remove the key/value matching |aLookup|, if present.
void remove(const Lookup& aLookup)
{
if (Ptr p = lookup(aLookup)) {
@ -354,9 +394,8 @@ public:
}
}
// Infallibly rekey one entry, if necessary.
// Requires template parameters Key and HashPolicy::Lookup to be the same
// type.
// Infallibly rekey one entry, if necessary. Requires that template
// parameters Key and HashPolicy::Lookup are the same type.
void rekeyIfMoved(const Key& aOldKey, const Key& aNewKey)
{
if (aOldKey != aNewKey) {
@ -376,7 +415,7 @@ public:
return false;
}
// HashMap is movable
// HashMap is movable.
HashMap(HashMap&& aRhs)
: mImpl(std::move(aRhs.mImpl))
{
@ -388,63 +427,67 @@ public:
}
private:
// HashMap is not copyable or assignable
// HashMap is not copyable or assignable.
HashMap(const HashMap& hm) = delete;
HashMap& operator=(const HashMap& hm) = delete;
friend class Impl::Enum;
};
/*****************************************************************************/
//---------------------------------------------------------------------------
// HashSet
//---------------------------------------------------------------------------
// A performant, STL-like container providing a hash-based set of values. In
// particular, HashSet calls constructors and destructors of all objects added
// so non-PODs may be used safely.
// HashSet is a fast hash-based set of values.
//
// T requirements:
// - movable, destructible, assignable
// HashPolicy requirements:
// - see Hash Policy section below
// AllocPolicy:
// - see AllocPolicy.h
// Template parameter requirements:
// - T: movable, destructible, assignable.
// - HashPolicy: see the "Hash Policy" section below.
// - AllocPolicy: see AllocPolicy.h
//
// Note:
// - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by
// HashSet must not call back into the same HashSet object.
// - Due to the lack of exception handling, the user must call |init()|.
//
template<class T,
class HashPolicy = DefaultHasher<T>,
class AllocPolicy = MallocAllocPolicy>
class HashSet
{
struct SetOps : HashPolicy
struct SetHashPolicy : HashPolicy
{
using Base = HashPolicy;
using KeyType = T;
static const KeyType& getKey(const T& aT) { return aT; }
static void setKey(T& aT, KeyType& aKey) { HashPolicy::rekey(aT, aKey); }
};
using Impl = detail::HashTable<const T, SetOps, AllocPolicy>;
using Impl = detail::HashTable<const T, SetHashPolicy, AllocPolicy>;
Impl mImpl;
public:
using Lookup = typename HashPolicy::Lookup;
using Entry = T;
// HashSet construction is fallible (due to OOM); thus the user must call
// init after constructing a HashSet and check the return value.
// HashSet construction is fallible (due to possible OOM). The user must call
// init() after construction and check the return value.
explicit HashSet(AllocPolicy a = AllocPolicy())
: mImpl(a)
{
}
// Initialize the set for use. Must be called after construction, before
// any other operations (other than initialized()).
MOZ_MUST_USE bool init(uint32_t aLen = 16) { return mImpl.init(aLen); }
// Has the set been initialized?
bool initialized() const { return mImpl.initialized(); }
// Return whether the given lookup value is present in the map. E.g.:
// Return a Ptr indicating whether an element matching |aLookup| is present
// in the set. E.g.:
//
// using HS = HashSet<int>;
// HS h;
@ -452,21 +495,21 @@ public:
// assert(*p == 3); // p acts like a pointer to int
// }
//
// Also see the definition of Ptr in HashTable above.
using Ptr = typename Impl::Ptr;
MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const
{
return mImpl.lookup(aLookup);
}
// Like lookup, but does not assert if two threads call lookup at the same
// time. Only use this method when none of the threads will modify the map.
// Like lookup(), but does not assert if two threads call it at the same
// time. Only use this method when none of the threads will modify the set.
MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const
{
return mImpl.readonlyThreadsafeLookup(aLookup);
}
// Assuming |aPtr.found()|, remove |*aPtr|.
// Remove a previously found element (assuming aPtr.found()). The set must
// not have been mutated in the interim.
void remove(Ptr aPtr) { mImpl.remove(aPtr); }
// Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
@ -483,14 +526,12 @@ public:
// }
// assert(*p == 3); // p acts like a pointer to int
//
// Also see the definition of AddPtr in HashTable above.
//
// N.B. The caller must ensure that no mutating hash table operations
// occur between a pair of |lookupForAdd| and |add| calls. To avoid
// looking up the key a second time, the caller may use the more efficient
// relookupOrAdd method. This method reuses part of the hashing computation
// to more efficiently insert the key if it has not been added. For
// example, a mutation-handling version of the previous example:
// N.B. The caller must ensure that no mutating hash table operations occur
// between a pair of lookupForAdd() and add() calls. To avoid looking up the
// key a second time, the caller may use the more efficient relookupOrAdd()
// method. This method reuses part of the hashing computation to more
// efficiently insert the key if it has not been added. For example, a
// mutation-handling version of the previous example:
//
// HS::AddPtr p = h.lookupForAdd(3);
// if (!p) {
@ -509,12 +550,14 @@ public:
return mImpl.lookupForAdd(aLookup);
}
// Add an element. Returns false on OOM.
template<typename U>
MOZ_MUST_USE bool add(AddPtr& aPtr, U&& aU)
{
return mImpl.add(aPtr, std::forward<U>(aU));
}
// See the comment above lookupForAdd() for details.
template<typename U>
MOZ_MUST_USE bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup, U&& aU)
{
@ -528,7 +571,6 @@ public:
// int i = iter.get();
// }
//
// Also see the definition of Iterator in HashTable above.
typedef typename Impl::Iterator Iterator;
Iterator iter() const { return mImpl.iter(); }
@ -541,8 +583,7 @@ public:
// }
// }
//
// Table resize may occur in ModIterator's destructor. Also see the
// definition of ModIterator in HashTable above.
// Table resize may occur in ModIterator's destructor.
typedef typename Impl::ModIterator ModIterator;
ModIterator modIter() { return mImpl.modIter(); }
@ -552,30 +593,28 @@ public:
using Enum = typename Impl::Enum;
Range all() const { return mImpl.all(); }
// Remove all entries. This does not shrink the table. For that consider
// using the finish() method.
// Remove all elements without changing the capacity.
void clear() { mImpl.clear(); }
// Remove all entries. Unlike clear() this method tries to shrink the table.
// Unlike finish() it does not require the set to be initialized again.
// Remove all elements and attempt to minimize the capacity.
void clearAndShrink() { mImpl.clearAndShrink(); }
// Remove all the entries and release all internal buffers. The set must
// be initialized again before any use.
// Remove all keys/values and release entry storage. The set must be
// initialized via init() again before further use.
void finish() { mImpl.finish(); }
// Does the table contain any entries?
// Is the set empty?
bool empty() const { return mImpl.empty(); }
// Number of live elements in the map.
// Number of elements in the set.
uint32_t count() const { return mImpl.count(); }
// Total number of allocation in the dynamic table. Note: resize will
// happen well before count() == capacity().
// Number of element slots in the set. Note: resize will happen well before
// count() == capacity().
size_t capacity() const { return mImpl.capacity(); }
// Measure the size of the HashSet's entry storage. If the entries contain
// pointers to other heap blocks, you must iterate over the table and measure
// The size of the HashSet's entry storage, in bytes. If the elements contain
// pointers to other heap blocks, you must iterate over the set and measure
// them separately; hence the "shallow" prefix.
size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
{
@ -587,16 +626,18 @@ public:
mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
}
// The set's current generation.
Generation generation() const { return mImpl.generation(); }
/************************************************** Shorthand operations */
// Does the set contain an element matching |aLookup|?
bool has(const Lookup& aLookup) const
{
return mImpl.lookup(aLookup).found();
}
// Add |aU| if it is not present already. Return false on oom.
// Add |aU| if it is not present already. Returns false on OOM.
template<typename U>
MOZ_MUST_USE bool put(U&& aU)
{
@ -604,13 +645,14 @@ public:
return p ? true : add(p, std::forward<U>(aU));
}
// Like put, but assert that the given key is not already present.
// Like put(), but asserts that the given key is not already present.
template<typename U>
MOZ_MUST_USE bool putNew(U&& aU)
{
return mImpl.putNew(aU, std::forward<U>(aU));
}
// Like the other putNew(), but for when |Lookup| is different to |T|.
template<typename U>
MOZ_MUST_USE bool putNew(const Lookup& aLookup, U&& aU)
{
@ -624,6 +666,7 @@ public:
mImpl.putNewInfallible(aLookup, std::forward<U>(aU));
}
// Lookup and remove the element matching |aLookup|, if present.
void remove(const Lookup& aLookup)
{
if (Ptr p = lookup(aLookup)) {
@ -631,8 +674,8 @@ public:
}
}
// Infallibly rekey one entry, if present.
// Requires template parameters T and HashPolicy::Lookup to be the same type.
// Infallibly rekey one entry, if present. Requires that template parameters
// T and HashPolicy::Lookup are the same type.
void rekeyIfMoved(const Lookup& aOldValue, const T& aNewValue)
{
if (aOldValue != aNewValue) {
@ -652,7 +695,7 @@ public:
return false;
}
// Infallibly replace the current key at |p| with an equivalent key.
// Infallibly replace the current key at |aPtr| with an equivalent key.
// Specifically, both HashPolicy::hash and HashPolicy::match must return
// identical results for the new and old key when applied against all
// possible matching values.
@ -665,7 +708,7 @@ public:
const_cast<T&>(*aPtr) = aNewValue;
}
// HashSet is movable
// HashSet is movable.
HashSet(HashSet&& aRhs)
: mImpl(std::move(aRhs.mImpl))
{
@ -684,36 +727,37 @@ private:
friend class Impl::Enum;
};
/*****************************************************************************/
//---------------------------------------------------------------------------
// Hash Policy
//---------------------------------------------------------------------------
// A hash policy |HP| for a hash table with key-type |Key| must provide:
//
// A hash policy P for a hash table with key-type Key must provide:
// - a type |P::Lookup| to use to lookup table entries;
// - a static member function |P::hash| with signature
// - a type |HP::Lookup| to use to lookup table entries;
//
// static mozilla::HashNumber hash(Lookup)
// - a static member function |HP::hash| that hashes lookup values:
//
// to use to hash the lookup type; and
// - a static member function |P::match| with signature
// static mozilla::HashNumber hash(const Lookup&);
//
// static bool match(Key, Lookup)
// - a static member function |HP::match| that tests equality of key and
// lookup values:
//
// to use to test equality of key and lookup values.
// static bool match(const Key&, const Lookup&);
//
// Normally, Lookup = Key. In general, though, different values and types of
// values can be used to lookup and store. If a Lookup value |l| is != to the
// added Key value |k|, the user must ensure that |P::match(k,l)|. E.g.:
// values can be used to lookup and store. If a Lookup value |l| is not equal
// to the added Key value |k|, the user must ensure that |HP::match(k,l)| is
// true. E.g.:
//
// mozilla::HashSet<Key, P>::AddPtr p = h.lookup(l);
// mozilla::HashSet<Key, HP>::AddPtr p = h.lookup(l);
// if (!p) {
// assert(P::match(k, l)); // must hold
// assert(HP::match(k, l)); // must hold
// h.add(p, k);
// }
// Pointer hashing policy that uses HashGeneric() to create good hashes for
// pointers. Note that we don't shift out the lowest k bits to generate a
// good distribution for arena allocated pointers.
// A pointer hashing policy that uses HashGeneric() to create good hashes for
// pointers. Note that we don't shift out the lowest k bits because we don't
// want to assume anything about the alignment of the pointers.
template<typename Key>
struct PointerHasher
{
@ -733,10 +777,7 @@ struct PointerHasher
static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
};
// Default hash policy: just use the 'lookup' value. This of course only
// works if the lookup value is integral. HashTable applies ScrambleHashCode to
// the result of the 'hash' which means that it is 'ok' if the lookup value is
// not well distributed over the HashNumber domain.
// The default hash policy, which only works with integers.
template<class Key>
struct DefaultHasher
{
@ -744,7 +785,9 @@ struct DefaultHasher
static HashNumber hash(const Lookup& aLookup)
{
// Hash if can implicitly cast to hash number type.
// Just convert the integer to a HashNumber and use that as is. (This
// discards the high 32-bits of 64-bit integers!) ScrambleHashCode() is
// subsequently called on the value to improve the distribution.
return aLookup;
}
@ -757,19 +800,18 @@ struct DefaultHasher
static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
};
// Specialize hashing policy for pointer types. It assumes that the type is
// at least word-aligned. For types with smaller size use PointerHasher.
// A DefaultHasher specialization for pointers.
template<class T>
struct DefaultHasher<T*> : PointerHasher<T*>
{
};
// Specialize hashing policy for mozilla::UniquePtr to proxy the UniquePtr's
// raw pointer to PointerHasher.
// A DefaultHasher specialization for mozilla::UniquePtr.
template<class T, class D>
struct DefaultHasher<UniquePtr<T, D>>
{
using Lookup = UniquePtr<T, D>;
using Key = UniquePtr<T, D>;
using Lookup = Key;
using PtrHasher = PointerHasher<T*>;
static HashNumber hash(const Lookup& aLookup)
@ -777,7 +819,7 @@ struct DefaultHasher<UniquePtr<T, D>>
return PtrHasher::hash(aLookup.get());
}
static bool match(const UniquePtr<T, D>& aKey, const Lookup& aLookup)
static bool match(const Key& aKey, const Lookup& aLookup)
{
return PtrHasher::match(aKey.get(), aLookup.get());
}
@ -788,59 +830,69 @@ struct DefaultHasher<UniquePtr<T, D>>
}
};
// For doubles, we can xor the two uint32s.
// A DefaultHasher specialization for doubles.
template<>
struct DefaultHasher<double>
{
using Lookup = double;
using Key = double;
using Lookup = Key;
static HashNumber hash(double aVal)
static HashNumber hash(const Lookup& aLookup)
{
// Just xor the high bits with the low bits, and then treat the bits of the
// result as a uint32_t.
static_assert(sizeof(HashNumber) == 4,
"subsequent code assumes a four-byte hash");
uint64_t u = BitwiseCast<uint64_t>(aVal);
uint64_t u = BitwiseCast<uint64_t>(aLookup);
return HashNumber(u ^ (u >> 32));
}
static bool match(double aLhs, double aRhs)
static bool match(const Key& aKey, const Lookup& aLookup)
{
return BitwiseCast<uint64_t>(aLhs) == BitwiseCast<uint64_t>(aRhs);
return BitwiseCast<uint64_t>(aKey) == BitwiseCast<uint64_t>(aLookup);
}
};
// A DefaultHasher specialization for floats.
template<>
struct DefaultHasher<float>
{
using Lookup = float;
using Key = float;
using Lookup = Key;
static HashNumber hash(float aVal)
static HashNumber hash(const Lookup& aLookup)
{
// Just use the value as if its bits form an integer. ScrambleHashCode() is
// subsequently called on the value to improve the distribution.
static_assert(sizeof(HashNumber) == 4,
"subsequent code assumes a four-byte hash");
return HashNumber(BitwiseCast<uint32_t>(aVal));
return HashNumber(BitwiseCast<uint32_t>(aLookup));
}
static bool match(float aLhs, float aRhs)
static bool match(const Key& aKey, const Lookup& aLookup)
{
return BitwiseCast<uint32_t>(aLhs) == BitwiseCast<uint32_t>(aRhs);
return BitwiseCast<uint32_t>(aKey) == BitwiseCast<uint32_t>(aLookup);
}
};
// A hash policy that compares C strings.
// A hash policy for C strings.
struct CStringHasher
{
using Key = const char*;
using Lookup = const char*;
static HashNumber hash(Lookup aLookup) { return HashString(aLookup); }
static HashNumber hash(const Lookup& aLookup) { return HashString(aLookup); }
static bool match(const char* key, Lookup lookup)
static bool match(const Key& aKey, const Lookup& aLookup)
{
return strcmp(key, lookup) == 0;
return strcmp(aKey, aLookup) == 0;
}
};
// Fallible hashing interface.
//
//---------------------------------------------------------------------------
// Fallible Hashing Interface
//---------------------------------------------------------------------------
// Most of the time generating a hash code is infallible so this class provides
// default methods that always succeed. Specialize this class for your own hash
// policy to provide fallible hashing.
@ -883,7 +935,9 @@ EnsureHash(Lookup&& aLookup)
std::forward<Lookup>(aLookup));
}
/*****************************************************************************/
//---------------------------------------------------------------------------
// Implementation Details (HashMapEntry, HashTableEntry, HashTable)
//---------------------------------------------------------------------------
// Both HashMap and HashSet are implemented by a single HashTable that is even
// more heavily parameterized than the other two. This leaves HashTable gnarly
@ -927,6 +981,9 @@ public:
using ValueType = Value;
const Key& key() const { return key_; }
// Use this method with caution! If the key is changed such that its hash
// value also changes, the map will be left in an invalid state.
Key& mutableKey() { return key_; }
const Value& value() const { return value_; }

Просмотреть файл

@ -23,6 +23,30 @@ import org.junit.runner.RunWith
@MediumTest
class ProgressDelegateTest : BaseSessionTest() {
@Test fun loadProgress() {
sessionRule.session.loadTestPath(HELLO_HTML_PATH)
sessionRule.waitForPageStop()
var counter = 0
var lastProgress = -1
sessionRule.forCallbacksDuringWait(object : Callbacks.ProgressDelegate {
@AssertCalled
override fun onProgressChange(session: GeckoSession, progress: Int) {
assertThat("Progress must be strictly increasing", progress,
greaterThan(lastProgress))
lastProgress = progress
counter++
}
})
assertThat("Callback should be called at least twice", counter,
greaterThanOrEqualTo(2))
assertThat("Last progress value should be 100", lastProgress,
equalTo(100))
}
@Test fun load() {
sessionRule.session.loadTestPath(HELLO_HTML_PATH)
sessionRule.waitForPageStop()

Просмотреть файл

@ -34,7 +34,9 @@ import android.util.Log;
import android.view.Menu;
import android.view.MenuInflater;
import android.view.MenuItem;
import android.view.View;
import android.view.WindowManager;
import android.widget.ProgressBar;
import java.util.LinkedList;
import java.util.Locale;
@ -63,6 +65,8 @@ public class GeckoViewActivity extends AppCompatActivity {
private boolean mCanGoForward;
private boolean mFullScreen;
private ProgressBar mProgressView;
private LinkedList<GeckoSession.WebResponseInfo> mPendingDownloads = new LinkedList<>();
private LocationView.CommitListener mCommitListener = new LocationView.CommitListener() {
@ -95,6 +99,7 @@ public class GeckoViewActivity extends AppCompatActivity {
getSupportActionBar().setDisplayOptions(ActionBar.DISPLAY_SHOW_CUSTOM);
mUseMultiprocess = getIntent().getBooleanExtra(USE_MULTIPROCESS_EXTRA, true);
mProgressView = (ProgressBar) findViewById(R.id.page_progress);
if (sGeckoRuntime == null) {
final GeckoRuntimeSettings.Builder runtimeSettingsBuilder =
@ -436,6 +441,14 @@ public class GeckoViewActivity extends AppCompatActivity {
@Override
public void onProgressChange(GeckoSession session, int progress) {
Log.i(LOGTAG, "onProgressChange " + progress);
mProgressView.setProgress(progress, true);
if (progress > 0 && progress < 100) {
mProgressView.setVisibility(View.VISIBLE);
} else {
mProgressView.setVisibility(View.GONE);
}
}
@Override

Просмотреть файл

@ -17,4 +17,12 @@
android:layout_width="match_parent"
android:layout_height="?android:actionBarSize"
android:layout_alignParentBottom="true"/>
<ProgressBar
android:id="@+id/page_progress"
style="@style/Base.Widget.AppCompat.ProgressBar.Horizontal"
android:layout_width="match_parent"
android:layout_height="3dp"
android:layout_alignTop="@id/gecko_view"
android:progress="70" />
</RelativeLayout>

Просмотреть файл

@ -274,6 +274,9 @@ case "$target" in
x86_64-*)
MIDL_FLAGS="${MIDL_FLAGS} -env x64"
;;
aarch64-*)
MIDL_FLAGS="${MIDL_FLAGS} -env arm64"
;;
esac
unset _MSVC_VER_FILTER

Просмотреть файл

@ -76,7 +76,7 @@ CPU_preprocessor_checks = OrderedDict((
('x86', '__i386__ || _M_IX86'),
('x86_64', '__x86_64__ || _M_X64'),
('arm', '__arm__ || _M_ARM'),
('aarch64', '__aarch64__'),
('aarch64', '__aarch64__ || _M_ARM64'),
('ia64', '__ia64__'),
('s390x', '__s390x__'),
('s390', '__s390__'),