merge mozilla-inbound to mozilla-central a=merge

This commit is contained in:
Carsten "Tomcat" Book 2017-01-16 16:35:14 +01:00
Родитель a7c09b70d4 8292a2eba6
Коммит 7cb92b4443
29 изменённых файлов: 669 добавлений и 552 удалений

Просмотреть файл

@ -14,7 +14,7 @@ let whitelist = [
{sourceName: /codemirror\.css$/i,
isFromDevTools: true},
// The debugger uses cross-browser CSS.
{sourceName: /devtools\/client\/debugger\/new\/styles.css/i,
{sourceName: /devtools\/client\/debugger\/new\/debugger.css/i,
isFromDevTools: true},
// PDFjs is futureproofing its pseudoselectors, and those rules are dropped.
{sourceName: /web\/viewer\.css$/i,

Просмотреть файл

@ -101,40 +101,48 @@ FormSubmitObserver.prototype =
return;
}
// Insure that this is the FormSubmitObserver associated with the
// element / window this notification is about.
let element = aInvalidElements.queryElementAt(0, Ci.nsISupports);
if (this._content != element.ownerGlobal.top.document.defaultView) {
return;
}
// Show a validation message on the first focusable element.
for (let i = 0; i < aInvalidElements.length; i++) {
// Insure that this is the FormSubmitObserver associated with the
// element / window this notification is about.
let element = aInvalidElements.queryElementAt(i, Ci.nsISupports);
if (this._content != element.ownerGlobal.top.document.defaultView) {
return;
}
if (!(element instanceof HTMLInputElement ||
element instanceof HTMLTextAreaElement ||
element instanceof HTMLSelectElement ||
element instanceof HTMLButtonElement)) {
return;
}
if (!(element instanceof HTMLInputElement ||
element instanceof HTMLTextAreaElement ||
element instanceof HTMLSelectElement ||
element instanceof HTMLButtonElement)) {
continue;
}
// Update validation message before showing notification
this._validationMessage = element.validationMessage;
if (!Services.focus.elementIsFocusable(element, 0)) {
continue;
}
// Update validation message before showing notification
this._validationMessage = element.validationMessage;
// Don't connect up to the same element more than once.
if (this._element == element) {
this._showPopup(element);
break;
}
this._element = element;
element.focus();
// Watch for input changes which may change the validation message.
element.addEventListener("input", this, false);
// Watch for focus changes so we can disconnect our listeners and
// hide the popup.
element.addEventListener("blur", this, false);
// Don't connect up to the same element more than once.
if (this._element == element) {
this._showPopup(element);
return;
break;
}
this._element = element;
element.focus();
// Watch for input changes which may change the validation message.
element.addEventListener("input", this, false);
// Watch for focus changes so we can disconnect our listeners and
// hide the popup.
element.addEventListener("blur", this, false);
this._showPopup(element);
},
/*

Просмотреть файл

@ -41,3 +41,4 @@ support-files =
[browser_UsageTelemetry_content.js]
[browser_UsageTelemetry_content_aboutHome.js]
[browser_urlBar_zoom.js]
[browser_bug1319078.js]

Просмотреть файл

@ -0,0 +1,49 @@
"use strict";
var gInvalidFormPopup = document.getElementById('invalid-form-popup');
function checkPopupHide() {
ok(gInvalidFormPopup.state != 'showing' && gInvalidFormPopup.state != 'open',
"[Test " + testId + "] The invalid form popup should not be shown");
}
var testId = 0;
function incrementTest() {
testId++;
info("Starting next part of test");
}
/**
* In this test, we check that no popup appears if the element display is none.
*/
add_task(function* () {
ok(gInvalidFormPopup,
"The browser should have a popup to show when a form is invalid");
incrementTest();
let testPage =
'data:text/html,' +
'<form target="t"><input type="url" placeholder="url" value="http://" style="display: none;"><input id="s" type="button" value="check"></form>';
let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, testPage);
yield BrowserTestUtils.synthesizeMouse("#s", 0, 0, {}, gBrowser.selectedBrowser);
checkPopupHide();
yield BrowserTestUtils.removeTab(tab);
});
/**
* In this test, we check that no popup appears if the element visibility is hidden.
*/
add_task(function* () {
incrementTest();
let testPage =
'data:text/html,' +
'<form target="t"><input type="url" placeholder="url" value="http://" style="visibility: hidden;"><input id="s" type="button" value="check"></form>';
let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, testPage);
yield BrowserTestUtils.synthesizeMouse("#s", 0, 0, {}, gBrowser.selectedBrowser);
checkPopupHide();
yield BrowserTestUtils.removeTab(tab);
});

Просмотреть файл

@ -13,7 +13,7 @@
<link rel="stylesheet"
type="text/css"
href="chrome://devtools/content/sourceeditor/codemirror/mozilla.css" />
<link rel="stylesheet" type="text/css" href="resource://devtools/client/debugger/new/styles.css" />
<link rel="stylesheet" type="text/css" href="resource://devtools/client/debugger/new/debugger.css" />
</head>
<body>
<div id="mount"></div>
@ -26,6 +26,6 @@
window,
});
</script>
<script type="text/javascript" src="resource://devtools/client/debugger/new/bundle.js"></script>
<script type="text/javascript" src="resource://devtools/client/debugger/new/debugger.js"></script>
</body>
</html>

Просмотреть файл

@ -4,9 +4,9 @@
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
DevToolsModules(
'bundle.js',
'debugger.css',
'debugger.js',
'panel.js',
'pretty-print-worker.js',
'source-map-worker.js',
'styles.css'
'source-map-worker.js'
)

Просмотреть файл

@ -97,12 +97,6 @@ public:
NS_IMETHOD GetScheme(nsACString &result) override;
};
class nsMediaSourceProtocolHandler : public nsHostObjectProtocolHandler
{
public:
NS_IMETHOD GetScheme(nsACString &result) override;
};
class nsFontTableProtocolHandler : public nsHostObjectProtocolHandler
{
public:

Просмотреть файл

@ -529,12 +529,12 @@ AudioContext::DecodeAudioData(const ArrayBuffer& aBuffer,
if (aSuccessCallback.WasPassed()) {
successCallback = &aSuccessCallback.Value();
}
RefPtr<WebAudioDecodeJob> job(
UniquePtr<WebAudioDecodeJob> job(
new WebAudioDecodeJob(contentType, this,
promise, successCallback, failureCallback));
AsyncDecodeWebAudio(contentType.get(), data, length, *job);
// Transfer the ownership to mDecodeJobs
mDecodeJobs.AppendElement(job.forget());
mDecodeJobs.AppendElement(Move(job));
return promise.forget();
}
@ -542,7 +542,14 @@ AudioContext::DecodeAudioData(const ArrayBuffer& aBuffer,
void
AudioContext::RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob)
{
mDecodeJobs.RemoveElement(aDecodeJob);
// Since UniquePtr doesn't provide an operator== which allows you to compare
// against raw pointers, we need to iterate manually.
for (uint32_t i = 0; i < mDecodeJobs.Length(); ++i) {
if (mDecodeJobs[i].get() == aDecodeJob) {
mDecodeJobs.RemoveElementAt(i);
break;
}
}
}
void

Просмотреть файл

@ -13,6 +13,7 @@
#include "mozilla/DOMEventTargetHelper.h"
#include "mozilla/MemoryReporting.h"
#include "mozilla/dom/TypedArray.h"
#include "mozilla/UniquePtr.h"
#include "nsCOMPtr.h"
#include "nsCycleCollectionParticipant.h"
#include "nsHashKeys.h"
@ -348,7 +349,7 @@ private:
AudioContextState mAudioContextState;
RefPtr<AudioDestinationNode> mDestination;
RefPtr<AudioListener> mListener;
nsTArray<RefPtr<WebAudioDecodeJob> > mDecodeJobs;
nsTArray<UniquePtr<WebAudioDecodeJob> > mDecodeJobs;
// This array is used to keep the suspend/resume/close promises alive until
// they are resolved, so we can safely pass them accross threads.
nsTArray<RefPtr<Promise>> mPromiseGripArray;

Просмотреть файл

@ -34,27 +34,6 @@ namespace mozilla {
extern LazyLogModule gMediaDecoderLog;
NS_IMPL_CYCLE_COLLECTION_CLASS(WebAudioDecodeJob)
NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(WebAudioDecodeJob)
NS_IMPL_CYCLE_COLLECTION_UNLINK(mContext)
NS_IMPL_CYCLE_COLLECTION_UNLINK(mOutput)
NS_IMPL_CYCLE_COLLECTION_UNLINK(mSuccessCallback)
NS_IMPL_CYCLE_COLLECTION_UNLINK(mFailureCallback)
NS_IMPL_CYCLE_COLLECTION_UNLINK_END
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(WebAudioDecodeJob)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mContext)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutput)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSuccessCallback)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mFailureCallback)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(WebAudioDecodeJob)
NS_IMPL_CYCLE_COLLECTION_TRACE_END
NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(WebAudioDecodeJob, AddRef)
NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(WebAudioDecodeJob, Release)
using namespace dom;
class ReportResultTask final : public Runnable

Просмотреть файл

@ -35,9 +35,7 @@ struct WebAudioDecodeJob final
dom::Promise* aPromise,
dom::DecodeSuccessCallback* aSuccessCallback = nullptr,
dom::DecodeErrorCallback* aFailureCallback = nullptr);
NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(WebAudioDecodeJob)
NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(WebAudioDecodeJob)
~WebAudioDecodeJob();
enum ErrorCode {
NoError,
@ -65,9 +63,6 @@ struct WebAudioDecodeJob final
RefPtr<dom::DecodeErrorCallback> mFailureCallback; // can be null
RefPtr<dom::AudioBuffer> mOutput;
RefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
private:
~WebAudioDecodeJob();
};
void AsyncDecodeWebAudio(const char* aContentType, uint8_t* aBuffer,

Просмотреть файл

@ -1140,6 +1140,13 @@ ModuleBuilder::processExport(frontend::ParseNode* pn)
bool isDefault = pn->getKind() == PNK_EXPORT_DEFAULT;
ParseNode* kid = isDefault ? pn->pn_left : pn->pn_kid;
if (isDefault && pn->pn_right) {
// This is an export default containing an expression.
RootedAtom localName(cx_, cx_->names().starDefaultStar);
RootedAtom exportName(cx_, cx_->names().default_);
return appendExportEntry(exportName, localName);
}
switch (kid->getKind()) {
case PNK_EXPORT_SPEC_LIST:
MOZ_ASSERT(!isDefault);
@ -1153,53 +1160,46 @@ ModuleBuilder::processExport(frontend::ParseNode* pn)
break;
case PNK_CLASS: {
const ClassNode& cls = kid->as<ClassNode>();
MOZ_ASSERT(cls.names());
RootedAtom localName(cx_, cls.names()->innerBinding()->pn_atom);
RootedAtom exportName(cx_, isDefault ? cx_->names().default_ : localName.get());
if (!appendExportEntry(exportName, localName))
return false;
break;
const ClassNode& cls = kid->as<ClassNode>();
MOZ_ASSERT(cls.names());
RootedAtom localName(cx_, cls.names()->innerBinding()->pn_atom);
RootedAtom exportName(cx_, isDefault ? cx_->names().default_ : localName.get());
if (!appendExportEntry(exportName, localName))
return false;
break;
}
case PNK_VAR:
case PNK_CONST:
case PNK_LET: {
MOZ_ASSERT(kid->isArity(PN_LIST));
for (ParseNode* var = kid->pn_head; var; var = var->pn_next) {
if (var->isKind(PNK_ASSIGN))
var = var->pn_left;
MOZ_ASSERT(var->isKind(PNK_NAME));
RootedAtom localName(cx_, var->pn_atom);
RootedAtom exportName(cx_, isDefault ? cx_->names().default_ : localName.get());
if (!appendExportEntry(exportName, localName))
return false;
}
break;
MOZ_ASSERT(kid->isArity(PN_LIST));
for (ParseNode* var = kid->pn_head; var; var = var->pn_next) {
if (var->isKind(PNK_ASSIGN))
var = var->pn_left;
MOZ_ASSERT(var->isKind(PNK_NAME));
RootedAtom localName(cx_, var->pn_atom);
RootedAtom exportName(cx_, isDefault ? cx_->names().default_ : localName.get());
if (!appendExportEntry(exportName, localName))
return false;
}
break;
}
case PNK_FUNCTION: {
RootedFunction func(cx_, kid->pn_funbox->function());
if (!func->isArrow()) {
RootedAtom localName(cx_, func->explicitName());
RootedAtom exportName(cx_, isDefault ? cx_->names().default_ : localName.get());
MOZ_ASSERT_IF(isDefault, localName);
if (!appendExportEntry(exportName, localName))
return false;
break;
}
}
MOZ_FALLTHROUGH; // Arrow functions are handled below.
default:
MOZ_ASSERT(isDefault);
RootedAtom localName(cx_, cx_->names().starDefaultStar);
RootedAtom exportName(cx_, cx_->names().default_);
RootedFunction func(cx_, kid->pn_funbox->function());
MOZ_ASSERT(!func->isArrow());
RootedAtom localName(cx_, func->explicitName());
RootedAtom exportName(cx_, isDefault ? cx_->names().default_ : localName.get());
MOZ_ASSERT_IF(isDefault, localName);
if (!appendExportEntry(exportName, localName))
return false;
break;
}
default:
MOZ_CRASH("Unexpected parse node");
}
return true;
}

Просмотреть файл

@ -0,0 +1,2 @@
parseModule("export default (class {})");
parseModule("export default (class A {})");

Просмотреть файл

@ -1377,6 +1377,42 @@ class MacroAssembler : public MacroAssemblerSpecific
void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Operand dstAddr) DEFINED_ON(x86, x64);
void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Operand dstAddr) DEFINED_ON(x86);
// For all the ARM wasmLoad and wasmStore functions, `ptr` MUST equal
// `ptrScratch`, and that register will be updated based on conditions
// listed below (where it is only mentioned as `ptr`).
// `ptr` will be updated if access.offset() != 0 or access.type() == Scalar::Int64.
void wasmLoad(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch, AnyRegister output) DEFINED_ON(arm);
void wasmLoadI64(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch, Register64 output) DEFINED_ON(arm);
void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Register ptr, Register ptrScratch) DEFINED_ON(arm);
void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Register ptr, Register ptrScratch) DEFINED_ON(arm);
// `ptr` will always be updated.
void wasmUnalignedLoad(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch,
Register output, Register tmp) DEFINED_ON(arm);
// `ptr` will always be updated and `tmp1` is always needed. `tmp2` is
// needed for Float32; `tmp2` and `tmp3` are needed for Float64. Temps must
// be Invalid when they are not needed.
void wasmUnalignedLoadFP(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch,
FloatRegister output, Register tmp1, Register tmp2, Register tmp3) DEFINED_ON(arm);
// `ptr` will always be updated.
void wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch,
Register64 output, Register tmp) DEFINED_ON(arm);
// `ptr` and `value` will always be updated.
void wasmUnalignedStore(const wasm::MemoryAccessDesc& access, Register value, Register ptr, Register ptrScratch)
DEFINED_ON(arm);
// `ptr` will always be updated.
void wasmUnalignedStoreFP(const wasm::MemoryAccessDesc& access, FloatRegister floatValue, Register ptr,
Register ptrScratch, Register tmp) DEFINED_ON(arm);
// `ptr` will always be updated.
void wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Register ptr, Register ptrScratch,
Register tmp) DEFINED_ON(arm);
// wasm specific methods, used in both the wasm baseline compiler and ion.
void wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86, x64, arm);
void wasmTruncateDoubleToInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86_shared, arm);

Просмотреть файл

@ -2366,69 +2366,20 @@ void
CodeGeneratorARM::emitWasmLoad(T* lir)
{
const MWasmLoad* mir = lir->mir();
MIRType resultType = mir->type();
Register ptr;
uint32_t offset = mir->access().offset();
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
Register ptr = ToRegister(lir->ptr());
Scalar::Type type = mir->access().type();
// Maybe add the offset.
if (offset || type == Scalar::Int64) {
ScratchRegisterScope scratch(masm);
Register ptrPlusOffset = ToRegister(lir->ptrCopy());
if (offset)
masm.ma_add(Imm32(offset), ptrPlusOffset, scratch);
ptr = ptrPlusOffset;
if (mir->access().offset() || mir->access().type() == Scalar::Int64) {
ptr = ToRegister(lir->ptrCopy());
} else {
MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
ptr = ToRegister(lir->ptr());
}
bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32 ||
type == Scalar::Int64;
unsigned byteSize = mir->access().byteSize();
masm.memoryBarrier(mir->access().barrierBefore());
BufferOffset load;
if (mir->type() == MIRType::Int64) {
Register64 output = ToOutRegister64(lir);
if (type == Scalar::Int64) {
MOZ_ASSERT(INT64LOW_OFFSET == 0);
load = masm.ma_dataTransferN(IsLoad, 32, /* signed = */ false, HeapReg, ptr, output.low);
masm.append(mir->access(), load.getOffset(), masm.framePushed());
masm.as_add(ptr, ptr, Imm8(INT64HIGH_OFFSET));
load = masm.ma_dataTransferN(IsLoad, 32, isSigned, HeapReg, ptr, output.high);
masm.append(mir->access(), load.getOffset(), masm.framePushed());
} else {
load = masm.ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.low);
masm.append(mir->access(), load.getOffset(), masm.framePushed());
if (isSigned)
masm.ma_asr(Imm32(31), output.low, output.high);
else
masm.ma_mov(Imm32(0), output.high);
}
} else {
AnyRegister output = ToAnyRegister(lir->output());
bool isFloat = output.isFloat();
if (isFloat) {
MOZ_ASSERT((byteSize == 4) == output.fpu().isSingle());
ScratchRegisterScope scratch(masm);
masm.ma_add(HeapReg, ptr, scratch);
load = masm.ma_vldr(Operand(Address(scratch, 0)).toVFPAddr(), output.fpu());
masm.append(mir->access(), load.getOffset(), masm.framePushed());
} else {
load = masm.ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.gpr());
masm.append(mir->access(), load.getOffset(), masm.framePushed());
}
}
masm.memoryBarrier(mir->access().barrierAfter());
if (resultType == MIRType::Int64)
masm.wasmLoadI64(mir->access(), ptr, ptr, ToOutRegister64(lir));
else
masm.wasmLoad(mir->access(), ptr, ptr, ToAnyRegister(lir->output()));
}
void
@ -2448,71 +2399,22 @@ void
CodeGeneratorARM::emitWasmUnalignedLoad(T* lir)
{
const MWasmLoad* mir = lir->mir();
uint32_t offset = mir->access().offset();
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
MIRType resultType = mir->type();
Register ptr = ToRegister(lir->ptrCopy());
if (offset) {
ScratchRegisterScope scratch(masm);
masm.ma_add(Imm32(offset), ptr, scratch);
Register tmp1 = ToRegister(lir->getTemp(1));
if (resultType == MIRType::Int64) {
masm.wasmUnalignedLoadI64(mir->access(), ptr, ptr, ToOutRegister64(lir), tmp1);
} else if (IsFloatingPointType(resultType)) {
Register tmp2(ToRegister(lir->getTemp(2)));
Register tmp3(Register::Invalid());
if (mir->access().byteSize() == 8)
tmp3 = ToRegister(lir->getTemp(3));
masm.wasmUnalignedLoadFP(mir->access(), ptr, ptr, ToFloatRegister(lir->output()), tmp1, tmp2, tmp3);
} else {
masm.wasmUnalignedLoad(mir->access(), ptr, ptr, ToRegister(lir->output()), tmp1);
}
// Add HeapReg to ptr, so we can use base+index addressing in the byte loads.
masm.ma_add(HeapReg, ptr);
unsigned byteSize = mir->access().byteSize();
Scalar::Type type = mir->access().type();
bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32 ||
type == Scalar::Int64;
MIRType mirType = mir->type();
Register tmp = ToRegister(lir->getTemp(1));
Register low;
if (IsFloatingPointType(mirType))
low = ToRegister(lir->getTemp(2));
else if (mirType == MIRType::Int64)
low = ToOutRegister64(lir).low;
else
low = ToRegister(lir->output());
MOZ_ASSERT(low != tmp);
MOZ_ASSERT(low != ptr);
masm.memoryBarrier(mir->access().barrierBefore());
masm.emitUnalignedLoad(isSigned, Min(byteSize, 4u), ptr, tmp, low);
if (IsFloatingPointType(mirType)) {
FloatRegister output = ToFloatRegister(lir->output());
if (byteSize == 4) {
MOZ_ASSERT(output.isSingle());
masm.ma_vxfer(low, output);
} else {
MOZ_ASSERT(byteSize == 8);
MOZ_ASSERT(output.isDouble());
Register high = ToRegister(lir->getTemp(3));
masm.emitUnalignedLoad(/* signed */ false, 4, ptr, tmp, high, /* offset */ 4);
masm.ma_vxfer(low, high, output);
}
} else if (mirType == MIRType::Int64) {
Register64 output = ToOutRegister64(lir);
if (type == Scalar::Int64) {
MOZ_ASSERT(byteSize == 8);
masm.emitUnalignedLoad(isSigned, 4, ptr, tmp, output.high, /* offset */ 4);
} else {
MOZ_ASSERT(byteSize <= 4);
// Propagate sign.
if (isSigned)
masm.ma_asr(Imm32(31), output.low, output.high);
else
masm.ma_mov(Imm32(0), output.high);
}
}
masm.memoryBarrier(mir->access().barrierAfter());
}
void
@ -2545,60 +2447,22 @@ void
CodeGeneratorARM::emitWasmStore(T* lir)
{
const MWasmStore* mir = lir->mir();
uint32_t offset = mir->access().offset();
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
Register ptr = ToRegister(lir->ptr());
unsigned byteSize = mir->access().byteSize();
Scalar::Type type = mir->access().type();
Scalar::Type accessType = mir->access().type();
Register ptr;
// Maybe add the offset.
if (offset || type == Scalar::Int64) {
ScratchRegisterScope scratch(masm);
Register ptrPlusOffset = ToRegister(lir->ptrCopy());
if (offset)
masm.ma_add(Imm32(offset), ptrPlusOffset, scratch);
ptr = ptrPlusOffset;
if (mir->access().offset() || accessType == Scalar::Int64) {
ptr = ToRegister(lir->ptrCopy());
} else {
MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
ptr = ToRegister(lir->ptr());
}
masm.memoryBarrier(mir->access().barrierBefore());
BufferOffset store;
if (type == Scalar::Int64) {
MOZ_ASSERT(INT64LOW_OFFSET == 0);
Register64 value = ToRegister64(lir->getInt64Operand(lir->ValueIndex));
store = masm.ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ false, HeapReg, ptr, value.low);
masm.append(mir->access(), store.getOffset(), masm.framePushed());
masm.as_add(ptr, ptr, Imm8(INT64HIGH_OFFSET));
store = masm.ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ true, HeapReg, ptr, value.high);
masm.append(mir->access(), store.getOffset(), masm.framePushed());
} else {
AnyRegister value = ToAnyRegister(lir->getOperand(lir->ValueIndex));
if (value.isFloat()) {
ScratchRegisterScope scratch(masm);
FloatRegister val = value.fpu();
MOZ_ASSERT((byteSize == 4) == val.isSingle());
masm.ma_add(HeapReg, ptr, scratch);
store = masm.ma_vstr(val, Operand(Address(scratch, 0)).toVFPAddr());
masm.append(mir->access(), store.getOffset(), masm.framePushed());
} else {
bool isSigned = type == Scalar::Uint32 || type == Scalar::Int32; // see AsmJSStoreHeap;
Register val = value.gpr();
store = masm.ma_dataTransferN(IsStore, 8 * byteSize /* bits */, isSigned, HeapReg, ptr, val);
masm.append(mir->access(), store.getOffset(), masm.framePushed());
}
}
masm.memoryBarrier(mir->access().barrierAfter());
if (accessType == Scalar::Int64)
masm.wasmStoreI64(mir->access(), ToRegister64(lir->getInt64Operand(lir->ValueIndex)),
ptr, ptr);
else
masm.wasmStore(mir->access(), ToAnyRegister(lir->getOperand(lir->ValueIndex)), ptr, ptr);
}
void
@ -2618,51 +2482,20 @@ void
CodeGeneratorARM::emitWasmUnalignedStore(T* lir)
{
const MWasmStore* mir = lir->mir();
uint32_t offset = mir->access().offset();
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
Scalar::Type accessType = mir->access().type();
Register ptr = ToRegister(lir->ptrCopy());
if (offset) {
ScratchRegisterScope scratch(masm);
masm.ma_add(Imm32(offset), ptr, scratch);
Register valOrTmp = ToRegister(lir->valueHelper());
if (accessType == Scalar::Int64) {
masm.wasmUnalignedStoreI64(mir->access(),
ToRegister64(lir->getInt64Operand(LWasmUnalignedStoreI64::ValueIndex)),
ptr, ptr, valOrTmp);
} else if (accessType == Scalar::Float32 || accessType == Scalar::Float64) {
FloatRegister value = ToFloatRegister(lir->getOperand(LWasmUnalignedStore::ValueIndex));
masm.wasmUnalignedStoreFP(mir->access(), value, ptr, ptr, valOrTmp);
} else {
masm.wasmUnalignedStore(mir->access(), valOrTmp, ptr, ptr);
}
// Add HeapReg to ptr, so we can use base+index addressing in the byte loads.
masm.ma_add(HeapReg, ptr);
MIRType mirType = mir->value()->type();
masm.memoryBarrier(mir->access().barrierAfter());
Register val = ToRegister(lir->valueHelper());
if (IsFloatingPointType(mirType)) {
masm.ma_vxfer(ToFloatRegister(lir->getOperand(LWasmUnalignedStore::ValueIndex)), val);
} else if (mirType == MIRType::Int64) {
Register64 input = ToRegister64(lir->getInt64Operand(LWasmUnalignedStoreI64::ValueIndex));
if (input.low != val)
masm.ma_mov(input.low, val);
}
unsigned byteSize = mir->access().byteSize();
masm.emitUnalignedStore(Min(byteSize, 4u), ptr, val);
if (byteSize > 4) {
// It's a double or an int64 load.
// Load the high 32 bits when counter == 4.
if (IsFloatingPointType(mirType)) {
FloatRegister fp = ToFloatRegister(lir->getOperand(LWasmUnalignedStore::ValueIndex));
MOZ_ASSERT(fp.isDouble());
ScratchRegisterScope scratch(masm);
masm.ma_vxfer(fp, scratch, val);
} else {
MOZ_ASSERT(mirType == MIRType::Int64);
masm.ma_mov(ToRegister64(lir->getInt64Operand(LWasmUnalignedStoreI64::ValueIndex)).high, val);
}
masm.emitUnalignedStore(4, ptr, val, /* offset */ 4);
}
masm.memoryBarrier(mir->access().barrierBefore());
}
void

Просмотреть файл

@ -5436,6 +5436,80 @@ MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input, Register output,
wasmTruncateToInt32(input, output, MIRType::Float32, /* isUnsigned= */ false, oolEntry);
}
void
MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch,
AnyRegister output)
{
wasmLoadImpl(access, ptr, ptrScratch, output, Register64::Invalid());
}
void
MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch,
Register64 output)
{
wasmLoadImpl(access, ptr, ptrScratch, AnyRegister(), output);
}
void
MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Register ptr,
Register ptrScratch)
{
wasmStoreImpl(access, value, Register64::Invalid(), ptr, ptrScratch);
}
void
MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Register ptr,
Register ptrScratch)
{
wasmStoreImpl(access, AnyRegister(), value, ptr, ptrScratch);
}
void
MacroAssembler::wasmUnalignedLoad(const wasm::MemoryAccessDesc& access, Register ptr,
Register ptrScratch, Register output, Register tmp)
{
wasmUnalignedLoadImpl(access, ptr, ptrScratch, AnyRegister(output), Register64::Invalid(), tmp,
Register::Invalid(), Register::Invalid());
}
void
MacroAssembler::wasmUnalignedLoadFP(const wasm::MemoryAccessDesc& access, Register ptr,
Register ptrScratch, FloatRegister outFP, Register tmp1,
Register tmp2, Register tmp3)
{
wasmUnalignedLoadImpl(access, ptr, ptrScratch, AnyRegister(outFP), Register64::Invalid(),
tmp1, tmp2, tmp3);
}
void
MacroAssembler::wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access, Register ptr,
Register ptrScratch, Register64 out64, Register tmp)
{
wasmUnalignedLoadImpl(access, ptr, ptrScratch, AnyRegister(), out64, tmp, Register::Invalid(),
Register::Invalid());
}
void
MacroAssembler::wasmUnalignedStore(const wasm::MemoryAccessDesc& access, Register value,
Register ptr, Register ptrScratch)
{
wasmUnalignedStoreImpl(access, FloatRegister(), Register64::Invalid(), ptr, ptrScratch, value);
}
void
MacroAssembler::wasmUnalignedStoreFP(const wasm::MemoryAccessDesc& access, FloatRegister floatVal,
Register ptr, Register ptrScratch, Register tmp)
{
wasmUnalignedStoreImpl(access, floatVal, Register64::Invalid(), ptr, ptrScratch, tmp);
}
void
MacroAssembler::wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access, Register64 val64,
Register ptr, Register ptrScratch, Register tmp)
{
wasmUnalignedStoreImpl(access, FloatRegister(), val64, ptr, ptrScratch, tmp);
}
//}}} check_macroassembler_style
void
@ -5580,6 +5654,248 @@ MacroAssemblerARM::outOfLineWasmTruncateToIntCheck(FloatRegister input, MIRType
asMasm().framePushed()));
}
void
MacroAssemblerARM::wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register ptr,
Register ptrScratch, AnyRegister output, Register64 out64)
{
MOZ_ASSERT(ptr == ptrScratch);
uint32_t offset = access.offset();
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
Scalar::Type type = access.type();
// Maybe add the offset.
if (offset || type == Scalar::Int64) {
ScratchRegisterScope scratch(asMasm());
if (offset)
ma_add(Imm32(offset), ptr, scratch);
}
bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32 ||
type == Scalar::Int64;
unsigned byteSize = access.byteSize();
asMasm().memoryBarrier(access.barrierBefore());
uint32_t framePushed = asMasm().framePushed();
BufferOffset load;
if (out64 != Register64::Invalid()) {
if (type == Scalar::Int64) {
MOZ_ASSERT(INT64LOW_OFFSET == 0);
load = ma_dataTransferN(IsLoad, 32, /* signed = */ false, HeapReg, ptr, out64.low);
append(access, load.getOffset(), framePushed);
as_add(ptr, ptr, Imm8(INT64HIGH_OFFSET));
load = ma_dataTransferN(IsLoad, 32, isSigned, HeapReg, ptr, out64.high);
append(access, load.getOffset(), framePushed);
} else {
load = ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, out64.low);
append(access, load.getOffset(), framePushed);
if (isSigned)
ma_asr(Imm32(31), out64.low, out64.high);
else
ma_mov(Imm32(0), out64.high);
}
} else {
bool isFloat = output.isFloat();
if (isFloat) {
MOZ_ASSERT((byteSize == 4) == output.fpu().isSingle());
ScratchRegisterScope scratch(asMasm());
ma_add(HeapReg, ptr, scratch);
load = ma_vldr(Operand(Address(scratch, 0)).toVFPAddr(), output.fpu());
append(access, load.getOffset(), framePushed);
} else {
load = ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.gpr());
append(access, load.getOffset(), framePushed);
}
}
asMasm().memoryBarrier(access.barrierAfter());
}
void
MacroAssemblerARM::wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister value,
Register64 val64, Register ptr, Register ptrScratch)
{
MOZ_ASSERT(ptr == ptrScratch);
uint32_t offset = access.offset();
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
unsigned byteSize = access.byteSize();
Scalar::Type type = access.type();
// Maybe add the offset.
if (offset || type == Scalar::Int64) {
ScratchRegisterScope scratch(asMasm());
if (offset)
ma_add(Imm32(offset), ptr, scratch);
}
asMasm().memoryBarrier(access.barrierBefore());
uint32_t framePushed = asMasm().framePushed();
BufferOffset store;
if (type == Scalar::Int64) {
MOZ_ASSERT(INT64LOW_OFFSET == 0);
store = ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ false, HeapReg, ptr, val64.low);
append(access, store.getOffset(), framePushed);
as_add(ptr, ptr, Imm8(INT64HIGH_OFFSET));
store = ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ true, HeapReg, ptr, val64.high);
append(access, store.getOffset(), framePushed);
} else {
if (value.isFloat()) {
ScratchRegisterScope scratch(asMasm());
FloatRegister val = value.fpu();
MOZ_ASSERT((byteSize == 4) == val.isSingle());
ma_add(HeapReg, ptr, scratch);
store = ma_vstr(val, Operand(Address(scratch, 0)).toVFPAddr());
append(access, store.getOffset(), framePushed);
} else {
bool isSigned = type == Scalar::Uint32 || type == Scalar::Int32; // see AsmJSStoreHeap;
Register val = value.gpr();
store = ma_dataTransferN(IsStore, 8 * byteSize /* bits */, isSigned, HeapReg, ptr, val);
append(access, store.getOffset(), framePushed);
}
}
asMasm().memoryBarrier(access.barrierAfter());
}
void
MacroAssemblerARM::wasmUnalignedLoadImpl(const wasm::MemoryAccessDesc& access, Register ptr,
Register ptrScratch, AnyRegister outAny, Register64 out64,
Register tmp, Register tmp2, Register tmp3)
{
MOZ_ASSERT(ptr == ptrScratch);
MOZ_ASSERT_IF(access.type() != Scalar::Float32 && access.type() != Scalar::Float64,
tmp2 == Register::Invalid() && tmp3 == Register::Invalid());
MOZ_ASSERT_IF(access.type() == Scalar::Float32,
tmp2 != Register::Invalid() && tmp3 == Register::Invalid());
MOZ_ASSERT_IF(access.type() == Scalar::Float64,
tmp2 != Register::Invalid() && tmp3 != Register::Invalid());
uint32_t offset = access.offset();
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
if (offset) {
ScratchRegisterScope scratch(asMasm());
ma_add(Imm32(offset), ptr, scratch);
}
// Add HeapReg to ptr, so we can use base+index addressing in the byte loads.
ma_add(HeapReg, ptr);
unsigned byteSize = access.byteSize();
Scalar::Type type = access.type();
bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32 ||
type == Scalar::Int64;
Register low;
if (out64 != Register64::Invalid())
low = out64.low;
else if (outAny.isFloat())
low = tmp2;
else
low = outAny.gpr();
MOZ_ASSERT(low != tmp);
MOZ_ASSERT(low != ptr);
asMasm().memoryBarrier(access.barrierBefore());
emitUnalignedLoad(isSigned, Min(byteSize, 4u), ptr, tmp, low);
if (out64 != Register64::Invalid()) {
if (type == Scalar::Int64) {
MOZ_ASSERT(byteSize == 8);
emitUnalignedLoad(isSigned, 4, ptr, tmp, out64.high, /* offset */ 4);
} else {
MOZ_ASSERT(byteSize <= 4);
// Propagate sign.
if (isSigned)
ma_asr(Imm32(31), out64.low, out64.high);
else
ma_mov(Imm32(0), out64.high);
}
} else if (outAny.isFloat()) {
FloatRegister output = outAny.fpu();
if (byteSize == 4) {
MOZ_ASSERT(output.isSingle());
ma_vxfer(low, output);
} else {
MOZ_ASSERT(byteSize == 8);
MOZ_ASSERT(output.isDouble());
Register high = tmp3;
emitUnalignedLoad(/* signed */ false, 4, ptr, tmp, high, /* offset */ 4);
ma_vxfer(low, high, output);
}
}
asMasm().memoryBarrier(access.barrierAfter());
}
void
MacroAssemblerARM::wasmUnalignedStoreImpl(const wasm::MemoryAccessDesc& access, FloatRegister floatValue,
Register64 val64, Register ptr, Register ptrScratch, Register tmp)
{
MOZ_ASSERT(ptr == ptrScratch);
// They can't both be valid, but they can both be invalid.
MOZ_ASSERT_IF(!floatValue.isInvalid(), val64 == Register64::Invalid());
MOZ_ASSERT_IF(val64 != Register64::Invalid(), floatValue.isInvalid());
uint32_t offset = access.offset();
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
unsigned byteSize = access.byteSize();
if (offset) {
ScratchRegisterScope scratch(asMasm());
ma_add(Imm32(offset), ptr, scratch);
}
// Add HeapReg to ptr, so we can use base+index addressing in the byte loads.
ma_add(HeapReg, ptr);
asMasm().memoryBarrier(access.barrierBefore());
if (val64 != Register64::Invalid()) {
if (val64.low != tmp)
ma_mov(val64.low, tmp);
} else if (!floatValue.isInvalid()) {
ma_vxfer(floatValue, tmp);
}
// Otherwise, tmp has the integer value to store.
emitUnalignedStore(Min(byteSize, 4u), ptr, tmp);
if (byteSize > 4) {
if (val64 != Register64::Invalid()) {
if (val64.high != tmp)
ma_mov(val64.high, tmp);
} else {
MOZ_ASSERT(!floatValue.isInvalid());
MOZ_ASSERT(floatValue.isDouble());
ScratchRegisterScope scratch(asMasm());
ma_vxfer(floatValue, scratch, tmp);
}
emitUnalignedStore(4, ptr, tmp, /* offset */ 4);
}
asMasm().memoryBarrier(access.barrierAfter());
}
void
MacroAssemblerARM::emitUnalignedLoad(bool isSigned, unsigned byteSize, Register ptr, Register tmp,
Register dest, unsigned offset)

Просмотреть файл

@ -455,6 +455,27 @@ class MacroAssemblerARM : public Assembler
MOZ_CRASH("Invalid data transfer addressing mode");
}
// `outAny` is valid if and only if `out64` == Register64::Invalid().
void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch,
AnyRegister outAny, Register64 out64);
// `valAny` is valid if and only if `val64` == Register64::Invalid().
void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister valAny, Register64 val64,
Register ptr, Register ptrScratch);
protected:
// `outAny` is valid if and only if `out64` == Register64::Invalid().
void wasmUnalignedLoadImpl(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch,
AnyRegister outAny, Register64 out64, Register tmp1, Register tmp2,
Register tmp3);
// The value to be stored is in `floatValue` (if not invalid), `val64` (if not invalid),
// or in `valOrTmp` (if `floatValue` and `val64` are both invalid). Note `valOrTmp` must
// always be valid.
void wasmUnalignedStoreImpl(const wasm::MemoryAccessDesc& access, FloatRegister floatValue,
Register64 val64, Register ptr, Register ptrScratch, Register valOrTmp);
private:
// Loads `byteSize` bytes, byte by byte, by reading from ptr[offset],
// applying the indicated signedness (defined by isSigned).
// - all three registers must be different.
@ -469,7 +490,6 @@ class MacroAssemblerARM : public Assembler
// - byteSize can be up to 4 bytes and no more (GPR are 32 bits on ARM).
void emitUnalignedStore(unsigned byteSize, Register ptr, Register val, unsigned offset = 0);
private:
// Implementation for transferMultipleByRuns so we can use different
// iterators for forward/backward traversals. The sign argument should be 1
// if we traverse forwards, -1 if we traverse backwards.

Просмотреть файл

@ -3418,16 +3418,16 @@ class BaseCompiler
}
// This is the temp register passed as the last argument to load()
MOZ_MUST_USE size_t loadStoreTemps(MemoryAccessDesc& access) {
MOZ_MUST_USE size_t loadTemps(MemoryAccessDesc& access) {
#if defined(JS_CODEGEN_ARM)
if (access.isUnaligned()) {
switch (access.type()) {
case Scalar::Float32:
return 1;
case Scalar::Float64:
return 2;
case Scalar::Float64:
return 3;
default:
break;
return 1;
}
}
return 0;
@ -3438,8 +3438,8 @@ class BaseCompiler
// ptr and dest may be the same iff dest is I32.
// This may destroy ptr even if ptr and dest are not the same.
MOZ_MUST_USE bool load(MemoryAccessDesc& access, RegI32 ptr, bool omitBoundsCheck,
AnyReg dest, RegI32 tmp1, RegI32 tmp2)
MOZ_MUST_USE bool load(MemoryAccessDesc& access, RegI32 ptr, bool omitBoundsCheck, AnyReg dest,
RegI32 tmp1, RegI32 tmp2, RegI32 tmp3)
{
checkOffset(&access, ptr);
@ -3481,40 +3481,26 @@ class BaseCompiler
masm.mov(ScratchRegX86, dest.i32());
}
#elif defined(JS_CODEGEN_ARM)
if (access.offset() != 0)
masm.add32(Imm32(access.offset()), ptr);
bool isSigned = true;
switch (access.type()) {
case Scalar::Uint8:
case Scalar::Uint16:
case Scalar::Uint32: {
isSigned = false;
MOZ_FALLTHROUGH;
case Scalar::Int8:
case Scalar::Int16:
case Scalar::Int32:
Register rt = dest.tag == AnyReg::I64 ? dest.i64().low : dest.i32();
loadI32(access, isSigned, ptr, rt);
if (dest.tag == AnyReg::I64) {
if (isSigned)
masm.ma_asr(Imm32(31), rt, dest.i64().high);
else
masm.move32(Imm32(0), dest.i64().high);
if (access.isUnaligned()) {
switch (dest.tag) {
case AnyReg::I64:
masm.wasmUnalignedLoadI64(access, ptr, ptr, dest.i64(), tmp1);
break;
case AnyReg::F32:
masm.wasmUnalignedLoadFP(access, ptr, ptr, dest.f32(), tmp1, tmp2, Register::Invalid());
break;
case AnyReg::F64:
masm.wasmUnalignedLoadFP(access, ptr, ptr, dest.f64(), tmp1, tmp2, tmp3);
break;
default:
masm.wasmUnalignedLoad(access, ptr, ptr, dest.i32(), tmp1);
break;
}
break;
}
case Scalar::Int64:
loadI64(access, ptr, dest.i64());
break;
case Scalar::Float32:
loadF32(access, ptr, dest.f32(), tmp1);
break;
case Scalar::Float64:
loadF64(access, ptr, dest.f64(), tmp1, tmp2);
break;
default:
MOZ_CRASH("Compiler bug: unexpected array type");
} else {
if (dest.tag == AnyReg::I64)
masm.wasmLoadI64(access, ptr, ptr, dest.i64());
else
masm.wasmLoad(access, ptr, ptr, dest.any());
}
#else
MOZ_CRASH("BaseCompiler platform hook: load");
@ -3525,10 +3511,21 @@ class BaseCompiler
return true;
}
MOZ_MUST_USE size_t storeTemps(MemoryAccessDesc& access) {
#if defined(JS_CODEGEN_ARM)
if (access.isUnaligned()) {
// See comment in store() about how this temp could be avoided for
// unaligned i8/i16/i32 stores with some restructuring elsewhere.
return 1;
}
#endif
return 0;
}
// ptr and src must not be the same register.
// This may destroy ptr.
MOZ_MUST_USE bool store(MemoryAccessDesc access, RegI32 ptr, bool omitBoundsCheck,
AnyReg src, RegI32 tmp1, RegI32 tmp2)
// This may destroy ptr but will not destroy src.
MOZ_MUST_USE bool store(MemoryAccessDesc access, RegI32 ptr, bool omitBoundsCheck, AnyReg src,
RegI32 tmp)
{
checkOffset(&access, ptr);
@ -3571,36 +3568,36 @@ class BaseCompiler
masm.wasmStore(access, value, dstAddr);
}
#elif defined(JS_CODEGEN_ARM)
if (access.offset() != 0)
masm.add32(Imm32(access.offset()), ptr);
switch (access.type()) {
case Scalar::Uint8:
MOZ_FALLTHROUGH;
case Scalar::Uint16:
MOZ_FALLTHROUGH;
case Scalar::Int8:
MOZ_FALLTHROUGH;
case Scalar::Int16:
MOZ_FALLTHROUGH;
case Scalar::Int32:
MOZ_FALLTHROUGH;
case Scalar::Uint32: {
Register rt = src.tag == AnyReg::I64 ? src.i64().low : src.i32();
storeI32(access, ptr, rt);
break;
}
case Scalar::Int64:
storeI64(access, ptr, src.i64());
break;
case Scalar::Float32:
storeF32(access, ptr, src.f32(), tmp1);
break;
case Scalar::Float64:
storeF64(access, ptr, src.f64(), tmp1, tmp2);
break;
default:
MOZ_CRASH("Compiler bug: unexpected array type");
if (access.isUnaligned()) {
// TODO / OPTIMIZE (bug 1331264): We perform the copy on the i32
// path (and allocate the temp for the copy) because we will destroy
// the value in the temp. We could avoid the copy and the temp if
// the caller would instead preserve src when it needs to return its
// value as a result (for teeStore). If unaligned accesses are
// common it will be worthwhile to make that change, but there's no
// evidence yet that they will be common.
switch (src.tag) {
case AnyReg::I64:
masm.wasmUnalignedStoreI64(access, src.i64(), ptr, ptr, tmp);
break;
case AnyReg::F32:
masm.wasmUnalignedStoreFP(access, src.f32(), ptr, ptr, tmp);
break;
case AnyReg::F64:
masm.wasmUnalignedStoreFP(access, src.f64(), ptr, ptr, tmp);
break;
default:
moveI32(src.i32(), tmp);
masm.wasmUnalignedStore(access, tmp, ptr, ptr);
break;
}
} else {
if (access.type() == Scalar::Int64)
masm.wasmStoreI64(access, src.i64(), ptr, ptr);
else if (src.tag == AnyReg::I64)
masm.wasmStore(access, AnyRegister(src.i64().low), ptr, ptr);
else
masm.wasmStore(access, src.any(), ptr, ptr);
}
#else
MOZ_CRASH("BaseCompiler platform hook: store");
@ -3612,131 +3609,6 @@ class BaseCompiler
return true;
}
#ifdef JS_CODEGEN_ARM
void
loadI32(MemoryAccessDesc access, bool isSigned, RegI32 ptr, Register rt) {
if (access.byteSize() > 1 && access.isUnaligned()) {
masm.add32(HeapReg, ptr);
SecondScratchRegisterScope scratch(*this);
masm.emitUnalignedLoad(isSigned, access.byteSize(), ptr, scratch, rt, 0);
} else {
BufferOffset ld =
masm.ma_dataTransferN(js::jit::IsLoad, BitSize(access.byteSize()*8),
isSigned, HeapReg, ptr, rt, Offset, Assembler::Always);
masm.append(access, ld.getOffset(), masm.framePushed());
}
}
void
storeI32(MemoryAccessDesc access, RegI32 ptr, Register rt) {
if (access.byteSize() > 1 && access.isUnaligned()) {
masm.add32(HeapReg, ptr);
masm.emitUnalignedStore(access.byteSize(), ptr, rt, 0);
} else {
BufferOffset st =
masm.ma_dataTransferN(js::jit::IsStore, BitSize(access.byteSize()*8),
IsSigned(false), ptr, HeapReg, rt, Offset,
Assembler::Always);
masm.append(access, st.getOffset(), masm.framePushed());
}
}
void
loadI64(MemoryAccessDesc access, RegI32 ptr, RegI64 dest) {
if (access.isUnaligned()) {
masm.add32(HeapReg, ptr);
SecondScratchRegisterScope scratch(*this);
masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr, scratch, dest.low,
0);
masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr, scratch, dest.high,
4);
} else {
BufferOffset ld;
ld = masm.ma_dataTransferN(js::jit::IsLoad, BitSize(32), IsSigned(false), HeapReg,
ptr, dest.low, Offset, Assembler::Always);
masm.append(access, ld.getOffset(), masm.framePushed());
masm.add32(Imm32(4), ptr);
ld = masm.ma_dataTransferN(js::jit::IsLoad, BitSize(32), IsSigned(false), HeapReg,
ptr, dest.high, Offset, Assembler::Always);
masm.append(access, ld.getOffset(), masm.framePushed());
}
}
void
storeI64(MemoryAccessDesc access, RegI32 ptr, RegI64 src) {
if (access.isUnaligned()) {
masm.add32(HeapReg, ptr);
masm.emitUnalignedStore(ByteSize(4), ptr, src.low, 0);
masm.emitUnalignedStore(ByteSize(4), ptr, src.high, 4);
} else {
BufferOffset st;
st = masm.ma_dataTransferN(js::jit::IsStore, BitSize(32), IsSigned(false), HeapReg,
ptr, src.low, Offset, Assembler::Always);
masm.append(access, st.getOffset(), masm.framePushed());
masm.add32(Imm32(4), ptr);
st = masm.ma_dataTransferN(js::jit::IsStore, BitSize(32), IsSigned(false), HeapReg,
ptr, src.high, Offset, Assembler::Always);
masm.append(access, st.getOffset(), masm.framePushed());
}
}
void
loadF32(MemoryAccessDesc access, RegI32 ptr, RegF32 dest, RegI32 tmp1) {
masm.add32(HeapReg, ptr);
if (access.isUnaligned()) {
SecondScratchRegisterScope scratch(*this);
masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr, scratch, tmp1, 0);
masm.ma_vxfer(tmp1, dest);
} else {
BufferOffset ld = masm.ma_vldr(VFPAddr(ptr, VFPOffImm(0)), dest,
Assembler::Always);
masm.append(access, ld.getOffset(), masm.framePushed());
}
}
void
storeF32(MemoryAccessDesc access, RegI32 ptr, RegF32 src, RegI32 tmp1) {
masm.add32(HeapReg, ptr);
if (access.isUnaligned()) {
masm.ma_vxfer(src, tmp1);
masm.emitUnalignedStore(ByteSize(4), ptr, tmp1, 0);
} else {
BufferOffset st =
masm.ma_vstr(src, VFPAddr(ptr, VFPOffImm(0)), Assembler::Always);
masm.append(access, st.getOffset(), masm.framePushed());
}
}
void
loadF64(MemoryAccessDesc access, RegI32 ptr, RegF64 dest, RegI32 tmp1, RegI32 tmp2) {
masm.add32(HeapReg, ptr);
if (access.isUnaligned()) {
SecondScratchRegisterScope scratch(*this);
masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr, scratch, tmp1, 0);
masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr, scratch, tmp2, 4);
masm.ma_vxfer(tmp1, tmp2, dest);
} else {
BufferOffset ld = masm.ma_vldr(VFPAddr(ptr, VFPOffImm(0)), dest,
Assembler::Always);
masm.append(access, ld.getOffset(), masm.framePushed());
}
}
void
storeF64(MemoryAccessDesc access, RegI32 ptr, RegF64 src, RegI32 tmp1, RegI32 tmp2) {
masm.add32(HeapReg, ptr);
if (access.isUnaligned()) {
masm.ma_vxfer(src, tmp1, tmp2);
masm.emitUnalignedStore(ByteSize(4), ptr, tmp1, 0);
masm.emitUnalignedStore(ByteSize(4), ptr, tmp2, 4);
} else {
BufferOffset st =
masm.ma_vstr(src, VFPAddr(ptr, VFPOffImm(0)), Assembler::Always);
masm.append(access, st.getOffset(), masm.framePushed());
}
}
#endif // JS_CODEGEN_ARM
////////////////////////////////////////////////////////////
// Generally speaking, ABOVE this point there should be no value
@ -6668,9 +6540,10 @@ BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
bool omitBoundsCheck = false;
MemoryAccessDesc access(viewType, addr.align, addr.offset, trapIfNotAsmJS());
size_t temps = loadStoreTemps(access);
size_t temps = loadTemps(access);
RegI32 tmp1 = temps >= 1 ? needI32() : invalidI32();
RegI32 tmp2 = temps >= 2 ? needI32() : invalidI32();
RegI32 tmp3 = temps >= 3 ? needI32() : invalidI32();
switch (type) {
case ValType::I32: {
@ -6680,7 +6553,7 @@ BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
#else
RegI32 rv = rp;
#endif
if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2))
if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2, tmp3))
return false;
pushI32(rv);
if (rp != rv)
@ -6698,7 +6571,7 @@ BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
rp = popMemoryAccess(&access, &omitBoundsCheck);
rv = needI64();
#endif
if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2))
if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2, tmp3))
return false;
pushI64(rv);
freeI32(rp);
@ -6707,7 +6580,7 @@ BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
case ValType::F32: {
RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
RegF32 rv = needF32();
if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2))
if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2, tmp3))
return false;
pushF32(rv);
freeI32(rp);
@ -6716,7 +6589,7 @@ BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
case ValType::F64: {
RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
RegF64 rv = needF64();
if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2))
if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2, tmp3))
return false;
pushF64(rv);
freeI32(rp);
@ -6731,6 +6604,8 @@ BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
freeI32(tmp1);
if (temps >= 2)
freeI32(tmp2);
if (temps >= 3)
freeI32(tmp3);
return true;
}
@ -6746,15 +6621,14 @@ BaseCompiler::emitStoreOrTeeStore(ValType resultType, Scalar::Type viewType,
bool omitBoundsCheck = false;
MemoryAccessDesc access(viewType, addr.align, addr.offset, trapIfNotAsmJS());
size_t temps = loadStoreTemps(access);
size_t temps = storeTemps(access);
RegI32 tmp1 = temps >= 1 ? needI32() : invalidI32();
RegI32 tmp2 = temps >= 2 ? needI32() : invalidI32();
switch (resultType) {
case ValType::I32: {
RegI32 rv = popI32();
RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
if (!store(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2))
if (!store(access, rp, omitBoundsCheck, AnyReg(rv), tmp1))
return false;
freeI32(rp);
freeOrPushI32<isStore>(rv);
@ -6763,7 +6637,7 @@ BaseCompiler::emitStoreOrTeeStore(ValType resultType, Scalar::Type viewType,
case ValType::I64: {
RegI64 rv = popI64();
RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
if (!store(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2))
if (!store(access, rp, omitBoundsCheck, AnyReg(rv), tmp1))
return false;
freeI32(rp);
freeOrPushI64<isStore>(rv);
@ -6772,7 +6646,7 @@ BaseCompiler::emitStoreOrTeeStore(ValType resultType, Scalar::Type viewType,
case ValType::F32: {
RegF32 rv = popF32();
RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
if (!store(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2))
if (!store(access, rp, omitBoundsCheck, AnyReg(rv), tmp1))
return false;
freeI32(rp);
freeOrPushF32<isStore>(rv);
@ -6781,7 +6655,7 @@ BaseCompiler::emitStoreOrTeeStore(ValType resultType, Scalar::Type viewType,
case ValType::F64: {
RegF64 rv = popF64();
RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
if (!store(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2))
if (!store(access, rp, omitBoundsCheck, AnyReg(rv), tmp1))
return false;
freeI32(rp);
freeOrPushF64<isStore>(rv);
@ -6794,8 +6668,6 @@ BaseCompiler::emitStoreOrTeeStore(ValType resultType, Scalar::Type viewType,
if (temps >= 1)
freeI32(tmp1);
if (temps >= 2)
freeI32(tmp2);
return true;
}
@ -6990,16 +6862,15 @@ BaseCompiler::emitTeeStoreWithCoercion(ValType resultType, Scalar::Type viewType
bool omitBoundsCheck = false;
MemoryAccessDesc access(viewType, addr.align, addr.offset, trapIfNotAsmJS());
size_t temps = loadStoreTemps(access);
size_t temps = storeTemps(access);
RegI32 tmp1 = temps >= 1 ? needI32() : invalidI32();
RegI32 tmp2 = temps >= 2 ? needI32() : invalidI32();
if (resultType == ValType::F32 && viewType == Scalar::Float64) {
RegF32 rv = popF32();
RegF64 rw = needF64();
masm.convertFloat32ToDouble(rv, rw);
RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
if (!store(access, rp, omitBoundsCheck, AnyReg(rw), tmp1, tmp2))
if (!store(access, rp, omitBoundsCheck, AnyReg(rw), tmp1))
return false;
pushF32(rv);
freeI32(rp);
@ -7010,7 +6881,7 @@ BaseCompiler::emitTeeStoreWithCoercion(ValType resultType, Scalar::Type viewType
RegF32 rw = needF32();
masm.convertDoubleToFloat32(rv, rw);
RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
if (!store(access, rp, omitBoundsCheck, AnyReg(rw), tmp1, tmp2))
if (!store(access, rp, omitBoundsCheck, AnyReg(rw), tmp1))
return false;
pushF64(rv);
freeI32(rp);
@ -7021,8 +6892,6 @@ BaseCompiler::emitTeeStoreWithCoercion(ValType resultType, Scalar::Type viewType
if (temps >= 1)
freeI32(tmp1);
if (temps >= 2)
freeI32(tmp2);
return true;
}

Просмотреть файл

@ -0,0 +1,6 @@
<!DOCTYPE html>
<html style="direction: rtl; border-left: medium solid;">
<body onload="document.documentElement.style.borderWidth = '109472098330px';">
<div style="float: right;"><option style="display: -moz-box;">I</option>0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0</div>
</body>
</html>

Просмотреть файл

@ -480,6 +480,7 @@ load 737313-1.html
load 737313-2.html
load 737313-3.html
test-pref(font.size.inflation.emPerLine,15) load 740199-1.xhtml
test-pref(font.size.inflation.emPerLine,15) load 742602.html
load 747688.html
load 750066.html
load 757413.xhtml

Просмотреть файл

@ -0,0 +1 @@
<html xmlns="http://www.w3.org/1999/xhtml" dir="rtl"><tr><span><math xmlns="http://www.w3.org/1998/Math/MathML"/><tr>n</tr></span></tr></html>

Просмотреть файл

@ -58,6 +58,7 @@ load 654928-1.html
load 655451-1.xhtml
load 713606-1.html
load 716349-1.html
load 767251.xhtml
load 848725-1.html
load 848725-2.html
load 947557-1.html

Просмотреть файл

@ -1,3 +1,4 @@
<!DOCTYPE html>
<html>
<head>
<style>
@ -8,13 +9,13 @@ div {
position: fixed;
top: 50px;
left: 50px;
margin: 50;
padding: 50;
margin: 50px;
padding: 50px;
border: 50px solid red;
transform-origin: 0 0;
transform: translate(50px, 50px) scale(0.5);
background-color: green;
clip-path: polygon(0 0, 200px 0, 0 200px) content-box;*/
clip-path: polygon(0 0, 200px 0, 0 200px) content-box;
}
</style>
<title>clip-path with polygon() hit test</title>
@ -38,4 +39,4 @@ is(a, document.elementFromPoint(299, 200), "a should be found");
is(a, document.elementFromPoint(200, 299), "a should be found");
is(a, document.elementFromPoint(250, 250), "a should be found");
</script>
</html>
</html>

Просмотреть файл

@ -0,0 +1 @@
<html xmlns="http://www.w3.org/1999/xhtml" style="display: table; position: absolute; left: 2305843009213694000pc; bottom: 2452284pc; padding: 9931442138140%; border-bottom-right-radius: 1152921504606847000pc;">X</html>

Просмотреть файл

@ -0,0 +1,5 @@
<svg xmlns="http://www.w3.org/2000/svg" style="border-width: 51703084143745256mm; border-left-style: dashed; border-top-left-radius: 3%; border-top-style: dashed; border-right-style: solid; border-image-outset: 10;">
<script>
document.elementFromPoint(20, 20);
</script>
</svg>

После

Ширина:  |  Высота:  |  Размер: 278 B

Просмотреть файл

@ -105,6 +105,7 @@ load 587336-1.html
load 590291-1.svg
load 601999-1.html
load 605626-1.svg
asserts(2) load 606914.xhtml # bug 606914
asserts-if(stylo,2) load 610594-1.html # bug 1324669
load 610954-1.html
load 612662-1.svg
@ -133,6 +134,7 @@ load 725918-1.svg
load 732836-1.svg
load 740627-1.svg
load 740627-2.svg
load 743469.svg
load 757704-1.svg
load 757718-1.svg
load 757751-1.svg

Просмотреть файл

@ -21,6 +21,7 @@ import android.util.TypedValue;
import org.mozilla.gecko.R;
import org.mozilla.gecko.icons.IconRequest;
import org.mozilla.gecko.icons.IconResponse;
import org.mozilla.gecko.util.StringUtils;
/**
* This loader will generate an icon in case no icon could be loaded. In order to do so this needs
@ -43,14 +44,6 @@ public class IconGenerator implements IconLoader {
0xFFea385e,
};
// List of common prefixes of host names. Those prefixes will be striped before a prepresentative
// character for an URL is determined.
private static final String[] COMMON_PREFIXES = {
"www.",
"m.",
"mobile.",
};
private static final int TEXT_SIZE_DP = 12;
@Override
public IconResponse load(IconRequest request) {
@ -156,12 +149,8 @@ public class IconGenerator implements IconLoader {
return "?";
}
// Strip common prefixes that we do not want to use to determine the representative character
for (String prefix : COMMON_PREFIXES) {
if (snippet.startsWith(prefix)) {
snippet = snippet.substring(prefix.length());
}
}
// Strip common prefixes that we do not want to use to determine the representative characterS
snippet = StringUtils.stripCommonSubdomains(snippet);
return snippet;
}