merge mozilla-central to autoland. r=merge a=merge

--HG--
rename : testing/marionette/wait.js => testing/marionette/sync.js
This commit is contained in:
Sebastian Hengst 2017-10-09 23:56:28 +02:00
Родитель 0a185f15f7 d486c008dc
Коммит 7e36032551
111 изменённых файлов: 2637 добавлений и 1298 удалений

Просмотреть файл

@ -11,3 +11,4 @@ skip-if = (os == 'win' && bits == 32) # Bug 1352791
[browser_notification_remove_permission.js] [browser_notification_remove_permission.js]
[browser_notification_replace.js] [browser_notification_replace.js]
[browser_notification_tab_switching.js] [browser_notification_tab_switching.js]
skip-if = os == "win" || os == "linux" # Bug 1243263

Просмотреть файл

@ -25,4 +25,7 @@ add_task(async function test_remove_history() {
await BrowserTestUtils.waitForCondition( await BrowserTestUtils.waitForCondition(
() => !gURLBar.popup.richlistbox.children.some(c => !c.collapsed && c.getAttribute("ac-value") == TEST_URL), () => !gURLBar.popup.richlistbox.children.some(c => !c.collapsed && c.getAttribute("ac-value") == TEST_URL),
"Waiting for the result to disappear"); "Waiting for the result to disappear");
gURLBar.popup.hidePopup();
await promisePopupHidden(gURLBar.popup);
}); });

Просмотреть файл

@ -132,6 +132,7 @@ skip-if = !e10s || debug || asan
[browser_ext_tabs_audio.js] [browser_ext_tabs_audio.js]
[browser_ext_tabs_captureVisibleTab.js] [browser_ext_tabs_captureVisibleTab.js]
[browser_ext_tabs_create.js] [browser_ext_tabs_create.js]
skip-if = os == "linux" && debug && bits == 32 # Bug 1350189
[browser_ext_tabs_create_invalid_url.js] [browser_ext_tabs_create_invalid_url.js]
[browser_ext_tabs_detectLanguage.js] [browser_ext_tabs_detectLanguage.js]
[browser_ext_tabs_discarded.js] [browser_ext_tabs_discarded.js]

Просмотреть файл

@ -1040,11 +1040,12 @@ Experiments.Experiments.prototype = {
let result = await loadJSONAsync(path, { compression: "lz4" }); let result = await loadJSONAsync(path, { compression: "lz4" });
this._populateFromCache(result); this._populateFromCache(result);
} catch (e) { } catch (e) {
this._experiments = new Map();
if (e instanceof OS.File.Error && e.becauseNoSuchFile) { if (e instanceof OS.File.Error && e.becauseNoSuchFile) {
// No cached manifest yet. // No cached manifest yet.
this._experiments = new Map(); this._log.trace("_loadFromCache - no cached manifest yet");
} else { } else {
throw e; this._log.error("_loadFromCache - caught error", e);
} }
} }
}, },

Просмотреть файл

@ -393,3 +393,19 @@ add_task(async function test_expiration() {
await promiseRestartManager(); await promiseRestartManager();
await removeCacheFile(); await removeCacheFile();
}); });
add_task(async function test_invalid_cache() {
// Save uncompressed data to the cache file to trigger a loading error.
let encoder = new TextEncoder();
let data = encoder.encode("foo");
let path = OS.Path.join(OS.Constants.Path.profileDir, "experiments.json");
let options = { tmpPath: path + ".tmp" };
await OS.File.writeAtomic(path, data, options);
// Trigger loading from the cache. This should not throw and gracefully recover.
let experiments = new Experiments.Experiments(gPolicy);
let list = await experiments.getExperiments();
Assert.deepEqual(list, [], "The experiments cache should be empty.");
});

Просмотреть файл

@ -814,10 +814,8 @@ NetworkMonitor.prototype = {
this.interceptedChannels.add(subject); this.interceptedChannels.add(subject);
// On e10s, we never receive http-on-examine-cached-response, so fake one. // Service workers never fire http-on-examine-cached-response, so fake one.
if (Services.appinfo.processType == Ci.nsIXULRuntime.PROCESS_TYPE_CONTENT) {
this._httpResponseExaminer(channel, "http-on-examine-cached-response"); this._httpResponseExaminer(channel, "http-on-examine-cached-response");
}
}, },
/** /**

Просмотреть файл

@ -97,11 +97,6 @@ let expectedConsoleCalls = [
filename: /helper_serviceworker/, filename: /helper_serviceworker/,
arguments: ['fetch event: ' + SCOPE_FRAME_URL2], arguments: ['fetch event: ' + SCOPE_FRAME_URL2],
}, },
{
level: "log",
filename: /helper_serviceworker/,
arguments: ['message event: ' + MESSAGE],
},
]; ];
let consoleCalls = []; let consoleCalls = [];
@ -169,10 +164,9 @@ let onAttach = Task.async(function*(state, response) {
// Now postMessage() the service worker to trigger its message event // Now postMessage() the service worker to trigger its message event
// handler. This will generate 1 or 2 to console.log() statements // handler. This will generate 1 or 2 to console.log() statements
// depending on if the worker thread needs to spin up again. Although we // depending on if the worker thread needs to spin up again. In either
// don't have a controlled or registering document in both cases, we still // case, though, we should not get any console calls because we don't
// could get console calls since we only flush reports when the channel is // have a controlled or registering document.
// finally destroyed.
info("Completed force refresh. Messaging service worker."); info("Completed force refresh. Messaging service worker.");
yield messageServiceWorker(currentFrame.contentWindow, SCOPE, MESSAGE); yield messageServiceWorker(currentFrame.contentWindow, SCOPE, MESSAGE);

Просмотреть файл

@ -15066,7 +15066,7 @@ nsDocShell::ChannelIntercepted(nsIInterceptedChannel* aChannel)
{ {
RefPtr<ServiceWorkerManager> swm = ServiceWorkerManager::GetInstance(); RefPtr<ServiceWorkerManager> swm = ServiceWorkerManager::GetInstance();
if (!swm) { if (!swm) {
aChannel->Cancel(NS_ERROR_INTERCEPTION_FAILED); aChannel->CancelInterception(NS_ERROR_INTERCEPTION_FAILED);
return NS_OK; return NS_OK;
} }

Просмотреть файл

@ -513,10 +513,10 @@ WebGLContext::InitAndValidateGL(FailureReason* const out_failReason)
// Note: GL_MAX_TEXTURE_UNITS is fixed at 4 for most desktop hardware, // Note: GL_MAX_TEXTURE_UNITS is fixed at 4 for most desktop hardware,
// even though the hardware supports much more. The // even though the hardware supports much more. The
// GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS value is the accurate value. // GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS value is the accurate value.
gl->GetUIntegerv(LOCAL_GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &mGLMaxTextureUnits); mGLMaxCombinedTextureImageUnits = gl->GetIntAs<GLuint>(LOCAL_GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS);
mGLMaxCombinedTextureImageUnits = mGLMaxTextureUnits; mGLMaxTextureUnits = mGLMaxCombinedTextureImageUnits;
if (mGLMaxTextureUnits < 8) { if (mGLMaxCombinedTextureImageUnits < 8) {
const nsPrintfCString reason("GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS: %u is < 8!", const nsPrintfCString reason("GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS: %u is < 8!",
mGLMaxTextureUnits); mGLMaxTextureUnits);
*out_failReason = { "FEATURE_FAILURE_WEBGL_T_UNIT", reason }; *out_failReason = { "FEATURE_FAILURE_WEBGL_T_UNIT", reason };

Просмотреть файл

@ -1295,8 +1295,9 @@ EventStateManager::DispatchCrossProcessEvent(WidgetEvent* aEvent,
return; return;
} }
case eDragEventClass: { case eDragEventClass: {
if (remote->Manager()->IsContentParent()) { RefPtr<TabParent> tabParent = remote;
remote->Manager()->AsContentParent()->MaybeInvokeDragSession(remote); if (tabParent->Manager()->IsContentParent()) {
tabParent->Manager()->AsContentParent()->MaybeInvokeDragSession(tabParent);
} }
nsCOMPtr<nsIDragSession> dragSession = nsContentUtils::GetDragSession(); nsCOMPtr<nsIDragSession> dragSession = nsContentUtils::GetDragSession();
@ -1312,7 +1313,7 @@ EventStateManager::DispatchCrossProcessEvent(WidgetEvent* aEvent,
} }
} }
remote->SendRealDragEvent(*aEvent->AsDragEvent(), action, dropEffect); tabParent->SendRealDragEvent(*aEvent->AsDragEvent(), action, dropEffect);
return; return;
} }
case ePluginEventClass: { case ePluginEventClass: {

Просмотреть файл

@ -872,6 +872,10 @@ FetchDriver::AsyncOnChannelRedirect(nsIChannel* aOldChannel,
} }
// "HTTP-redirect fetch": step 14 "Append locationURL to request's URL list." // "HTTP-redirect fetch": step 14 "Append locationURL to request's URL list."
// However, ignore internal redirects here. We don't want to flip
// Response.redirected to true if an internal redirect occurs. These
// should be transparent to script.
if (!(aFlags & nsIChannelEventSink::REDIRECT_INTERNAL)) {
nsCOMPtr<nsIURI> uri; nsCOMPtr<nsIURI> uri;
MOZ_ALWAYS_SUCCEEDS(aNewChannel->GetURI(getter_AddRefs(uri))); MOZ_ALWAYS_SUCCEEDS(aNewChannel->GetURI(getter_AddRefs(uri)));
@ -892,6 +896,8 @@ FetchDriver::AsyncOnChannelRedirect(nsIChannel* aOldChannel,
} }
mRequest->AddURL(spec, fragment); mRequest->AddURL(spec, fragment);
}
NS_ConvertUTF8toUTF16 tRPHeaderValue(tRPHeaderCValue); NS_ConvertUTF8toUTF16 tRPHeaderValue(tRPHeaderCValue);
// updates requests associated referrer policy according to the // updates requests associated referrer policy according to the
// Referrer-Policy header (if any). // Referrer-Policy header (if any).
@ -902,7 +908,7 @@ FetchDriver::AsyncOnChannelRedirect(nsIChannel* aOldChannel,
mRequest->SetReferrerPolicy(net_referrerPolicy); mRequest->SetReferrerPolicy(net_referrerPolicy);
// Should update channel's referrer policy // Should update channel's referrer policy
if (httpChannel) { if (httpChannel) {
rv = FetchUtil::SetRequestReferrer(mPrincipal, nsresult rv = FetchUtil::SetRequestReferrer(mPrincipal,
mDocument, mDocument,
httpChannel, httpChannel,
mRequest); mRequest);

Просмотреть файл

@ -201,6 +201,10 @@ public:
{ {
GetHTMLAttr(nsGkAtoms::name, aValue); GetHTMLAttr(nsGkAtoms::name, aValue);
} }
void GetName(nsAString& aValue)
{
GetHTMLAttr(nsGkAtoms::name, aValue);
}
void SetName(const nsAString& aValue, mozilla::ErrorResult& rv) void SetName(const nsAString& aValue, mozilla::ErrorResult& rv)
{ {
SetHTMLAttr(nsGkAtoms::name, aValue, rv); SetHTMLAttr(nsGkAtoms::name, aValue, rv);

Просмотреть файл

@ -32,6 +32,8 @@ public:
// nsISupports // nsISupports
NS_DECL_ISUPPORTS_INHERITED NS_DECL_ISUPPORTS_INHERITED
NS_IMPL_FROMCONTENT_HTML_WITH_TAG(HTMLBodyElement, body);
// Event listener stuff; we need to declare only the ones we need to // Event listener stuff; we need to declare only the ones we need to
// forward to window that don't come from nsIDOMHTMLBodyElement. // forward to window that don't come from nsIDOMHTMLBodyElement.
#define EVENT(name_, id_, type_, struct_) /* nothing; handled by the shim */ #define EVENT(name_, id_, type_, struct_) /* nothing; handled by the shim */
@ -112,6 +114,10 @@ public:
{ {
GetHTMLAttr(nsGkAtoms::background, aBackground); GetHTMLAttr(nsGkAtoms::background, aBackground);
} }
void GetBackground(nsAString& aBackground)
{
GetHTMLAttr(nsGkAtoms::background, aBackground);
}
void SetBackground(const nsAString& aBackground, ErrorResult& aError) void SetBackground(const nsAString& aBackground, ErrorResult& aError)
{ {
SetHTMLAttr(nsGkAtoms::background, aBackground, aError); SetHTMLAttr(nsGkAtoms::background, aBackground, aError);

Просмотреть файл

@ -1528,6 +1528,11 @@ CSPReportRedirectSink::AsyncOnChannelRedirect(nsIChannel* aOldChannel,
uint32_t aRedirFlags, uint32_t aRedirFlags,
nsIAsyncVerifyRedirectCallback* aCallback) nsIAsyncVerifyRedirectCallback* aCallback)
{ {
if (aRedirFlags & nsIChannelEventSink::REDIRECT_INTERNAL) {
aCallback->OnRedirectVerifyCallback(NS_OK);
return NS_OK;
}
// cancel the old channel so XHR failure callback happens // cancel the old channel so XHR failure callback happens
nsresult rv = aOldChannel->Cancel(NS_ERROR_ABORT); nsresult rv = aOldChannel->Cancel(NS_ERROR_ABORT);
NS_ENSURE_SUCCESS(rv, rv); NS_ENSURE_SUCCESS(rv, rv);

Просмотреть файл

@ -34,9 +34,9 @@ public:
: mKey(*aKey), : mKey(*aKey),
mForceCompositing(false) mForceCompositing(false)
{ } { }
nsSMILCompositor(const nsSMILCompositor& toCopy) nsSMILCompositor(nsSMILCompositor&& toMove)
: mKey(toCopy.mKey), : mKey(mozilla::Move(toMove.mKey)),
mAnimationFunctions(toCopy.mAnimationFunctions), mAnimationFunctions(mozilla::Move(toMove.mAnimationFunctions)),
mForceCompositing(false) mForceCompositing(false)
{ } { }
~nsSMILCompositor() { } ~nsSMILCompositor() { }

Просмотреть файл

@ -8,6 +8,13 @@
#include "nsSMILValue.h" #include "nsSMILValue.h"
#include "nsDebug.h" #include "nsDebug.h"
/*static*/ nsSMILNullType*
nsSMILNullType::Singleton()
{
static nsSMILNullType sSingleton;
return &sSingleton;
}
nsresult nsresult
nsSMILNullType::Assign(nsSMILValue& aDest, const nsSMILValue& aSrc) const nsSMILNullType::Assign(nsSMILValue& aDest, const nsSMILValue& aSrc) const
{ {

Просмотреть файл

@ -14,12 +14,7 @@ class nsSMILNullType : public nsISMILType
{ {
public: public:
// Singleton for nsSMILValue objects to hold onto. // Singleton for nsSMILValue objects to hold onto.
static nsSMILNullType* static nsSMILNullType* Singleton();
Singleton()
{
static nsSMILNullType sSingleton;
return &sSingleton;
}
protected: protected:
// nsISMILType Methods // nsISMILType Methods

Просмотреть файл

@ -9,11 +9,13 @@ interface XPathEvaluator {
// Based on nsIDOMXPathEvaluator // Based on nsIDOMXPathEvaluator
[NewObject, Throws] [NewObject, Throws]
XPathExpression createExpression(DOMString expression, XPathExpression createExpression(DOMString expression,
XPathNSResolver? resolver); optional XPathNSResolver? resolver = null);
[Pure] [Pure]
Node createNSResolver(Node nodeResolver); Node createNSResolver(Node nodeResolver);
[Throws] [Throws]
XPathResult evaluate(DOMString expression, Node contextNode, XPathResult evaluate(DOMString expression,
XPathNSResolver? resolver, unsigned short type, Node contextNode,
object? result); optional XPathNSResolver? resolver = null,
optional unsigned short type = 0 /* XPathResult.ANY_TYPE */,
optional object? result = null);
}; };

Просмотреть файл

@ -9,7 +9,9 @@ interface XPathExpression {
// returned by this method. If this is specified as null or it's not an // returned by this method. If this is specified as null or it's not an
// XPathResult object, a new result object will be constructed and returned. // XPathResult object, a new result object will be constructed and returned.
[Throws] [Throws]
XPathResult evaluate(Node contextNode, unsigned short type, object? result); XPathResult evaluate(Node contextNode,
optional unsigned short type = 0 /* XPathResult.ANY_TYPE */,
optional object? result = null);
// The result specifies a specific result object which may be reused and // The result specifies a specific result object which may be reused and
// returned by this method. If this is specified as null or it's not an // returned by this method. If this is specified as null or it's not an
@ -18,5 +20,6 @@ interface XPathExpression {
XPathResult evaluateWithContext(Node contextNode, XPathResult evaluateWithContext(Node contextNode,
unsigned long contextPosition, unsigned long contextPosition,
unsigned long contextSize, unsigned long contextSize,
unsigned short type, object? result); optional unsigned short type = 0 /* XPathResult.ANY_TYPE */,
optional object? result = null);
}; };

Просмотреть файл

@ -115,7 +115,7 @@ CancelChannelRunnable::Run()
mChannel->SetHandleFetchEventEnd(TimeStamp::Now()); mChannel->SetHandleFetchEventEnd(TimeStamp::Now());
mChannel->SaveTimeStamps(); mChannel->SaveTimeStamps();
mChannel->Cancel(mStatus); mChannel->CancelInterception(mStatus);
mRegistration->MaybeScheduleUpdate(); mRegistration->MaybeScheduleUpdate();
return NS_OK; return NS_OK;
} }
@ -199,7 +199,7 @@ public:
nsCOMPtr<nsILoadInfo> loadInfo = underlyingChannel->GetLoadInfo(); nsCOMPtr<nsILoadInfo> loadInfo = underlyingChannel->GetLoadInfo();
if (!loadInfo || !CSPPermitsResponse(loadInfo)) { if (!loadInfo || !CSPPermitsResponse(loadInfo)) {
mChannel->Cancel(NS_ERROR_CONTENT_BLOCKED); mChannel->CancelInterception(NS_ERROR_CONTENT_BLOCKED);
return NS_OK; return NS_OK;
} }
@ -213,14 +213,14 @@ public:
} }
rv = mChannel->SetChannelInfo(&channelInfo); rv = mChannel->SetChannelInfo(&channelInfo);
if (NS_WARN_IF(NS_FAILED(rv))) { if (NS_WARN_IF(NS_FAILED(rv))) {
mChannel->Cancel(NS_ERROR_INTERCEPTION_FAILED); mChannel->CancelInterception(NS_ERROR_INTERCEPTION_FAILED);
return NS_OK; return NS_OK;
} }
rv = mChannel->SynthesizeStatus(mInternalResponse->GetUnfilteredStatus(), rv = mChannel->SynthesizeStatus(mInternalResponse->GetUnfilteredStatus(),
mInternalResponse->GetUnfilteredStatusText()); mInternalResponse->GetUnfilteredStatusText());
if (NS_WARN_IF(NS_FAILED(rv))) { if (NS_WARN_IF(NS_FAILED(rv))) {
mChannel->Cancel(NS_ERROR_INTERCEPTION_FAILED); mChannel->CancelInterception(NS_ERROR_INTERCEPTION_FAILED);
return NS_OK; return NS_OK;
} }
@ -235,7 +235,7 @@ public:
rv = mChannel->FinishSynthesizedResponse(mResponseURLSpec); rv = mChannel->FinishSynthesizedResponse(mResponseURLSpec);
if (NS_WARN_IF(NS_FAILED(rv))) { if (NS_WARN_IF(NS_FAILED(rv))) {
mChannel->Cancel(NS_ERROR_INTERCEPTION_FAILED); mChannel->CancelInterception(NS_ERROR_INTERCEPTION_FAILED);
return NS_OK; return NS_OK;
} }

Просмотреть файл

@ -0,0 +1,11 @@
<html xmlns="http://www.w3.org/1999/xhtml">
<body>
<select>
<script>document.documentElement.offsetHeight</script>
<option>Hello there</option>
</select>
<script>
document.querySelector("body").style.display = "inline";
</script>
</body>
</html>

Просмотреть файл

@ -10,3 +10,4 @@ load 453278.html
load 803586.xhtml load 803586.xhtml
load 994740-1.xhtml load 994740-1.xhtml
load 1038887.xhtml load 1038887.xhtml
load 1405878.xml

Просмотреть файл

@ -1070,6 +1070,17 @@ nsXMLContentSink::HandleEndElement(const char16_t *aName,
isTemplateElement, "Wrong element being closed"); isTemplateElement, "Wrong element being closed");
#endif #endif
// Make sure to notify on our kids before we call out to any other code that
// might reenter us and call FlushTags, in a state in which we've already
// popped "content" from the stack but haven't notified on its kids yet.
int32_t stackLen = mContentStack.Length();
if (mNotifyLevel >= stackLen) {
if (numFlushed < content->GetChildCount()) {
NotifyAppend(content, numFlushed);
}
mNotifyLevel = stackLen - 1;
}
result = CloseElement(content); result = CloseElement(content);
if (mCurrentHead == content) { if (mCurrentHead == content) {
@ -1085,13 +1096,6 @@ nsXMLContentSink::HandleEndElement(const char16_t *aName,
MaybeStartLayout(false); MaybeStartLayout(false);
} }
int32_t stackLen = mContentStack.Length();
if (mNotifyLevel >= stackLen) {
if (numFlushed < content->GetChildCount()) {
NotifyAppend(content, numFlushed);
}
mNotifyLevel = stackLen - 1;
}
DidAddContent(); DidAddContent();
if (content->IsSVGElement(nsGkAtoms::svg)) { if (content->IsSVGElement(nsGkAtoms::svg)) {

Просмотреть файл

@ -112,9 +112,9 @@ public:
: nsRefPtrHashKey<PermissionKey>(aPermissionKey) : nsRefPtrHashKey<PermissionKey>(aPermissionKey)
{} {}
PermissionHashKey(const PermissionHashKey& toCopy) PermissionHashKey(PermissionHashKey&& toCopy)
: nsRefPtrHashKey<PermissionKey>(toCopy) : nsRefPtrHashKey<PermissionKey>(mozilla::Move(toCopy))
, mPermissions(toCopy.mPermissions) , mPermissions(mozilla::Move(toCopy.mPermissions))
{} {}
bool KeyEquals(const PermissionKey* aKey) const bool KeyEquals(const PermissionKey* aKey) const

Просмотреть файл

@ -166,12 +166,12 @@ static gfx::IntRect ContainerVisibleRect(ContainerT* aContainer)
/* all of the per-layer prepared data we need to maintain */ /* all of the per-layer prepared data we need to maintain */
struct PreparedLayer struct PreparedLayer
{ {
PreparedLayer(LayerComposite *aLayer, PreparedLayer(Layer *aLayer,
RenderTargetIntRect aClipRect, RenderTargetIntRect aClipRect,
Maybe<gfx::Polygon>&& aGeometry) Maybe<gfx::Polygon>&& aGeometry)
: mLayer(aLayer), mClipRect(aClipRect), mGeometry(Move(aGeometry)) {} : mLayer(aLayer), mClipRect(aClipRect), mGeometry(Move(aGeometry)) {}
LayerComposite* mLayer; RefPtr<Layer> mLayer;
RenderTargetIntRect mClipRect; RenderTargetIntRect mClipRect;
Maybe<Polygon> mGeometry; Maybe<Polygon> mGeometry;
}; };
@ -235,7 +235,8 @@ ContainerPrepare(ContainerT* aContainer,
CULLING_LOG("Preparing sublayer %p\n", layerToRender->GetLayer()); CULLING_LOG("Preparing sublayer %p\n", layerToRender->GetLayer());
layerToRender->Prepare(clipRect); layerToRender->Prepare(clipRect);
aContainer->mPrepared->mLayers.AppendElement(PreparedLayer(layerToRender, clipRect, aContainer->mPrepared->mLayers.AppendElement(PreparedLayer(layerToRender->GetLayer(),
clipRect,
Move(layer.geometry))); Move(layer.geometry)));
} }
@ -413,7 +414,7 @@ RenderLayers(ContainerT* aContainer, LayerManagerComposite* aManager,
PreparedLayer& preparedData = aContainer->mPrepared->mLayers[i]; PreparedLayer& preparedData = aContainer->mPrepared->mLayers[i];
const gfx::IntRect clipRect = preparedData.mClipRect.ToUnknownRect(); const gfx::IntRect clipRect = preparedData.mClipRect.ToUnknownRect();
LayerComposite* layerToRender = preparedData.mLayer; LayerComposite* layerToRender = static_cast<LayerComposite*>(preparedData.mLayer->ImplData());
const Maybe<gfx::Polygon>& childGeometry = preparedData.mGeometry; const Maybe<gfx::Polygon>& childGeometry = preparedData.mGeometry;
Layer* layer = layerToRender->GetLayer(); Layer* layer = layerToRender->GetLayer();

Просмотреть файл

@ -20,15 +20,12 @@ StackingContextHelper::StackingContextHelper()
StackingContextHelper::StackingContextHelper(const StackingContextHelper& aParentSC, StackingContextHelper::StackingContextHelper(const StackingContextHelper& aParentSC,
wr::DisplayListBuilder& aBuilder, wr::DisplayListBuilder& aBuilder,
nsDisplayListBuilder* aDisplayListBuilder, const nsTArray<wr::WrFilterOp>& aFilters,
nsDisplayItem* aItem,
nsDisplayList* aDisplayList,
const gfx::Matrix4x4* aBoundTransform, const gfx::Matrix4x4* aBoundTransform,
uint64_t aAnimationsId, uint64_t aAnimationsId,
float* aOpacityPtr, float* aOpacityPtr,
gfx::Matrix4x4* aTransformPtr, gfx::Matrix4x4* aTransformPtr,
gfx::Matrix4x4* aPerspectivePtr, gfx::Matrix4x4* aPerspectivePtr,
const nsTArray<wr::WrFilterOp>& aFilters,
const gfx::CompositionOp& aMixBlendMode, const gfx::CompositionOp& aMixBlendMode,
bool aBackfaceVisible) bool aBackfaceVisible)
: mBuilder(&aBuilder) : mBuilder(&aBuilder)

Просмотреть файл

@ -30,15 +30,12 @@ class MOZ_RAII StackingContextHelper
public: public:
StackingContextHelper(const StackingContextHelper& aParentSC, StackingContextHelper(const StackingContextHelper& aParentSC,
wr::DisplayListBuilder& aBuilder, wr::DisplayListBuilder& aBuilder,
nsDisplayListBuilder* aDisplayListBuilder,
nsDisplayItem* aItem,
nsDisplayList* aDisplayList,
const gfx::Matrix4x4* aBoundTransform,
uint64_t aAnimationsId,
float* aOpacityPtr,
gfx::Matrix4x4* aTransformPtr,
gfx::Matrix4x4* aPerspectivePtr = nullptr,
const nsTArray<wr::WrFilterOp>& aFilters = nsTArray<wr::WrFilterOp>(), const nsTArray<wr::WrFilterOp>& aFilters = nsTArray<wr::WrFilterOp>(),
const gfx::Matrix4x4* aBoundTransform = nullptr,
uint64_t aAnimationsId = 0,
float* aOpacityPtr = nullptr,
gfx::Matrix4x4* aTransformPtr = nullptr,
gfx::Matrix4x4* aPerspectivePtr = nullptr,
const gfx::CompositionOp& aMixBlendMode = gfx::CompositionOp::OP_OVER, const gfx::CompositionOp& aMixBlendMode = gfx::CompositionOp::OP_OVER,
bool aBackfaceVisible = true); bool aBackfaceVisible = true);
// This version of the constructor should only be used at the root level // This version of the constructor should only be used at the root level

Просмотреть файл

@ -46,8 +46,11 @@ WebRenderCommandBuilder::BuildWebRenderCommands(wr::DisplayListBuilder& aBuilder
mLastCanvasDatas.Clear(); mLastCanvasDatas.Clear();
mLastAsr = nullptr; mLastAsr = nullptr;
CreateWebRenderCommandsFromDisplayList(aDisplayList, aDisplayListBuilder, sc, {
aBuilder, aResourceUpdates); StackingContextHelper pageRootSc(sc, aBuilder);
CreateWebRenderCommandsFromDisplayList(aDisplayList, aDisplayListBuilder,
pageRootSc, aBuilder, aResourceUpdates);
}
// Make a "root" layer data that has everything else as descendants // Make a "root" layer data that has everything else as descendants
mLayerScrollData.emplace_back(); mLayerScrollData.emplace_back();

Просмотреть файл

@ -237,7 +237,7 @@ AssertGCThingIsNotAnObjectSubclass(js::gc::Cell* cell) {}
* Type T must be a public GC pointer type. * Type T must be a public GC pointer type.
*/ */
template <typename T> template <typename T>
class Heap : public js::HeapBase<T, Heap<T>> class MOZ_NON_MEMMOVABLE Heap : public js::HeapBase<T, Heap<T>>
{ {
// Please note: this can actually also be used by nsXBLMaybeCompiled<T>, for legacy reasons. // Please note: this can actually also be used by nsXBLMaybeCompiled<T>, for legacy reasons.
static_assert(js::IsHeapConstructibleType<T>::value, static_assert(js::IsHeapConstructibleType<T>::value,
@ -1173,6 +1173,14 @@ class JS_PUBLIC_API(ObjectPtr)
explicit ObjectPtr(JSObject* obj) : value(obj) {} explicit ObjectPtr(JSObject* obj) : value(obj) {}
ObjectPtr(const ObjectPtr& other) : value(other.value) {}
ObjectPtr(ObjectPtr&& other)
: value(other.value)
{
other.value = nullptr;
}
/* Always call finalize before the destructor. */ /* Always call finalize before the destructor. */
~ObjectPtr() { MOZ_ASSERT(!value); } ~ObjectPtr() { MOZ_ASSERT(!value); }

Просмотреть файл

@ -1428,14 +1428,25 @@ auto
DispatchTyped(F f, const JS::Value& val, Args&&... args) DispatchTyped(F f, const JS::Value& val, Args&&... args)
-> decltype(f(static_cast<JSObject*>(nullptr), mozilla::Forward<Args>(args)...)) -> decltype(f(static_cast<JSObject*>(nullptr), mozilla::Forward<Args>(args)...))
{ {
if (val.isString()) if (val.isString()) {
return f(val.toString(), mozilla::Forward<Args>(args)...); JSString* str = val.toString();
if (val.isObject()) MOZ_ASSERT(gc::IsCellPointerValid(str));
return f(&val.toObject(), mozilla::Forward<Args>(args)...); return f(str, mozilla::Forward<Args>(args)...);
if (val.isSymbol()) }
return f(val.toSymbol(), mozilla::Forward<Args>(args)...); if (val.isObject()) {
if (MOZ_UNLIKELY(val.isPrivateGCThing())) JSObject* obj = &val.toObject();
MOZ_ASSERT(gc::IsCellPointerValid(obj));
return f(obj, mozilla::Forward<Args>(args)...);
}
if (val.isSymbol()) {
JS::Symbol* sym = val.toSymbol();
MOZ_ASSERT(gc::IsCellPointerValid(sym));
return f(sym, mozilla::Forward<Args>(args)...);
}
if (MOZ_UNLIKELY(val.isPrivateGCThing())) {
MOZ_ASSERT(gc::IsCellPointerValid(val.toGCThing()));
return DispatchTyped(f, val.toGCCellPtr(), mozilla::Forward<Args>(args)...); return DispatchTyped(f, val.toGCCellPtr(), mozilla::Forward<Args>(args)...);
}
MOZ_ASSERT(!val.isGCThing()); MOZ_ASSERT(!val.isGCThing());
return F::defaultValue(val); return F::defaultValue(val);
} }

Просмотреть файл

@ -472,30 +472,30 @@ static bool ResolvePromiseFunction(JSContext* cx, unsigned argc, Value* vp);
static bool RejectPromiseFunction(JSContext* cx, unsigned argc, Value* vp); static bool RejectPromiseFunction(JSContext* cx, unsigned argc, Value* vp);
// ES2016, 25.4.1.3. // ES2016, 25.4.1.3.
static MOZ_MUST_USE bool static MOZ_MUST_USE MOZ_ALWAYS_INLINE bool
CreateResolvingFunctions(JSContext* cx, HandleObject promise, CreateResolvingFunctions(JSContext* cx, HandleObject promise,
MutableHandleObject resolveFn, MutableHandleObject resolveFn,
MutableHandleObject rejectFn) MutableHandleObject rejectFn)
{ {
RootedAtom funName(cx, cx->names().empty); HandlePropertyName funName = cx->names().empty;
RootedFunction resolve(cx, NewNativeFunction(cx, ResolvePromiseFunction, 1, funName, resolveFn.set(NewNativeFunction(cx, ResolvePromiseFunction, 1, funName,
gc::AllocKind::FUNCTION_EXTENDED, GenericObject)); gc::AllocKind::FUNCTION_EXTENDED, GenericObject));
if (!resolve) if (!resolveFn)
return false; return false;
RootedFunction reject(cx, NewNativeFunction(cx, RejectPromiseFunction, 1, funName, rejectFn.set(NewNativeFunction(cx, RejectPromiseFunction, 1, funName,
gc::AllocKind::FUNCTION_EXTENDED, GenericObject)); gc::AllocKind::FUNCTION_EXTENDED, GenericObject));
if (!reject) if (!rejectFn)
return false; return false;
resolve->setExtendedSlot(ResolveFunctionSlot_Promise, ObjectValue(*promise)); JSFunction* resolveFun = &resolveFn->as<JSFunction>();
resolve->setExtendedSlot(ResolveFunctionSlot_RejectFunction, ObjectValue(*reject)); JSFunction* rejectFun = &rejectFn->as<JSFunction>();
reject->setExtendedSlot(RejectFunctionSlot_Promise, ObjectValue(*promise)); resolveFun->initExtendedSlot(ResolveFunctionSlot_Promise, ObjectValue(*promise));
reject->setExtendedSlot(RejectFunctionSlot_ResolveFunction, ObjectValue(*resolve)); resolveFun->initExtendedSlot(ResolveFunctionSlot_RejectFunction, ObjectValue(*rejectFun));
resolveFn.set(resolve); rejectFun->initExtendedSlot(RejectFunctionSlot_Promise, ObjectValue(*promise));
rejectFn.set(reject); rejectFun->initExtendedSlot(RejectFunctionSlot_ResolveFunction, ObjectValue(*resolveFun));
return true; return true;
} }
@ -1465,12 +1465,11 @@ ClearResolutionFunctionSlots(JSFunction* resolutionFun)
} }
// ES2016, 25.4.3.1. steps 3-7. // ES2016, 25.4.3.1. steps 3-7.
static MOZ_MUST_USE PromiseObject* static MOZ_MUST_USE MOZ_ALWAYS_INLINE PromiseObject*
CreatePromiseObjectInternal(JSContext* cx, HandleObject proto /* = nullptr */, CreatePromiseObjectInternal(JSContext* cx, HandleObject proto /* = nullptr */,
bool protoIsWrapped /* = false */, bool informDebugger /* = true */) bool protoIsWrapped /* = false */, bool informDebugger /* = true */)
{ {
// Step 3. // Step 3.
Rooted<PromiseObject*> promise(cx);
// Enter the unwrapped proto's compartment, if that's different from // Enter the unwrapped proto's compartment, if that's different from
// the current one. // the current one.
// All state stored in a Promise's fixed slots must be created in the // All state stored in a Promise's fixed slots must be created in the
@ -1480,12 +1479,12 @@ CreatePromiseObjectInternal(JSContext* cx, HandleObject proto /* = nullptr */,
if (protoIsWrapped) if (protoIsWrapped)
ac.emplace(cx, proto); ac.emplace(cx, proto);
promise = NewObjectWithClassProto<PromiseObject>(cx, proto); PromiseObject* promise = NewObjectWithClassProto<PromiseObject>(cx, proto);
if (!promise) if (!promise)
return nullptr; return nullptr;
// Step 4. // Step 4.
promise->setFixedSlot(PromiseSlot_Flags, Int32Value(0)); promise->initFixedSlot(PromiseSlot_Flags, Int32Value(0));
// Steps 5-6. // Steps 5-6.
// Omitted, we allocate our single list of reaction records lazily. // Omitted, we allocate our single list of reaction records lazily.
@ -1493,20 +1492,24 @@ CreatePromiseObjectInternal(JSContext* cx, HandleObject proto /* = nullptr */,
// Step 7. // Step 7.
// Implicit, the handled flag is unset by default. // Implicit, the handled flag is unset by default.
if (MOZ_LIKELY(!ShouldCaptureDebugInfo(cx)))
return promise;
// Store an allocation stack so we can later figure out what the // Store an allocation stack so we can later figure out what the
// control flow was for some unexpected results. Frightfully expensive, // control flow was for some unexpected results. Frightfully expensive,
// but oh well. // but oh well.
if (ShouldCaptureDebugInfo(cx)) {
PromiseDebugInfo* debugInfo = PromiseDebugInfo::create(cx, promise); Rooted<PromiseObject*> promiseRoot(cx, promise);
PromiseDebugInfo* debugInfo = PromiseDebugInfo::create(cx, promiseRoot);
if (!debugInfo) if (!debugInfo)
return nullptr; return nullptr;
}
// Let the Debugger know about this Promise. // Let the Debugger know about this Promise.
if (informDebugger) if (informDebugger)
Debugger::onNewPromise(cx, promise); Debugger::onNewPromise(cx, promiseRoot);
return promise; return promiseRoot;
} }
// ES2016, 25.4.3.1. // ES2016, 25.4.3.1.
@ -1588,7 +1591,7 @@ PromiseConstructor(JSContext* cx, unsigned argc, Value* vp)
if (!GetPrototypeFromBuiltinConstructor(cx, args, &proto)) if (!GetPrototypeFromBuiltinConstructor(cx, args, &proto))
return false; return false;
} }
Rooted<PromiseObject*> promise(cx, PromiseObject::create(cx, executor, proto, needsWrapping)); PromiseObject* promise = PromiseObject::create(cx, executor, proto, needsWrapping);
if (!promise) if (!promise)
return false; return false;
@ -1639,14 +1642,16 @@ PromiseObject::create(JSContext* cx, HandleObject executor, HandleObject proto /
return nullptr; return nullptr;
// Need to wrap the resolution functions before storing them on the Promise. // Need to wrap the resolution functions before storing them on the Promise.
MOZ_ASSERT(promise->getFixedSlot(PromiseSlot_RejectFunction).isUndefined(),
"Slot must be undefined so initFixedSlot can be used");
if (needsWrapping) { if (needsWrapping) {
AutoCompartment ac(cx, promise); AutoCompartment ac(cx, promise);
RootedObject wrappedRejectFn(cx, rejectFn); RootedObject wrappedRejectFn(cx, rejectFn);
if (!cx->compartment()->wrap(cx, &wrappedRejectFn)) if (!cx->compartment()->wrap(cx, &wrappedRejectFn))
return nullptr; return nullptr;
promise->setFixedSlot(PromiseSlot_RejectFunction, ObjectValue(*wrappedRejectFn)); promise->initFixedSlot(PromiseSlot_RejectFunction, ObjectValue(*wrappedRejectFn));
} else { } else {
promise->setFixedSlot(PromiseSlot_RejectFunction, ObjectValue(*rejectFn)); promise->initFixedSlot(PromiseSlot_RejectFunction, ObjectValue(*rejectFn));
} }
// Step 9. // Step 9.
@ -2401,6 +2406,15 @@ NewReactionRecord(JSContext* cx, HandleObject resultPromise, HandleValue onFulfi
HandleValue onRejected, HandleObject resolve, HandleObject reject, HandleValue onRejected, HandleObject resolve, HandleObject reject,
HandleObject incumbentGlobalObject) HandleObject incumbentGlobalObject)
{ {
// Either of the following conditions must be met:
// * resultPromise is a PromiseObject
// * resolve and reject are callable
// except for Async Generator, there resultPromise can be nullptr.
MOZ_ASSERT_IF(resultPromise && !resultPromise->is<PromiseObject>(), resolve);
MOZ_ASSERT_IF(resultPromise && !resultPromise->is<PromiseObject>(), IsCallable(resolve));
MOZ_ASSERT_IF(resultPromise && !resultPromise->is<PromiseObject>(), reject);
MOZ_ASSERT_IF(resultPromise && !resultPromise->is<PromiseObject>(), IsCallable(reject));
Rooted<PromiseReactionRecord*> reaction(cx, NewObjectWithClassProto<PromiseReactionRecord>(cx)); Rooted<PromiseReactionRecord*> reaction(cx, NewObjectWithClassProto<PromiseReactionRecord>(cx));
if (!reaction) if (!reaction)
return nullptr; return nullptr;
@ -3108,7 +3122,7 @@ BlockOnPromise(JSContext* cx, HandleValue promiseVal, HandleObject blockedPromis
// rejected promises list. // rejected promises list.
bool addToDependent = true; bool addToDependent = true;
if (C == PromiseCtor) { if (C == PromiseCtor && resultPromise->is<PromiseObject>()) {
addToDependent = false; addToDependent = false;
} else { } else {
// 25.4.5.3., step 4. // 25.4.5.3., step 4.
@ -3167,12 +3181,14 @@ BlockOnPromise(JSContext* cx, HandleValue promiseVal, HandleObject blockedPromis
return false; return false;
} }
// If the object to depend on isn't a, maybe-wrapped, Promise instance, // If either the object to depend on or the object that gets blocked isn't
// we ignore it. All this does is lose some small amount of debug // a, maybe-wrapped, Promise instance, we ignore it. All this does is lose
// information in scenarios that are highly unlikely to occur in useful // some small amount of debug information in scenarios that are highly
// code. // unlikely to occur in useful code.
if (!unwrappedPromiseObj->is<PromiseObject>()) if (!unwrappedPromiseObj->is<PromiseObject>())
return true; return true;
if (!blockedPromise_->is<PromiseObject>())
return true;
Rooted<PromiseObject*> promise(cx, &unwrappedPromiseObj->as<PromiseObject>()); Rooted<PromiseObject*> promise(cx, &unwrappedPromiseObj->as<PromiseObject>());
return AddPromiseReaction(cx, promise, UndefinedHandleValue, UndefinedHandleValue, return AddPromiseReaction(cx, promise, UndefinedHandleValue, UndefinedHandleValue,

Просмотреть файл

@ -283,7 +283,7 @@ struct InternalBarrierMethods<Value>
DispatchTyped(PreBarrierFunctor<Value>(), v); DispatchTyped(PreBarrierFunctor<Value>(), v);
} }
static void postBarrier(Value* vp, const Value& prev, const Value& next) { static MOZ_ALWAYS_INLINE void postBarrier(Value* vp, const Value& prev, const Value& next) {
MOZ_ASSERT(!CurrentThreadIsIonCompiling()); MOZ_ASSERT(!CurrentThreadIsIonCompiling());
MOZ_ASSERT(vp); MOZ_ASSERT(vp);
@ -318,8 +318,11 @@ struct InternalBarrierMethods<jsid>
}; };
// Base class of all barrier types. // Base class of all barrier types.
//
// This is marked non-memmovable since post barriers added by derived classes
// can add pointers to class instances to the store buffer.
template <typename T> template <typename T>
class BarrieredBase class MOZ_NON_MEMMOVABLE BarrieredBase
{ {
protected: protected:
// BarrieredBase is not directly instantiable. // BarrieredBase is not directly instantiable.
@ -369,7 +372,7 @@ class WriteBarrieredBase : public BarrieredBase<T>,
protected: protected:
void pre() { InternalBarrierMethods<T>::preBarrier(this->value); } void pre() { InternalBarrierMethods<T>::preBarrier(this->value); }
void post(const T& prev, const T& next) { MOZ_ALWAYS_INLINE void post(const T& prev, const T& next) {
InternalBarrierMethods<T>::postBarrier(&this->value, prev, next); InternalBarrierMethods<T>::postBarrier(&this->value, prev, next);
} }
}; };

Просмотреть файл

@ -2716,6 +2716,7 @@ void
js::gc::StoreBuffer::SlotsEdge::trace(TenuringTracer& mover) const js::gc::StoreBuffer::SlotsEdge::trace(TenuringTracer& mover) const
{ {
NativeObject* obj = object(); NativeObject* obj = object();
MOZ_ASSERT(IsCellPointerValid(obj));
// Beware JSObject::swap exchanging a native object for a non-native one. // Beware JSObject::swap exchanging a native object for a non-native one.
if (!obj->isNative()) if (!obj->isNative())
@ -2788,6 +2789,7 @@ js::gc::StoreBuffer::traceWholeCells(TenuringTracer& mover)
{ {
for (ArenaCellSet* cells = bufferWholeCell; cells; cells = cells->next) { for (ArenaCellSet* cells = bufferWholeCell; cells; cells = cells->next) {
Arena* arena = cells->arena; Arena* arena = cells->arena;
MOZ_ASSERT(IsCellPointerValid(arena));
MOZ_ASSERT(arena->bufferedCells() == cells); MOZ_ASSERT(arena->bufferedCells() == cells);
arena->bufferedCells() = &ArenaCellSet::Empty; arena->bufferedCells() = &ArenaCellSet::Empty;
@ -2817,6 +2819,7 @@ js::gc::StoreBuffer::CellPtrEdge::trace(TenuringTracer& mover) const
if (!*edge) if (!*edge)
return; return;
MOZ_ASSERT(IsCellPointerValid(*edge));
MOZ_ASSERT((*edge)->getTraceKind() == JS::TraceKind::Object); MOZ_ASSERT((*edge)->getTraceKind() == JS::TraceKind::Object);
mover.traverse(reinterpret_cast<JSObject**>(edge)); mover.traverse(reinterpret_cast<JSObject**>(edge));
} }

Просмотреть файл

@ -82,9 +82,12 @@ Zone::~Zone()
js_delete(jitZone_.ref()); js_delete(jitZone_.ref());
#ifdef DEBUG #ifdef DEBUG
// Avoid assertion destroying the weak map list if the embedding leaked GC things. // Avoid assertions failures warning that not everything has been destroyed
if (!rt->gc.shutdownCollectedEverything()) // if the embedding leaked GC things.
if (!rt->gc.shutdownCollectedEverything()) {
gcWeakMapList().clear(); gcWeakMapList().clear();
regExps.clear();
}
#endif #endif
} }

Просмотреть файл

@ -619,26 +619,71 @@ assertEq(e.call(), 1090);
let valueToConvert = 0; let valueToConvert = 0;
function ffi(n) { if (n == 1337) { return valueToConvert }; return 42; } function ffi(n) { if (n == 1337) { return valueToConvert }; return 42; }
// Baseline compile ffi. function sum(a, b, c) {
for (let i = baselineTrigger + 1; i --> 0;) if (a === 1337)
ffi(i); return valueToConvert;
return (a|0) + (b|0) + (c|0) | 0;
}
let imports = { a: { ffi }}; // Baseline compile ffis.
for (let i = baselineTrigger + 1; i --> 0;) {
ffi(i);
sum((i%2)?i:undefined,
(i%3)?i:undefined,
(i%4)?i:undefined);
}
let imports = {
a: {
ffi,
sum
}
};
i = wasmEvalText(`(module i = wasmEvalText(`(module
(import $ffi "a" "ffi" (param i32) (result i32)) (import $ffi "a" "ffi" (param i32) (result i32))
(func $foo (export "foo") (param i32) (result i32)
(import $missingOneArg "a" "sum" (param i32) (param i32) (result i32))
(import $missingTwoArgs "a" "sum" (param i32) (result i32))
(import $missingThreeArgs "a" "sum" (result i32))
(func (export "foo") (param i32) (result i32)
get_local 0 get_local 0
call $ffi) call $ffi
)
(func (export "missThree") (result i32)
call $missingThreeArgs
)
(func (export "missTwo") (param i32) (result i32)
get_local 0
call $missingTwoArgs
)
(func (export "missOne") (param i32) (param i32) (result i32)
get_local 0
get_local 1
call $missingOneArg
)
)`, imports).exports; )`, imports).exports;
// Enable the jit exit. // Enable the jit exit for each JS callee.
assertEq(i.foo(0), 42); assertEq(i.foo(0), 42);
// Test the jit exit. assertEq(i.missThree(), 0);
assertEq(i.missTwo(42), 42);
assertEq(i.missOne(13, 37), 50);
// Test the jit exit under normal conditions.
assertEq(i.foo(0), 42); assertEq(i.foo(0), 42);
assertEq(i.foo(1337), 0); assertEq(i.foo(1337), 0);
// Test the arguments rectifier.
assertEq(i.missThree(), 0);
assertEq(i.missTwo(-1), -1);
assertEq(i.missOne(23, 10), 33);
// Test OOL coercion. // Test OOL coercion.
valueToConvert = 2**31; valueToConvert = 2**31;
assertEq(i.foo(1337), -(2**31)); assertEq(i.foo(1337), -(2**31));
@ -649,5 +694,7 @@ assertEq(e.call(), 1090);
valueToConvert = { toString() { throw new Error('a FFI to believe in'); } } valueToConvert = { toString() { throw new Error('a FFI to believe in'); } }
assertErrorMessage(() => i.foo(1337), Error, "a FFI to believe in"); assertErrorMessage(() => i.foo(1337), Error, "a FFI to believe in");
})();
// Test the error path in the arguments rectifier.
assertErrorMessage(() => i.missTwo(1337), Error, "a FFI to believe in");
})();

Просмотреть файл

@ -331,9 +331,17 @@ for (let type of ['f32', 'f64']) {
var m = new Module(wasmTextToBinary(`(module var m = new Module(wasmTextToBinary(`(module
(import $ffi "a" "ffi" (param i32) (result i32)) (import $ffi "a" "ffi" (param i32) (result i32))
(func $foo (export "foo") (param i32) (result i32)
(import $missingOneArg "a" "sumTwo" (param i32) (result i32))
(func (export "foo") (param i32) (result i32)
get_local 0 get_local 0
call $ffi) call $ffi)
(func (export "id") (param i32) (result i32)
get_local 0
call $missingOneArg
)
)`)); )`));
var valueToConvert = 0; var valueToConvert = 0;
@ -343,45 +351,71 @@ for (let type of ['f32', 'f64']) {
return 42; return 42;
} }
// Baseline compile ffi. function sumTwo(a, b) {
for (var i = 20; i --> 0;) return (a|0)+(b|0)|0;
ffi(i); }
var imports = { a: { ffi }}; // Baseline compile ffi.
for (var i = 20; i --> 0;) {
ffi(i);
sumTwo(i-1, i+1);
}
var imports = {
a: {
ffi,
sumTwo
}
};
var i = new Instance(m, imports).exports; var i = new Instance(m, imports).exports;
// Enable the jit exit. // Enable the jit exit.
assertEq(i.foo(0), 42); assertEq(i.foo(0), 42);
assertEq(i.id(13), 13);
// Test normal conditions.
enableSingleStepProfiling(); enableSingleStepProfiling();
assertEq(i.foo(0), 42); assertEq(i.foo(0), 42);
assertEqStacks(disableSingleStepProfiling(), ["", ">", "1,>", "<,1,>", assertEqStacks(disableSingleStepProfiling(), ["", ">", "2,>", "<,2,>",
// Losing stack information while the JIT func prologue sets profiler // Losing stack information while the JIT func prologue sets profiler
// virtual FP. // virtual FP.
"", "",
// Callee time. // Callee time.
"<,1,>", "<,2,>",
// Losing stack information while we're exiting JIT func epilogue and // Losing stack information while we're exiting JIT func epilogue and
// recovering wasm FP. // recovering wasm FP.
"", "",
// Back into the jit exit (frame info has been recovered). // Back into the jit exit (frame info has been recovered).
"<,1,>", "<,2,>",
// Normal unwinding. // Normal unwinding.
"1,>", ">", ""]); "2,>", ">", ""]);
// Test rectifier frame.
enableSingleStepProfiling();
assertEq(i.id(100), 100);
assertEqStacks(disableSingleStepProfiling(), ["", ">", "3,>", "<,3,>",
// Rectifier frame time is spent here (lastProfilingFrame has not been
// set).
"",
"<,3,>",
// Rectifier frame unwinding time is spent here.
"",
"<,3,>",
"3,>", ">", ""]);
// Test OOL coercion path. // Test OOL coercion path.
valueToConvert = 2**31; valueToConvert = 2**31;
enableSingleStepProfiling(); enableSingleStepProfiling();
assertEq(i.foo(1337), -(2**31)); assertEq(i.foo(1337), -(2**31));
assertEqStacks(disableSingleStepProfiling(), ["", ">", "1,>", "<,1,>", "", "<,1,>", "", assertEqStacks(disableSingleStepProfiling(), ["", ">", "2,>", "<,2,>", "", "<,2,>", "",
// Back into the jit exit (frame info has been recovered). // Back into the jit exit (frame info has been recovered).
// Inline conversion fails, we skip to the OOL path, call from there // Inline conversion fails, we skip to the OOL path, call from there
// and get back to the jit exit. // and get back to the jit exit.
"<,1,>", "<,2,>",
// Normal unwinding. // Normal unwinding.
"1,>", ">", ""]); "2,>", ">", ""]);
disableGeckoProfiling(); disableGeckoProfiling();
setJitCompilerOption("baseline.warmup.trigger", prevOptions["baseline.warmup.trigger"]); setJitCompilerOption("baseline.warmup.trigger", prevOptions["baseline.warmup.trigger"]);

Просмотреть файл

@ -394,11 +394,13 @@ struct BaselineStackBuilder
BufferPointer<RectifierFrameLayout> priorFrame = BufferPointer<RectifierFrameLayout> priorFrame =
pointerAtStackOffset<RectifierFrameLayout>(priorOffset); pointerAtStackOffset<RectifierFrameLayout>(priorOffset);
FrameType priorType = priorFrame->prevType(); FrameType priorType = priorFrame->prevType();
MOZ_ASSERT(priorType == JitFrame_IonJS || priorType == JitFrame_BaselineStub); MOZ_ASSERT(priorType == JitFrame_WasmToJSJit ||
priorType == JitFrame_IonJS ||
priorType == JitFrame_BaselineStub);
// If the frame preceding the rectifier is an IonJS frame, then once again // If the frame preceding the rectifier is an IonJS or WasmToJSJit
// the frame pointer does not matter. // entry frame, then once again the frame pointer does not matter.
if (priorType == JitFrame_IonJS) if (priorType == JitFrame_IonJS || priorType == JitFrame_WasmToJSJit)
return nullptr; return nullptr;
// Otherwise, the frame preceding the rectifier is a BaselineStub frame. // Otherwise, the frame preceding the rectifier is a BaselineStub frame.

Просмотреть файл

@ -280,6 +280,15 @@ JitRuntime::initialize(JSContext* cx, AutoLockForExclusiveAccess& lock)
return false; return false;
} }
// The arguments rectifier has to use the same frame layout as the function
// frames it rectifies.
static_assert(mozilla::IsBaseOf<JitFrameLayout, RectifierFrameLayout>::value,
"a rectifier frame can be used with jit frame");
static_assert(mozilla::IsBaseOf<JitFrameLayout, WasmToJSJitFrameLayout>::value,
"wasm frames simply are jit frames");
static_assert(sizeof(JitFrameLayout) == sizeof(WasmToJSJitFrameLayout),
"thus a rectifier frame can be used with a wasm frame");
JitSpew(JitSpew_Codegen, "# Emitting sequential arguments rectifier"); JitSpew(JitSpew_Codegen, "# Emitting sequential arguments rectifier");
argumentsRectifier_ = generateArgumentsRectifier(cx, &argumentsRectifierReturnAddr_.writeRef()); argumentsRectifier_ = generateArgumentsRectifier(cx, &argumentsRectifierReturnAddr_.writeRef());
if (!argumentsRectifier_) if (!argumentsRectifier_)

Просмотреть файл

@ -643,6 +643,17 @@ JSJitProfilingFrameIterator::operator++()
moveToNextFrame(frame); moveToNextFrame(frame);
} }
void
JSJitProfilingFrameIterator::moveToWasmFrame(CommonFrameLayout* frame)
{
// No previous js jit frame, this is a transition frame, used to
// pass a wasm iterator the correct value of FP.
returnAddressToFp_ = nullptr;
fp_ = GetPreviousRawFrame<uint8_t*>(frame);
type_ = JitFrame_WasmToJSJit;
MOZ_ASSERT(!done());
}
void void
JSJitProfilingFrameIterator::moveToNextFrame(CommonFrameLayout* frame) JSJitProfilingFrameIterator::moveToNextFrame(CommonFrameLayout* frame)
{ {
@ -666,6 +677,8 @@ JSJitProfilingFrameIterator::moveToNextFrame(CommonFrameLayout* frame)
* | ^--- Ion * | ^--- Ion
* | | * | |
* | ^--- Baseline Stub <---- Baseline * | ^--- Baseline Stub <---- Baseline
* | |
* | ^--- WasmToJSJit <--- (other wasm frames)
* | * |
* ^--- Entry Frame (From C++) * ^--- Entry Frame (From C++)
* Exit Frame (From previous JitActivation) * Exit Frame (From previous JitActivation)
@ -726,6 +739,11 @@ JSJitProfilingFrameIterator::moveToNextFrame(CommonFrameLayout* frame)
return; return;
} }
if (rectPrevType == JitFrame_WasmToJSJit) {
moveToWasmFrame(rectFrame);
return;
}
MOZ_CRASH("Bad frame type prior to rectifier frame."); MOZ_CRASH("Bad frame type prior to rectifier frame.");
} }
@ -742,11 +760,7 @@ JSJitProfilingFrameIterator::moveToNextFrame(CommonFrameLayout* frame)
} }
if (prevType == JitFrame_WasmToJSJit) { if (prevType == JitFrame_WasmToJSJit) {
// No previous js jit frame, this is a transition frame, used to pass moveToWasmFrame(frame);
// a wasm iterator the correct value of FP.
returnAddressToFp_ = nullptr;
fp_ = GetPreviousRawFrame<uint8_t*>(frame);
type_ = JitFrame_WasmToJSJit;
return; return;
} }

Просмотреть файл

@ -297,6 +297,7 @@ class JSJitProfilingFrameIterator
bool forLastCallSite); bool forLastCallSite);
void fixBaselineReturnAddress(); void fixBaselineReturnAddress();
void moveToWasmFrame(CommonFrameLayout* frame);
void moveToNextFrame(CommonFrameLayout* frame); void moveToNextFrame(CommonFrameLayout* frame);
public: public:

Просмотреть файл

@ -445,11 +445,11 @@ class RectifierFrameLayout : public JitFrameLayout
} }
}; };
class WasmFrameLayout : public JitFrameLayout class WasmToJSJitFrameLayout : public JitFrameLayout
{ {
public: public:
static inline size_t Size() { static inline size_t Size() {
return sizeof(WasmFrameLayout); return sizeof(WasmToJSJitFrameLayout);
} }
}; };

Просмотреть файл

@ -1654,6 +1654,22 @@ MacroAssembler::generateBailoutTail(Register scratch, Register bailoutInfo)
} }
} }
void
MacroAssembler::assertRectifierFrameParentType(Register frameType)
{
#ifdef DEBUG
{
// Check the possible previous frame types here.
Label checkOk;
branch32(Assembler::Equal, frameType, Imm32(JitFrame_IonJS), &checkOk);
branch32(Assembler::Equal, frameType, Imm32(JitFrame_BaselineStub), &checkOk);
branch32(Assembler::Equal, frameType, Imm32(JitFrame_WasmToJSJit), &checkOk);
assumeUnreachable("Unrecognized frame type preceding RectifierFrame.");
bind(&checkOk);
}
#endif
}
void void
MacroAssembler::loadBaselineOrIonRaw(Register script, Register dest, Label* failure) MacroAssembler::loadBaselineOrIonRaw(Register script, Register dest, Label* failure)
{ {

Просмотреть файл

@ -1837,6 +1837,8 @@ class MacroAssembler : public MacroAssemblerSpecific
// Generates code used to complete a bailout. // Generates code used to complete a bailout.
void generateBailoutTail(Register scratch, Register bailoutInfo); void generateBailoutTail(Register scratch, Register bailoutInfo);
void assertRectifierFrameParentType(Register frameType);
public: public:
#ifndef JS_CODEGEN_ARM64 #ifndef JS_CODEGEN_ARM64
// StackPointer manipulation functions. // StackPointer manipulation functions.

Просмотреть файл

@ -1287,10 +1287,10 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
// //
// JitFrame_Rectifier // JitFrame_Rectifier
// //
// The rectifier frame can be preceded by either an IonJS or a // The rectifier frame can be preceded by either an IonJS, a WasmToJSJit or
// BaselineStub frame. // a BaselineStub frame.
// //
// Stack layout if caller of rectifier was Ion: // Stack layout if caller of rectifier was Ion or WasmToJSJit:
// //
// Ion-Descriptor // Ion-Descriptor
// Ion-ReturnAddr // Ion-ReturnAddr
@ -1335,10 +1335,11 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
// and |scratch2| points to Rectifier frame // and |scratch2| points to Rectifier frame
// and |scratch3| contains Rect-Descriptor.Type // and |scratch3| contains Rect-Descriptor.Type
masm.assertRectifierFrameParentType(scratch3);
// Check for either Ion or BaselineStub frame. // Check for either Ion or BaselineStub frame.
Label handle_Rectifier_BaselineStub; Label notIonFrame;
masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS), masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS), &notIonFrame);
&handle_Rectifier_BaselineStub);
// Handle Rectifier <- IonJS // Handle Rectifier <- IonJS
// scratch3 := RectFrame[ReturnAddr] // scratch3 := RectFrame[ReturnAddr]
@ -1351,16 +1352,13 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
masm.storePtr(scratch3, lastProfilingFrame); masm.storePtr(scratch3, lastProfilingFrame);
masm.ret(); masm.ret();
masm.bind(&notIonFrame);
// Check for either BaselineStub or WasmToJSJit: since WasmToJSJit is
// just an entry, jump there if we see it.
masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_BaselineStub), &handle_Entry);
// Handle Rectifier <- BaselineStub <- BaselineJS // Handle Rectifier <- BaselineStub <- BaselineJS
masm.bind(&handle_Rectifier_BaselineStub);
#ifdef DEBUG
{
Label checkOk;
masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk);
masm.assumeUnreachable("Unrecognized frame preceding baselineStub.");
masm.bind(&checkOk);
}
#endif
masm.ma_add(scratch2, scratch1, scratch3); masm.ma_add(scratch2, scratch1, scratch3);
Address stubFrameReturnAddr(scratch3, RectifierFrameLayout::Size() + Address stubFrameReturnAddr(scratch3, RectifierFrameLayout::Size() +
BaselineStubFrameLayout::offsetOfReturnAddress()); BaselineStubFrameLayout::offsetOfReturnAddress());
@ -1424,7 +1422,7 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
} }
// //
// JitFrame_CppToJSJit / JitFrame_WasmJSToJit // JitFrame_CppToJSJit / JitFrame_WasmToJSJit
// //
// If at an entry frame, store null into both fields. // If at an entry frame, store null into both fields.
// A fast-path wasm->jit transition frame is an entry frame from the point // A fast-path wasm->jit transition frame is an entry frame from the point

Просмотреть файл

@ -1128,10 +1128,11 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
// and |scratch2| points to Rectifier frame // and |scratch2| points to Rectifier frame
// and |scratch3| contains Rect-Descriptor.Type // and |scratch3| contains Rect-Descriptor.Type
masm.assertRectifierFrameParentType(scratch3);
// Check for either Ion or BaselineStub frame. // Check for either Ion or BaselineStub frame.
Label handle_Rectifier_BaselineStub; Label notIonFrame;
masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS), masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS), &notIonFrame);
&handle_Rectifier_BaselineStub);
// Handle Rectifier <- IonJS // Handle Rectifier <- IonJS
// scratch3 := RectFrame[ReturnAddr] // scratch3 := RectFrame[ReturnAddr]
@ -1144,16 +1145,13 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
masm.storePtr(scratch3, lastProfilingFrame); masm.storePtr(scratch3, lastProfilingFrame);
masm.ret(); masm.ret();
masm.bind(&notIonFrame);
// Check for either BaselineStub or WasmToJSJit: since WasmToJSJit is
// just an entry, jump there if we see it.
masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_BaselineStub), &handle_Entry);
// Handle Rectifier <- BaselineStub <- BaselineJS // Handle Rectifier <- BaselineStub <- BaselineJS
masm.bind(&handle_Rectifier_BaselineStub);
#ifdef DEBUG
{
Label checkOk;
masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk);
masm.assumeUnreachable("Unrecognized frame preceding baselineStub.");
masm.bind(&checkOk);
}
#endif
masm.addPtr(scratch2, scratch1, scratch3); masm.addPtr(scratch2, scratch1, scratch3);
Address stubFrameReturnAddr(scratch3, RectifierFrameLayout::Size() + Address stubFrameReturnAddr(scratch3, RectifierFrameLayout::Size() +
BaselineStubFrameLayout::offsetOfReturnAddress()); BaselineStubFrameLayout::offsetOfReturnAddress());
@ -1218,7 +1216,7 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
} }
// //
// JitFrame_CppToJSJit / JitFrame_WasmJSToJit // JitFrame_CppToJSJit / JitFrame_WasmToJSJit
// //
// If at an entry frame, store null into both fields. // If at an entry frame, store null into both fields.
// A fast-path wasm->jit transition frame is an entry frame from the point // A fast-path wasm->jit transition frame is an entry frame from the point

Просмотреть файл

@ -13,6 +13,7 @@
#define HWCAP_MIPS (1 << 28) #define HWCAP_MIPS (1 << 28)
#define HWCAP_LOONGSON (1 << 27) #define HWCAP_LOONGSON (1 << 27)
#define HWCAP_R2 (1 << 26)
#define HWCAP_FPU (1 << 0) #define HWCAP_FPU (1 << 0)
namespace js { namespace js {
@ -25,6 +26,7 @@ get_mips_flags()
#if defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64) #if defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
flags |= HWCAP_FPU; flags |= HWCAP_FPU;
flags |= HWCAP_R2;
#else #else
# ifdef __linux__ # ifdef __linux__
FILE* fp = fopen("/proc/cpuinfo", "r"); FILE* fp = fopen("/proc/cpuinfo", "r");
@ -39,6 +41,8 @@ get_mips_flags()
flags |= HWCAP_FPU; flags |= HWCAP_FPU;
if (strstr(buf, "Loongson")) if (strstr(buf, "Loongson"))
flags |= HWCAP_LOONGSON; flags |= HWCAP_LOONGSON;
if (strstr(buf, "mips32r2") || strstr(buf, "mips64r2"))
flags |= HWCAP_R2;
# endif # endif
#endif // JS_SIMULATOR_MIPS32 || JS_SIMULATOR_MIPS64 #endif // JS_SIMULATOR_MIPS32 || JS_SIMULATOR_MIPS64
return flags; return flags;
@ -54,11 +58,17 @@ static bool check_loongson()
return mips_private::Flags & HWCAP_LOONGSON; return mips_private::Flags & HWCAP_LOONGSON;
} }
static bool check_r2()
{
return mips_private::Flags & HWCAP_R2;
}
namespace mips_private { namespace mips_private {
// Cache a local copy so we only have to read /proc/cpuinfo once. // Cache a local copy so we only have to read /proc/cpuinfo once.
uint32_t Flags = get_mips_flags(); uint32_t Flags = get_mips_flags();
bool hasFPU = check_fpu();; bool hasFPU = check_fpu();;
bool isLoongson = check_loongson(); bool isLoongson = check_loongson();
bool hasR2 = check_r2();
} }
Registers::Code Registers::Code

Просмотреть файл

@ -314,11 +314,13 @@ namespace mips_private {
extern uint32_t Flags; extern uint32_t Flags;
extern bool hasFPU; extern bool hasFPU;
extern bool isLoongson; extern bool isLoongson;
extern bool hasR2;
} }
inline uint32_t GetMIPSFlags() { return mips_private::Flags; } inline uint32_t GetMIPSFlags() { return mips_private::Flags; }
inline bool hasFPU() { return mips_private::hasFPU; } inline bool hasFPU() { return mips_private::hasFPU; }
inline bool isLoongson() { return mips_private::isLoongson; } inline bool isLoongson() { return mips_private::isLoongson; }
inline bool hasR2() { return mips_private::hasR2; }
// MIPS doesn't have double registers that can NOT be treated as float32. // MIPS doesn't have double registers that can NOT be treated as float32.
inline bool inline bool

Просмотреть файл

@ -714,6 +714,7 @@ AssemblerMIPSShared::as_rotr(Register rd, Register rt, uint16_t sa)
{ {
MOZ_ASSERT(sa < 32); MOZ_ASSERT(sa < 32);
spew("rotr %3s,%3s, 0x%x", rd.name(), rt.name(), sa); spew("rotr %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_special, rs_one, rt, rd, sa, ff_srl).encode()); return writeInst(InstReg(op_special, rs_one, rt, rd, sa, ff_srl).encode());
} }
@ -722,6 +723,7 @@ AssemblerMIPSShared::as_drotr(Register rd, Register rt, uint16_t sa)
{ {
MOZ_ASSERT(sa < 32); MOZ_ASSERT(sa < 32);
spew("drotr %3s,%3s, 0x%x", rd.name(), rt.name(), sa); spew("drotr %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_special, rs_one, rt, rd, sa, ff_dsrl).encode()); return writeInst(InstReg(op_special, rs_one, rt, rd, sa, ff_dsrl).encode());
} }
@ -730,6 +732,7 @@ AssemblerMIPSShared::as_drotr32(Register rd, Register rt, uint16_t sa)
{ {
MOZ_ASSERT(31 < sa && sa < 64); MOZ_ASSERT(31 < sa && sa < 64);
spew("drotr32%3s,%3s, 0x%x", rd.name(), rt.name(), sa - 32); spew("drotr32%3s,%3s, 0x%x", rd.name(), rt.name(), sa - 32);
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_special, rs_one, rt, rd, sa - 32, ff_dsrl32).encode()); return writeInst(InstReg(op_special, rs_one, rt, rd, sa - 32, ff_dsrl32).encode());
} }
@ -737,6 +740,7 @@ BufferOffset
AssemblerMIPSShared::as_rotrv(Register rd, Register rt, Register rs) AssemblerMIPSShared::as_rotrv(Register rd, Register rt, Register rs)
{ {
spew("rotrv %3s,%3s,%3s", rd.name(), rt.name(), rs.name()); spew("rotrv %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_special, rs, rt, rd, 1, ff_srlv).encode()); return writeInst(InstReg(op_special, rs, rt, rd, 1, ff_srlv).encode());
} }
@ -744,6 +748,7 @@ BufferOffset
AssemblerMIPSShared::as_drotrv(Register rd, Register rt, Register rs) AssemblerMIPSShared::as_drotrv(Register rd, Register rt, Register rs)
{ {
spew("drotrv %3s,%3s,%3s", rd.name(), rt.name(), rs.name()); spew("drotrv %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_special, rs, rt, rd, 1, ff_dsrlv).encode()); return writeInst(InstReg(op_special, rs, rt, rd, 1, ff_dsrlv).encode());
} }
@ -1076,6 +1081,7 @@ AssemblerMIPSShared::as_ins(Register rt, Register rs, uint16_t pos, uint16_t siz
Register rd; Register rd;
rd = Register::FromCode(pos + size - 1); rd = Register::FromCode(pos + size - 1);
spew("ins %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size); spew("ins %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ins).encode()); return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ins).encode());
} }
@ -1086,6 +1092,7 @@ AssemblerMIPSShared::as_dins(Register rt, Register rs, uint16_t pos, uint16_t si
Register rd; Register rd;
rd = Register::FromCode(pos + size - 1); rd = Register::FromCode(pos + size - 1);
spew("dins %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size); spew("dins %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dins).encode()); return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dins).encode());
} }
@ -1096,6 +1103,7 @@ AssemblerMIPSShared::as_dinsm(Register rt, Register rs, uint16_t pos, uint16_t s
Register rd; Register rd;
rd = Register::FromCode(pos + size - 1 - 32); rd = Register::FromCode(pos + size - 1 - 32);
spew("dinsm %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size); spew("dinsm %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dinsm).encode()); return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dinsm).encode());
} }
@ -1106,6 +1114,7 @@ AssemblerMIPSShared::as_dinsu(Register rt, Register rs, uint16_t pos, uint16_t s
Register rd; Register rd;
rd = Register::FromCode(pos + size - 1 - 32); rd = Register::FromCode(pos + size - 1 - 32);
spew("dinsu %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size); spew("dinsu %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_special3, rs, rt, rd, pos - 32, ff_dinsu).encode()); return writeInst(InstReg(op_special3, rs, rt, rd, pos - 32, ff_dinsu).encode());
} }
@ -1116,6 +1125,7 @@ AssemblerMIPSShared::as_ext(Register rt, Register rs, uint16_t pos, uint16_t siz
Register rd; Register rd;
rd = Register::FromCode(size - 1); rd = Register::FromCode(size - 1);
spew("ext %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size); spew("ext %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ext).encode()); return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ext).encode());
} }
@ -1124,6 +1134,7 @@ BufferOffset
AssemblerMIPSShared::as_seb(Register rd, Register rt) AssemblerMIPSShared::as_seb(Register rd, Register rt)
{ {
spew("seb %3s,%3s", rd.name(), rt.name()); spew("seb %3s,%3s", rd.name(), rt.name());
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_special3, zero, rt, rd, 16, ff_bshfl).encode()); return writeInst(InstReg(op_special3, zero, rt, rd, 16, ff_bshfl).encode());
} }
@ -1131,6 +1142,7 @@ BufferOffset
AssemblerMIPSShared::as_seh(Register rd, Register rt) AssemblerMIPSShared::as_seh(Register rd, Register rt)
{ {
spew("seh %3s,%3s", rd.name(), rt.name()); spew("seh %3s,%3s", rd.name(), rt.name());
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_special3, zero, rt, rd, 24, ff_bshfl).encode()); return writeInst(InstReg(op_special3, zero, rt, rd, 24, ff_bshfl).encode());
} }
@ -1141,6 +1153,7 @@ AssemblerMIPSShared::as_dext(Register rt, Register rs, uint16_t pos, uint16_t si
Register rd; Register rd;
rd = Register::FromCode(size - 1); rd = Register::FromCode(size - 1);
spew("dext %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size); spew("dext %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dext).encode()); return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dext).encode());
} }
@ -1151,6 +1164,7 @@ AssemblerMIPSShared::as_dextm(Register rt, Register rs, uint16_t pos, uint16_t s
Register rd; Register rd;
rd = Register::FromCode(size - 1 - 32); rd = Register::FromCode(size - 1 - 32);
spew("dextm %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size); spew("dextm %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dextm).encode()); return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dextm).encode());
} }
@ -1161,6 +1175,7 @@ AssemblerMIPSShared::as_dextu(Register rt, Register rs, uint16_t pos, uint16_t s
Register rd; Register rd;
rd = Register::FromCode(size - 1); rd = Register::FromCode(size - 1);
spew("dextu %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size); spew("dextu %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_special3, rs, rt, rd, pos - 32, ff_dextu).encode()); return writeInst(InstReg(op_special3, rs, rt, rd, pos - 32, ff_dextu).encode());
} }
@ -1412,6 +1427,7 @@ BufferOffset
AssemblerMIPSShared::as_truncls(FloatRegister fd, FloatRegister fs) AssemblerMIPSShared::as_truncls(FloatRegister fd, FloatRegister fs)
{ {
spew("trunc.l.s%3s,%3s", fd.name(), fs.name()); spew("trunc.l.s%3s,%3s", fd.name(), fs.name());
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_trunc_l_fmt).encode()); return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_trunc_l_fmt).encode());
} }
@ -1447,6 +1463,7 @@ BufferOffset
AssemblerMIPSShared::as_truncld(FloatRegister fd, FloatRegister fs) AssemblerMIPSShared::as_truncld(FloatRegister fd, FloatRegister fs)
{ {
spew("trunc.l.d%3s,%3s", fd.name(), fs.name()); spew("trunc.l.d%3s,%3s", fd.name(), fs.name());
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_trunc_l_fmt).encode()); return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_trunc_l_fmt).encode());
} }
@ -1454,6 +1471,7 @@ BufferOffset
AssemblerMIPSShared::as_cvtdl(FloatRegister fd, FloatRegister fs) AssemblerMIPSShared::as_cvtdl(FloatRegister fd, FloatRegister fs)
{ {
spew("cvt.d.l%3s,%3s", fd.name(), fs.name()); spew("cvt.d.l%3s,%3s", fd.name(), fs.name());
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_cop1, rs_l, zero, fs, fd, ff_cvt_d_fmt).encode()); return writeInst(InstReg(op_cop1, rs_l, zero, fs, fd, ff_cvt_d_fmt).encode());
} }
@ -1482,6 +1500,7 @@ BufferOffset
AssemblerMIPSShared::as_cvtsl(FloatRegister fd, FloatRegister fs) AssemblerMIPSShared::as_cvtsl(FloatRegister fd, FloatRegister fs)
{ {
spew("cvt.s.l%3s,%3s", fd.name(), fs.name()); spew("cvt.s.l%3s,%3s", fd.name(), fs.name());
MOZ_ASSERT(hasR2());
return writeInst(InstReg(op_cop1, rs_l, zero, fs, fd, ff_cvt_s_fmt).encode()); return writeInst(InstReg(op_cop1, rs_l, zero, fs, fd, ff_cvt_s_fmt).encode());
} }

Просмотреть файл

@ -1580,7 +1580,7 @@ CodeGeneratorMIPSShared::visitCopySignF(LCopySignF* ins)
masm.moveFromFloat32(rhs, rhsi); masm.moveFromFloat32(rhs, rhsi);
// Combine. // Combine.
masm.as_ins(rhsi, lhsi, 0, 31); masm.ma_ins(rhsi, lhsi, 0, 31);
masm.moveToFloat32(rhsi, output); masm.moveToFloat32(rhsi, output);
} }
@ -1600,7 +1600,7 @@ CodeGeneratorMIPSShared::visitCopySignD(LCopySignD* ins)
masm.moveFromDoubleHi(rhs, rhsi); masm.moveFromDoubleHi(rhs, rhsi);
// Combine. // Combine.
masm.as_ins(rhsi, lhsi, 0, 31); masm.ma_ins(rhsi, lhsi, 0, 31);
masm.moveToDoubleHi(rhsi, output); masm.moveToDoubleHi(rhsi, output);
} }

Просмотреть файл

@ -29,13 +29,13 @@ MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest)
void void
MacroAssembler::move8SignExtend(Register src, Register dest) MacroAssembler::move8SignExtend(Register src, Register dest)
{ {
as_seb(dest, src); ma_seb(dest, src);
} }
void void
MacroAssembler::move16SignExtend(Register src, Register dest) MacroAssembler::move16SignExtend(Register src, Register dest)
{ {
as_seh(dest, src); ma_seh(dest, src);
} }
// =============================================================== // ===============================================================

Просмотреть файл

@ -71,13 +71,27 @@ MacroAssemblerMIPSShared::ma_sra(Register rd, Register rt, Imm32 shift)
void void
MacroAssemblerMIPSShared::ma_ror(Register rd, Register rt, Imm32 shift) MacroAssemblerMIPSShared::ma_ror(Register rd, Register rt, Imm32 shift)
{ {
if (hasR2()) {
as_rotr(rd, rt, shift.value % 32); as_rotr(rd, rt, shift.value % 32);
} else {
ScratchRegisterScope scratch(asMasm());
as_srl(scratch, rt, shift.value % 32);
as_sll(rd, rt, (32 - (shift.value % 32)) % 32);
as_or(rd, rd, scratch);
}
} }
void void
MacroAssemblerMIPSShared::ma_rol(Register rd, Register rt, Imm32 shift) MacroAssemblerMIPSShared::ma_rol(Register rd, Register rt, Imm32 shift)
{ {
as_rotr(rd, rt, 32 - (shift.value % 32)); if (hasR2()) {
as_rotr(rd, rt, (32 - (shift.value % 32)) % 32);
} else {
ScratchRegisterScope scratch(asMasm());
as_srl(scratch, rt, (32 - (shift.value % 32)) % 32);
as_sll(rd, rt, shift.value % 32);
as_or(rd, rd, scratch);
}
} }
void void
@ -101,14 +115,29 @@ MacroAssemblerMIPSShared::ma_sra(Register rd, Register rt, Register shift)
void void
MacroAssemblerMIPSShared::ma_ror(Register rd, Register rt, Register shift) MacroAssemblerMIPSShared::ma_ror(Register rd, Register rt, Register shift)
{ {
if (hasR2()) {
as_rotrv(rd, rt, shift); as_rotrv(rd, rt, shift);
} else {
ScratchRegisterScope scratch(asMasm());
ma_negu(scratch, shift);
as_sllv(scratch, rt, scratch);
as_srlv(rd, rt, shift);
as_or(rd, rd, scratch);
}
} }
void void
MacroAssemblerMIPSShared::ma_rol(Register rd, Register rt, Register shift) MacroAssemblerMIPSShared::ma_rol(Register rd, Register rt, Register shift)
{ {
ma_negu(ScratchRegister, shift); ScratchRegisterScope scratch(asMasm());
as_rotrv(rd, rt, ScratchRegister); ma_negu(scratch, shift);
if (hasR2()) {
as_rotrv(rd, rt, scratch);
} else {
as_srlv(rd, rt, scratch);
as_sllv(scratch, rt, shift);
as_or(rd, rd, scratch);
}
} }
void void
@ -123,6 +152,69 @@ MacroAssemblerMIPSShared::ma_not(Register rd, Register rs)
as_nor(rd, rs, zero); as_nor(rd, rs, zero);
} }
// Bit extract/insert
void
MacroAssemblerMIPSShared::ma_ext(Register rt, Register rs, uint16_t pos, uint16_t size) {
MOZ_ASSERT(pos < 32);
MOZ_ASSERT(pos + size < 33);
if (hasR2()) {
as_ext(rt, rs, pos, size);
} else {
int shift_left = 32 - (pos + size);
as_sll(rt, rs, shift_left);
int shift_right = 32 - size;
if (shift_right > 0) {
as_srl(rt, rt, shift_right);
}
}
}
void
MacroAssemblerMIPSShared::ma_ins(Register rt, Register rs, uint16_t pos, uint16_t size) {
MOZ_ASSERT(pos < 32);
MOZ_ASSERT(pos + size <= 32);
MOZ_ASSERT(size != 0);
if (hasR2()) {
as_ins(rt, rs, pos, size);
} else {
ScratchRegisterScope scratch(asMasm());
SecondScratchRegisterScope scratch2(asMasm());
ma_subu(scratch, zero, Imm32(1));
as_srl(scratch, scratch, 32 - size);
as_and(scratch2, rs, scratch);
as_sll(scratch2, scratch2, pos);
as_sll(scratch, scratch, pos);
as_nor(scratch, scratch, zero);
as_and(scratch, rt, scratch);
as_or(rt, scratch2, scratch);
}
}
// Sign extend
void
MacroAssemblerMIPSShared::ma_seb(Register rd, Register rt)
{
if (hasR2()) {
as_seb(rd, rt);
} else {
as_sll(rd, rt, 24);
as_sra(rd, rd, 24);
}
}
void
MacroAssemblerMIPSShared::ma_seh(Register rd, Register rt)
{
if (hasR2()) {
as_seh(rd, rt);
} else {
as_sll(rd, rt, 16);
as_sra(rd, rd, 16);
}
}
// And. // And.
void void
MacroAssemblerMIPSShared::ma_and(Register rd, Register rs) MacroAssemblerMIPSShared::ma_and(Register rd, Register rs)
@ -484,7 +576,7 @@ MacroAssemblerMIPSShared::ma_load_unaligned(Register dest, const BaseIndex& src,
as_lbu(temp, base, hiOffset); as_lbu(temp, base, hiOffset);
else else
as_lb(temp, base, hiOffset); as_lb(temp, base, hiOffset);
as_ins(dest, temp, 8, 24); ma_ins(dest, temp, 8, 24);
break; break;
case SizeWord: case SizeWord:
as_lwl(dest, base, hiOffset); as_lwl(dest, base, hiOffset);
@ -627,7 +719,7 @@ MacroAssemblerMIPSShared::ma_store_unaligned(Register data, const BaseIndex& des
switch (size) { switch (size) {
case SizeHalfWord: case SizeHalfWord:
as_sb(data, base, lowOffset); as_sb(data, base, lowOffset);
as_ext(temp, data, 8, 8); ma_ext(temp, data, 8, 8);
as_sb(temp, base, hiOffset); as_sb(temp, base, hiOffset);
break; break;
case SizeWord: case SizeWord:
@ -1243,10 +1335,10 @@ MacroAssemblerMIPSShared::atomicFetchOpMIPSr2(int nbytes, bool signExtend, Atomi
if (signExtend) { if (signExtend) {
switch (nbytes) { switch (nbytes) {
case 1: case 1:
as_seb(output, output); ma_seb(output, output);
break; break;
case 2: case 2:
as_seh(output, output); ma_seh(output, output);
break; break;
case 4: case 4:
break; break;
@ -1418,10 +1510,10 @@ MacroAssemblerMIPSShared::compareExchangeMIPSr2(int nbytes, bool signExtend, con
if (signExtend) { if (signExtend) {
switch (nbytes) { switch (nbytes) {
case 1: case 1:
as_seb(output, output); ma_seb(output, output);
break; break;
case 2: case 2:
as_seh(output, output); ma_seh(output, output);
break; break;
case 4: case 4:
break; break;
@ -1769,7 +1861,7 @@ MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input, Register output,
as_truncwd(ScratchFloat32Reg, input); as_truncwd(ScratchFloat32Reg, input);
as_cfc1(ScratchRegister, Assembler::FCSR); as_cfc1(ScratchRegister, Assembler::FCSR);
moveFromFloat32(ScratchFloat32Reg, output); moveFromFloat32(ScratchFloat32Reg, output);
as_ext(ScratchRegister, ScratchRegister, 6, 1); ma_ext(ScratchRegister, ScratchRegister, 6, 1);
ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual); ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
} }
@ -1780,7 +1872,7 @@ MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input, Register output,
as_truncws(ScratchFloat32Reg, input); as_truncws(ScratchFloat32Reg, input);
as_cfc1(ScratchRegister, Assembler::FCSR); as_cfc1(ScratchRegister, Assembler::FCSR);
moveFromFloat32(ScratchFloat32Reg, output); moveFromFloat32(ScratchFloat32Reg, output);
as_ext(ScratchRegister, ScratchRegister, 6, 1); ma_ext(ScratchRegister, ScratchRegister, 6, 1);
ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual); ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
} }

Просмотреть файл

@ -85,6 +85,14 @@ class MacroAssemblerMIPSShared : public Assembler
void ma_not(Register rd, Register rs); void ma_not(Register rd, Register rs);
// Bit extract/insert
void ma_ext(Register rt, Register rs, uint16_t pos, uint16_t size);
void ma_ins(Register rt, Register rs, uint16_t pos, uint16_t size);
// Sign extend
void ma_seb(Register rd, Register rt);
void ma_seh(Register rd, Register rt);
// and // and
void ma_and(Register rd, Register rs); void ma_and(Register rd, Register rs);
void ma_and(Register rd, Imm32 imm); void ma_and(Register rd, Imm32 imm);

Просмотреть файл

@ -427,21 +427,21 @@ JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
MacroAssembler masm(cx); MacroAssembler masm(cx);
masm.pushReturnAddress(); masm.pushReturnAddress();
// ArgumentsRectifierReg contains the |nargs| pushed onto the current
// frame. Including |this|, there are (|nargs| + 1) arguments to copy.
MOZ_ASSERT(ArgumentsRectifierReg == s3);
Register numActArgsReg = t6; Register numActArgsReg = t6;
Register calleeTokenReg = t7; Register calleeTokenReg = t7;
Register numArgsReg = t5; Register numArgsReg = t5;
// Copy number of actual arguments into numActArgsReg // Load the number of actual arguments into numActArgsReg
masm.loadPtr(Address(StackPointer, RectifierFrameLayout::offsetOfNumActualArgs()), masm.loadPtr(Address(StackPointer, RectifierFrameLayout::offsetOfNumActualArgs()),
numActArgsReg); numActArgsReg);
// Load the number of |undefined|s to push into t1. // Load the number of |undefined|s to push into t1.
masm.loadPtr(Address(StackPointer, RectifierFrameLayout::offsetOfCalleeToken()), masm.loadPtr(Address(StackPointer, RectifierFrameLayout::offsetOfCalleeToken()),
calleeTokenReg); calleeTokenReg);
// Copy the number of actual arguments into s3.
masm.mov(numActArgsReg, s3);
masm.mov(calleeTokenReg, numArgsReg); masm.mov(calleeTokenReg, numArgsReg);
masm.andPtr(Imm32(CalleeTokenMask), numArgsReg); masm.andPtr(Imm32(CalleeTokenMask), numArgsReg);
masm.load16ZeroExtend(Address(numArgsReg, JSFunction::offsetOfNargs()), numArgsReg); masm.load16ZeroExtend(Address(numArgsReg, JSFunction::offsetOfNargs()), numArgsReg);
@ -1266,10 +1266,10 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
// //
// JitFrame_Rectifier // JitFrame_Rectifier
// //
// The rectifier frame can be preceded by either an IonJS or a // The rectifier frame can be preceded by either an IonJS, a WasmToJSJit or
// BaselineStub frame. // a BaselineStub frame.
// //
// Stack layout if caller of rectifier was Ion: // Stack layout if caller of rectifier was Ion or WasmToJSJit:
// //
// Ion-Descriptor // Ion-Descriptor
// Ion-ReturnAddr // Ion-ReturnAddr
@ -1314,10 +1314,11 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
// and |scratch2| points to Rectifier frame // and |scratch2| points to Rectifier frame
// and |scratch3| contains Rect-Descriptor.Type // and |scratch3| contains Rect-Descriptor.Type
masm.assertRectifierFrameParentType(scratch3);
// Check for either Ion or BaselineStub frame. // Check for either Ion or BaselineStub frame.
Label handle_Rectifier_BaselineStub; Label notIonFrame;
masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS), masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS), &notIonFrame);
&handle_Rectifier_BaselineStub);
// Handle Rectifier <- IonJS // Handle Rectifier <- IonJS
// scratch3 := RectFrame[ReturnAddr] // scratch3 := RectFrame[ReturnAddr]
@ -1330,16 +1331,13 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
masm.storePtr(scratch3, lastProfilingFrame); masm.storePtr(scratch3, lastProfilingFrame);
masm.ret(); masm.ret();
masm.bind(&notIonFrame);
// Check for either BaselineStub or WasmToJSJit: since WasmToJSJit is
// just an entry, jump there if we see it.
masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_BaselineStub), &handle_Entry);
// Handle Rectifier <- BaselineStub <- BaselineJS // Handle Rectifier <- BaselineStub <- BaselineJS
masm.bind(&handle_Rectifier_BaselineStub);
#ifdef DEBUG
{
Label checkOk;
masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk);
masm.assumeUnreachable("Unrecognized frame preceding baselineStub.");
masm.bind(&checkOk);
}
#endif
masm.as_addu(scratch3, scratch2, scratch1); masm.as_addu(scratch3, scratch2, scratch1);
Address stubFrameReturnAddr(scratch3, RectifierFrameLayout::Size() + Address stubFrameReturnAddr(scratch3, RectifierFrameLayout::Size() +
BaselineStubFrameLayout::offsetOfReturnAddress()); BaselineStubFrameLayout::offsetOfReturnAddress());
@ -1403,7 +1401,7 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
} }
// //
// JitFrame_CppToJSJit / JitFrame_WasmJSToJit // JitFrame_CppToJSJit / JitFrame_WasmToJSJit
// //
// If at an entry frame, store null into both fields. // If at an entry frame, store null into both fields.
// A fast-path wasm->jit transition frame is an entry frame from the point // A fast-path wasm->jit transition frame is an entry frame from the point

Просмотреть файл

@ -683,7 +683,7 @@ CodeGeneratorMIPS64::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir)
masm.moveFromDouble(ScratchDoubleReg, output); masm.moveFromDouble(ScratchDoubleReg, output);
masm.as_cfc1(ScratchRegister, Assembler::FCSR); masm.as_cfc1(ScratchRegister, Assembler::FCSR);
// extract invalid operation flag (bit 6) from FCSR // extract invalid operation flag (bit 6) from FCSR
masm.as_ext(ScratchRegister, ScratchRegister, 6, 1); masm.ma_ext(ScratchRegister, ScratchRegister, 6, 1);
masm.ma_dsrl(SecondScratchReg, output, Imm32(63)); masm.ma_dsrl(SecondScratchReg, output, Imm32(63));
masm.ma_or(SecondScratchReg, ScratchRegister); masm.ma_or(SecondScratchReg, ScratchRegister);
masm.ma_b(SecondScratchReg, Imm32(0), ool->entry(), Assembler::NotEqual); masm.ma_b(SecondScratchReg, Imm32(0), ool->entry(), Assembler::NotEqual);
@ -703,7 +703,7 @@ CodeGeneratorMIPS64::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir)
// Check that the result is in the uint64_t range. // Check that the result is in the uint64_t range.
masm.moveFromDouble(ScratchDoubleReg, output); masm.moveFromDouble(ScratchDoubleReg, output);
masm.as_cfc1(ScratchRegister, Assembler::FCSR); masm.as_cfc1(ScratchRegister, Assembler::FCSR);
masm.as_ext(ScratchRegister, ScratchRegister, 6, 1); masm.ma_ext(ScratchRegister, ScratchRegister, 6, 1);
masm.ma_dsrl(SecondScratchReg, output, Imm32(63)); masm.ma_dsrl(SecondScratchReg, output, Imm32(63));
masm.ma_or(SecondScratchReg, ScratchRegister); masm.ma_or(SecondScratchReg, ScratchRegister);
masm.ma_b(SecondScratchReg, Imm32(0), ool->entry(), Assembler::NotEqual); masm.ma_b(SecondScratchReg, Imm32(0), ool->entry(), Assembler::NotEqual);
@ -724,7 +724,7 @@ CodeGeneratorMIPS64::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir)
// Check that the result is in the int64_t range. // Check that the result is in the int64_t range.
masm.as_cfc1(output, Assembler::FCSR); masm.as_cfc1(output, Assembler::FCSR);
masm.as_ext(output, output, 6, 1); masm.ma_ext(output, output, 6, 1);
masm.ma_b(output, Imm32(0), ool->entry(), Assembler::NotEqual); masm.ma_b(output, Imm32(0), ool->entry(), Assembler::NotEqual);
masm.bind(ool->rejoin()); masm.bind(ool->rejoin());

Просмотреть файл

@ -2578,7 +2578,7 @@ MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input, Register output,
as_truncld(ScratchDoubleReg, input); as_truncld(ScratchDoubleReg, input);
moveFromDoubleHi(ScratchDoubleReg, output); moveFromDoubleHi(ScratchDoubleReg, output);
as_cfc1(ScratchRegister, Assembler::FCSR); as_cfc1(ScratchRegister, Assembler::FCSR);
as_ext(ScratchRegister, ScratchRegister, 6, 1); ma_ext(ScratchRegister, ScratchRegister, 6, 1);
ma_or(ScratchRegister, output); ma_or(ScratchRegister, output);
moveFromFloat32(ScratchDoubleReg, output); moveFromFloat32(ScratchDoubleReg, output);
ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual); ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
@ -2592,7 +2592,7 @@ MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input, Register output
as_truncls(ScratchDoubleReg, input); as_truncls(ScratchDoubleReg, input);
moveFromDoubleHi(ScratchDoubleReg, output); moveFromDoubleHi(ScratchDoubleReg, output);
as_cfc1(ScratchRegister, Assembler::FCSR); as_cfc1(ScratchRegister, Assembler::FCSR);
as_ext(ScratchRegister, ScratchRegister, 6, 1); ma_ext(ScratchRegister, ScratchRegister, 6, 1);
ma_or(ScratchRegister, output); ma_or(ScratchRegister, output);
moveFromFloat32(ScratchDoubleReg, output); moveFromFloat32(ScratchDoubleReg, output);
ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual); ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);

Просмотреть файл

@ -428,14 +428,10 @@ JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
masm.pushReturnAddress(); masm.pushReturnAddress();
// Caller: // Caller:
// [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- sp // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- sp
// '--- s3 ---'
// ArgumentsRectifierReg contains the |nargs| pushed onto the current
// frame. Including |this|, there are (|nargs| + 1) arguments to copy.
MOZ_ASSERT(ArgumentsRectifierReg == s3);
// Add |this|, in the counter of known arguments. // Add |this|, in the counter of known arguments.
masm.addPtr(Imm32(1), ArgumentsRectifierReg); masm.loadPtr(Address(StackPointer, RectifierFrameLayout::offsetOfNumActualArgs()), s3);
masm.addPtr(Imm32(1), s3);
Register numActArgsReg = a6; Register numActArgsReg = a6;
Register calleeTokenReg = a7; Register calleeTokenReg = a7;
@ -1211,10 +1207,10 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
// //
// JitFrame_Rectifier // JitFrame_Rectifier
// //
// The rectifier frame can be preceded by either an IonJS or a // The rectifier frame can be preceded by either an IonJS, a WasmToJSJit or
// BaselineStub frame. // a BaselineStub frame.
// //
// Stack layout if caller of rectifier was Ion: // Stack layout if caller of rectifier was Ion or WasmToJSJit:
// //
// Ion-Descriptor // Ion-Descriptor
// Ion-ReturnAddr // Ion-ReturnAddr
@ -1259,10 +1255,11 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
// and |scratch2| points to Rectifier frame // and |scratch2| points to Rectifier frame
// and |scratch3| contains Rect-Descriptor.Type // and |scratch3| contains Rect-Descriptor.Type
masm.assertRectifierFrameParentType(scratch3);
// Check for either Ion or BaselineStub frame. // Check for either Ion or BaselineStub frame.
Label handle_Rectifier_BaselineStub; Label notIonFrame;
masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS), masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS), &notIonFrame);
&handle_Rectifier_BaselineStub);
// Handle Rectifier <- IonJS // Handle Rectifier <- IonJS
// scratch3 := RectFrame[ReturnAddr] // scratch3 := RectFrame[ReturnAddr]
@ -1275,16 +1272,13 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
masm.storePtr(scratch3, lastProfilingFrame); masm.storePtr(scratch3, lastProfilingFrame);
masm.ret(); masm.ret();
masm.bind(&notIonFrame);
// Check for either BaselineStub or WasmToJSJit: since WasmToJSJit is
// just an entry, jump there if we see it.
masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_BaselineStub), &handle_Entry);
// Handle Rectifier <- BaselineStub <- BaselineJS // Handle Rectifier <- BaselineStub <- BaselineJS
masm.bind(&handle_Rectifier_BaselineStub);
#ifdef DEBUG
{
Label checkOk;
masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk);
masm.assumeUnreachable("Unrecognized frame preceding baselineStub.");
masm.bind(&checkOk);
}
#endif
masm.as_daddu(scratch3, scratch2, scratch1); masm.as_daddu(scratch3, scratch2, scratch1);
Address stubFrameReturnAddr(scratch3, RectifierFrameLayout::Size() + Address stubFrameReturnAddr(scratch3, RectifierFrameLayout::Size() +
BaselineStubFrameLayout::offsetOfReturnAddress()); BaselineStubFrameLayout::offsetOfReturnAddress());
@ -1348,7 +1342,7 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
} }
// //
// JitFrame_CppToJSJit / JitFrame_WasmJSToJit // JitFrame_CppToJSJit / JitFrame_WasmToJSJit
// //
// If at an entry frame, store null into both fields. // If at an entry frame, store null into both fields.
// A fast-path wasm->jit transition frame is an entry frame from the point // A fast-path wasm->jit transition frame is an entry frame from the point

Просмотреть файл

@ -1180,10 +1180,10 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
// //
// JitFrame_Rectifier // JitFrame_Rectifier
// //
// The rectifier frame can be preceded by either an IonJS or a // The rectifier frame can be preceded by either an IonJS, a WasmToJSJit or
// BaselineStub frame. // a BaselineStub frame.
// //
// Stack layout if caller of rectifier was Ion: // Stack layout if caller of rectifier was Ion or WasmToJSJit:
// //
// Ion-Descriptor // Ion-Descriptor
// Ion-ReturnAddr // Ion-ReturnAddr
@ -1228,10 +1228,11 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
// and |scratch2| points to Rectifier frame // and |scratch2| points to Rectifier frame
// and |scratch3| contains Rect-Descriptor.Type // and |scratch3| contains Rect-Descriptor.Type
// Check for either Ion or BaselineStub frame. masm.assertRectifierFrameParentType(scratch3);
Label handle_Rectifier_BaselineStub;
masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS), // Check for either Ion or something else frame.
&handle_Rectifier_BaselineStub); Label notIonFrame;
masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS), &notIonFrame);
// Handle Rectifier <- IonJS // Handle Rectifier <- IonJS
// scratch3 := RectFrame[ReturnAddr] // scratch3 := RectFrame[ReturnAddr]
@ -1243,16 +1244,13 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
masm.storePtr(scratch3, lastProfilingFrame); masm.storePtr(scratch3, lastProfilingFrame);
masm.ret(); masm.ret();
masm.bind(&notIonFrame);
// Check for either BaselineStub or WasmToJSJit: since WasmToJSJit is
// just an entry, jump there if we see it.
masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_BaselineStub), &handle_Entry);
// Handle Rectifier <- BaselineStub <- BaselineJS // Handle Rectifier <- BaselineStub <- BaselineJS
masm.bind(&handle_Rectifier_BaselineStub);
#ifdef DEBUG
{
Label checkOk;
masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk);
masm.assumeUnreachable("Unrecognized frame preceding baselineStub.");
masm.bind(&checkOk);
}
#endif
BaseIndex stubFrameReturnAddr(scratch2, scratch1, TimesOne, BaseIndex stubFrameReturnAddr(scratch2, scratch1, TimesOne,
RectifierFrameLayout::Size() + RectifierFrameLayout::Size() +
BaselineStubFrameLayout::offsetOfReturnAddress()); BaselineStubFrameLayout::offsetOfReturnAddress());
@ -1314,7 +1312,7 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
} }
// //
// JitFrame_CppToJSJit / JitFrame_WasmJSToJit // JitFrame_CppToJSJit / JitFrame_WasmToJSJit
// //
// If at an entry frame, store null into both fields. // If at an entry frame, store null into both fields.
// A fast-path wasm->jit transition frame is an entry frame from the point // A fast-path wasm->jit transition frame is an entry frame from the point

Просмотреть файл

@ -1214,10 +1214,10 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
// //
// JitFrame_Rectifier // JitFrame_Rectifier
// //
// The rectifier frame can be preceded by either an IonJS or a // The rectifier frame can be preceded by either an IonJS, a WasmToJSJit or
// BaselineStub frame. // a BaselineStub frame.
// //
// Stack layout if caller of rectifier was Ion: // Stack layout if caller of rectifier was Ion or WasmToJSJit:
// //
// Ion-Descriptor // Ion-Descriptor
// Ion-ReturnAddr // Ion-ReturnAddr
@ -1262,10 +1262,11 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
// and |scratch2| points to Rectifier frame // and |scratch2| points to Rectifier frame
// and |scratch3| contains Rect-Descriptor.Type // and |scratch3| contains Rect-Descriptor.Type
masm.assertRectifierFrameParentType(scratch3);
// Check for either Ion or BaselineStub frame. // Check for either Ion or BaselineStub frame.
Label handle_Rectifier_BaselineStub; Label notIonFrame;
masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS), masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS), &notIonFrame);
&handle_Rectifier_BaselineStub);
// Handle Rectifier <- IonJS // Handle Rectifier <- IonJS
// scratch3 := RectFrame[ReturnAddr] // scratch3 := RectFrame[ReturnAddr]
@ -1277,16 +1278,13 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
masm.storePtr(scratch3, lastProfilingFrame); masm.storePtr(scratch3, lastProfilingFrame);
masm.ret(); masm.ret();
masm.bind(&notIonFrame);
// Check for either BaselineStub or WasmToJSJit: since WasmToJSJit is
// just an entry, jump there if we see it.
masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_BaselineStub), &handle_Entry);
// Handle Rectifier <- BaselineStub <- BaselineJS // Handle Rectifier <- BaselineStub <- BaselineJS
masm.bind(&handle_Rectifier_BaselineStub);
#ifdef DEBUG
{
Label checkOk;
masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk);
masm.assumeUnreachable("Unrecognized frame preceding baselineStub.");
masm.bind(&checkOk);
}
#endif
BaseIndex stubFrameReturnAddr(scratch2, scratch1, TimesOne, BaseIndex stubFrameReturnAddr(scratch2, scratch1, TimesOne,
RectifierFrameLayout::Size() + RectifierFrameLayout::Size() +
BaselineStubFrameLayout::offsetOfReturnAddress()); BaselineStubFrameLayout::offsetOfReturnAddress());
@ -1348,7 +1346,7 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
} }
// //
// JitFrame_CppToJSJit / JitFrame_WasmJSToJit // JitFrame_CppToJSJit / JitFrame_WasmToJSJit
// //
// If at an entry frame, store null into both fields. // If at an entry frame, store null into both fields.
// A fast-path wasm->jit transition frame is an entry frame from the point // A fast-path wasm->jit transition frame is an entry frame from the point

Просмотреть файл

@ -7525,19 +7525,6 @@ UnhideScriptedCaller(JSContext* cx)
} /* namespace JS */ } /* namespace JS */
AutoGCRooter::AutoGCRooter(JSContext* cx, ptrdiff_t tag)
: AutoGCRooter(JS::RootingContext::get(cx), tag)
{}
AutoGCRooter::AutoGCRooter(JS::RootingContext* cx, ptrdiff_t tag)
: down(cx->autoGCRooters_),
tag_(tag),
stackTop(&cx->autoGCRooters_)
{
MOZ_ASSERT(this != *stackTop);
*stackTop = this;
}
#ifdef JS_DEBUG #ifdef JS_DEBUG
JS_PUBLIC_API(void) JS_PUBLIC_API(void)
JS::detail::AssertArgumentsAreSane(JSContext* cx, HandleValue value) JS::detail::AssertArgumentsAreSane(JSContext* cx, HandleValue value)

Просмотреть файл

@ -2036,28 +2036,6 @@ JSFunction::needsNamedLambdaEnvironment() const
return scope->hasEnvironment(); return scope->hasEnvironment();
} }
JSFunction*
js::NewNativeFunction(JSContext* cx, Native native, unsigned nargs, HandleAtom atom,
gc::AllocKind allocKind /* = AllocKind::FUNCTION */,
NewObjectKind newKind /* = SingletonObject */)
{
MOZ_ASSERT(native);
return NewFunctionWithProto(cx, native, nargs, JSFunction::NATIVE_FUN,
nullptr, atom, nullptr, allocKind, newKind);
}
JSFunction*
js::NewNativeConstructor(JSContext* cx, Native native, unsigned nargs, HandleAtom atom,
gc::AllocKind allocKind /* = AllocKind::FUNCTION */,
NewObjectKind newKind /* = SingletonObject */,
JSFunction::Flags flags /* = JSFunction::NATIVE_CTOR */)
{
MOZ_ASSERT(native);
MOZ_ASSERT(flags & JSFunction::NATIVE_CTOR);
return NewFunctionWithProto(cx, native, nargs, flags, nullptr, atom,
nullptr, allocKind, newKind);
}
JSFunction* JSFunction*
js::NewScriptedFunction(JSContext* cx, unsigned nargs, js::NewScriptedFunction(JSContext* cx, unsigned nargs,
JSFunction::Flags flags, HandleAtom atom, JSFunction::Flags flags, HandleAtom atom,
@ -2092,26 +2070,16 @@ js::NewFunctionWithProto(JSContext* cx, Native native,
unsigned nargs, JSFunction::Flags flags, HandleObject enclosingEnv, unsigned nargs, JSFunction::Flags flags, HandleObject enclosingEnv,
HandleAtom atom, HandleObject proto, HandleAtom atom, HandleObject proto,
gc::AllocKind allocKind /* = AllocKind::FUNCTION */, gc::AllocKind allocKind /* = AllocKind::FUNCTION */,
NewObjectKind newKind /* = GenericObject */, NewObjectKind newKind /* = GenericObject */)
NewFunctionProtoHandling protoHandling /* = NewFunctionClassProto */)
{ {
MOZ_ASSERT(allocKind == AllocKind::FUNCTION || allocKind == AllocKind::FUNCTION_EXTENDED); MOZ_ASSERT(allocKind == AllocKind::FUNCTION || allocKind == AllocKind::FUNCTION_EXTENDED);
MOZ_ASSERT_IF(native, !enclosingEnv); MOZ_ASSERT_IF(native, !enclosingEnv);
MOZ_ASSERT(NewFunctionEnvironmentIsWellFormed(cx, enclosingEnv)); MOZ_ASSERT(NewFunctionEnvironmentIsWellFormed(cx, enclosingEnv));
RootedObject funobj(cx); JSFunction* fun = NewObjectWithClassProto<JSFunction>(cx, proto, allocKind, newKind);
if (protoHandling == NewFunctionClassProto) { if (!fun)
funobj = NewObjectWithClassProto(cx, &JSFunction::class_, proto, allocKind,
newKind);
} else {
funobj = NewObjectWithGivenTaggedProto(cx, &JSFunction::class_, AsTaggedProto(proto),
allocKind, newKind);
}
if (!funobj)
return nullptr; return nullptr;
RootedFunction fun(cx, &funobj->as<JSFunction>());
if (allocKind == AllocKind::FUNCTION_EXTENDED) if (allocKind == AllocKind::FUNCTION_EXTENDED)
flags = JSFunction::Flags(flags | JSFunction::EXTENDED); flags = JSFunction::Flags(flags | JSFunction::EXTENDED);

Просмотреть файл

@ -687,20 +687,41 @@ AsyncFunctionConstructor(JSContext* cx, unsigned argc, Value* vp);
extern bool extern bool
AsyncGeneratorConstructor(JSContext* cx, unsigned argc, Value* vp); AsyncGeneratorConstructor(JSContext* cx, unsigned argc, Value* vp);
// If enclosingEnv is null, the function will have a null environment()
// (yes, null, not the global). In all cases, the global will be used as the
// parent.
extern JSFunction*
NewFunctionWithProto(JSContext* cx, JSNative native, unsigned nargs,
JSFunction::Flags flags, HandleObject enclosingEnv, HandleAtom atom,
HandleObject proto, gc::AllocKind allocKind = gc::AllocKind::FUNCTION,
NewObjectKind newKind = GenericObject);
// Allocate a new function backed by a JSNative. Note that by default this // Allocate a new function backed by a JSNative. Note that by default this
// creates a singleton object. // creates a singleton object.
extern JSFunction* inline JSFunction*
NewNativeFunction(JSContext* cx, JSNative native, unsigned nargs, HandleAtom atom, NewNativeFunction(JSContext* cx, JSNative native, unsigned nargs, HandleAtom atom,
gc::AllocKind allocKind = gc::AllocKind::FUNCTION, gc::AllocKind allocKind = gc::AllocKind::FUNCTION,
NewObjectKind newKind = SingletonObject); NewObjectKind newKind = SingletonObject)
{
MOZ_ASSERT(native);
return NewFunctionWithProto(cx, native, nargs, JSFunction::NATIVE_FUN,
nullptr, atom, nullptr, allocKind, newKind);
}
// Allocate a new constructor backed by a JSNative. Note that by default this // Allocate a new constructor backed by a JSNative. Note that by default this
// creates a singleton object. // creates a singleton object.
extern JSFunction* inline JSFunction*
NewNativeConstructor(JSContext* cx, JSNative native, unsigned nargs, HandleAtom atom, NewNativeConstructor(JSContext* cx, JSNative native, unsigned nargs, HandleAtom atom,
gc::AllocKind allocKind = gc::AllocKind::FUNCTION, gc::AllocKind allocKind = gc::AllocKind::FUNCTION,
NewObjectKind newKind = SingletonObject, NewObjectKind newKind = SingletonObject,
JSFunction::Flags flags = JSFunction::NATIVE_CTOR); JSFunction::Flags flags = JSFunction::NATIVE_CTOR)
{
MOZ_ASSERT(native);
MOZ_ASSERT(flags & JSFunction::NATIVE_CTOR);
return NewFunctionWithProto(cx, native, nargs, flags, nullptr, atom,
nullptr, allocKind, newKind);
}
// Allocate a new scripted function. If enclosingEnv is null, the // Allocate a new scripted function. If enclosingEnv is null, the
// global will be used. In all cases the parent of the resulting object will be // global will be used. In all cases the parent of the resulting object will be
@ -711,25 +732,6 @@ NewScriptedFunction(JSContext* cx, unsigned nargs, JSFunction::Flags flags,
gc::AllocKind allocKind = gc::AllocKind::FUNCTION, gc::AllocKind allocKind = gc::AllocKind::FUNCTION,
NewObjectKind newKind = GenericObject, NewObjectKind newKind = GenericObject,
HandleObject enclosingEnv = nullptr); HandleObject enclosingEnv = nullptr);
// By default, if proto is nullptr, Function.prototype is used instead.i
// If protoHandling is NewFunctionExactProto, and proto is nullptr, the created
// function will use nullptr as its [[Prototype]] instead. If
// enclosingEnv is null, the function will have a null environment()
// (yes, null, not the global). In all cases, the global will be used as the
// parent.
enum NewFunctionProtoHandling {
NewFunctionClassProto,
NewFunctionGivenProto
};
extern JSFunction*
NewFunctionWithProto(JSContext* cx, JSNative native, unsigned nargs,
JSFunction::Flags flags, HandleObject enclosingEnv, HandleAtom atom,
HandleObject proto, gc::AllocKind allocKind = gc::AllocKind::FUNCTION,
NewObjectKind newKind = GenericObject,
NewFunctionProtoHandling protoHandling = NewFunctionClassProto);
extern JSAtom* extern JSAtom*
IdToFunctionName(JSContext* cx, HandleId id, IdToFunctionName(JSContext* cx, HandleId id,
FunctionPrefixKind prefixKind = FunctionPrefixKind::None); FunctionPrefixKind prefixKind = FunctionPrefixKind::None);

Просмотреть файл

@ -210,11 +210,90 @@ class MOZ_STACK_CLASS JS_PUBLIC_API(AutoEnterCycleCollection)
class RootingContext; class RootingContext;
// Our instantiations of Rooted<void*> and PersistentRooted<void*> require an
// instantiation of MapTypeToRootKind.
template <>
struct MapTypeToRootKind<void*> {
static const RootKind kind = RootKind::Traceable;
};
using RootedListHeads = mozilla::EnumeratedArray<RootKind, RootKind::Limit,
Rooted<void*>*>;
/*
* This list enumerates the different types of conceptual stacks we have in
* SpiderMonkey. In reality, they all share the C stack, but we allow different
* stack limits depending on the type of code running.
*/
enum StackKind
{
StackForSystemCode, // C++, such as the GC, running on behalf of the VM.
StackForTrustedScript, // Script running with trusted principals.
StackForUntrustedScript, // Script running with untrusted principals.
StackKindCount
};
class JS_PUBLIC_API(AutoGCRooter);
// Superclass of JSContext which can be used for rooting data in use by the
// current thread but that does not provide all the functions of a JSContext.
class RootingContext
{
// Stack GC roots for Rooted GC heap pointers.
RootedListHeads stackRoots_;
template <typename T> friend class JS::Rooted;
// Stack GC roots for AutoFooRooter classes.
JS::AutoGCRooter* autoGCRooters_;
friend class JS::AutoGCRooter;
public:
RootingContext();
void traceStackRoots(JSTracer* trc);
void checkNoGCRooters();
protected:
// The remaining members in this class should only be accessed through
// JSContext pointers. They are unrelated to rooting and are in place so
// that inlined API functions can directly access the data.
/* The current compartment. */
JSCompartment* compartment_;
/* The current zone. */
JS::Zone* zone_;
public:
/* Limit pointer for checking native stack consumption. */
uintptr_t nativeStackLimit[StackKindCount];
static const RootingContext* get(const JSContext* cx) {
return reinterpret_cast<const RootingContext*>(cx);
}
static RootingContext* get(JSContext* cx) {
return reinterpret_cast<RootingContext*>(cx);
}
friend JSCompartment* js::GetContextCompartment(const JSContext* cx);
friend JS::Zone* js::GetContextZone(const JSContext* cx);
};
class JS_PUBLIC_API(AutoGCRooter) class JS_PUBLIC_API(AutoGCRooter)
{ {
public: public:
AutoGCRooter(JSContext* cx, ptrdiff_t tag); AutoGCRooter(JSContext* cx, ptrdiff_t tag)
AutoGCRooter(RootingContext* cx, ptrdiff_t tag); : AutoGCRooter(JS::RootingContext::get(cx), tag)
{}
AutoGCRooter(JS::RootingContext* cx, ptrdiff_t tag)
: down(cx->autoGCRooters_),
tag_(tag),
stackTop(&cx->autoGCRooters_)
{
MOZ_ASSERT(this != *stackTop);
*stackTop = this;
}
~AutoGCRooter() { ~AutoGCRooter() {
MOZ_ASSERT(this == *stackTop); MOZ_ASSERT(this == *stackTop);
@ -262,74 +341,6 @@ class JS_PUBLIC_API(AutoGCRooter)
void operator=(AutoGCRooter& ida) = delete; void operator=(AutoGCRooter& ida) = delete;
}; };
// Our instantiations of Rooted<void*> and PersistentRooted<void*> require an
// instantiation of MapTypeToRootKind.
template <>
struct MapTypeToRootKind<void*> {
static const RootKind kind = RootKind::Traceable;
};
using RootedListHeads = mozilla::EnumeratedArray<RootKind, RootKind::Limit,
Rooted<void*>*>;
/*
* This list enumerates the different types of conceptual stacks we have in
* SpiderMonkey. In reality, they all share the C stack, but we allow different
* stack limits depending on the type of code running.
*/
enum StackKind
{
StackForSystemCode, // C++, such as the GC, running on behalf of the VM.
StackForTrustedScript, // Script running with trusted principals.
StackForUntrustedScript, // Script running with untrusted principals.
StackKindCount
};
// Superclass of JSContext which can be used for rooting data in use by the
// current thread but that does not provide all the functions of a JSContext.
class RootingContext
{
// Stack GC roots for Rooted GC heap pointers.
RootedListHeads stackRoots_;
template <typename T> friend class JS::Rooted;
// Stack GC roots for AutoFooRooter classes.
JS::AutoGCRooter* autoGCRooters_;
friend class JS::AutoGCRooter;
public:
RootingContext();
void traceStackRoots(JSTracer* trc);
void checkNoGCRooters();
protected:
// The remaining members in this class should only be accessed through
// JSContext pointers. They are unrelated to rooting and are in place so
// that inlined API functions can directly access the data.
/* The current compartment. */
JSCompartment* compartment_;
/* The current zone. */
JS::Zone* zone_;
public:
/* Limit pointer for checking native stack consumption. */
uintptr_t nativeStackLimit[StackKindCount];
static const RootingContext* get(const JSContext* cx) {
return reinterpret_cast<const RootingContext*>(cx);
}
static RootingContext* get(JSContext* cx) {
return reinterpret_cast<RootingContext*>(cx);
}
friend JSCompartment* js::GetContextCompartment(const JSContext* cx);
friend JS::Zone* js::GetContextZone(const JSContext* cx);
};
} /* namespace JS */ } /* namespace JS */
namespace js { namespace js {

Просмотреть файл

@ -279,6 +279,10 @@ class RegExpZone
/* Like 'get', but compile 'maybeOpt' (if non-null). */ /* Like 'get', but compile 'maybeOpt' (if non-null). */
RegExpShared* get(JSContext* cx, HandleAtom source, JSString* maybeOpt); RegExpShared* get(JSContext* cx, HandleAtom source, JSString* maybeOpt);
#ifdef DEBUG
void clear() { set_.clear(); }
#endif
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf); size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf);
}; };

Просмотреть файл

@ -538,6 +538,7 @@ JitFrameIter::skipNonScriptedJSFrames()
jit::JSJitFrameIter& frames = asJSJit(); jit::JSJitFrameIter& frames = asJSJit();
while (!frames.isScripted() && !frames.done()) while (!frames.isScripted() && !frames.done())
++frames; ++frames;
settle();
} }
} }

Просмотреть файл

@ -215,10 +215,6 @@ Instance::callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc, con
if (script->baselineScript()->hasPendingIonBuilder()) if (script->baselineScript()->hasPendingIonBuilder())
return true; return true;
// Currently we can't rectify arguments. Therefore disable if argc is too low.
if (importFun->nargs() > fi.sig().args().length())
return true;
// Ensure the argument types are included in the argument TypeSets stored in // Ensure the argument types are included in the argument TypeSets stored in
// the TypeScript. This is necessary for Ion, because the import will use // the TypeScript. This is necessary for Ion, because the import will use
// the skip-arg-checks entry point. // the skip-arg-checks entry point.
@ -229,9 +225,13 @@ Instance::callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc, con
// patched back. // patched back.
if (!TypeScript::ThisTypes(script)->hasType(TypeSet::UndefinedType())) if (!TypeScript::ThisTypes(script)->hasType(TypeSet::UndefinedType()))
return true; return true;
for (uint32_t i = 0; i < importFun->nargs(); i++) {
const ValTypeVector& importArgs = fi.sig().args();
size_t numKnownArgs = Min(importArgs.length(), importFun->nargs());
for (uint32_t i = 0; i < numKnownArgs; i++) {
TypeSet::Type type = TypeSet::UnknownType(); TypeSet::Type type = TypeSet::UnknownType();
switch (fi.sig().args()[i]) { switch (importArgs[i]) {
case ValType::I32: type = TypeSet::Int32Type(); break; case ValType::I32: type = TypeSet::Int32Type(); break;
case ValType::I64: MOZ_CRASH("can't happen because of above guard"); case ValType::I64: MOZ_CRASH("can't happen because of above guard");
case ValType::F32: type = TypeSet::DoubleType(); break; case ValType::F32: type = TypeSet::DoubleType(); break;
@ -248,6 +248,14 @@ Instance::callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc, con
return true; return true;
} }
// These arguments will be filled with undefined at runtime by the
// arguments rectifier: check that the imported function can handle
// undefined there.
for (uint32_t i = importArgs.length(); i < importFun->nargs(); i++) {
if (!TypeScript::ArgTypes(script, i)->hasType(TypeSet::UndefinedType()))
return true;
}
// Let's optimize it! // Let's optimize it!
if (!script->baselineScript()->addDependentWasmImport(cx, *this, funcImportIndex)) if (!script->baselineScript()->addDependentWasmImport(cx, *this, funcImportIndex))
return false; return false;
@ -451,6 +459,13 @@ Instance::init(JSContext* cx)
} }
} }
if (!metadata(code_->bestTier()).funcImports.empty()) {
JitRuntime* jitRuntime = cx->runtime()->getJitRuntime(cx);
if (!jitRuntime)
return false;
jsJitArgsRectifier_ = jitRuntime->getArgumentsRectifier();
}
return true; return true;
} }
@ -508,6 +523,8 @@ Instance::tracePrivate(JSTracer* trc)
MOZ_ASSERT(!gc::IsAboutToBeFinalized(&object_)); MOZ_ASSERT(!gc::IsAboutToBeFinalized(&object_));
TraceEdge(trc, &object_, "wasm instance object"); TraceEdge(trc, &object_, "wasm instance object");
TraceNullableEdge(trc, &jsJitArgsRectifier_, "wasm jit args rectifier");
// OK to just do one tier here; though the tiers have different funcImports // OK to just do one tier here; though the tiers have different funcImports
// tables, they share the tls object. // tables, they share the tls object.
for (const FuncImport& fi : metadata(code().stableTier()).funcImports) for (const FuncImport& fi : metadata(code().stableTier()).funcImports)

Просмотреть файл

@ -71,6 +71,7 @@ class Instance
{ {
JSCompartment* const compartment_; JSCompartment* const compartment_;
ReadBarrieredWasmInstanceObject object_; ReadBarrieredWasmInstanceObject object_;
GCPtrJitCode jsJitArgsRectifier_;
const SharedCode code_; const SharedCode code_;
const UniqueDebugState debug_; const UniqueDebugState debug_;
const UniqueGlobalSegment globals_; const UniqueGlobalSegment globals_;
@ -122,6 +123,8 @@ class Instance
bool memoryAccessInGuardRegion(uint8_t* addr, unsigned numBytes) const; bool memoryAccessInGuardRegion(uint8_t* addr, unsigned numBytes) const;
TlsData* tlsData() const { return globals_->tlsData(); } TlsData* tlsData() const { return globals_->tlsData(); }
static size_t offsetOfJSJitArgsRectifier() { return offsetof(Instance, jsJitArgsRectifier_); }
// This method returns a pointer to the GC object that owns this Instance. // This method returns a pointer to the GC object that owns this Instance.
// Instances may be reached via weak edges (e.g., Compartment::instances_) // Instances may be reached via weak edges (e.g., Compartment::instances_)
// so this perform a read-barrier on the returned object unless the barrier // so this perform a read-barrier on the returned object unless the barrier

Просмотреть файл

@ -23,6 +23,7 @@
#include "wasm/WasmCode.h" #include "wasm/WasmCode.h"
#include "wasm/WasmGenerator.h" #include "wasm/WasmGenerator.h"
#include "wasm/WasmInstance.h"
#include "jit/MacroAssembler-inl.h" #include "jit/MacroAssembler-inl.h"
@ -710,7 +711,7 @@ GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLa
// the return address. // the return address.
static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes"); static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes");
unsigned sizeOfRetAddr = sizeof(void*); unsigned sizeOfRetAddr = sizeof(void*);
unsigned sizeOfPreFrame = WasmFrameLayout::Size() - sizeOfRetAddr; unsigned sizeOfPreFrame = WasmToJSJitFrameLayout::Size() - sizeOfRetAddr;
unsigned sizeOfThisAndArgs = (1 + fi.sig().args().length()) * sizeof(Value); unsigned sizeOfThisAndArgs = (1 + fi.sig().args().length()) * sizeof(Value);
unsigned totalJitFrameBytes = sizeOfRetAddr + sizeOfPreFrame + sizeOfThisAndArgs; unsigned totalJitFrameBytes = sizeOfRetAddr + sizeOfPreFrame + sizeOfThisAndArgs;
unsigned jitFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalJitFrameBytes) - unsigned jitFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalJitFrameBytes) -
@ -722,7 +723,7 @@ GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLa
// 1. Descriptor // 1. Descriptor
size_t argOffset = 0; size_t argOffset = 0;
uint32_t descriptor = MakeFrameDescriptor(sizeOfThisAndArgsAndPadding, JitFrame_WasmToJSJit, uint32_t descriptor = MakeFrameDescriptor(sizeOfThisAndArgsAndPadding, JitFrame_WasmToJSJit,
WasmFrameLayout::Size()); WasmToJSJitFrameLayout::Size());
masm.storePtr(ImmWord(uintptr_t(descriptor)), Address(masm.getStackPointer(), argOffset)); masm.storePtr(ImmWord(uintptr_t(descriptor)), Address(masm.getStackPointer(), argOffset));
argOffset += sizeof(size_t); argOffset += sizeof(size_t);
@ -730,17 +731,13 @@ GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLa
Register callee = ABINonArgReturnReg0; // live until call Register callee = ABINonArgReturnReg0; // live until call
Register scratch = ABINonArgReturnReg1; // repeatedly clobbered Register scratch = ABINonArgReturnReg1; // repeatedly clobbered
// 2.1. Get callee // 2.1. Get JSFunction callee
masm.loadWasmGlobalPtr(fi.tlsDataOffset() + offsetof(FuncImportTls, obj), callee); masm.loadWasmGlobalPtr(fi.tlsDataOffset() + offsetof(FuncImportTls, obj), callee);
// 2.2. Save callee // 2.2. Save callee
masm.storePtr(callee, Address(masm.getStackPointer(), argOffset)); masm.storePtr(callee, Address(masm.getStackPointer(), argOffset));
argOffset += sizeof(size_t); argOffset += sizeof(size_t);
// 2.3. Load callee executable entry point
masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
masm.loadBaselineOrIonNoArgCheck(callee, callee, nullptr);
// 3. Argc // 3. Argc
unsigned argc = fi.sig().args().length(); unsigned argc = fi.sig().args().length();
masm.storePtr(ImmWord(uintptr_t(argc)), Address(masm.getStackPointer(), argOffset)); masm.storePtr(ImmWord(uintptr_t(argc)), Address(masm.getStackPointer(), argOffset));
@ -757,6 +754,19 @@ GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLa
argOffset += fi.sig().args().length() * sizeof(Value); argOffset += fi.sig().args().length() * sizeof(Value);
MOZ_ASSERT(argOffset == sizeOfThisAndArgs + sizeOfPreFrame); MOZ_ASSERT(argOffset == sizeOfThisAndArgs + sizeOfPreFrame);
// 6. Check if we need to rectify arguments
masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), scratch);
Label rectify;
masm.branch32(Assembler::Above, scratch, Imm32(fi.sig().args().length()), &rectify);
// 7. If we haven't rectified arguments, load callee executable entry point
masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
masm.loadBaselineOrIonNoArgCheck(callee, callee, nullptr);
Label rejoinBeforeCall;
masm.bind(&rejoinBeforeCall);
AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddr); AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddr);
masm.callJitNoProfiler(callee); masm.callJitNoProfiler(callee);
@ -818,6 +828,15 @@ GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLa
GenerateJitExitEpilogue(masm, masm.framePushed(), offsets); GenerateJitExitEpilogue(masm, masm.framePushed(), offsets);
{
// Call the arguments rectifier.
masm.bind(&rectify);
masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, instance)), callee);
masm.loadPtr(Address(callee, Instance::offsetOfJSJitArgsRectifier()), callee);
masm.loadPtr(Address(callee, JitCode::offsetOfCode()), callee);
masm.jump(&rejoinBeforeCall);
}
if (oolConvert.used()) { if (oolConvert.used()) {
masm.bind(&oolConvert); masm.bind(&oolConvert);
masm.setFramePushed(nativeFramePushed); masm.setFramePushed(nativeFramePushed);

Просмотреть файл

@ -6279,17 +6279,8 @@ nsDisplayOpacity::CreateWebRenderCommands(mozilla::wr::DisplayListBuilder& aBuil
} }
nsTArray<mozilla::wr::WrFilterOp> filters; nsTArray<mozilla::wr::WrFilterOp> filters;
StackingContextHelper sc(aSc, StackingContextHelper sc(aSc, aBuilder, filters, nullptr, animationsId,
aBuilder, opacityForSC);
aDisplayListBuilder,
this,
&mList,
nullptr,
animationsId,
opacityForSC,
nullptr,
nullptr,
filters);
aManager->CommandBuilder().CreateWebRenderCommandsFromDisplayList(&mList, aManager->CommandBuilder().CreateWebRenderCommandsFromDisplayList(&mList,
aDisplayListBuilder, aDisplayListBuilder,
@ -6341,9 +6332,8 @@ nsDisplayBlendMode::CreateWebRenderCommands(mozilla::wr::DisplayListBuilder& aBu
nsDisplayListBuilder* aDisplayListBuilder) nsDisplayListBuilder* aDisplayListBuilder)
{ {
nsTArray<mozilla::wr::WrFilterOp> filters; nsTArray<mozilla::wr::WrFilterOp> filters;
StackingContextHelper sc(aSc, aBuilder, aDisplayListBuilder, this, StackingContextHelper sc(aSc, aBuilder, filters, nullptr, 0, nullptr, nullptr,
&mList, nullptr, 0, nullptr, nullptr, nullptr, nullptr, nsCSSRendering::GetGFXBlendMode(mBlendMode));
filters, nsCSSRendering::GetGFXBlendMode(mBlendMode));
return nsDisplayWrapList::CreateWebRenderCommands(aBuilder,aResources, sc, return nsDisplayWrapList::CreateWebRenderCommands(aBuilder,aResources, sc,
aManager, aDisplayListBuilder); aManager, aDisplayListBuilder);
@ -6471,8 +6461,7 @@ nsDisplayBlendContainer::CreateWebRenderCommands(mozilla::wr::DisplayListBuilder
mozilla::layers::WebRenderLayerManager* aManager, mozilla::layers::WebRenderLayerManager* aManager,
nsDisplayListBuilder* aDisplayListBuilder) nsDisplayListBuilder* aDisplayListBuilder)
{ {
StackingContextHelper sc(aSc, aBuilder, aDisplayListBuilder, this, StackingContextHelper sc(aSc, aBuilder);
&mList, nullptr, 0, nullptr, nullptr);
return nsDisplayWrapList::CreateWebRenderCommands(aBuilder, aResources, sc, return nsDisplayWrapList::CreateWebRenderCommands(aBuilder, aResources, sc,
aManager, aDisplayListBuilder); aManager, aDisplayListBuilder);
@ -6578,8 +6567,8 @@ nsDisplayOwnLayer::CreateWebRenderCommands(mozilla::wr::DisplayListBuilder& aBui
animationInfo.EnsureAnimationsId(); animationInfo.EnsureAnimationsId();
mWrAnimationId = animationInfo.GetCompositorAnimationsId(); mWrAnimationId = animationInfo.GetCompositorAnimationsId();
StackingContextHelper sc(aSc, aBuilder, aDisplayListBuilder, this, StackingContextHelper sc(aSc, aBuilder, nsTArray<wr::WrFilterOp>(), nullptr,
&mList, nullptr, mWrAnimationId, nullptr, nullptr); mWrAnimationId);
nsDisplayWrapList::CreateWebRenderCommands(aBuilder, aResources, sc, nsDisplayWrapList::CreateWebRenderCommands(aBuilder, aResources, sc,
aManager, aDisplayListBuilder); aManager, aDisplayListBuilder);
@ -7131,8 +7120,7 @@ nsDisplayStickyPosition::CreateWebRenderCommands(mozilla::wr::DisplayListBuilder
// adjusted origin and use that for the nested items. This way all the // adjusted origin and use that for the nested items. This way all the
// ToRelativeLayoutRect calls on this StackingContextHelper object will // ToRelativeLayoutRect calls on this StackingContextHelper object will
// include the necessary adjustment. // include the necessary adjustment.
StackingContextHelper sc(aSc, aBuilder, aDisplayListBuilder, this, StackingContextHelper sc(aSc, aBuilder);
&mList, nullptr, 0, nullptr, nullptr);
sc.AdjustOrigin(scTranslation); sc.AdjustOrigin(scTranslation);
// TODO: if, inside this nested command builder, we try to turn a gecko clip // TODO: if, inside this nested command builder, we try to turn a gecko clip
@ -8056,15 +8044,12 @@ nsDisplayTransform::CreateWebRenderCommands(mozilla::wr::DisplayListBuilder& aBu
nsTArray<mozilla::wr::WrFilterOp> filters; nsTArray<mozilla::wr::WrFilterOp> filters;
StackingContextHelper sc(aSc, StackingContextHelper sc(aSc,
aBuilder, aBuilder,
aDisplayListBuilder, filters,
this,
mStoredList.GetChildren(),
&newTransformMatrix, &newTransformMatrix,
animationsId, animationsId,
nullptr, nullptr,
transformForSC, transformForSC,
nullptr, nullptr,
filters,
gfx::CompositionOp::OP_OVER, gfx::CompositionOp::OP_OVER,
!BackfaceIsHidden()); !BackfaceIsHidden());
@ -8670,15 +8655,12 @@ nsDisplayPerspective::CreateWebRenderCommands(mozilla::wr::DisplayListBuilder& a
nsTArray<mozilla::wr::WrFilterOp> filters; nsTArray<mozilla::wr::WrFilterOp> filters;
StackingContextHelper sc(aSc, StackingContextHelper sc(aSc,
aBuilder, aBuilder,
aDisplayListBuilder, filters,
this,
mList.GetChildren(),
nullptr, nullptr,
0, 0,
nullptr, nullptr,
&transformForSC, &transformForSC,
&perspectiveMatrix, &perspectiveMatrix,
filters,
gfx::CompositionOp::OP_OVER, gfx::CompositionOp::OP_OVER,
!BackfaceIsHidden()); !BackfaceIsHidden());
@ -9409,17 +9391,7 @@ nsDisplayFilter::CreateWebRenderCommands(mozilla::wr::DisplayListBuilder& aBuild
wrFilters.AppendElement(wr::ToWrFilterOp(ToCSSFilter(filter))); wrFilters.AppendElement(wr::ToWrFilterOp(ToCSSFilter(filter)));
} }
StackingContextHelper sc(aSc, StackingContextHelper sc(aSc, aBuilder, wrFilters);
aBuilder,
aDisplayListBuilder,
this,
&mList,
nullptr,
0,
nullptr,
nullptr,
nullptr,
wrFilters);
nsDisplaySVGEffects::CreateWebRenderCommands(aBuilder, aResources, sc, aManager, aDisplayListBuilder); nsDisplaySVGEffects::CreateWebRenderCommands(aBuilder, aResources, sc, aManager, aDisplayListBuilder);
return true; return true;

Просмотреть файл

@ -0,0 +1,7 @@
<html xmlns="http://www.w3.org/1999/xhtml">
<select>
<script>document.documentElement.offsetHeight</script>
<option>Hello there</option>
<script>document.documentElement.offsetHeight</script>
</select>
</html>

Просмотреть файл

@ -0,0 +1,6 @@
<html xmlns="http://www.w3.org/1999/xhtml">
<select>
<script>document.documentElement.offsetHeight</script>
<option>Hello there</option>
</select>
</html>

Просмотреть файл

@ -2042,5 +2042,6 @@ needs-focus != 1377447-1.html 1377447-2.html
== 1398500-1.html 1398500-1-ref.html == 1398500-1.html 1398500-1-ref.html
== 1401317.html 1401317-ref.html == 1401317.html 1401317-ref.html
== 1401992.html 1401992-ref.html == 1401992.html 1401992-ref.html
== 1405878-1.xml 1405878-1-ref.xml
== 1404057.html 1404057-ref.html == 1404057.html 1404057-ref.html
!= 1404057.html 1404057-noref.html != 1404057.html 1404057-noref.html

Просмотреть файл

@ -2,9 +2,9 @@ pref(layout.css.mix-blend-mode.enabled,true) == blend-canvas.html blend-canvas-r
pref(layout.css.mix-blend-mode.enabled,true) == blend-constant-background-color.html blend-constant-background-color-ref.html pref(layout.css.mix-blend-mode.enabled,true) == blend-constant-background-color.html blend-constant-background-color-ref.html
pref(layout.css.mix-blend-mode.enabled,true) fuzzy-if(webrender,1-1,7875-7875) == blend-gradient-background-color.html blend-gradient-background-color-ref.html pref(layout.css.mix-blend-mode.enabled,true) fuzzy-if(webrender,1-1,7875-7875) == blend-gradient-background-color.html blend-gradient-background-color-ref.html
pref(layout.css.mix-blend-mode.enabled,true) == blend-image.html blend-image-ref.html pref(layout.css.mix-blend-mode.enabled,true) == blend-image.html blend-image-ref.html
pref(layout.css.mix-blend-mode.enabled,true) fails-if(webrender) == blend-difference-stacking.html blend-difference-stacking-ref.html pref(layout.css.mix-blend-mode.enabled,true) == blend-difference-stacking.html blend-difference-stacking-ref.html
fuzzy-if(skiaContent,1,30000) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-alpha.html background-blending-alpha-ref.html fuzzy-if(skiaContent,1,30000) pref(layout.css.background-blend-mode.enabled,true) == background-blending-alpha.html background-blending-alpha-ref.html
pref(layout.css.background-blend-mode.enabled,true) fuzzy-if(webrender,1-1,7875-7875) == background-blending-gradient-color.html background-blending-gradient-color-ref.html pref(layout.css.background-blend-mode.enabled,true) fuzzy-if(webrender,1-1,7875-7875) == background-blending-gradient-color.html background-blending-gradient-color-ref.html
fuzzy-if(azureSkiaGL,3,7597) fuzzy-if(cocoaWidget,3,7597) fuzzy-if(d2d,1,3800) fuzzy-if(d3d11,1,4200) fuzzy-if(skiaContent,2,9450) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-gradient-gradient.html background-blending-gradient-gradient-ref.html fuzzy-if(azureSkiaGL,3,7597) fuzzy-if(cocoaWidget,3,7597) fuzzy-if(d2d,1,3800) fuzzy-if(d3d11,1,4200) fuzzy-if(skiaContent,2,9450) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-gradient-gradient.html background-blending-gradient-gradient-ref.html
fuzzy-if(azureSkiaGL,2,7174) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-gradient-image.html background-blending-gradient-color-ref.html fuzzy-if(azureSkiaGL,2,7174) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-gradient-image.html background-blending-gradient-color-ref.html
@ -12,21 +12,21 @@ fuzzy-if(azureSkia||d2d||gtkWidget,1,10000) pref(layout.css.background-blend-mod
pref(layout.css.background-blend-mode.enabled,true) == background-blending-image-color-png.html background-blending-image-color-ref.html pref(layout.css.background-blend-mode.enabled,true) == background-blending-image-color-png.html background-blending-image-color-ref.html
pref(layout.css.background-blend-mode.enabled,true) == background-blending-image-color-svg.html background-blending-image-color-ref.html pref(layout.css.background-blend-mode.enabled,true) == background-blending-image-color-svg.html background-blending-image-color-ref.html
fuzzy-if(azureSkiaGL,2,7174) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-image-gradient.html background-blending-gradient-color-ref.html fuzzy-if(azureSkiaGL,2,7174) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-image-gradient.html background-blending-gradient-color-ref.html
pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-image-image.html background-blending-image-color-ref.html pref(layout.css.background-blend-mode.enabled,true) == background-blending-image-image.html background-blending-image-color-ref.html
pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-isolation.html background-blending-isolation-ref.html pref(layout.css.background-blend-mode.enabled,true) == background-blending-isolation.html background-blending-isolation-ref.html
pref(layout.css.background-blend-mode.enabled,true) == background-blending-list-repeat.html background-blending-list-repeat-ref.html pref(layout.css.background-blend-mode.enabled,true) == background-blending-list-repeat.html background-blending-list-repeat-ref.html
pref(layout.css.background-blend-mode.enabled,true) == background-blending-multiple-images.html background-blending-multiple-images-ref.html pref(layout.css.background-blend-mode.enabled,true) == background-blending-multiple-images.html background-blending-multiple-images-ref.html
pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-color-burn.html background-blending-color-burn-ref.svg pref(layout.css.background-blend-mode.enabled,true) == background-blending-color-burn.html background-blending-color-burn-ref.svg
pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-color-dodge.html background-blending-color-dodge-ref.svg pref(layout.css.background-blend-mode.enabled,true) == background-blending-color-dodge.html background-blending-color-dodge-ref.svg
# need to investigate why these tests are fuzzy - first suspect is a possible color space conversion on some platforms; same for mix-blend-mode tests # need to investigate why these tests are fuzzy - first suspect is a possible color space conversion on some platforms; same for mix-blend-mode tests
fuzzy-if(azureSkia||gtkWidget,2,9600) fuzzy-if(d2d,1,8000) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-color.html background-blending-color-ref.svg fuzzy-if(azureSkia||gtkWidget,2,9600) fuzzy-if(d2d,1,8000) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-color.html background-blending-color-ref.svg
pref(layout.css.background-blend-mode.enabled,true) == background-blending-darken.html background-blending-darken-ref.svg pref(layout.css.background-blend-mode.enabled,true) == background-blending-darken.html background-blending-darken-ref.svg
pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-difference.html background-blending-difference-ref.svg pref(layout.css.background-blend-mode.enabled,true) == background-blending-difference.html background-blending-difference-ref.svg
fuzzy-if(skiaContent,1,1600) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-exclusion.html background-blending-exclusion-ref.svg fuzzy-if(skiaContent,1,1600) pref(layout.css.background-blend-mode.enabled,true) == background-blending-exclusion.html background-blending-exclusion-ref.svg
fuzzy-if(cocoaWidget||d2d,1,1600) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-hard-light.html background-blending-hard-light-ref.svg fuzzy-if(cocoaWidget||d2d,1,1600) pref(layout.css.background-blend-mode.enabled,true) == background-blending-hard-light.html background-blending-hard-light-ref.svg
fuzzy-if(d2d,1,9600) fuzzy-if(azureSkia||gtkWidget,2,9600) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-hue.html background-blending-hue-ref.svg fuzzy-if(d2d,1,9600) fuzzy-if(azureSkia||gtkWidget,2,9600) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-hue.html background-blending-hue-ref.svg
pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-lighten.html background-blending-lighten-ref.svg pref(layout.css.background-blend-mode.enabled,true) == background-blending-lighten.html background-blending-lighten-ref.svg
fuzzy-if(d2d,1,8000) fuzzy-if(azureSkia||gtkWidget,2,9600) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-luminosity.html background-blending-luminosity-ref.svg fuzzy-if(d2d,1,8000) fuzzy-if(azureSkia||gtkWidget,2,9600) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-luminosity.html background-blending-luminosity-ref.svg
pref(layout.css.background-blend-mode.enabled,true) == background-blending-multiply.html background-blending-multiply-ref.svg pref(layout.css.background-blend-mode.enabled,true) == background-blending-multiply.html background-blending-multiply-ref.svg
pref(layout.css.background-blend-mode.enabled,true) == background-blending-normal.html background-blending-normal-ref.svg pref(layout.css.background-blend-mode.enabled,true) == background-blending-normal.html background-blending-normal-ref.svg
@ -60,7 +60,7 @@ pref(layout.css.background-blend-mode.enabled,true) == background-blending-image
# Test plan 5.3.2 Background layers do not blend with content outside the background (or behind the element) - tests 2 and 3 # Test plan 5.3.2 Background layers do not blend with content outside the background (or behind the element) - tests 2 and 3
pref(layout.css.background-blend-mode.enabled,true) == background-blending-isolation-parent-child-color.html background-blending-isolation-parent-child-ref.html pref(layout.css.background-blend-mode.enabled,true) == background-blending-isolation-parent-child-color.html background-blending-isolation-parent-child-ref.html
pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-isolation-parent-child-image.html background-blending-isolation-parent-child-ref.html pref(layout.css.background-blend-mode.enabled,true) == background-blending-isolation-parent-child-image.html background-blending-isolation-parent-child-ref.html
# Test plan 5.3.6 background-blend-mode for an element with background-position # Test plan 5.3.6 background-blend-mode for an element with background-position
pref(layout.css.background-blend-mode.enabled,true) == background-blending-background-position-percentage.html background-blending-background-position-percentage-ref.html pref(layout.css.background-blend-mode.enabled,true) == background-blending-background-position-percentage.html background-blending-background-position-percentage-ref.html
@ -85,16 +85,16 @@ pref(layout.css.background-blend-mode.enabled,true) == background-blending-backg
# Test plan 5.3.11 background-blend-mode for an element with background-attachement # Test plan 5.3.11 background-blend-mode for an element with background-attachement
pref(layout.css.background-blend-mode.enabled,true) == background-blending-background-attachement-fixed.html background-blending-background-attachement-fixed-ref.html pref(layout.css.background-blend-mode.enabled,true) == background-blending-background-attachement-fixed.html background-blending-background-attachement-fixed-ref.html
pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-background-attachement-fixed-scroll.html background-blending-background-attachement-fixed-scroll-ref.html pref(layout.css.background-blend-mode.enabled,true) == background-blending-background-attachement-fixed-scroll.html background-blending-background-attachement-fixed-scroll-ref.html
pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blend-mode-body-image.html background-blend-mode-body-image-ref.html pref(layout.css.background-blend-mode.enabled,true) == background-blend-mode-body-image.html background-blend-mode-body-image-ref.html
fuzzy-if(Android,4,768) fuzzy-if(gtkWidget,1,132) fuzzy-if(skiaContent,1,800) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blend-mode-body-transparent-image.html background-blend-mode-body-transparent-image-ref.html fuzzy-if(Android,4,768) fuzzy-if(gtkWidget,1,132) fuzzy-if(skiaContent,1,800) pref(layout.css.background-blend-mode.enabled,true) == background-blend-mode-body-transparent-image.html background-blend-mode-body-transparent-image-ref.html
pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-moz-element.html background-blending-moz-element-ref.html pref(layout.css.background-blend-mode.enabled,true) == background-blending-moz-element.html background-blending-moz-element-ref.html
fuzzy(1,40000) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == mix-blend-mode-soft-light.html mix-blend-mode-soft-light-ref.html fuzzy(1,40000) pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == mix-blend-mode-soft-light.html mix-blend-mode-soft-light-ref.html
# Test plan 4.4.2 element with isolation:isolate creates an isolated group for blended children # Test plan 4.4.2 element with isolation:isolate creates an isolated group for blended children
pref(layout.css.isolation.enabled,true) fails-if(webrender) == blend-isolation.html blend-isolation-ref.html pref(layout.css.isolation.enabled,true) == blend-isolation.html blend-isolation-ref.html
pref(layout.css.background-blend-mode.enabled,true) == bug1281593.html bug1281593-ref.html pref(layout.css.background-blend-mode.enabled,true) == bug1281593.html bug1281593-ref.html

Просмотреть файл

@ -178,7 +178,7 @@ HTTP(..) == reflow-sanity-1.html reflow-sanity-1-data.html
HTTP(..) == reflow-sanity-delay-1a.html reflow-sanity-1-ref.html HTTP(..) == reflow-sanity-delay-1a.html reflow-sanity-1-ref.html
HTTP(..) == reflow-sanity-delay-1b.html reflow-sanity-1-ref.html HTTP(..) == reflow-sanity-delay-1b.html reflow-sanity-1-ref.html
HTTP(..) == reflow-sanity-delay-1c.html reflow-sanity-1-ref.html HTTP(..) == reflow-sanity-delay-1c.html reflow-sanity-1-ref.html
HTTP(..) == reflow-sanity-delay-1-metrics.html reflow-sanity-1-ref.html skip-if(winWidget&&!isDebugBuild) HTTP(..) == reflow-sanity-delay-1-metrics.html reflow-sanity-1-ref.html
# font-display # font-display
skip-if(/^Linux\x20i686/.test(http.oscpu)) HTTP(..) == font-display-1.html font-display-1-ref.html # normal font load (~500ms) skip-if(/^Linux\x20i686/.test(http.oscpu)) HTTP(..) == font-display-1.html font-display-1-ref.html # normal font load (~500ms)

Просмотреть файл

@ -194,14 +194,25 @@ public:
mozilla::LogicalSide aOwner, mozilla::LogicalSide aOwner,
bool aBevel); bool aBevel);
bool IsIStartStart() const; inline bool IsIStartStart() const
{
return (bool)mIStartStart;
}
void SetIStartStart(bool aValue); inline void SetIStartStart(bool aValue)
{
mIStartStart = aValue;
}
bool IsBStartStart() const; inline bool IsBStartStart() const
{
void SetBStartStart(bool aValue); return (bool)mBStartStart;
}
inline void SetBStartStart(bool aValue)
{
mBStartStart = aValue;
}
protected: protected:
BCPixelSize mIStartSize; // size in pixels of iStart border BCPixelSize mIStartSize; // size in pixels of iStart border
@ -371,7 +382,8 @@ inline void CellData::SetOverlap(bool aOverlap)
inline BCData::BCData() inline BCData::BCData()
{ {
mIStartOwner = mBStartOwner = eCellOwner; mIStartOwner = mBStartOwner = eCellOwner;
mIStartStart = mBStartStart = 1; SetBStartStart(true);
SetIStartStart(true);
mIStartSize = mCornerSubSize = mBStartSize = 0; mIStartSize = mCornerSubSize = mBStartSize = 0;
mCornerSide = mozilla::eLogicalSideBStart; mCornerSide = mozilla::eLogicalSideBStart;
mCornerBevel = false; mCornerBevel = false;
@ -385,7 +397,7 @@ inline nscoord BCData::GetIStartEdge(BCBorderOwner& aOwner,
bool& aStart) const bool& aStart) const
{ {
aOwner = (BCBorderOwner)mIStartOwner; aOwner = (BCBorderOwner)mIStartOwner;
aStart = (bool)mIStartStart; aStart = IsIStartStart();
return (nscoord)mIStartSize; return (nscoord)mIStartSize;
} }
@ -396,14 +408,14 @@ inline void BCData::SetIStartEdge(BCBorderOwner aOwner,
{ {
mIStartOwner = aOwner; mIStartOwner = aOwner;
mIStartSize = (aSize > MAX_BORDER_WIDTH) ? MAX_BORDER_WIDTH : aSize; mIStartSize = (aSize > MAX_BORDER_WIDTH) ? MAX_BORDER_WIDTH : aSize;
mIStartStart = aStart; SetIStartStart(aStart);
} }
inline nscoord BCData::GetBStartEdge(BCBorderOwner& aOwner, inline nscoord BCData::GetBStartEdge(BCBorderOwner& aOwner,
bool& aStart) const bool& aStart) const
{ {
aOwner = (BCBorderOwner)mBStartOwner; aOwner = (BCBorderOwner)mBStartOwner;
aStart = (bool)mBStartStart; aStart = IsBStartStart();
return (nscoord)mBStartSize; return (nscoord)mBStartSize;
} }
@ -414,7 +426,7 @@ inline void BCData::SetBStartEdge(BCBorderOwner aOwner,
{ {
mBStartOwner = aOwner; mBStartOwner = aOwner;
mBStartSize = (aSize > MAX_BORDER_WIDTH) ? MAX_BORDER_WIDTH : aSize; mBStartSize = (aSize > MAX_BORDER_WIDTH) ? MAX_BORDER_WIDTH : aSize;
mBStartStart = aStart; SetBStartStart(aStart);
} }
inline BCPixelSize BCData::GetCorner(mozilla::LogicalSide& aOwnerSide, inline BCPixelSize BCData::GetCorner(mozilla::LogicalSide& aOwnerSide,
@ -434,24 +446,4 @@ inline void BCData::SetCorner(BCPixelSize aSubSize,
mCornerBevel = aBevel; mCornerBevel = aBevel;
} }
inline bool BCData::IsIStartStart() const
{
return (bool)mIStartStart;
}
inline void BCData::SetIStartStart(bool aValue)
{
mIStartStart = aValue;
}
inline bool BCData::IsBStartStart() const
{
return (bool)mBStartStart;
}
inline void BCData::SetBStartStart(bool aValue)
{
mBStartStart = aValue;
}
#endif #endif

Просмотреть файл

@ -1312,8 +1312,8 @@ pref("dom.timeout.foreground_budget_regeneration_rate", 1);
pref("dom.timeout.foreground_throttling_max_budget", -1); pref("dom.timeout.foreground_throttling_max_budget", -1);
// The maximum amount a timeout can be delayed by budget throttling // The maximum amount a timeout can be delayed by budget throttling
pref("dom.timeout.budget_throttling_max_delay", 15000); pref("dom.timeout.budget_throttling_max_delay", 15000);
// Turn off budget throttling by default // Turn on budget throttling by default
pref("dom.timeout.enable_budget_timer_throttling", false); pref("dom.timeout.enable_budget_timer_throttling", true);
// Don't use new input types // Don't use new input types
pref("dom.experimental_forms", false); pref("dom.experimental_forms", false);

Просмотреть файл

@ -106,6 +106,28 @@ nsBaseChannel::Redirect(nsIChannel *newChannel, uint32_t redirectFlags,
new nsRedirectHistoryEntry(uriPrincipal, nullptr, EmptyCString()); new nsRedirectHistoryEntry(uriPrincipal, nullptr, EmptyCString());
newLoadInfo->AppendRedirectHistoryEntry(entry, isInternalRedirect); newLoadInfo->AppendRedirectHistoryEntry(entry, isInternalRedirect);
// Ensure the channel's loadInfo's result principal URI so that it's
// either non-null or updated to the redirect target URI.
// We must do this because in case the loadInfo's result principal URI
// is null, it would be taken from OriginalURI of the channel. But we
// overwrite it with the whole redirect chain first URI before opening
// the target channel, hence the information would be lost.
// If the protocol handler that created the channel wants to use
// the originalURI of the channel as the principal URI, it has left
// the result principal URI on the load info null.
nsCOMPtr<nsIURI> resultPrincipalURI;
nsCOMPtr<nsILoadInfo> existingLoadInfo = newChannel->GetLoadInfo();
if (existingLoadInfo) {
existingLoadInfo->GetResultPrincipalURI(getter_AddRefs(resultPrincipalURI));
}
if (!resultPrincipalURI) {
newChannel->GetOriginalURI(getter_AddRefs(resultPrincipalURI));
}
newLoadInfo->SetResultPrincipalURI(resultPrincipalURI);
newChannel->SetLoadInfo(newLoadInfo); newChannel->SetLoadInfo(newLoadInfo);
} }
else { else {

Просмотреть файл

@ -68,7 +68,7 @@ interface nsIInterceptedChannel : nsISupports
* @return NS_ERROR_FAILURE if the response has already been synthesized or * @return NS_ERROR_FAILURE if the response has already been synthesized or
* the original request has been instructed to continue. * the original request has been instructed to continue.
*/ */
void cancel(in nsresult status); void cancelInterception(in nsresult status);
/** /**
* The synthesized response body to be produced. * The synthesized response body to be produced.

Просмотреть файл

@ -780,7 +780,6 @@ bool nsIDNService::isLabelSafe(const nsAString &label)
if (illegalScriptCombo(script, savedScript)) { if (illegalScriptCombo(script, savedScript)) {
return false; return false;
} }
lastScript = script;
} }
// Check for mixed numbering systems // Check for mixed numbering systems
@ -833,6 +832,10 @@ bool nsIDNService::isLabelSafe(const nsAString &label)
} }
} }
if (script != Script::COMMON && script != Script::INHERITED) {
lastScript = script;
}
// Simplified/Traditional Chinese check temporarily disabled -- bug 857481 // Simplified/Traditional Chinese check temporarily disabled -- bug 857481
#if 0 #if 0

Просмотреть файл

@ -371,6 +371,36 @@ public: /* Necko internal use only... */
return mChannelId; return mChannelId;
} }
void InternalSetUploadStream(nsIInputStream *uploadStream)
{
mUploadStream = uploadStream;
}
void SetUploadStreamHasHeaders(bool hasHeaders)
{
mUploadStreamHasHeaders = hasHeaders;
}
MOZ_MUST_USE nsresult
SetReferrerWithPolicyInternal(nsIURI *referrer, uint32_t referrerPolicy)
{
nsAutoCString spec;
nsresult rv = referrer->GetAsciiSpec(spec);
if (NS_FAILED(rv)) {
return rv;
}
mReferrer = referrer;
mReferrerPolicy = referrerPolicy;
rv = mRequestHead.SetHeader(nsHttp::Referer, spec);
return rv;
}
MOZ_MUST_USE nsresult SetTopWindowURI(nsIURI* aTopWindowURI)
{
mTopWindowURI = aTopWindowURI;
return NS_OK;
}
protected: protected:
// Handle notifying listener, removing from loadgroup if request failed. // Handle notifying listener, removing from loadgroup if request failed.
void DoNotifyListener(); void DoNotifyListener();

Просмотреть файл

@ -3570,12 +3570,6 @@ HttpChannelChild::ForceIntercepted(bool aPostRedirectChannelShouldIntercept,
return NS_OK; return NS_OK;
} }
NS_IMETHODIMP
HttpChannelChild::ForceIntercepted(uint64_t aInterceptionID)
{
return NS_ERROR_NOT_IMPLEMENTED;
}
void void
HttpChannelChild::ForceIntercepted(nsIInputStream* aSynthesizedInput) HttpChannelChild::ForceIntercepted(nsIInputStream* aSynthesizedInput)
{ {

Просмотреть файл

@ -98,7 +98,6 @@ public:
NS_IMETHOD GetProtocolVersion(nsACString& aProtocolVersion) override; NS_IMETHOD GetProtocolVersion(nsACString& aProtocolVersion) override;
// nsIHttpChannelInternal // nsIHttpChannelInternal
NS_IMETHOD SetupFallbackChannel(const char *aFallbackKey) override; NS_IMETHOD SetupFallbackChannel(const char *aFallbackKey) override;
NS_IMETHOD ForceIntercepted(uint64_t aInterceptionID) override;
// nsISupportsPriority // nsISupportsPriority
NS_IMETHOD SetPriority(int32_t value) override; NS_IMETHOD SetPriority(int32_t value) override;
// nsIClassOfService // nsIClassOfService

Просмотреть файл

@ -113,7 +113,7 @@ HttpChannelParent::ActorDestroy(ActorDestroyReason why)
// If this is an intercepted channel, we need to make sure that any resources are // If this is an intercepted channel, we need to make sure that any resources are
// cleaned up to avoid leaks. // cleaned up to avoid leaks.
if (mParentListener) { if (mParentListener) {
mParentListener->ClearInterceptedChannel(); mParentListener->ClearInterceptedChannel(this);
} }
CleanupBackgroundChannel(); CleanupBackgroundChannel();
@ -235,8 +235,9 @@ HttpChannelParent::CleanupBackgroundChannel()
// The nsHttpChannel may have a reference to this parent, release it // The nsHttpChannel may have a reference to this parent, release it
// to avoid circular references. // to avoid circular references.
if (mChannel) { RefPtr<nsHttpChannel> httpChannelImpl = do_QueryObject(mChannel);
mChannel->SetWarningReporter(nullptr); if (httpChannelImpl) {
httpChannelImpl->SetWarningReporter(nullptr);
} }
if (!mPromise.IsEmpty()) { if (!mPromise.IsEmpty()) {
@ -521,7 +522,7 @@ HttpChannelParent::DoAsyncOpen( const URIParams& aURI,
return SendFailedAsyncOpen(rv); return SendFailedAsyncOpen(rv);
} }
RefPtr<nsHttpChannel> httpChannel = do_QueryObject(channel, &rv); RefPtr<HttpBaseChannel> httpChannel = do_QueryObject(channel, &rv);
if (NS_FAILED(rv)) { if (NS_FAILED(rv)) {
return SendFailedAsyncOpen(rv); return SendFailedAsyncOpen(rv);
} }
@ -531,7 +532,10 @@ HttpChannelParent::DoAsyncOpen( const URIParams& aURI,
httpChannel->SetTopLevelContentWindowId(aContentWindowId); httpChannel->SetTopLevelContentWindowId(aContentWindowId);
httpChannel->SetTopLevelOuterContentWindowId(aTopLevelOuterContentWindowId); httpChannel->SetTopLevelOuterContentWindowId(aTopLevelOuterContentWindowId);
httpChannel->SetWarningReporter(this); RefPtr<nsHttpChannel> httpChannelImpl = do_QueryObject(httpChannel);
if (httpChannelImpl) {
httpChannelImpl->SetWarningReporter(this);
}
httpChannel->SetTimingEnabled(true); httpChannel->SetTimingEnabled(true);
if (mPBOverride != kPBOverride_Unset) { if (mPBOverride != kPBOverride_Unset) {
httpChannel->SetPrivate(mPBOverride == kPBOverride_Private ? true : false); httpChannel->SetPrivate(mPBOverride == kPBOverride_Private ? true : false);
@ -645,7 +649,7 @@ HttpChannelParent::DoAsyncOpen( const URIParams& aURI,
if (aSynthesizedResponseHead.type() == OptionalHttpResponseHead::TnsHttpResponseHead) { if (aSynthesizedResponseHead.type() == OptionalHttpResponseHead::TnsHttpResponseHead) {
parentListener->SetupInterception(aSynthesizedResponseHead.get_nsHttpResponseHead()); parentListener->SetupInterception(aSynthesizedResponseHead.get_nsHttpResponseHead());
mWillSynthesizeResponse = true; mWillSynthesizeResponse = true;
httpChannel->SetCouldBeSynthesized(); httpChannelImpl->SetCouldBeSynthesized();
if (!aSecurityInfoSerialization.IsEmpty()) { if (!aSecurityInfoSerialization.IsEmpty()) {
nsCOMPtr<nsISupports> secInfo; nsCOMPtr<nsISupports> secInfo;
@ -671,10 +675,14 @@ HttpChannelParent::DoAsyncOpen( const URIParams& aURI,
return SendFailedAsyncOpen(rv); return SendFailedAsyncOpen(rv);
} }
httpChannel->SetCacheKey(cacheKey); nsCOMPtr<nsICacheInfoChannel> cacheChannel =
httpChannel->PreferAlternativeDataType(aPreferredAlternativeType); do_QueryInterface(static_cast<nsIChannel*>(httpChannel.get()));
if (cacheChannel) {
cacheChannel->SetCacheKey(cacheKey);
cacheChannel->PreferAlternativeDataType(aPreferredAlternativeType);
httpChannel->SetAllowStaleCacheContent(aAllowStaleCacheContent); cacheChannel->SetAllowStaleCacheContent(aAllowStaleCacheContent);
}
httpChannel->SetContentType(aContentTypeHint); httpChannel->SetContentType(aContentTypeHint);
@ -814,14 +822,17 @@ HttpChannelParent::ConnectChannel(const uint32_t& registrarId, const bool& shoul
LOG((" found channel %p, rv=%08" PRIx32, channel.get(), static_cast<uint32_t>(rv))); LOG((" found channel %p, rv=%08" PRIx32, channel.get(), static_cast<uint32_t>(rv)));
mChannel = do_QueryObject(channel); mChannel = do_QueryObject(channel);
if (!mChannel) { if (!mChannel) {
LOG((" but it's not nsHttpChannel")); LOG((" but it's not HttpBaseChannel"));
Delete(); Delete();
return true; return true;
} }
LOG((" and it is nsHttpChannel %p", mChannel.get())); LOG((" and it is HttpBaseChannel %p", mChannel.get()));
mChannel->SetWarningReporter(this); RefPtr<nsHttpChannel> httpChannelImpl = do_QueryObject(mChannel);
if (httpChannelImpl) {
httpChannelImpl->SetWarningReporter(this);
}
nsCOMPtr<nsINetworkInterceptController> controller; nsCOMPtr<nsINetworkInterceptController> controller;
NS_QueryNotificationCallbacks(channel, controller); NS_QueryNotificationCallbacks(channel, controller);
@ -1409,15 +1420,15 @@ HttpChannelParent::OnStartRequest(nsIRequest *aRequest, nsISupports *aContext)
MOZ_RELEASE_ASSERT(!mDivertingFromChild, MOZ_RELEASE_ASSERT(!mDivertingFromChild,
"Cannot call OnStartRequest if diverting is set!"); "Cannot call OnStartRequest if diverting is set!");
RefPtr<nsHttpChannel> chan = do_QueryObject(aRequest); RefPtr<HttpBaseChannel> chan = do_QueryObject(aRequest);
if (!chan) { if (!chan) {
LOG((" aRequest is not nsHttpChannel")); LOG((" aRequest is not HttpBaseChannel"));
NS_ERROR("Expecting only nsHttpChannel as aRequest in HttpChannelParent::OnStartRequest"); NS_ERROR("Expecting only HttpBaseChannel as aRequest in HttpChannelParent::OnStartRequest");
return NS_ERROR_UNEXPECTED; return NS_ERROR_UNEXPECTED;
} }
MOZ_ASSERT(mChannel == chan, MOZ_ASSERT(mChannel == chan,
"HttpChannelParent getting OnStartRequest from a different nsHttpChannel instance"); "HttpChannelParent getting OnStartRequest from a different HttpBaseChannel instance");
// Send down any permissions which are relevant to this URL if we are // Send down any permissions which are relevant to this URL if we are
// performing a document load. We can't do that is mIPCClosed is set. // performing a document load. We can't do that is mIPCClosed is set.
@ -1432,20 +1443,27 @@ HttpChannelParent::OnStartRequest(nsIRequest *aRequest, nsISupports *aContext)
nsHttpResponseHead *responseHead = chan->GetResponseHead(); nsHttpResponseHead *responseHead = chan->GetResponseHead();
nsHttpRequestHead *requestHead = chan->GetRequestHead(); nsHttpRequestHead *requestHead = chan->GetRequestHead();
bool isFromCache = false; bool isFromCache = false;
chan->IsFromCache(&isFromCache);
int32_t fetchCount = 0; int32_t fetchCount = 0;
chan->GetCacheTokenFetchCount(&fetchCount);
uint32_t expirationTime = nsICacheEntry::NO_EXPIRATION_TIME; uint32_t expirationTime = nsICacheEntry::NO_EXPIRATION_TIME;
chan->GetCacheTokenExpirationTime(&expirationTime);
nsCString cachedCharset; nsCString cachedCharset;
chan->GetCacheTokenCachedCharset(cachedCharset);
bool loadedFromApplicationCache; RefPtr<nsHttpChannel> httpChannelImpl = do_QueryObject(chan);
chan->GetLoadedFromApplicationCache(&loadedFromApplicationCache);
if (httpChannelImpl) {
httpChannelImpl->IsFromCache(&isFromCache);
httpChannelImpl->GetCacheTokenFetchCount(&fetchCount);
httpChannelImpl->GetCacheTokenExpirationTime(&expirationTime);
httpChannelImpl->GetCacheTokenCachedCharset(cachedCharset);
}
bool loadedFromApplicationCache = false;
if (httpChannelImpl) {
httpChannelImpl->GetLoadedFromApplicationCache(&loadedFromApplicationCache);
if (loadedFromApplicationCache) { if (loadedFromApplicationCache) {
mOfflineForeignMarker = chan->GetOfflineCacheEntryAsForeignMarker(); mOfflineForeignMarker = httpChannelImpl->GetOfflineCacheEntryAsForeignMarker();
nsCOMPtr<nsIApplicationCache> appCache; nsCOMPtr<nsIApplicationCache> appCache;
chan->GetApplicationCache(getter_AddRefs(appCache)); httpChannelImpl->GetApplicationCache(getter_AddRefs(appCache));
nsCString appCacheGroupId; nsCString appCacheGroupId;
nsCString appCacheClientId; nsCString appCacheClientId;
appCache->GetGroupID(appCacheGroupId); appCache->GetGroupID(appCacheGroupId);
@ -1456,6 +1474,7 @@ HttpChannelParent::OnStartRequest(nsIRequest *aRequest, nsISupports *aContext)
return NS_ERROR_UNEXPECTED; return NS_ERROR_UNEXPECTED;
} }
} }
}
nsCOMPtr<nsIEncodedChannel> encodedChannel = do_QueryInterface(aRequest); nsCOMPtr<nsIEncodedChannel> encodedChannel = do_QueryInterface(aRequest);
if (encodedChannel) if (encodedChannel)
@ -1464,21 +1483,18 @@ HttpChannelParent::OnStartRequest(nsIRequest *aRequest, nsISupports *aContext)
// Keep the cache entry for future use in RecvSetCacheTokenCachedCharset(). // Keep the cache entry for future use in RecvSetCacheTokenCachedCharset().
// It could be already released by nsHttpChannel at that time. // It could be already released by nsHttpChannel at that time.
nsCOMPtr<nsISupports> cacheEntry; nsCOMPtr<nsISupports> cacheEntry;
chan->GetCacheToken(getter_AddRefs(cacheEntry)); nsresult channelStatus = NS_OK;
uint32_t cacheKeyValue = 0;
nsAutoCString altDataType;
if (httpChannelImpl) {
httpChannelImpl->GetCacheToken(getter_AddRefs(cacheEntry));
mCacheEntry = do_QueryInterface(cacheEntry); mCacheEntry = do_QueryInterface(cacheEntry);
nsresult channelStatus = NS_OK; httpChannelImpl->GetStatus(&channelStatus);
chan->GetStatus(&channelStatus);
nsCString secInfoSerialization;
UpdateAndSerializeSecurityInfo(secInfoSerialization);
uint8_t redirectCount = 0;
chan->GetRedirectCount(&redirectCount);
nsCOMPtr<nsISupports> cacheKey; nsCOMPtr<nsISupports> cacheKey;
chan->GetCacheKey(getter_AddRefs(cacheKey)); httpChannelImpl->GetCacheKey(getter_AddRefs(cacheKey));
uint32_t cacheKeyValue = 0;
if (cacheKey) { if (cacheKey) {
nsCOMPtr<nsISupportsPRUint32> container = do_QueryInterface(cacheKey); nsCOMPtr<nsISupportsPRUint32> container = do_QueryInterface(cacheKey);
if (!container) { if (!container) {
@ -1491,8 +1507,15 @@ HttpChannelParent::OnStartRequest(nsIRequest *aRequest, nsISupports *aContext)
} }
} }
nsAutoCString altDataType; httpChannelImpl->GetAlternativeDataType(altDataType);
chan->GetAlternativeDataType(altDataType); }
nsCString secInfoSerialization;
UpdateAndSerializeSecurityInfo(secInfoSerialization);
uint8_t redirectCount = 0;
chan->GetRedirectCount(&redirectCount);
int64_t altDataLen = chan->GetAltDataLength(); int64_t altDataLen = chan->GetAltDataLength();
// !!! We need to lock headers and please don't forget to unlock them !!! // !!! We need to lock headers and please don't forget to unlock them !!!
@ -1562,7 +1585,10 @@ HttpChannelParent::OnStopRequest(nsIRequest *aRequest,
mChannel->GetCacheReadStart(&timing.cacheReadStart); mChannel->GetCacheReadStart(&timing.cacheReadStart);
mChannel->GetCacheReadEnd(&timing.cacheReadEnd); mChannel->GetCacheReadEnd(&timing.cacheReadEnd);
mChannel->SetWarningReporter(nullptr); RefPtr<nsHttpChannel> httpChannelImpl = do_QueryObject(mChannel);
if (httpChannelImpl) {
httpChannelImpl->SetWarningReporter(nullptr);
}
// Either IPC channel is closed or background channel // Either IPC channel is closed or background channel
// is ready to send OnStopRequest. // is ready to send OnStopRequest.
@ -1597,9 +1623,11 @@ HttpChannelParent::OnDataAvailable(nsIRequest *aRequest,
nsresult channelStatus = NS_OK; nsresult channelStatus = NS_OK;
mChannel->GetStatus(&channelStatus); mChannel->GetStatus(&channelStatus);
nsresult transportStatus = nsresult transportStatus = NS_NET_STATUS_RECEIVING_FROM;
(mChannel->IsReadingFromCache()) ? NS_NET_STATUS_READING RefPtr<nsHttpChannel> httpChannelImpl = do_QueryObject(mChannel);
: NS_NET_STATUS_RECEIVING_FROM; if (httpChannelImpl && httpChannelImpl->IsReadingFromCache()) {
transportStatus = NS_NET_STATUS_READING;
}
static uint32_t const kCopyChunkSize = 128 * 1024; static uint32_t const kCopyChunkSize = 128 * 1024;
uint32_t toRead = std::min<uint32_t>(aCount, kCopyChunkSize); uint32_t toRead = std::min<uint32_t>(aCount, kCopyChunkSize);
@ -1778,8 +1806,40 @@ HttpChannelParent::StartRedirect(uint32_t registrarId,
"newChannel=%p callback=%p]\n", this, registrarId, newChannel, "newChannel=%p callback=%p]\n", this, registrarId, newChannel,
callback)); callback));
if (mIPCClosed) if (mIPCClosed) {
return NS_BINDING_ABORTED; return NS_BINDING_ABORTED;
}
// If this is an internal redirect for service worker interception, then
// hide it from the child process. The original e10s interception code
// was not designed with this in mind and its not necessary to replace
// the HttpChannelChild/Parent objects in this case.
if (redirectFlags & nsIChannelEventSink::REDIRECT_INTERNAL) {
nsCOMPtr<nsIInterceptedChannel> newIntercepted = do_QueryInterface(newChannel);
if (newIntercepted) {
#ifdef DEBUG
// Note, InterceptedHttpChannel can also do an internal redirect
// for opaque response interception. This should not actually
// happen here in e10s mode.
nsCOMPtr<nsIInterceptedChannel> oldIntercepted =
do_QueryInterface(static_cast<nsIChannel*>(mChannel.get()));
MOZ_ASSERT(!oldIntercepted);
#endif
// Re-link the HttpChannelParent to the new InterceptedHttpChannel.
nsCOMPtr<nsIChannel> linkedChannel;
rv = NS_LinkRedirectChannels(registrarId, this, getter_AddRefs(linkedChannel));
NS_ENSURE_SUCCESS(rv, rv);
MOZ_ASSERT(linkedChannel == newChannel);
// We immediately store the InterceptedHttpChannel as our nested
// mChannel. None of the redirect IPC messaging takes place.
mChannel = do_QueryObject(newChannel);
callback->OnRedirectVerifyCallback(NS_OK);
return NS_OK;
}
}
// Sending down the original URI, because that is the URI we have // Sending down the original URI, because that is the URI we have
// to construct the channel from - this is the URI we've been actually // to construct the channel from - this is the URI we've been actually
@ -1798,7 +1858,7 @@ HttpChannelParent::StartRedirect(uint32_t registrarId,
// If the channel is a HTTP channel, we also want to inform the child // If the channel is a HTTP channel, we also want to inform the child
// about the parent's channelId attribute, so that both parent and child // about the parent's channelId attribute, so that both parent and child
// share the same ID. Useful for monitoring channel activity in devtools. // share the same ID. Useful for monitoring channel activity in devtools.
uint64_t channelId; uint64_t channelId = 0;
nsCOMPtr<nsIHttpChannel> httpChannel = do_QueryInterface(newChannel); nsCOMPtr<nsIHttpChannel> httpChannel = do_QueryInterface(newChannel);
if (httpChannel) { if (httpChannel) {
rv = httpChannel->GetChannelId(&channelId); rv = httpChannel->GetChannelId(&channelId);
@ -1839,6 +1899,13 @@ HttpChannelParent::CompleteRedirect(bool succeeded)
LOG(("HttpChannelParent::CompleteRedirect [this=%p succeeded=%d]\n", LOG(("HttpChannelParent::CompleteRedirect [this=%p succeeded=%d]\n",
this, succeeded)); this, succeeded));
// If this was an internal redirect for a service worker interception then
// we will not have a redirecting channel here. Hide this redirect from
// the child.
if (!mRedirectChannel) {
return NS_OK;
}
if (succeeded && !mIPCClosed) { if (succeeded && !mIPCClosed) {
// TODO: check return value: assume child dead if failed // TODO: check return value: assume child dead if failed
Unused << SendRedirect3Complete(); Unused << SendRedirect3Complete();
@ -1873,7 +1940,9 @@ HttpChannelParent::SuspendForDiversion()
// MessageDiversionStarted call will suspend mEventQ as many times as the // MessageDiversionStarted call will suspend mEventQ as many times as the
// channel has been suspended, so that channel and this queue are in sync. // channel has been suspended, so that channel and this queue are in sync.
mChannel->MessageDiversionStarted(this); nsCOMPtr<nsIChannelWithDivertableParentListener> divertChannel =
do_QueryInterface(static_cast<nsIChannel*>(mChannel.get()));
divertChannel->MessageDiversionStarted(this);
nsresult rv = NS_OK; nsresult rv = NS_OK;
@ -1889,7 +1958,7 @@ HttpChannelParent::SuspendForDiversion()
// OnDataAvailable until diversion is over. At the same time we should // OnDataAvailable until diversion is over. At the same time we should
// send the diverted OnDataAvailable-s to the listeners and not queue them // send the diverted OnDataAvailable-s to the listeners and not queue them
// in mEventQ. // in mEventQ.
rv = mChannel->SuspendInternal(); rv = divertChannel->SuspendInternal();
MOZ_ASSERT(NS_SUCCEEDED(rv) || rv == NS_ERROR_NOT_AVAILABLE); MOZ_ASSERT(NS_SUCCEEDED(rv) || rv == NS_ERROR_NOT_AVAILABLE);
mSuspendedForDiversion = NS_SUCCEEDED(rv); mSuspendedForDiversion = NS_SUCCEEDED(rv);
} else { } else {
@ -1944,11 +2013,13 @@ HttpChannelParent::ResumeForDiversion()
return NS_ERROR_UNEXPECTED; return NS_ERROR_UNEXPECTED;
} }
mChannel->MessageDiversionStop(); nsCOMPtr<nsIChannelWithDivertableParentListener> divertChannel =
do_QueryInterface(static_cast<nsIChannel*>(mChannel.get()));
divertChannel->MessageDiversionStop();
if (mSuspendedForDiversion) { if (mSuspendedForDiversion) {
// The nsHttpChannel will deliver remaining OnData/OnStop for the transfer. // The nsHttpChannel will deliver remaining OnData/OnStop for the transfer.
nsresult rv = mChannel->ResumeInternal(); nsresult rv = divertChannel->ResumeInternal();
if (NS_WARN_IF(NS_FAILED(rv))) { if (NS_WARN_IF(NS_FAILED(rv))) {
return rv; return rv;
} }
@ -2104,7 +2175,9 @@ HttpChannelParent::NotifyDiversionFailed(nsresult aErrorCode)
// Resume only if we suspended earlier. // Resume only if we suspended earlier.
if (mSuspendedForDiversion) { if (mSuspendedForDiversion) {
mChannel->ResumeInternal(); nsCOMPtr<nsIChannelWithDivertableParentListener> divertChannel =
do_QueryInterface(static_cast<nsIChannel*>(mChannel.get()));
divertChannel->ResumeInternal();
} }
// Channel has already sent OnStartRequest to the child, so ensure that we // Channel has already sent OnStartRequest to the child, so ensure that we
// call it here if it hasn't already been called. // call it here if it hasn't already been called.

Просмотреть файл

@ -260,7 +260,7 @@ private:
friend class DivertStopRequestEvent; friend class DivertStopRequestEvent;
friend class DivertCompleteEvent; friend class DivertCompleteEvent;
RefPtr<nsHttpChannel> mChannel; RefPtr<HttpBaseChannel> mChannel;
nsCOMPtr<nsICacheEntry> mCacheEntry; nsCOMPtr<nsICacheEntry> mCacheEntry;
nsCOMPtr<nsIAssociatedContentSecurity> mAssociatedContentSecurity; nsCOMPtr<nsIAssociatedContentSecurity> mAssociatedContentSecurity;
bool mIPCClosed; // PHttpChannel actor has been Closed() bool mIPCClosed; // PHttpChannel actor has been Closed()

Просмотреть файл

@ -29,6 +29,7 @@ HttpChannelParentListener::HttpChannelParentListener(HttpChannelParent* aInitial
, mSuspendedForDiversion(false) , mSuspendedForDiversion(false)
, mShouldIntercept(false) , mShouldIntercept(false)
, mShouldSuspendIntercept(false) , mShouldSuspendIntercept(false)
, mInterceptCanceled(false)
{ {
LOG(("HttpChannelParentListener::HttpChannelParentListener [this=%p, next=%p]", LOG(("HttpChannelParentListener::HttpChannelParentListener [this=%p, next=%p]",
this, aInitialChannel)); this, aInitialChannel));
@ -247,14 +248,19 @@ HttpChannelParentListener::OnRedirectResult(bool succeeded)
} }
if (succeeded) { if (succeeded) {
// Switch to redirect channel and delete the old one. // Switch to redirect channel and delete the old one. Only do this
// if we are actually changing channels. During a service worker
// interception internal redirect we preserve the same HttpChannelParent.
if (!SameCOMIdentity(redirectChannel, mNextListener)) {
nsCOMPtr<nsIParentChannel> parent; nsCOMPtr<nsIParentChannel> parent;
parent = do_QueryInterface(mNextListener); parent = do_QueryInterface(mNextListener);
MOZ_ASSERT(parent); MOZ_ASSERT(parent);
parent->Delete(); parent->Delete();
mInterceptCanceled = false;
mNextListener = do_QueryInterface(redirectChannel); mNextListener = do_QueryInterface(redirectChannel);
MOZ_ASSERT(mNextListener); MOZ_ASSERT(mNextListener);
redirectChannel->SetParentListener(this); redirectChannel->SetParentListener(this);
}
} else if (redirectChannel) { } else if (redirectChannel) {
// Delete the redirect target channel: continue using old channel // Delete the redirect target channel: continue using old channel
redirectChannel->Delete(); redirectChannel->Delete();
@ -320,6 +326,23 @@ public:
NS_IMETHODIMP NS_IMETHODIMP
HttpChannelParentListener::ChannelIntercepted(nsIInterceptedChannel* aChannel) HttpChannelParentListener::ChannelIntercepted(nsIInterceptedChannel* aChannel)
{ {
// Its possible for the child-side interception to complete and tear down
// the actor before we even get this parent-side interception notification.
// In this case we want to let the interception succeed, but then immediately
// cancel it. If we return an error code from here then it might get
// propagated back to the child process where the interception did not encounter
// an error. Therefore cancel the new channel asynchronously from a runnable.
if (mInterceptCanceled) {
nsCOMPtr<nsIRunnable> r =
NewRunnableMethod<nsresult>("HttpChannelParentListener::CancelInterception",
aChannel,
&nsIInterceptedChannel::CancelInterception,
NS_BINDING_ABORTED);
MOZ_ALWAYS_SUCCEEDS(
SystemGroup::Dispatch(TaskCategory::Other, r.forget()));
return NS_OK;
}
if (mShouldSuspendIntercept) { if (mShouldSuspendIntercept) {
mInterceptedChannel = aChannel; mInterceptedChannel = aChannel;
return NS_OK; return NS_OK;
@ -381,6 +404,11 @@ HttpChannelParentListener::DivertTo(nsIStreamListener* aListener)
MOZ_ASSERT(aListener); MOZ_ASSERT(aListener);
MOZ_RELEASE_ASSERT(mSuspendedForDiversion, "Must already be suspended!"); MOZ_RELEASE_ASSERT(mSuspendedForDiversion, "Must already be suspended!");
// Reset mInterceptCanceled back to false every time a new listener is set.
// We only want to cancel the interception if our current listener has
// signaled its cleaning up.
mInterceptCanceled = false;
mNextListener = aListener; mNextListener = aListener;
return ResumeForDiversion(); return ResumeForDiversion();
@ -405,12 +433,21 @@ HttpChannelParentListener::SetupInterceptionAfterRedirect(bool aShouldIntercept)
} }
void void
HttpChannelParentListener::ClearInterceptedChannel() HttpChannelParentListener::ClearInterceptedChannel(nsIStreamListener* aListener)
{ {
// Only cancel the interception if this is from our current listener. We
// can get spurious calls here from other HttpChannelParent instances being
// destroyed asynchronously.
if (!SameCOMIdentity(mNextListener, aListener)) {
return;
}
if (mInterceptedChannel) { if (mInterceptedChannel) {
mInterceptedChannel->Cancel(NS_ERROR_INTERCEPTION_FAILED); mInterceptedChannel->CancelInterception(NS_ERROR_INTERCEPTION_FAILED);
mInterceptedChannel = nullptr; mInterceptedChannel = nullptr;
} }
// Note that channel interception has been canceled. If we got this before
// the interception even occured we will trigger the cancel later.
mInterceptCanceled = true;
} }
} // namespace net } // namespace net

Просмотреть файл

@ -52,7 +52,7 @@ public:
void SetupInterception(const nsHttpResponseHead& aResponseHead); void SetupInterception(const nsHttpResponseHead& aResponseHead);
void SetupInterceptionAfterRedirect(bool aShouldIntercept); void SetupInterceptionAfterRedirect(bool aShouldIntercept);
void ClearInterceptedChannel(); void ClearInterceptedChannel(nsIStreamListener* aListener);
private: private:
virtual ~HttpChannelParentListener(); virtual ~HttpChannelParentListener();
@ -73,6 +73,10 @@ private:
bool mShouldIntercept; bool mShouldIntercept;
// Set if this channel should suspend on interception. // Set if this channel should suspend on interception.
bool mShouldSuspendIntercept; bool mShouldSuspendIntercept;
// Set if the channel interception has been canceled. Can be set before
// interception first occurs. In this case cancelation is deferred until
// the interception takes place.
bool mInterceptCanceled;
nsAutoPtr<nsHttpResponseHead> mSynthesizedResponseHead; nsAutoPtr<nsHttpResponseHead> mSynthesizedResponseHead;

Просмотреть файл

@ -213,215 +213,6 @@ InterceptedChannelBase::SecureUpgradeChannelURI(nsIChannel* aChannel)
return upgradedURI.forget(); return upgradedURI.forget();
} }
InterceptedChannelChrome::InterceptedChannelChrome(nsHttpChannel* aChannel,
nsINetworkInterceptController* aController,
nsICacheEntry* aEntry)
: InterceptedChannelBase(aController)
, mChannel(aChannel)
, mSynthesizedCacheEntry(aEntry)
{
nsresult rv = mChannel->GetApplyConversion(&mOldApplyConversion);
if (NS_WARN_IF(NS_FAILED(rv))) {
mOldApplyConversion = false;
}
}
void
InterceptedChannelChrome::NotifyController()
{
// Intercepted responses should already be decoded.
mChannel->SetApplyConversion(false);
nsresult rv = mSynthesizedCacheEntry->OpenOutputStream(0, getter_AddRefs(mResponseBody));
NS_ENSURE_SUCCESS_VOID(rv);
DoNotifyController();
}
NS_IMETHODIMP
InterceptedChannelChrome::GetChannel(nsIChannel** aChannel)
{
NS_IF_ADDREF(*aChannel = mChannel);
return NS_OK;
}
NS_IMETHODIMP
InterceptedChannelChrome::ResetInterception()
{
if (mClosed) {
return NS_ERROR_NOT_AVAILABLE;
}
mReportCollector->FlushConsoleReports(mChannel);
mSynthesizedCacheEntry->AsyncDoom(nullptr);
mSynthesizedCacheEntry = nullptr;
mChannel->SetApplyConversion(mOldApplyConversion);
nsCOMPtr<nsIURI> uri;
mChannel->GetURI(getter_AddRefs(uri));
nsresult rv = mChannel->StartRedirectChannelToURI(uri, nsIChannelEventSink::REDIRECT_INTERNAL);
NS_ENSURE_SUCCESS(rv, rv);
mResponseBody->Close();
mResponseBody = nullptr;
mClosed = true;
return NS_OK;
}
NS_IMETHODIMP
InterceptedChannelChrome::SynthesizeStatus(uint16_t aStatus, const nsACString& aReason)
{
if (!mSynthesizedCacheEntry) {
return NS_ERROR_NOT_AVAILABLE;
}
return DoSynthesizeStatus(aStatus, aReason);
}
NS_IMETHODIMP
InterceptedChannelChrome::SynthesizeHeader(const nsACString& aName, const nsACString& aValue)
{
if (!mSynthesizedCacheEntry) {
return NS_ERROR_NOT_AVAILABLE;
}
return DoSynthesizeHeader(aName, aValue);
}
NS_IMETHODIMP
InterceptedChannelChrome::FinishSynthesizedResponse(const nsACString& aFinalURLSpec)
{
if (mClosed) {
return NS_ERROR_NOT_AVAILABLE;
}
// Make sure the cache entry's output stream is always closed. If the
// channel was intercepted with a null-body response then its possible
// the synthesis completed without a stream copy operation.
mResponseBody->Close();
mReportCollector->FlushConsoleReports(mChannel);
EnsureSynthesizedResponse();
// If the synthesized response is a redirect, then we want to respect
// the encoding of whatever is loaded as a result.
if (nsHttpChannel::WillRedirect(mSynthesizedResponseHead.ref())) {
nsresult rv = mChannel->SetApplyConversion(mOldApplyConversion);
NS_ENSURE_SUCCESS(rv, rv);
}
mChannel->MarkIntercepted();
// First we ensure the appropriate metadata is set on the synthesized cache entry
// (i.e. the flattened response head)
nsCOMPtr<nsISupports> securityInfo;
nsresult rv = mChannel->GetSecurityInfo(getter_AddRefs(securityInfo));
NS_ENSURE_SUCCESS(rv, rv);
uint32_t expirationTime = 0;
rv = DoUpdateExpirationTime(mChannel, mSynthesizedCacheEntry,
mSynthesizedResponseHead.ref(),
expirationTime);
rv = DoAddCacheEntryHeaders(mChannel, mSynthesizedCacheEntry,
mChannel->GetRequestHead(),
mSynthesizedResponseHead.ref(), securityInfo);
NS_ENSURE_SUCCESS(rv, rv);
nsCOMPtr<nsIURI> originalURI;
mChannel->GetURI(getter_AddRefs(originalURI));
nsCOMPtr<nsIURI> responseURI;
if (!aFinalURLSpec.IsEmpty()) {
rv = NS_NewURI(getter_AddRefs(responseURI), aFinalURLSpec);
NS_ENSURE_SUCCESS(rv, rv);
} else {
responseURI = originalURI;
}
bool equal = false;
originalURI->Equals(responseURI, &equal);
if (!equal) {
rv =
mChannel->StartRedirectChannelToURI(responseURI, nsIChannelEventSink::REDIRECT_INTERNAL);
NS_ENSURE_SUCCESS(rv, rv);
} else {
bool usingSSL = false;
responseURI->SchemeIs("https", &usingSSL);
// Then we open a real cache entry to read the synthesized response from.
rv = mChannel->OpenCacheEntry(usingSSL);
NS_ENSURE_SUCCESS(rv, rv);
mSynthesizedCacheEntry = nullptr;
if (!mChannel->AwaitingCacheCallbacks()) {
rv = mChannel->ContinueConnect();
NS_ENSURE_SUCCESS(rv, rv);
}
}
mClosed = true;
return NS_OK;
}
NS_IMETHODIMP
InterceptedChannelChrome::Cancel(nsresult aStatus)
{
MOZ_ASSERT(NS_FAILED(aStatus));
if (mClosed) {
return NS_ERROR_FAILURE;
}
mReportCollector->FlushConsoleReports(mChannel);
// we need to use AsyncAbort instead of Cancel since there's no active pump
// to cancel which will provide OnStart/OnStopRequest to the channel.
nsresult rv = mChannel->AsyncAbort(aStatus);
NS_ENSURE_SUCCESS(rv, rv);
mClosed = true;
return NS_OK;
}
NS_IMETHODIMP
InterceptedChannelChrome::SetChannelInfo(dom::ChannelInfo* aChannelInfo)
{
if (mClosed) {
return NS_ERROR_FAILURE;
}
return aChannelInfo->ResurrectInfoOnChannel(mChannel);
}
NS_IMETHODIMP
InterceptedChannelChrome::GetInternalContentPolicyType(nsContentPolicyType* aPolicyType)
{
NS_ENSURE_ARG(aPolicyType);
nsCOMPtr<nsILoadInfo> loadInfo;
nsresult rv = mChannel->GetLoadInfo(getter_AddRefs(loadInfo));
NS_ENSURE_SUCCESS(rv, rv);
if (loadInfo) {
*aPolicyType = loadInfo->InternalContentPolicyType();
}
return NS_OK;
}
NS_IMETHODIMP
InterceptedChannelChrome::GetSecureUpgradedChannelURI(nsIURI** aURI)
{
return mChannel->GetURI(aURI);
}
InterceptedChannelContent::InterceptedChannelContent(HttpChannelChild* aChannel, InterceptedChannelContent::InterceptedChannelContent(HttpChannelChild* aChannel,
nsINetworkInterceptController* aController, nsINetworkInterceptController* aController,
InterceptStreamListener* aListener, InterceptStreamListener* aListener,
@ -541,7 +332,7 @@ InterceptedChannelContent::FinishSynthesizedResponse(const nsACString& aFinalURL
} }
NS_IMETHODIMP NS_IMETHODIMP
InterceptedChannelContent::Cancel(nsresult aStatus) InterceptedChannelContent::CancelInterception(nsresult aStatus)
{ {
MOZ_ASSERT(NS_FAILED(aStatus)); MOZ_ASSERT(NS_FAILED(aStatus));

Просмотреть файл

@ -166,37 +166,6 @@ public:
SecureUpgradeChannelURI(nsIChannel* aChannel); SecureUpgradeChannelURI(nsIChannel* aChannel);
}; };
class InterceptedChannelChrome : public InterceptedChannelBase
{
// The actual channel being intercepted.
RefPtr<nsHttpChannel> mChannel;
// Writeable cache entry for use when synthesizing a response in a parent process
nsCOMPtr<nsICacheEntry> mSynthesizedCacheEntry;
// When a channel is intercepted, content decoding is disabled since the
// ServiceWorker will have already extracted the decoded data. For parent
// process channels we need to preserve the earlier value in case
// ResetInterception is called.
bool mOldApplyConversion;
public:
InterceptedChannelChrome(nsHttpChannel* aChannel,
nsINetworkInterceptController* aController,
nsICacheEntry* aEntry);
NS_IMETHOD ResetInterception() override;
NS_IMETHOD FinishSynthesizedResponse(const nsACString& aFinalURLSpec) override;
NS_IMETHOD GetChannel(nsIChannel** aChannel) override;
NS_IMETHOD GetSecureUpgradedChannelURI(nsIURI** aURI) override;
NS_IMETHOD SynthesizeStatus(uint16_t aStatus, const nsACString& aReason) override;
NS_IMETHOD SynthesizeHeader(const nsACString& aName, const nsACString& aValue) override;
NS_IMETHOD Cancel(nsresult aStatus) override;
NS_IMETHOD SetChannelInfo(mozilla::dom::ChannelInfo* aChannelInfo) override;
NS_IMETHOD GetInternalContentPolicyType(nsContentPolicyType *aInternalContentPolicyType) override;
virtual void NotifyController() override;
};
class InterceptedChannelContent : public InterceptedChannelBase class InterceptedChannelContent : public InterceptedChannelBase
{ {
// The actual channel being intercepted. // The actual channel being intercepted.
@ -223,7 +192,7 @@ public:
NS_IMETHOD GetSecureUpgradedChannelURI(nsIURI** aURI) override; NS_IMETHOD GetSecureUpgradedChannelURI(nsIURI** aURI) override;
NS_IMETHOD SynthesizeStatus(uint16_t aStatus, const nsACString& aReason) override; NS_IMETHOD SynthesizeStatus(uint16_t aStatus, const nsACString& aReason) override;
NS_IMETHOD SynthesizeHeader(const nsACString& aName, const nsACString& aValue) override; NS_IMETHOD SynthesizeHeader(const nsACString& aName, const nsACString& aValue) override;
NS_IMETHOD Cancel(nsresult aStatus) override; NS_IMETHOD CancelInterception(nsresult aStatus) override;
NS_IMETHOD SetChannelInfo(mozilla::dom::ChannelInfo* aChannelInfo) override; NS_IMETHOD SetChannelInfo(mozilla::dom::ChannelInfo* aChannelInfo) override;
NS_IMETHOD GetInternalContentPolicyType(nsContentPolicyType *aInternalContentPolicyType) override; NS_IMETHOD GetInternalContentPolicyType(nsContentPolicyType *aInternalContentPolicyType) override;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,185 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set sw=2 ts=8 et tw=80 : */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_net_InterceptedHttpChannel_h
#define mozilla_net_InterceptedHttpChannel_h
#include "HttpBaseChannel.h"
#include "nsINetworkInterceptController.h"
#include "nsIInputStream.h"
#include "nsIChannelWithDivertableParentListener.h"
#include "nsIThreadRetargetableRequest.h"
namespace mozilla {
namespace net {
// This class represents an http channel that is being intercepted by a
// ServiceWorker. This means that when the channel is opened a FetchEvent
// will be fired on the ServiceWorker thread. The channel will complete
// depending on what the worker does. The options are:
//
// 1. If the ServiceWorker does not handle the FetchEvent or does not call
// FetchEvent.respondWith(), then the channel needs to fall back to a
// normal request. When this happens ResetInterception() is called and
// the channel will perform an internal redirect back to an nsHttpChannel.
//
// 2. If the ServiceWorker provides a Response to FetchEvent.respondWith()
// then the status, headers, and body must be synthesized. When
// FinishSynthesizedResponse() is called the synthesized data must be
// reported back to the channel listener. This is handled in a few
// different ways:
// a. If a redirect was synthesized, then we perform the redirect to
// a new nsHttpChannel. This new channel might trigger yet another
// interception.
// b. If a same-origin or CORS Response was synthesized, then we simply
// crate an nsInputStreamPump to process it and call back to the
// listener.
// c. If an opaque Response was synthesized, then we perform an internal
// redirect to a new InterceptedHttpChannel using the cross-origin URL.
// When this new channel is opened, it then creates a pump as in case
// (b). The extra redirect here is to make sure the various listeners
// treat the result as unsafe cross-origin data.
//
// 3. If an error occurs, such as the ServiceWorker passing garbage to
// FetchEvent.respondWith(), then CancelInterception() is called. This is
// handled the same as a normal nsIChannel::Cancel() call. We abort the
// channel and end up calling OnStopRequest() with an error code.
class InterceptedHttpChannel final : public HttpBaseChannel
, public HttpAsyncAborter<InterceptedHttpChannel>
, public nsIInterceptedChannel
, public nsIAsyncVerifyRedirectCallback
, public nsIStreamListener
, public nsIChannelWithDivertableParentListener
, public nsIThreadRetargetableRequest
, public nsIThreadRetargetableStreamListener
{
NS_DECL_ISUPPORTS_INHERITED
NS_DECL_NSIINTERCEPTEDCHANNEL
NS_DECL_NSIASYNCVERIFYREDIRECTCALLBACK
NS_DECL_NSIREQUESTOBSERVER
NS_DECL_NSISTREAMLISTENER
NS_DECL_NSICHANNELWITHDIVERTABLEPARENTLISTENER
NS_DECL_NSITHREADRETARGETABLEREQUEST
NS_DECL_NSITHREADRETARGETABLESTREAMLISTENER
private:
friend class HttpAsyncAborter<InterceptedHttpChannel>;
UniquePtr<nsHttpResponseHead> mSynthesizedResponseHead;
nsCOMPtr<nsIChannel> mRedirectChannel;
nsCOMPtr<nsIInputStream> mBodyReader;
nsCOMPtr<nsIOutputStream> mBodyWriter;
nsCOMPtr<nsISupports> mReleaseHandle;
nsCOMPtr<nsIProgressEventSink> mProgressSink;
RefPtr<nsInputStreamPump> mPump;
RefPtr<ADivertableParentChannel> mParentChannel;
TimeStamp mFinishResponseStart;
TimeStamp mFinishResponseEnd;
Atomic<int64_t> mProgress;
int64_t mProgressReported;
int64_t mSynthesizedStreamLength;
uint64_t mResumeStartPos;
nsCString mResumeEntityId;
nsString mStatusHost;
enum {
Invalid = 0,
Synthesized,
Reset
} mSynthesizedOrReset;
Atomic<bool> mCallingStatusAndProgress;
InterceptedHttpChannel(PRTime aCreationTime,
const TimeStamp& aCreationTimestamp,
const TimeStamp& aAsyncOpenTimestamp);
~InterceptedHttpChannel() = default;
virtual void
ReleaseListeners() override;
virtual MOZ_MUST_USE nsresult
SetupReplacementChannel(nsIURI *aURI, nsIChannel *aChannel,
bool aPreserveMethod,
uint32_t aRedirectFlags) override;
bool
ShouldRedirect() const;
nsresult
FollowSyntheticRedirect();
nsresult
RedirectForOpaqueResponse(nsIURI* aResponseURI);
nsresult
StartPump();
nsresult
OpenRedirectChannel();
void
MaybeCallStatusAndProgress();
public:
static already_AddRefed<InterceptedHttpChannel>
CreateForInterception(PRTime aCreationTime, const TimeStamp& aCreationTimestamp,
const TimeStamp& aAsyncOpenTimestamp);
static already_AddRefed<InterceptedHttpChannel>
CreateForSynthesis(const nsHttpResponseHead* aHead, nsIInputStream* aBody,
PRTime aCreationTime,
const TimeStamp& aCreationTimestamp,
const TimeStamp& aAsyncOpenTimestamp);
NS_IMETHOD
Cancel(nsresult aStatus) override;
NS_IMETHOD
Suspend(void) override;
NS_IMETHOD
Resume(void) override;
NS_IMETHOD
GetSecurityInfo(nsISupports * *aSecurityInfo) override;
NS_IMETHOD
AsyncOpen(nsIStreamListener *aListener, nsISupports *aContext) override;
NS_IMETHOD
AsyncOpen2(nsIStreamListener *aListener) override;
NS_IMETHOD
LogBlockedCORSRequest(const nsAString & aMessage) override;
NS_IMETHOD
SetupFallbackChannel(const char * aFallbackKey) override;
NS_IMETHOD
GetResponseSynthesized(bool *aResponseSynthesized) override;
NS_IMETHOD
SetPriority(int32_t aPriority) override;
NS_IMETHOD
SetClassFlags(uint32_t aClassFlags) override;
NS_IMETHOD
ClearClassFlags(uint32_t flags) override;
NS_IMETHOD
AddClassFlags(uint32_t flags) override;
NS_IMETHOD
ResumeAt(uint64_t startPos, const nsACString & entityID) override;
void
DoNotifyListenerCleanup() override;
};
} // namespace net
} // namespace mozilla
#endif // mozilla_net_InterceptedHttpChannel_h

Просмотреть файл

@ -77,6 +77,7 @@ UNIFIED_SOURCES += [
'HttpChannelParentListener.cpp', 'HttpChannelParentListener.cpp',
'HttpInfo.cpp', 'HttpInfo.cpp',
'InterceptedChannel.cpp', 'InterceptedChannel.cpp',
'InterceptedHttpChannel.cpp',
'nsCORSListenerProxy.cpp', 'nsCORSListenerProxy.cpp',
'nsHttp.cpp', 'nsHttp.cpp',
'nsHttpActivityDistributor.cpp', 'nsHttpActivityDistributor.cpp',

Просмотреть файл

@ -107,6 +107,7 @@
#include "HSTSPrimerListener.h" #include "HSTSPrimerListener.h"
#include "CacheStorageService.h" #include "CacheStorageService.h"
#include "HttpChannelParent.h" #include "HttpChannelParent.h"
#include "InterceptedHttpChannel.h"
#include "nsIBufferedStreams.h" #include "nsIBufferedStreams.h"
#include "nsIFileStreams.h" #include "nsIFileStreams.h"
#include "nsIMIMEInputStream.h" #include "nsIMIMEInputStream.h"
@ -121,9 +122,6 @@ namespace mozilla { namespace net {
namespace { namespace {
// Monotonically increasing ID for generating unique cache entries per
// intercepted channel.
static uint64_t gNumIntercepted = 0;
static bool sRCWNEnabled = false; static bool sRCWNEnabled = false;
static uint32_t sRCWNQueueSizeNormal = 50; static uint32_t sRCWNQueueSizeNormal = 50;
static uint32_t sRCWNQueueSizePriority = 10; static uint32_t sRCWNQueueSizePriority = 10;
@ -308,8 +306,6 @@ nsHttpChannel::nsHttpChannel()
, mRequestTime(0) , mRequestTime(0)
, mOfflineCacheLastModifiedTime(0) , mOfflineCacheLastModifiedTime(0)
, mSuspendTotalTime(0) , mSuspendTotalTime(0)
, mInterceptCache(DO_NOT_INTERCEPT)
, mInterceptionID(gNumIntercepted++)
, mCacheOpenWithPriority(false) , mCacheOpenWithPriority(false)
, mCacheQueueSizeWhenOpen(0) , mCacheQueueSizeWhenOpen(0)
, mCachedContentIsValid(false) , mCachedContentIsValid(false)
@ -545,6 +541,10 @@ nsHttpChannel::Connect()
return NS_ERROR_DOCUMENT_NOT_CACHED; return NS_ERROR_DOCUMENT_NOT_CACHED;
} }
if (ShouldIntercept()) {
return RedirectToInterceptedChannel();
}
bool isTrackingResource = mIsTrackingResource; // is atomic bool isTrackingResource = mIsTrackingResource; // is atomic
LOG(("nsHttpChannel %p tracking resource=%d, local blocklist=%d, cos=%u", LOG(("nsHttpChannel %p tracking resource=%d, local blocklist=%d, cos=%u",
this, isTrackingResource, mLocalBlocklist, mClassOfService)); this, isTrackingResource, mLocalBlocklist, mClassOfService));
@ -648,8 +648,7 @@ nsHttpChannel::TryHSTSPriming()
mLoadInfo->GetForceHSTSPriming(); mLoadInfo->GetForceHSTSPriming();
if (requireHSTSPriming && if (requireHSTSPriming &&
nsMixedContentBlocker::sSendHSTSPriming && nsMixedContentBlocker::sSendHSTSPriming) {
mInterceptCache == DO_NOT_INTERCEPT) {
if (!isHttpsScheme) { if (!isHttpsScheme) {
rv = HSTSPrimingListener::StartHSTSPriming(this, this); rv = HSTSPrimingListener::StartHSTSPriming(this, this);
@ -808,8 +807,7 @@ nsHttpChannel::ContinueConnect()
// a CORS preflight. Bug: 1272440 // a CORS preflight. Bug: 1272440
// If we need to start a CORS preflight, do it now! // If we need to start a CORS preflight, do it now!
// Note that it is important to do this before the early returns below. // Note that it is important to do this before the early returns below.
if (!mIsCorsPreflightDone && mRequireCORSPreflight && if (!mIsCorsPreflightDone && mRequireCORSPreflight) {
mInterceptCache != INTERCEPTED) {
MOZ_ASSERT(!mPreflightChannel); MOZ_ASSERT(!mPreflightChannel);
nsresult rv = nsresult rv =
nsCORSListenerProxy::StartCORSPreflight(this, this, nsCORSListenerProxy::StartCORSPreflight(this, this,
@ -818,9 +816,7 @@ nsHttpChannel::ContinueConnect()
return rv; return rv;
} }
MOZ_RELEASE_ASSERT(!(mRequireCORSPreflight && MOZ_RELEASE_ASSERT(!mRequireCORSPreflight || mIsCorsPreflightDone,
mInterceptCache != INTERCEPTED) ||
mIsCorsPreflightDone,
"CORS preflight must have been finished by the time we " "CORS preflight must have been finished by the time we "
"do the rest of ContinueConnect"); "do the rest of ContinueConnect");
@ -843,10 +839,7 @@ nsHttpChannel::ContinueConnect()
event->Revoke(); event->Revoke();
} }
// Don't accumulate the cache hit telemetry for intercepted channels.
if (mInterceptCache != INTERCEPTED) {
AccumulateCacheHitTelemetry(kCacheHit); AccumulateCacheHitTelemetry(kCacheHit);
}
return rv; return rv;
} }
@ -1573,9 +1566,7 @@ nsHttpChannel::CallOnStartRequest()
{ {
LOG(("nsHttpChannel::CallOnStartRequest [this=%p]", this)); LOG(("nsHttpChannel::CallOnStartRequest [this=%p]", this));
MOZ_RELEASE_ASSERT(!(mRequireCORSPreflight && MOZ_RELEASE_ASSERT(!mRequireCORSPreflight || mIsCorsPreflightDone,
mInterceptCache != INTERCEPTED) ||
mIsCorsPreflightDone,
"CORS preflight must have been finished by the time we " "CORS preflight must have been finished by the time we "
"call OnStartRequest"); "call OnStartRequest");
@ -2991,16 +2982,6 @@ nsHttpChannel::StartRedirectChannelToURI(nsIURI *upgradedURI, uint32_t flags)
// Inform consumers about this fake redirect // Inform consumers about this fake redirect
mRedirectChannel = newChannel; mRedirectChannel = newChannel;
if (!(flags & nsIChannelEventSink::REDIRECT_STS_UPGRADE) &&
mInterceptCache == INTERCEPTED) {
// Mark the channel as intercepted in order to propagate the response URL.
nsCOMPtr<nsIHttpChannelInternal> httpRedirect = do_QueryInterface(mRedirectChannel);
if (httpRedirect) {
rv = httpRedirect->ForceIntercepted(mInterceptionID);
MOZ_ASSERT(NS_SUCCEEDED(rv));
}
}
PushRedirectAsyncFunc( PushRedirectAsyncFunc(
&nsHttpChannel::ContinueAsyncRedirectChannelToURI); &nsHttpChannel::ContinueAsyncRedirectChannelToURI);
rv = gHttpHandler->AsyncOnChannelRedirect(this, newChannel, flags); rv = gHttpHandler->AsyncOnChannelRedirect(this, newChannel, flags);
@ -3796,7 +3777,7 @@ nsHttpChannel::OpenCacheEntry(bool isHttps)
if (mPostID == 0) if (mPostID == 0)
mPostID = gHttpHandler->GenerateUniqueID(); mPostID = gHttpHandler->GenerateUniqueID();
} }
else if (!PossiblyIntercepted() && !mRequestHead.IsGet() && !mRequestHead.IsHead()) { else if (!mRequestHead.IsGet() && !mRequestHead.IsHead()) {
// don't use the cache for other types of requests // don't use the cache for other types of requests
return NS_OK; return NS_OK;
} }
@ -3814,7 +3795,7 @@ nsHttpChannel::OpenCacheEntry(bool isHttps)
// Pick up an application cache from the notification // Pick up an application cache from the notification
// callbacks if available and if we are not an intercepted channel. // callbacks if available and if we are not an intercepted channel.
if (!PossiblyIntercepted() && !mApplicationCache && if (!mApplicationCache &&
mInheritApplicationCache) { mInheritApplicationCache) {
nsCOMPtr<nsIApplicationCacheContainer> appCacheContainer; nsCOMPtr<nsIApplicationCacheContainer> appCacheContainer;
GetCallback(appCacheContainer); GetCallback(appCacheContainer);
@ -3837,16 +3818,8 @@ nsHttpChannel::OpenCacheEntry(bool isHttps)
NS_ENSURE_SUCCESS(rv, rv); NS_ENSURE_SUCCESS(rv, rv);
} }
else { else {
// In the case of intercepted channels, we need to construct the cache
// entry key based on the original URI, so that in case the intercepted
// channel is redirected, the cache entry key before and after the
// redirect is the same.
if (PossiblyIntercepted()) {
openURI = mOriginalURI;
} else {
openURI = mURI; openURI = mURI;
} }
}
RefPtr<LoadContextInfo> info = GetLoadContextInfo(this); RefPtr<LoadContextInfo> info = GetLoadContextInfo(this);
if (!info) { if (!info) {
@ -3861,12 +3834,12 @@ nsHttpChannel::OpenCacheEntry(bool isHttps)
nsAutoCString cacheControlRequestHeader; nsAutoCString cacheControlRequestHeader;
Unused << mRequestHead.GetHeader(nsHttp::Cache_Control, cacheControlRequestHeader); Unused << mRequestHead.GetHeader(nsHttp::Cache_Control, cacheControlRequestHeader);
CacheControlParser cacheControlRequest(cacheControlRequestHeader); CacheControlParser cacheControlRequest(cacheControlRequestHeader);
if (cacheControlRequest.NoStore() && !PossiblyIntercepted()) { if (cacheControlRequest.NoStore()) {
goto bypassCacheEntryOpen; goto bypassCacheEntryOpen;
} }
if (offline || (mLoadFlags & INHIBIT_CACHING)) { if (offline || (mLoadFlags & INHIBIT_CACHING)) {
if (BYPASS_LOCAL_CACHE(mLoadFlags) && !offline && !PossiblyIntercepted()) { if (BYPASS_LOCAL_CACHE(mLoadFlags) && !offline) {
goto bypassCacheEntryOpen; goto bypassCacheEntryOpen;
} }
cacheEntryOpenFlags = nsICacheStorage::OPEN_READONLY; cacheEntryOpenFlags = nsICacheStorage::OPEN_READONLY;
@ -3893,10 +3866,6 @@ nsHttpChannel::OpenCacheEntry(bool isHttps)
rv = cacheStorageService->AppCacheStorage(info, rv = cacheStorageService->AppCacheStorage(info,
mApplicationCache, mApplicationCache,
getter_AddRefs(cacheStorage)); getter_AddRefs(cacheStorage));
} else if (PossiblyIntercepted()) {
// The synthesized cache has less restrictions on file size and so on.
rv = cacheStorageService->SynthesizedCacheStorage(info,
getter_AddRefs(cacheStorage));
} else if (mLoadFlags & INHIBIT_PERSISTENT_CACHING) { } else if (mLoadFlags & INHIBIT_PERSISTENT_CACHING) {
rv = cacheStorageService->MemoryCacheStorage(info, // ? choose app cache as well... rv = cacheStorageService->MemoryCacheStorage(info, // ? choose app cache as well...
getter_AddRefs(cacheStorage)); getter_AddRefs(cacheStorage));
@ -3925,46 +3894,14 @@ nsHttpChannel::OpenCacheEntry(bool isHttps)
if (mLoadFlags & LOAD_BYPASS_LOCAL_CACHE_IF_BUSY) if (mLoadFlags & LOAD_BYPASS_LOCAL_CACHE_IF_BUSY)
cacheEntryOpenFlags |= nsICacheStorage::OPEN_BYPASS_IF_BUSY; cacheEntryOpenFlags |= nsICacheStorage::OPEN_BYPASS_IF_BUSY;
if (PossiblyIntercepted()) { if (mPostID) {
extension.Append(nsPrintfCString("u%" PRIu64, mInterceptionID));
} else if (mPostID) {
extension.Append(nsPrintfCString("%d", mPostID)); extension.Append(nsPrintfCString("%d", mPostID));
} }
// If this channel should be intercepted, we do not open a cache entry for this channel
// until the interception process is complete and the consumer decides what to do with it.
if (mInterceptCache == MAYBE_INTERCEPT) {
DebugOnly<bool> exists;
MOZ_ASSERT(NS_FAILED(cacheStorage->Exists(openURI, extension, &exists)) || !exists,
"The entry must not exist in the cache before we create it here");
nsCOMPtr<nsICacheEntry> entry;
rv = cacheStorage->OpenTruncate(openURI, extension, getter_AddRefs(entry));
NS_ENSURE_SUCCESS(rv, rv);
nsCOMPtr<nsINetworkInterceptController> controller;
GetCallback(controller);
RefPtr<InterceptedChannelChrome> intercepted =
new InterceptedChannelChrome(this, controller, entry);
intercepted->NotifyController();
} else {
if (mInterceptCache == INTERCEPTED) {
cacheEntryOpenFlags |= nsICacheStorage::OPEN_INTERCEPTED;
// Clear OPEN_TRUNCATE for the fake cache entry, since otherwise
// cache storage will close the current entry which breaks the
// response synthesis.
cacheEntryOpenFlags &= ~nsICacheStorage::OPEN_TRUNCATE;
DebugOnly<bool> exists;
MOZ_ASSERT(NS_SUCCEEDED(cacheStorage->Exists(openURI, extension, &exists)) && exists,
"The entry must exist in the cache after we create it here");
}
mCacheOpenWithPriority = cacheEntryOpenFlags & nsICacheStorage::OPEN_PRIORITY; mCacheOpenWithPriority = cacheEntryOpenFlags & nsICacheStorage::OPEN_PRIORITY;
mCacheQueueSizeWhenOpen = CacheStorageService::CacheQueueSize(mCacheOpenWithPriority); mCacheQueueSizeWhenOpen = CacheStorageService::CacheQueueSize(mCacheOpenWithPriority);
if (sRCWNEnabled && maybeRCWN && !mApplicationCacheForWrite && if (sRCWNEnabled && maybeRCWN && !mApplicationCacheForWrite) {
mInterceptCache != INTERCEPTED) {
bool hasAltData = false; bool hasAltData = false;
uint32_t sizeInKb = 0; uint32_t sizeInKb = 0;
rv = cacheStorage->GetCacheIndexEntryAttrs(openURI, extension, rv = cacheStorage->GetCacheIndexEntryAttrs(openURI, extension,
@ -4002,7 +3939,6 @@ nsHttpChannel::OpenCacheEntry(bool isHttps)
} }
NS_ENSURE_SUCCESS(rv, rv); NS_ENSURE_SUCCESS(rv, rv);
}
waitFlags.Keep(WAIT_FOR_CACHE_ENTRY); waitFlags.Keep(WAIT_FOR_CACHE_ENTRY);
@ -4224,7 +4160,7 @@ nsHttpChannel::OnCacheEntryCheck(nsICacheEntry* entry, nsIApplicationCache* appC
// want to proceed since the LOAD_ONLY_IF_MODIFIED flag is // want to proceed since the LOAD_ONLY_IF_MODIFIED flag is
// also set. // also set.
MOZ_ASSERT(mLoadFlags & LOAD_ONLY_IF_MODIFIED); MOZ_ASSERT(mLoadFlags & LOAD_ONLY_IF_MODIFIED);
} else if (mInterceptCache != INTERCEPTED) { } else {
return rv; return rv;
} }
} }
@ -4325,10 +4261,6 @@ nsHttpChannel::OnCacheEntryCheck(nsICacheEntry* entry, nsIApplicationCache* appC
mRedirectedCachekeys->AppendElement(cacheKey); mRedirectedCachekeys->AppendElement(cacheKey);
} }
if (doValidation && mInterceptCache == INTERCEPTED) {
doValidation = false;
}
mCachedContentIsValid = !doValidation; mCachedContentIsValid = !doValidation;
if (doValidation) { if (doValidation) {
@ -5652,17 +5584,6 @@ nsHttpChannel::SetupReplacementChannel(nsIURI *newURI,
resumableChannel->ResumeAt(mStartPos, mEntityID); resumableChannel->ResumeAt(mStartPos, mEntityID);
} }
if (!(redirectFlags & nsIChannelEventSink::REDIRECT_STS_UPGRADE) &&
mInterceptCache != INTERCEPTED &&
mRedirectMode != nsIHttpChannelInternal::REDIRECT_MODE_MANUAL) {
nsLoadFlags loadFlags = nsIRequest::LOAD_NORMAL;
rv = newChannel->GetLoadFlags(&loadFlags);
NS_ENSURE_SUCCESS(rv, rv);
loadFlags |= nsIChannel::LOAD_BYPASS_SERVICE_WORKER;
rv = newChannel->SetLoadFlags(loadFlags);
NS_ENSURE_SUCCESS(rv, rv);
}
return NS_OK; return NS_OK;
} }
@ -6164,11 +6085,6 @@ nsHttpChannel::AsyncOpen(nsIStreamListener *listener, nsISupports *context)
return NS_OK; return NS_OK;
} }
if (mInterceptCache != INTERCEPTED && ShouldIntercept()) {
mInterceptCache = MAYBE_INTERCEPT;
SetCouldBeSynthesized();
}
// Remember the cookie header that was set, if any // Remember the cookie header that was set, if any
nsAutoCString cookieHeader; nsAutoCString cookieHeader;
if (NS_SUCCEEDED(mRequestHead.GetHeader(nsHttp::Cookie, cookieHeader))) { if (NS_SUCCEEDED(mRequestHead.GetHeader(nsHttp::Cookie, cookieHeader))) {
@ -6692,21 +6608,6 @@ nsHttpChannel::SetupFallbackChannel(const char *aFallbackKey)
return NS_OK; return NS_OK;
} }
NS_IMETHODIMP
nsHttpChannel::ForceIntercepted(uint64_t aInterceptionID)
{
ENSURE_CALLED_BEFORE_ASYNC_OPEN();
if (NS_WARN_IF(mLoadFlags & LOAD_BYPASS_SERVICE_WORKER)) {
return NS_ERROR_NOT_AVAILABLE;
}
MarkIntercepted();
mResponseCouldBeSynthesized = true;
mInterceptionID = aInterceptionID;
return NS_OK;
}
NS_IMETHODIMP NS_IMETHODIMP
nsHttpChannel::SetChannelIsForDownload(bool aChannelIsForDownload) nsHttpChannel::SetChannelIsForDownload(bool aChannelIsForDownload)
{ {
@ -8680,17 +8581,11 @@ nsHttpChannel::SetNotificationCallbacks(nsIInterfaceRequestor *aCallbacks)
return rv; return rv;
} }
void
nsHttpChannel::MarkIntercepted()
{
mInterceptCache = INTERCEPTED;
}
NS_IMETHODIMP NS_IMETHODIMP
nsHttpChannel::GetResponseSynthesized(bool* aSynthesized) nsHttpChannel::GetResponseSynthesized(bool* aSynthesized)
{ {
NS_ENSURE_ARG_POINTER(aSynthesized); NS_ENSURE_ARG_POINTER(aSynthesized);
*aSynthesized = (mInterceptCache == INTERCEPTED); *aSynthesized = false;
return NS_OK; return NS_OK;
} }
@ -9577,5 +9472,50 @@ nsHttpChannel::GetWarningReporter()
return mWarningReporter.get(); return mWarningReporter.get();
} }
nsresult
nsHttpChannel::RedirectToInterceptedChannel()
{
nsCOMPtr<nsINetworkInterceptController> controller;
GetCallback(controller);
RefPtr<InterceptedHttpChannel> intercepted =
InterceptedHttpChannel::CreateForInterception(mChannelCreationTime,
mChannelCreationTimestamp,
mAsyncOpenTime);
nsresult rv =
intercepted->Init(mURI, mCaps, static_cast<nsProxyInfo*>(mProxyInfo.get()),
mProxyResolveFlags, mProxyURI, mChannelId);
nsCOMPtr<nsILoadInfo> redirectLoadInfo =
CloneLoadInfoForRedirect(mURI, nsIChannelEventSink::REDIRECT_INTERNAL);
intercepted->SetLoadInfo(redirectLoadInfo);
rv = SetupReplacementChannel(mURI, intercepted, true,
nsIChannelEventSink::REDIRECT_INTERNAL);
NS_ENSURE_SUCCESS(rv, rv);
mRedirectChannel = intercepted;
PushRedirectAsyncFunc(
&nsHttpChannel::ContinueAsyncRedirectChannelToURI);
rv = gHttpHandler->AsyncOnChannelRedirect(this, intercepted,
nsIChannelEventSink::REDIRECT_INTERNAL);
if (NS_SUCCEEDED(rv)) {
rv = WaitForRedirectCallback();
}
if (NS_FAILED(rv)) {
AutoRedirectVetoNotifier notifier(this);
PopRedirectAsyncFunc(
&nsHttpChannel::ContinueAsyncRedirectChannelToURI);
}
return rv;
}
} // namespace net } // namespace net
} // namespace mozilla } // namespace mozilla

Просмотреть файл

@ -165,7 +165,6 @@ public:
NS_IMETHOD GetEncodedBodySize(uint64_t *aEncodedBodySize) override; NS_IMETHOD GetEncodedBodySize(uint64_t *aEncodedBodySize) override;
// nsIHttpChannelInternal // nsIHttpChannelInternal
NS_IMETHOD SetupFallbackChannel(const char *aFallbackKey) override; NS_IMETHOD SetupFallbackChannel(const char *aFallbackKey) override;
NS_IMETHOD ForceIntercepted(uint64_t aInterceptionID) override;
NS_IMETHOD SetChannelIsForDownload(bool aChannelIsForDownload) override; NS_IMETHOD SetChannelIsForDownload(bool aChannelIsForDownload) override;
// nsISupportsPriority // nsISupportsPriority
NS_IMETHOD SetPriority(int32_t value) override; NS_IMETHOD SetPriority(int32_t value) override;
@ -203,27 +202,6 @@ public: /* internal necko use only */
using InitLocalBlockListCallback = std::function<void(bool)>; using InitLocalBlockListCallback = std::function<void(bool)>;
void InternalSetUploadStream(nsIInputStream *uploadStream)
{ mUploadStream = uploadStream; }
void SetUploadStreamHasHeaders(bool hasHeaders)
{ mUploadStreamHasHeaders = hasHeaders; }
MOZ_MUST_USE nsresult
SetReferrerWithPolicyInternal(nsIURI *referrer, uint32_t referrerPolicy) {
nsAutoCString spec;
nsresult rv = referrer->GetAsciiSpec(spec);
if (NS_FAILED(rv)) return rv;
mReferrer = referrer;
mReferrerPolicy = referrerPolicy;
rv = mRequestHead.SetHeader(nsHttp::Referer, spec);
return rv;
}
MOZ_MUST_USE nsresult SetTopWindowURI(nsIURI* aTopWindowURI) {
mTopWindowURI = aTopWindowURI;
return NS_OK;
}
uint32_t GetRequestTime() const uint32_t GetRequestTime() const
{ {
return mRequestTime; return mRequestTime;
@ -285,7 +263,6 @@ public: /* internal necko use only */
uint32_t mKeep : 2; uint32_t mKeep : 2;
}; };
void MarkIntercepted();
NS_IMETHOD GetResponseSynthesized(bool* aSynthesized) override; NS_IMETHOD GetResponseSynthesized(bool* aSynthesized) override;
bool AwaitingCacheCallbacks(); bool AwaitingCacheCallbacks();
void SetCouldBeSynthesized(); void SetCouldBeSynthesized();
@ -520,6 +497,10 @@ private:
already_AddRefed<nsChannelClassifier> GetOrCreateChannelClassifier(); already_AddRefed<nsChannelClassifier> GetOrCreateChannelClassifier();
// Start an internal redirect to a new InterceptedHttpChannel which will
// resolve in firing a ServiceWorker FetchEvent.
MOZ_MUST_USE nsresult RedirectToInterceptedChannel();
private: private:
// this section is for main-thread-only object // this section is for main-thread-only object
// all the references need to be proxy released on main thread. // all the references need to be proxy released on main thread.
@ -575,20 +556,6 @@ private:
// telemetry in nsHttpChannel::OnStartRequest(). // telemetry in nsHttpChannel::OnStartRequest().
uint32_t mSuspendTotalTime; uint32_t mSuspendTotalTime;
// States of channel interception
enum {
DO_NOT_INTERCEPT, // no interception will occur
MAYBE_INTERCEPT, // interception in progress, but can be cancelled
INTERCEPTED, // a synthesized response has been provided
} mInterceptCache;
// ID of this channel for the interception purposes. Unique unless this
// channel is replacing an intercepted one via an redirection.
uint64_t mInterceptionID;
bool PossiblyIntercepted() {
return mInterceptCache != DO_NOT_INTERCEPT;
}
// If the channel is associated with a cache, and the URI matched // If the channel is associated with a cache, and the URI matched
// a fallback namespace, this will hold the key for the fallback // a fallback namespace, this will hold the key for the fallback
// cache entry. // cache entry.

Просмотреть файл

@ -225,13 +225,6 @@ interface nsIHttpChannelInternal : nsISupports
[must_use] readonly attribute PRTime lastModifiedTime; [must_use] readonly attribute PRTime lastModifiedTime;
/**
* Force a channel that has not been AsyncOpen'ed to skip any check for possible
* interception and proceed immediately to open a previously-synthesized cache
* entry using the provided ID.
*/
[must_use] void forceIntercepted(in uint64_t aInterceptionID);
[must_use] readonly attribute boolean responseSynthesized; [must_use] readonly attribute boolean responseSynthesized;
/** /**

Просмотреть файл

@ -10,11 +10,14 @@
#include "plstr.h" #include "plstr.h"
#include "nsDebug.h" #include "nsDebug.h"
#include "prprf.h" #include "prprf.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/IntegerPrintfMacros.h" #include "mozilla/IntegerPrintfMacros.h"
#include "mozilla/Sprintf.h" #include "mozilla/Sprintf.h"
/* ==================================================================== */ /* ==================================================================== */
using mozilla::CheckedInt;
static inline int ParsingFailed(struct list_state *state) static inline int ParsingFailed(struct list_state *state)
{ {
if (state->parsed_one || state->lstyle) /* junk if we fail to parse */ if (state->parsed_one || state->lstyle) /* junk if we fail to parse */
@ -1192,14 +1195,17 @@ int ParseFTPList(const char *line, struct list_state *state,
{ {
/* First try to use result->fe_size to find " -> " sequence. /* First try to use result->fe_size to find " -> " sequence.
This can give proper result for cases like "aaa -> bbb -> ccc". */ This can give proper result for cases like "aaa -> bbb -> ccc". */
uint32_t fe_size = atoi(result->fe_size); uintptr_t fe_size = atoi(result->fe_size);
CheckedInt<uintptr_t> arrow_start(result->fe_fnlen);
arrow_start -= fe_size;
arrow_start -= 4;
if (result->fe_fnlen > (fe_size + 4) && if (arrow_start.isValid() &&
PL_strncmp(result->fe_fname + result->fe_fnlen - fe_size - 4 , " -> ", 4) == 0) PL_strncmp(result->fe_fname + arrow_start.value(), " -> ", 4) == 0)
{ {
result->fe_lname = result->fe_fname + (result->fe_fnlen - fe_size); result->fe_lname = result->fe_fname + (result->fe_fnlen - fe_size);
result->fe_lnlen = (&(line[linelen])) - (result->fe_lname); result->fe_lnlen = (&(line[linelen])) - (result->fe_lname);
result->fe_fnlen -= fe_size + 4; result->fe_fnlen = arrow_start.value();
} }
else else
{ {

Просмотреть файл

@ -300,6 +300,9 @@ const testcases = [
["goo\u0650gle", "xn--google-yri", false, false, false], ["goo\u0650gle", "xn--google-yri", false, false, false],
// ...but Arabic diacritics are allowed on Arabic text // ...but Arabic diacritics are allowed on Arabic text
["العَرَبِي", "xn--mgbc0a5a6cxbzabt", false, true, true], ["العَرَبِي", "xn--mgbc0a5a6cxbzabt", false, true, true],
// Hebrew diacritic also not allowed in Latin text (bug 1404349)
["goo\u05b4gle", "xn--google-rvh", false, false, false],
]; ];
const profiles = ["ASCII", "high", "moderate"]; const profiles = ["ASCII", "high", "moderate"];

Просмотреть файл

@ -183,7 +183,7 @@ add_test(function() {
// ensure that the intercepted channel can be cancelled // ensure that the intercepted channel can be cancelled
add_test(function() { add_test(function() {
var chan = make_channel(URL + '/body', null, function(intercepted) { var chan = make_channel(URL + '/body', null, function(intercepted) {
intercepted.cancel(Cr.NS_BINDING_ABORTED); intercepted.cancelInterception(Cr.NS_BINDING_ABORTED);
}); });
chan.asyncOpen2(new ChannelListener(run_next_test, null, CL_EXPECT_FAILURE)); chan.asyncOpen2(new ChannelListener(run_next_test, null, CL_EXPECT_FAILURE));
}); });
@ -195,7 +195,7 @@ add_test(function() {
do_timeout(0, function() { do_timeout(0, function() {
var gotexception = false; var gotexception = false;
try { try {
chan.cancel(); chan.cancelInterception();
} catch (x) { } catch (x) {
gotexception = true; gotexception = true;
} }

Просмотреть файл

@ -370,6 +370,12 @@ ARCHIVE_FILES = {
'base': 'testing', 'base': 'testing',
'pattern': 'talos/**', 'pattern': 'talos/**',
}, },
{
'source': buildconfig.topsrcdir,
'base': 'third_party/speedometer',
'pattern': '**',
'dest': 'talos/talos/tests/speedometer/',
},
], ],
'awsy': [ 'awsy': [
{ {

Просмотреть файл

@ -1140,4 +1140,4 @@ static const TransportSecurityPreload kPublicKeyPinningPreloadList[] = {
static const int32_t kUnknownId = -1; static const int32_t kUnknownId = -1;
static const PRTime kPreloadPKPinsExpirationTime = INT64_C(1515995399924000); static const PRTime kPreloadPKPinsExpirationTime = INT64_C(1516038167454000);

Просмотреть файл

@ -7,6 +7,7 @@
#ifndef nsCertOverrideService_h #ifndef nsCertOverrideService_h
#define nsCertOverrideService_h #define nsCertOverrideService_h
#include "mozilla/Move.h"
#include "mozilla/Mutex.h" #include "mozilla/Mutex.h"
#include "mozilla/TypedEnumBits.h" #include "mozilla/TypedEnumBits.h"
#include "nsICertOverrideService.h" #include "nsICertOverrideService.h"
@ -79,10 +80,10 @@ class nsCertOverrideEntry final : public PLDHashEntryHdr
{ {
} }
nsCertOverrideEntry(const nsCertOverrideEntry& toCopy) nsCertOverrideEntry(nsCertOverrideEntry&& toMove)
: mSettings(mozilla::Move(toMove.mSettings))
, mHostWithPort(mozilla::Move(toMove.mHostWithPort))
{ {
mSettings = toCopy.mSettings;
mHostWithPort = toCopy.mHostWithPort;
} }
~nsCertOverrideEntry() ~nsCertOverrideEntry()

Просмотреть файл

@ -7,6 +7,7 @@
#ifndef __NSCLIENTAUTHREMEMBER_H__ #ifndef __NSCLIENTAUTHREMEMBER_H__
#define __NSCLIENTAUTHREMEMBER_H__ #define __NSCLIENTAUTHREMEMBER_H__
#include "mozilla/Move.h"
#include "mozilla/ReentrantMonitor.h" #include "mozilla/ReentrantMonitor.h"
#include "nsTHashtable.h" #include "nsTHashtable.h"
#include "nsIObserver.h" #include "nsIObserver.h"
@ -62,9 +63,10 @@ class nsClientAuthRememberEntry final : public PLDHashEntryHdr
{ {
} }
nsClientAuthRememberEntry(const nsClientAuthRememberEntry& aToCopy) nsClientAuthRememberEntry(nsClientAuthRememberEntry&& aToMove)
: mSettings(mozilla::Move(aToMove.mSettings))
, mEntryKey(mozilla::Move(aToMove.mEntryKey))
{ {
mSettings = aToCopy.mSettings;
} }
~nsClientAuthRememberEntry() ~nsClientAuthRememberEntry()

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше