зеркало из https://github.com/mozilla/gecko-dev.git
Merge latest green fx-team changeset and mozilla-central; a=merge
This commit is contained in:
Коммит
6fc258bd19
|
@ -19,7 +19,7 @@
|
|||
<copyfile dest="Makefile" src="core/root.mk"/>
|
||||
</project>
|
||||
<project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
|
||||
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="f42ebc93554979501d3ac52bcf9e69cb4b310a4f"/>
|
||||
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="e0c637f14265291ed81934058ec1cc019612127c"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="dbb66e540194a187326cece28ae0b51cdd500184"/>
|
||||
<project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
|
||||
<project name="platform_hardware_ril" path="hardware/ril" remote="b2g" revision="8e4420c0c5c8e8c8e58a000278a7129403769f96"/>
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
</project>
|
||||
<project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
|
||||
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="f42ebc93554979501d3ac52bcf9e69cb4b310a4f"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="e0c637f14265291ed81934058ec1cc019612127c"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="dbb66e540194a187326cece28ae0b51cdd500184"/>
|
||||
<project name="moztt" path="external/moztt" remote="b2g" revision="ce95d372e6d285725b96490afdaaf489ad8f9ca9"/>
|
||||
<project name="apitrace" path="external/apitrace" remote="apitrace" revision="cabebb87fcd32f8596af08e6b5e80764ee0157dd"/>
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
<project name="platform_build" path="build" remote="b2g" revision="276ce45e78b09c4a4ee643646f691d22804754c1">
|
||||
<copyfile dest="Makefile" src="core/root.mk"/>
|
||||
</project>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="f42ebc93554979501d3ac52bcf9e69cb4b310a4f"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="e0c637f14265291ed81934058ec1cc019612127c"/>
|
||||
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="dbb66e540194a187326cece28ae0b51cdd500184"/>
|
||||
<project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
<copyfile dest="Makefile" src="core/root.mk"/>
|
||||
</project>
|
||||
<project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
|
||||
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="f42ebc93554979501d3ac52bcf9e69cb4b310a4f"/>
|
||||
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="e0c637f14265291ed81934058ec1cc019612127c"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="dbb66e540194a187326cece28ae0b51cdd500184"/>
|
||||
<project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
|
||||
<project name="platform_hardware_ril" path="hardware/ril" remote="b2g" revision="8e4420c0c5c8e8c8e58a000278a7129403769f96"/>
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
</project>
|
||||
<project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
|
||||
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="f42ebc93554979501d3ac52bcf9e69cb4b310a4f"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="e0c637f14265291ed81934058ec1cc019612127c"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="dbb66e540194a187326cece28ae0b51cdd500184"/>
|
||||
<project name="moztt" path="external/moztt" remote="b2g" revision="ce95d372e6d285725b96490afdaaf489ad8f9ca9"/>
|
||||
<project name="apitrace" path="external/apitrace" remote="apitrace" revision="cabebb87fcd32f8596af08e6b5e80764ee0157dd"/>
|
||||
|
@ -118,7 +118,7 @@
|
|||
<!-- Flame specific things -->
|
||||
<project name="device/generic/armv7-a-neon" path="device/generic/armv7-a-neon" revision="e8a318f7690092e639ba88891606f4183e846d3f"/>
|
||||
<project name="device/qcom/common" path="device/qcom/common" revision="34ed8345250bb97262d70a052217a92e83444ede"/>
|
||||
<project name="device-flame" path="device/t2m/flame" remote="b2g" revision="9971053a02fb1b2c574dbf9ddd9405f6b8ddc53a"/>
|
||||
<project name="device-flame" path="device/t2m/flame" remote="b2g" revision="70c66225521828ad568c0c72859c6f17c3182f1b"/>
|
||||
<project name="codeaurora_kernel_msm" path="kernel" remote="b2g" revision="6d29b672b039612c08c40e92d8051a4cfbd38162"/>
|
||||
<project name="kernel_lk" path="bootable/bootloader/lk" remote="b2g" revision="2b1d8b5b7a760230f4c94c02e733e3929f44253a"/>
|
||||
<project name="platform/bootable/recovery" path="bootable/recovery" revision="f2914eacee9120680a41463708bb6ee8291749fc"/>
|
||||
|
|
|
@ -4,6 +4,6 @@
|
|||
"remote": "",
|
||||
"branch": ""
|
||||
},
|
||||
"revision": "89087e33f8ad411f955b7918f33bb54a100a1413",
|
||||
"revision": "37cd9a62b2cb570985d99a5519b136672614b980",
|
||||
"repo_path": "/integration/gaia-central"
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
<copyfile dest="Makefile" src="core/root.mk"/>
|
||||
</project>
|
||||
<project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
|
||||
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="f42ebc93554979501d3ac52bcf9e69cb4b310a4f"/>
|
||||
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="e0c637f14265291ed81934058ec1cc019612127c"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="dbb66e540194a187326cece28ae0b51cdd500184"/>
|
||||
<project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
|
||||
<project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
<copyfile dest="Makefile" src="core/root.mk"/>
|
||||
</project>
|
||||
<project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
|
||||
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="f42ebc93554979501d3ac52bcf9e69cb4b310a4f"/>
|
||||
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="e0c637f14265291ed81934058ec1cc019612127c"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="dbb66e540194a187326cece28ae0b51cdd500184"/>
|
||||
<project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
|
||||
<project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
</project>
|
||||
<project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
|
||||
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="f42ebc93554979501d3ac52bcf9e69cb4b310a4f"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="e0c637f14265291ed81934058ec1cc019612127c"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="dbb66e540194a187326cece28ae0b51cdd500184"/>
|
||||
<project name="moztt" path="external/moztt" remote="b2g" revision="ce95d372e6d285725b96490afdaaf489ad8f9ca9"/>
|
||||
<project name="apitrace" path="external/apitrace" remote="apitrace" revision="cabebb87fcd32f8596af08e6b5e80764ee0157dd"/>
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
<copyfile dest="Makefile" src="core/root.mk"/>
|
||||
</project>
|
||||
<project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
|
||||
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="f42ebc93554979501d3ac52bcf9e69cb4b310a4f"/>
|
||||
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="e0c637f14265291ed81934058ec1cc019612127c"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="dbb66e540194a187326cece28ae0b51cdd500184"/>
|
||||
<project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
|
||||
<project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
|
||||
|
|
|
@ -34,6 +34,12 @@ class nsDOMStringMap;
|
|||
class nsINodeInfo;
|
||||
class nsIURI;
|
||||
|
||||
namespace mozilla {
|
||||
namespace dom {
|
||||
class Element;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Class that implements the nsIDOMNodeList interface (a list of children of
|
||||
* the content), by holding a reference to the content and delegating GetLength
|
||||
|
@ -230,6 +236,14 @@ public:
|
|||
return Children()->Length();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the IsElementInStyleScope flag on each element in the subtree rooted
|
||||
* at this node, including any elements reachable through shadow trees.
|
||||
*
|
||||
* @param aInStyleScope The flag value to set.
|
||||
*/
|
||||
void SetIsElementInStyleScopeFlagOnSubtree(bool aInStyleScope);
|
||||
|
||||
public:
|
||||
/**
|
||||
* If there are listeners for DOMNodeInserted event, fires the event on all
|
||||
|
@ -415,6 +429,14 @@ protected:
|
|||
return static_cast<nsDOMSlots*>(GetExistingSlots());
|
||||
}
|
||||
|
||||
/**
|
||||
* Calls SetIsElementInStyleScopeFlagOnSubtree for each shadow tree attached
|
||||
* to this node, which is assumed to be an Element.
|
||||
*
|
||||
* @param aInStyleScope The IsElementInStyleScope flag value to set.
|
||||
*/
|
||||
void SetIsElementInStyleScopeFlagOnShadowTree(bool aInStyleScope);
|
||||
|
||||
friend class ::ContentUnbinder;
|
||||
/**
|
||||
* Array containing all attributes and children for this element
|
||||
|
|
|
@ -816,6 +816,12 @@ public:
|
|||
return mParent && mParent->IsElement() ? mParent->AsElement() : nullptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the parent Element of this node, traversing over a ShadowRoot
|
||||
* to its host if necessary.
|
||||
*/
|
||||
mozilla::dom::Element* GetParentElementCrossingShadowRoot() const;
|
||||
|
||||
/**
|
||||
* Get the root of the subtree this node belongs to. This never returns
|
||||
* null. It may return 'this' (e.g. for document nodes, and nodes that
|
||||
|
|
|
@ -1289,15 +1289,28 @@ Element::BindToTree(nsIDocument* aDocument, nsIContent* aParent,
|
|||
NODE_NEEDS_FRAME | NODE_DESCENDANTS_NEED_FRAMES |
|
||||
// And the restyle bits
|
||||
ELEMENT_ALL_RESTYLE_FLAGS);
|
||||
|
||||
// Propagate scoped style sheet tracking bit.
|
||||
SetIsElementInStyleScope(mParent->IsElementInStyleScope());
|
||||
} else if (!HasFlag(NODE_IS_IN_SHADOW_TREE)) {
|
||||
// If we're not in the doc and not in a shadow tree,
|
||||
// update our subtree pointer.
|
||||
SetSubtreeRootPointer(aParent->SubtreeRoot());
|
||||
}
|
||||
|
||||
// Propagate scoped style sheet tracking bit.
|
||||
if (mParent->IsContent()) {
|
||||
nsIContent* parent;
|
||||
ShadowRoot* shadowRootParent = ShadowRoot::FromNode(mParent);
|
||||
if (shadowRootParent) {
|
||||
parent = shadowRootParent->GetHost();
|
||||
} else {
|
||||
parent = mParent->AsContent();
|
||||
}
|
||||
|
||||
bool inStyleScope = parent->IsElementInStyleScope();
|
||||
|
||||
SetIsElementInStyleScope(inStyleScope);
|
||||
SetIsElementInStyleScopeFlagOnShadowTree(inStyleScope);
|
||||
}
|
||||
|
||||
// This has to be here, rather than in nsGenericHTMLElement::BindToTree,
|
||||
// because it has to happen after updating the parent pointer, but before
|
||||
// recursively binding the kids.
|
||||
|
|
|
@ -2846,3 +2846,41 @@ FragmentOrElement::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
|
|||
|
||||
return n;
|
||||
}
|
||||
|
||||
void
|
||||
FragmentOrElement::SetIsElementInStyleScopeFlagOnSubtree(bool aInStyleScope)
|
||||
{
|
||||
if (aInStyleScope && IsElementInStyleScope()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (IsElement()) {
|
||||
SetIsElementInStyleScope(aInStyleScope);
|
||||
SetIsElementInStyleScopeFlagOnShadowTree(aInStyleScope);
|
||||
}
|
||||
|
||||
nsIContent* n = GetNextNode(this);
|
||||
while (n) {
|
||||
if (n->IsElementInStyleScope()) {
|
||||
n = n->GetNextNonChildNode(this);
|
||||
} else {
|
||||
if (n->IsElement()) {
|
||||
n->SetIsElementInStyleScope(aInStyleScope);
|
||||
n->AsElement()->SetIsElementInStyleScopeFlagOnShadowTree(aInStyleScope);
|
||||
}
|
||||
n = n->GetNextNode(this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
FragmentOrElement::SetIsElementInStyleScopeFlagOnShadowTree(bool aInStyleScope)
|
||||
{
|
||||
NS_ASSERTION(IsElement(), "calling SetIsElementInStyleScopeFlagOnShadowTree "
|
||||
"on a non-Element is useless");
|
||||
ShadowRoot* shadowRoot = GetShadowRoot();
|
||||
while (shadowRoot) {
|
||||
shadowRoot->SetIsElementInStyleScopeFlagOnSubtree(aInStyleScope);
|
||||
shadowRoot = shadowRoot->GetOlderShadow();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2736,3 +2736,25 @@ EventTarget::DispatchEvent(Event& aEvent,
|
|||
aRv = DispatchEvent(&aEvent, &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
Element*
|
||||
nsINode::GetParentElementCrossingShadowRoot() const
|
||||
{
|
||||
if (!mParent) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (mParent->IsElement()) {
|
||||
return mParent->AsElement();
|
||||
}
|
||||
|
||||
ShadowRoot* shadowRoot = ShadowRoot::FromNode(mParent);
|
||||
if (shadowRoot) {
|
||||
nsIContent* host = shadowRoot->GetHost();
|
||||
MOZ_ASSERT(host, "ShowRoots should always have a host");
|
||||
MOZ_ASSERT(host->IsElement(), "ShadowRoot hosts should always be Elements");
|
||||
return host->AsElement();
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
#include "mozilla/css/Loader.h"
|
||||
#include "mozilla/dom/Element.h"
|
||||
#include "mozilla/dom/FragmentOrElement.h"
|
||||
#include "mozilla/dom/ShadowRoot.h"
|
||||
#include "mozilla/Preferences.h"
|
||||
#include "nsCSSStyleSheet.h"
|
||||
|
@ -220,28 +221,6 @@ IsScopedStyleElement(nsIContent* aContent)
|
|||
aContent->HasAttr(kNameSpaceID_None, nsGkAtoms::scoped);
|
||||
}
|
||||
|
||||
static void
|
||||
SetIsElementInStyleScopeFlagOnSubtree(Element* aElement)
|
||||
{
|
||||
if (aElement->IsElementInStyleScope()) {
|
||||
return;
|
||||
}
|
||||
|
||||
aElement->SetIsElementInStyleScope();
|
||||
|
||||
nsIContent* n = aElement->GetNextNode(aElement);
|
||||
while (n) {
|
||||
if (n->IsElementInStyleScope()) {
|
||||
n = n->GetNextNonChildNode(aElement);
|
||||
} else {
|
||||
if (n->IsElement()) {
|
||||
n->SetIsElementInStyleScope();
|
||||
}
|
||||
n = n->GetNextNode(aElement);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
HasScopedStyleSheetChild(nsIContent* aContent)
|
||||
{
|
||||
|
@ -396,7 +375,7 @@ nsStyleLinkElement::DoUpdateStyleSheet(nsIDocument* aOldDocument,
|
|||
Element* scopeElement = isScoped ? thisContent->GetParentElement() : nullptr;
|
||||
if (scopeElement) {
|
||||
NS_ASSERTION(isInline, "non-inline style must not have scope element");
|
||||
SetIsElementInStyleScopeFlagOnSubtree(scopeElement);
|
||||
scopeElement->SetIsElementInStyleScopeFlagOnSubtree(true);
|
||||
}
|
||||
|
||||
bool doneLoading = false;
|
||||
|
@ -488,6 +467,6 @@ nsStyleLinkElement::UpdateStyleSheetScopedness(bool aIsNowScoped)
|
|||
UpdateIsElementInStyleScopeFlagOnSubtree(oldScopeElement);
|
||||
}
|
||||
if (newScopeElement) {
|
||||
SetIsElementInStyleScopeFlagOnSubtree(newScopeElement);
|
||||
newScopeElement->SetIsElementInStyleScopeFlagOnSubtree(true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ public:
|
|||
, mOffset(aOffset)
|
||||
, mTime(aTimestamp)
|
||||
, mDuration(aDuration)
|
||||
, mDiscontinuity(false)
|
||||
{}
|
||||
|
||||
virtual ~MediaData() {}
|
||||
|
@ -53,6 +54,10 @@ public:
|
|||
// Duration of sample, in microseconds.
|
||||
const int64_t mDuration;
|
||||
|
||||
// True if this is the first sample after a gap or discontinuity in
|
||||
// the stream. This is true for the first sample in a stream after a seek.
|
||||
bool mDiscontinuity;
|
||||
|
||||
int64_t GetEndTime() const { return mTime + mDuration; }
|
||||
|
||||
};
|
||||
|
|
|
@ -0,0 +1,148 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef MediaDataDecodedListener_h_
|
||||
#define MediaDataDecodedListener_h_
|
||||
|
||||
#include "mozilla/Monitor.h"
|
||||
#include "MediaDecoderReader.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
class MediaDecoderStateMachine;
|
||||
class MediaData;
|
||||
|
||||
// A RequestSampleCallback implementation that forwards samples onto the
|
||||
// MediaDecoderStateMachine via tasks that run on the supplied task queue.
|
||||
template<class Target>
|
||||
class MediaDataDecodedListener : public RequestSampleCallback {
|
||||
public:
|
||||
MediaDataDecodedListener(Target* aTarget,
|
||||
MediaTaskQueue* aTaskQueue)
|
||||
: mMonitor("MediaDataDecodedListener")
|
||||
, mTaskQueue(aTaskQueue)
|
||||
, mTarget(aTarget)
|
||||
{
|
||||
MOZ_ASSERT(aTarget);
|
||||
MOZ_ASSERT(aTaskQueue);
|
||||
}
|
||||
|
||||
virtual void OnAudioDecoded(AudioData* aSample) MOZ_OVERRIDE {
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
nsAutoPtr<AudioData> sample(aSample);
|
||||
if (!mTarget || !mTaskQueue) {
|
||||
// We've been shutdown, abort.
|
||||
return;
|
||||
}
|
||||
RefPtr<nsIRunnable> task(new DeliverAudioTask(sample.forget(), mTarget));
|
||||
mTaskQueue->Dispatch(task);
|
||||
}
|
||||
|
||||
virtual void OnAudioEOS() MOZ_OVERRIDE {
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
if (!mTarget || !mTaskQueue) {
|
||||
// We've been shutdown, abort.
|
||||
return;
|
||||
}
|
||||
RefPtr<nsIRunnable> task(NS_NewRunnableMethod(mTarget, &Target::OnAudioEOS));
|
||||
if (NS_FAILED(mTaskQueue->Dispatch(task))) {
|
||||
NS_WARNING("Failed to dispatch OnAudioEOS task");
|
||||
}
|
||||
}
|
||||
|
||||
virtual void OnVideoDecoded(VideoData* aSample) MOZ_OVERRIDE {
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
nsAutoPtr<VideoData> sample(aSample);
|
||||
if (!mTarget || !mTaskQueue) {
|
||||
// We've been shutdown, abort.
|
||||
return;
|
||||
}
|
||||
RefPtr<nsIRunnable> task(new DeliverVideoTask(sample.forget(), mTarget));
|
||||
mTaskQueue->Dispatch(task);
|
||||
}
|
||||
|
||||
virtual void OnVideoEOS() MOZ_OVERRIDE {
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
if (!mTarget || !mTaskQueue) {
|
||||
// We've been shutdown, abort.
|
||||
return;
|
||||
}
|
||||
RefPtr<nsIRunnable> task(NS_NewRunnableMethod(mTarget, &Target::OnVideoEOS));
|
||||
if (NS_FAILED(mTaskQueue->Dispatch(task))) {
|
||||
NS_WARNING("Failed to dispatch OnVideoEOS task");
|
||||
}
|
||||
}
|
||||
|
||||
virtual void OnDecodeError() MOZ_OVERRIDE {
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
if (!mTarget || !mTaskQueue) {
|
||||
// We've been shutdown, abort.
|
||||
return;
|
||||
}
|
||||
RefPtr<nsIRunnable> task(NS_NewRunnableMethod(mTarget, &Target::OnDecodeError));
|
||||
if (NS_FAILED(mTaskQueue->Dispatch(task))) {
|
||||
NS_WARNING("Failed to dispatch OnAudioDecoded task");
|
||||
}
|
||||
}
|
||||
|
||||
void BreakCycles() {
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
mTarget = nullptr;
|
||||
mTaskQueue = nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
class DeliverAudioTask : public nsRunnable {
|
||||
public:
|
||||
DeliverAudioTask(AudioData* aSample, Target* aTarget)
|
||||
: mSample(aSample)
|
||||
, mTarget(aTarget)
|
||||
{
|
||||
MOZ_COUNT_CTOR(DeliverAudioTask);
|
||||
}
|
||||
~DeliverAudioTask()
|
||||
{
|
||||
MOZ_COUNT_DTOR(DeliverAudioTask);
|
||||
}
|
||||
NS_METHOD Run() {
|
||||
mTarget->OnAudioDecoded(mSample.forget());
|
||||
return NS_OK;
|
||||
}
|
||||
private:
|
||||
nsAutoPtr<AudioData> mSample;
|
||||
RefPtr<Target> mTarget;
|
||||
};
|
||||
|
||||
class DeliverVideoTask : public nsRunnable {
|
||||
public:
|
||||
DeliverVideoTask(VideoData* aSample, Target* aTarget)
|
||||
: mSample(aSample)
|
||||
, mTarget(aTarget)
|
||||
{
|
||||
MOZ_COUNT_CTOR(DeliverVideoTask);
|
||||
}
|
||||
~DeliverVideoTask()
|
||||
{
|
||||
MOZ_COUNT_DTOR(DeliverVideoTask);
|
||||
}
|
||||
NS_METHOD Run() {
|
||||
mTarget->OnVideoDecoded(mSample.forget());
|
||||
return NS_OK;
|
||||
}
|
||||
private:
|
||||
nsAutoPtr<VideoData> mSample;
|
||||
RefPtr<Target> mTarget;
|
||||
};
|
||||
|
||||
Monitor mMonitor;
|
||||
RefPtr<MediaTaskQueue> mTaskQueue;
|
||||
RefPtr<Target> mTarget;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif // MediaDataDecodedListener_h_
|
|
@ -1526,7 +1526,7 @@ int64_t MediaDecoder::GetEndMediaTime() const {
|
|||
}
|
||||
|
||||
// Drop reference to state machine. Only called during shutdown dance.
|
||||
void MediaDecoder::ReleaseStateMachine() {
|
||||
void MediaDecoder::BreakCycles() {
|
||||
mDecoderStateMachine = nullptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
/*
|
||||
Each video element based on MediaDecoder has a state machine to manage
|
||||
its play state and keep the current frame up to date. All state machines
|
||||
share time in a single shared thread. Each decoder also has one thread
|
||||
dedicated to decoding audio and video data. This thread is shutdown when
|
||||
playback is paused. Each decoder also has a thread to push decoded audio
|
||||
share time in a single shared thread. Each decoder also has a MediaTaskQueue
|
||||
running in a SharedThreadPool to decode audio and video data.
|
||||
Each decoder also has a thread to push decoded audio
|
||||
to the hardware. This thread is not created until playback starts, but
|
||||
currently is not destroyed when paused, only when playback ends.
|
||||
|
||||
|
@ -234,6 +234,11 @@ struct SeekTarget {
|
|||
, mType(aType)
|
||||
{
|
||||
}
|
||||
SeekTarget(const SeekTarget& aOther)
|
||||
: mTime(aOther.mTime)
|
||||
, mType(aOther.mType)
|
||||
{
|
||||
}
|
||||
bool IsValid() const {
|
||||
return mType != SeekTarget::Invalid;
|
||||
}
|
||||
|
@ -823,7 +828,7 @@ public:
|
|||
MediaDecoderStateMachine* GetStateMachine() const;
|
||||
|
||||
// Drop reference to state machine. Only called during shutdown dance.
|
||||
virtual void ReleaseStateMachine();
|
||||
virtual void BreakCycles();
|
||||
|
||||
// Notifies the element that decoding has failed.
|
||||
virtual void DecodeError();
|
||||
|
|
|
@ -63,9 +63,11 @@ public:
|
|||
};
|
||||
|
||||
MediaDecoderReader::MediaDecoderReader(AbstractMediaDecoder* aDecoder)
|
||||
: mAudioCompactor(mAudioQueue),
|
||||
mDecoder(aDecoder),
|
||||
mIgnoreAudioOutputFormat(false)
|
||||
: mAudioCompactor(mAudioQueue)
|
||||
, mDecoder(aDecoder)
|
||||
, mIgnoreAudioOutputFormat(false)
|
||||
, mAudioDiscontinuity(false)
|
||||
, mVideoDiscontinuity(false)
|
||||
{
|
||||
MOZ_COUNT_CTOR(MediaDecoderReader);
|
||||
}
|
||||
|
@ -97,6 +99,9 @@ nsresult MediaDecoderReader::ResetDecode()
|
|||
VideoQueue().Reset();
|
||||
AudioQueue().Reset();
|
||||
|
||||
mAudioDiscontinuity = true;
|
||||
mVideoDiscontinuity = true;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -173,169 +178,6 @@ VideoData* MediaDecoderReader::FindStartTime(int64_t& aOutStartTime)
|
|||
return videoData;
|
||||
}
|
||||
|
||||
nsresult MediaDecoderReader::DecodeToTarget(int64_t aTarget)
|
||||
{
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::DecodeToTarget(%lld) Begin", aTarget));
|
||||
|
||||
// Decode forward to the target frame. Start with video, if we have it.
|
||||
if (HasVideo()) {
|
||||
// Note: when decoding hits the end of stream we must keep the last frame
|
||||
// in the video queue so that we'll have something to display after the
|
||||
// seek completes. This makes our logic a bit messy.
|
||||
bool eof = false;
|
||||
nsAutoPtr<VideoData> video;
|
||||
while (HasVideo() && !eof) {
|
||||
while (VideoQueue().GetSize() == 0 && !eof) {
|
||||
bool skip = false;
|
||||
eof = !DecodeVideoFrame(skip, 0);
|
||||
{
|
||||
ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
|
||||
if (mDecoder->IsShutdown()) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (eof) {
|
||||
// Hit end of file, we want to display the last frame of the video.
|
||||
if (video) {
|
||||
DECODER_LOG(PR_LOG_DEBUG,
|
||||
("MediaDecoderReader::DecodeToTarget(%lld) repushing video frame [%lld, %lld] at EOF",
|
||||
aTarget, video->mTime, video->GetEndTime()));
|
||||
VideoQueue().PushFront(video.forget());
|
||||
}
|
||||
VideoQueue().Finish();
|
||||
break;
|
||||
}
|
||||
video = VideoQueue().PeekFront();
|
||||
// If the frame end time is less than the seek target, we won't want
|
||||
// to display this frame after the seek, so discard it.
|
||||
if (video && video->GetEndTime() <= aTarget) {
|
||||
DECODER_LOG(PR_LOG_DEBUG,
|
||||
("MediaDecoderReader::DecodeToTarget(%lld) pop video frame [%lld, %lld]",
|
||||
aTarget, video->mTime, video->GetEndTime()));
|
||||
VideoQueue().PopFront();
|
||||
} else {
|
||||
// Found a frame after or encompasing the seek target.
|
||||
if (aTarget >= video->mTime && video->GetEndTime() >= aTarget) {
|
||||
// The seek target lies inside this frame's time slice. Adjust the frame's
|
||||
// start time to match the seek target. We do this by replacing the
|
||||
// first frame with a shallow copy which has the new timestamp.
|
||||
VideoQueue().PopFront();
|
||||
VideoData* temp = VideoData::ShallowCopyUpdateTimestamp(video, aTarget);
|
||||
video = temp;
|
||||
VideoQueue().PushFront(video);
|
||||
}
|
||||
DECODER_LOG(PR_LOG_DEBUG,
|
||||
("MediaDecoderReader::DecodeToTarget(%lld) found target video frame [%lld,%lld]",
|
||||
aTarget, video->mTime, video->GetEndTime()));
|
||||
|
||||
video.forget();
|
||||
break;
|
||||
}
|
||||
}
|
||||
{
|
||||
ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
|
||||
if (mDecoder->IsShutdown()) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
}
|
||||
#ifdef PR_LOGGING
|
||||
const VideoData* front = VideoQueue().PeekFront();
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("First video frame after decode is %lld",
|
||||
front ? front->mTime : -1));
|
||||
#endif
|
||||
}
|
||||
|
||||
if (HasAudio()) {
|
||||
// Decode audio forward to the seek target.
|
||||
bool eof = false;
|
||||
while (HasAudio() && !eof) {
|
||||
while (!eof && AudioQueue().GetSize() == 0) {
|
||||
eof = !DecodeAudioData();
|
||||
{
|
||||
ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
|
||||
if (mDecoder->IsShutdown()) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
}
|
||||
}
|
||||
const AudioData* audio = AudioQueue().PeekFront();
|
||||
if (!audio || eof) {
|
||||
AudioQueue().Finish();
|
||||
break;
|
||||
}
|
||||
CheckedInt64 startFrame = UsecsToFrames(audio->mTime, mInfo.mAudio.mRate);
|
||||
CheckedInt64 targetFrame = UsecsToFrames(aTarget, mInfo.mAudio.mRate);
|
||||
if (!startFrame.isValid() || !targetFrame.isValid()) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
if (startFrame.value() + audio->mFrames <= targetFrame.value()) {
|
||||
// Our seek target lies after the frames in this AudioData. Pop it
|
||||
// off the queue, and keep decoding forwards.
|
||||
delete AudioQueue().PopFront();
|
||||
audio = nullptr;
|
||||
continue;
|
||||
}
|
||||
if (startFrame.value() > targetFrame.value()) {
|
||||
// The seek target doesn't lie in the audio block just after the last
|
||||
// audio frames we've seen which were before the seek target. This
|
||||
// could have been the first audio data we've seen after seek, i.e. the
|
||||
// seek terminated after the seek target in the audio stream. Just
|
||||
// abort the audio decode-to-target, the state machine will play
|
||||
// silence to cover the gap. Typically this happens in poorly muxed
|
||||
// files.
|
||||
NS_WARNING("Audio not synced after seek, maybe a poorly muxed file?");
|
||||
break;
|
||||
}
|
||||
|
||||
// The seek target lies somewhere in this AudioData's frames, strip off
|
||||
// any frames which lie before the seek target, so we'll begin playback
|
||||
// exactly at the seek target.
|
||||
NS_ASSERTION(targetFrame.value() >= startFrame.value(),
|
||||
"Target must at or be after data start.");
|
||||
NS_ASSERTION(targetFrame.value() < startFrame.value() + audio->mFrames,
|
||||
"Data must end after target.");
|
||||
|
||||
int64_t framesToPrune = targetFrame.value() - startFrame.value();
|
||||
if (framesToPrune > audio->mFrames) {
|
||||
// We've messed up somehow. Don't try to trim frames, the |frames|
|
||||
// variable below will overflow.
|
||||
NS_WARNING("Can't prune more frames that we have!");
|
||||
break;
|
||||
}
|
||||
uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune);
|
||||
uint32_t channels = audio->mChannels;
|
||||
nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[frames * channels]);
|
||||
memcpy(audioData.get(),
|
||||
audio->mAudioData.get() + (framesToPrune * channels),
|
||||
frames * channels * sizeof(AudioDataValue));
|
||||
CheckedInt64 duration = FramesToUsecs(frames, mInfo.mAudio.mRate);
|
||||
if (!duration.isValid()) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
nsAutoPtr<AudioData> data(new AudioData(audio->mOffset,
|
||||
aTarget,
|
||||
duration.value(),
|
||||
frames,
|
||||
audioData.forget(),
|
||||
channels));
|
||||
delete AudioQueue().PopFront();
|
||||
AudioQueue().PushFront(data.forget());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef PR_LOGGING
|
||||
const VideoData* v = VideoQueue().PeekFront();
|
||||
const AudioData* a = AudioQueue().PeekFront();
|
||||
DECODER_LOG(PR_LOG_DEBUG,
|
||||
("MediaDecoderReader::DecodeToTarget(%lld) finished v=%lld a=%lld",
|
||||
aTarget, v ? v->mTime : -1, a ? a->mTime : -1));
|
||||
#endif
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
nsresult
|
||||
MediaDecoderReader::GetBuffered(mozilla::dom::TimeRanges* aBuffered,
|
||||
int64_t aStartTime)
|
||||
|
@ -350,4 +192,174 @@ MediaDecoderReader::GetBuffered(mozilla::dom::TimeRanges* aBuffered,
|
|||
return NS_OK;
|
||||
}
|
||||
|
||||
class RequestVideoWithSkipTask : public nsRunnable {
|
||||
public:
|
||||
RequestVideoWithSkipTask(MediaDecoderReader* aReader,
|
||||
int64_t aTimeThreshold)
|
||||
: mReader(aReader)
|
||||
, mTimeThreshold(aTimeThreshold)
|
||||
{
|
||||
}
|
||||
NS_METHOD Run() {
|
||||
bool skip = true;
|
||||
mReader->RequestVideoData(skip, mTimeThreshold);
|
||||
return NS_OK;
|
||||
}
|
||||
private:
|
||||
nsRefPtr<MediaDecoderReader> mReader;
|
||||
int64_t mTimeThreshold;
|
||||
};
|
||||
|
||||
void
|
||||
MediaDecoderReader::RequestVideoData(bool aSkipToNextKeyframe,
|
||||
int64_t aTimeThreshold)
|
||||
{
|
||||
bool skip = aSkipToNextKeyframe;
|
||||
while (VideoQueue().GetSize() == 0 &&
|
||||
!VideoQueue().IsFinished()) {
|
||||
if (!DecodeVideoFrame(skip, aTimeThreshold)) {
|
||||
VideoQueue().Finish();
|
||||
} else if (skip) {
|
||||
// We still need to decode more data in order to skip to the next
|
||||
// keyframe. Post another task to the decode task queue to decode
|
||||
// again. We don't just decode straight in a loop here, as that
|
||||
// would hog the decode task queue.
|
||||
RefPtr<nsIRunnable> task(new RequestVideoWithSkipTask(this, aTimeThreshold));
|
||||
mTaskQueue->Dispatch(task);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (VideoQueue().GetSize() > 0) {
|
||||
VideoData* v = VideoQueue().PopFront();
|
||||
if (v && mVideoDiscontinuity) {
|
||||
v->mDiscontinuity = true;
|
||||
mVideoDiscontinuity = false;
|
||||
}
|
||||
GetCallback()->OnVideoDecoded(v);
|
||||
} else if (VideoQueue().IsFinished()) {
|
||||
GetCallback()->OnVideoEOS();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MediaDecoderReader::RequestAudioData()
|
||||
{
|
||||
while (AudioQueue().GetSize() == 0 &&
|
||||
!AudioQueue().IsFinished()) {
|
||||
if (!DecodeAudioData()) {
|
||||
AudioQueue().Finish();
|
||||
}
|
||||
}
|
||||
if (AudioQueue().GetSize() > 0) {
|
||||
AudioData* a = AudioQueue().PopFront();
|
||||
if (mAudioDiscontinuity) {
|
||||
a->mDiscontinuity = true;
|
||||
mAudioDiscontinuity = false;
|
||||
}
|
||||
GetCallback()->OnAudioDecoded(a);
|
||||
return;
|
||||
} else if (AudioQueue().IsFinished()) {
|
||||
GetCallback()->OnAudioEOS();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MediaDecoderReader::SetCallback(RequestSampleCallback* aCallback)
|
||||
{
|
||||
mSampleDecodedCallback = aCallback;
|
||||
}
|
||||
|
||||
void
|
||||
MediaDecoderReader::SetTaskQueue(MediaTaskQueue* aTaskQueue)
|
||||
{
|
||||
mTaskQueue = aTaskQueue;
|
||||
}
|
||||
|
||||
void
|
||||
MediaDecoderReader::BreakCycles()
|
||||
{
|
||||
if (mSampleDecodedCallback) {
|
||||
mSampleDecodedCallback->BreakCycles();
|
||||
mSampleDecodedCallback = nullptr;
|
||||
}
|
||||
mTaskQueue = nullptr;
|
||||
}
|
||||
|
||||
void
|
||||
MediaDecoderReader::Shutdown()
|
||||
{
|
||||
ReleaseMediaResources();
|
||||
}
|
||||
|
||||
AudioDecodeRendezvous::AudioDecodeRendezvous()
|
||||
: mMonitor("AudioDecodeRendezvous")
|
||||
, mHaveResult(false)
|
||||
{
|
||||
}
|
||||
|
||||
AudioDecodeRendezvous::~AudioDecodeRendezvous()
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
AudioDecodeRendezvous::OnAudioDecoded(AudioData* aSample)
|
||||
{
|
||||
MonitorAutoLock mon(mMonitor);
|
||||
mSample = aSample;
|
||||
mStatus = NS_OK;
|
||||
mHaveResult = true;
|
||||
mon.NotifyAll();
|
||||
}
|
||||
|
||||
void
|
||||
AudioDecodeRendezvous::OnAudioEOS()
|
||||
{
|
||||
MonitorAutoLock mon(mMonitor);
|
||||
mSample = nullptr;
|
||||
mStatus = NS_OK;
|
||||
mHaveResult = true;
|
||||
mon.NotifyAll();
|
||||
}
|
||||
|
||||
void
|
||||
AudioDecodeRendezvous::OnDecodeError()
|
||||
{
|
||||
MonitorAutoLock mon(mMonitor);
|
||||
mSample = nullptr;
|
||||
mStatus = NS_ERROR_FAILURE;
|
||||
mHaveResult = true;
|
||||
mon.NotifyAll();
|
||||
}
|
||||
|
||||
void
|
||||
AudioDecodeRendezvous::Reset()
|
||||
{
|
||||
MonitorAutoLock mon(mMonitor);
|
||||
mHaveResult = false;
|
||||
mStatus = NS_OK;
|
||||
mSample = nullptr;
|
||||
}
|
||||
|
||||
nsresult
|
||||
AudioDecodeRendezvous::Await(nsAutoPtr<AudioData>& aSample)
|
||||
{
|
||||
MonitorAutoLock mon(mMonitor);
|
||||
while (!mHaveResult) {
|
||||
mon.Wait();
|
||||
}
|
||||
mHaveResult = false;
|
||||
aSample = mSample;
|
||||
return mStatus;
|
||||
}
|
||||
|
||||
void
|
||||
AudioDecodeRendezvous::Cancel()
|
||||
{
|
||||
MonitorAutoLock mon(mMonitor);
|
||||
mStatus = NS_ERROR_ABORT;
|
||||
mHaveResult = true;
|
||||
mon.NotifyAll();
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
|
@ -18,12 +18,19 @@ namespace dom {
|
|||
class TimeRanges;
|
||||
}
|
||||
|
||||
// Encapsulates the decoding and reading of media data. Reading can only be
|
||||
// done on the decode thread. Never hold the decoder monitor when
|
||||
// calling into this class. Unless otherwise specified, methods and fields of
|
||||
// this class can only be accessed on the decode thread.
|
||||
class RequestSampleCallback;
|
||||
|
||||
// Encapsulates the decoding and reading of media data. Reading can either
|
||||
// synchronous and done on the calling "decode" thread, or asynchronous and
|
||||
// performed on a background thread, with the result being returned by
|
||||
// callback. Never hold the decoder monitor when calling into this class.
|
||||
// Unless otherwise specified, methods and fields of this class can only
|
||||
// be accessed on the decode task queue.
|
||||
class MediaDecoderReader {
|
||||
public:
|
||||
|
||||
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDecoderReader)
|
||||
|
||||
MediaDecoderReader(AbstractMediaDecoder* aDecoder);
|
||||
virtual ~MediaDecoderReader();
|
||||
|
||||
|
@ -36,24 +43,48 @@ public:
|
|||
// True when this reader need to become dormant state
|
||||
virtual bool IsDormantNeeded() { return false; }
|
||||
// Release media resources they should be released in dormant state
|
||||
// The reader can be made usable again by calling ReadMetadata().
|
||||
virtual void ReleaseMediaResources() {};
|
||||
// Release the decoder during shutdown
|
||||
virtual void ReleaseDecoder() {};
|
||||
// Breaks reference-counted cycles. Called during shutdown.
|
||||
// WARNING: If you override this, you must call the base implementation
|
||||
// in your override.
|
||||
virtual void BreakCycles();
|
||||
|
||||
// Destroys the decoding state. The reader cannot be made usable again.
|
||||
// This is different from ReleaseMediaResources() as it is irreversable,
|
||||
// whereas ReleaseMediaResources() is.
|
||||
virtual void Shutdown();
|
||||
|
||||
virtual void SetCallback(RequestSampleCallback* aDecodedSampleCallback);
|
||||
virtual void SetTaskQueue(MediaTaskQueue* aTaskQueue);
|
||||
|
||||
// Resets all state related to decoding, emptying all buffers etc.
|
||||
// Cancels all pending Request*Data() request callbacks, and flushes the
|
||||
// decode pipeline. The decoder must not call any of the callbacks for
|
||||
// outstanding Request*Data() calls after this is called. Calls to
|
||||
// Request*Data() made after this should be processed as usual.
|
||||
// Normally this call preceedes a Seek() call, or shutdown.
|
||||
// The first samples of every stream produced after a ResetDecode() call
|
||||
// *must* be marked as "discontinuities". If it's not, seeking work won't
|
||||
// properly!
|
||||
virtual nsresult ResetDecode();
|
||||
|
||||
// Decodes an unspecified amount of audio data, enqueuing the audio data
|
||||
// in mAudioQueue. Returns true when there's more audio to decode,
|
||||
// false if the audio is finished, end of file has been reached,
|
||||
// or an un-recoverable read error has occured.
|
||||
virtual bool DecodeAudioData() = 0;
|
||||
// Requests the Reader to call OnAudioDecoded() on aCallback with one
|
||||
// audio sample. The decode should be performed asynchronously, and
|
||||
// the callback can be performed on any thread. Don't hold the decoder
|
||||
// monitor while calling this, as the implementation may try to wait
|
||||
// on something that needs the monitor and deadlock.
|
||||
virtual void RequestAudioData();
|
||||
|
||||
// Reads and decodes one video frame. Packets with a timestamp less
|
||||
// than aTimeThreshold will be decoded (unless they're not keyframes
|
||||
// and aKeyframeSkip is true), but will not be added to the queue.
|
||||
virtual bool DecodeVideoFrame(bool &aKeyframeSkip,
|
||||
int64_t aTimeThreshold) = 0;
|
||||
// Requests the Reader to call OnVideoDecoded() on aCallback with one
|
||||
// video sample. The decode should be performed asynchronously, and
|
||||
// the callback can be performed on any thread. Don't hold the decoder
|
||||
// monitor while calling this, as the implementation may try to wait
|
||||
// on something that needs the monitor and deadlock.
|
||||
// If aSkipToKeyframe is true, the decode should skip ahead to the
|
||||
// the next keyframe at or after aTimeThreshold microseconds.
|
||||
virtual void RequestVideoData(bool aSkipToNextKeyframe,
|
||||
int64_t aTimeThreshold);
|
||||
|
||||
virtual bool HasAudio() = 0;
|
||||
virtual bool HasVideo() = 0;
|
||||
|
@ -65,6 +96,7 @@ public:
|
|||
virtual nsresult ReadMetadata(MediaInfo* aInfo,
|
||||
MetadataTags** aTags) = 0;
|
||||
|
||||
// TODO: DEPRECATED. This uses synchronous decoding.
|
||||
// Stores the presentation time of the first frame we'd be able to play if
|
||||
// we started playback at the current position. Returns the first video
|
||||
// frame, if we have video.
|
||||
|
@ -98,22 +130,6 @@ public:
|
|||
mIgnoreAudioOutputFormat = true;
|
||||
}
|
||||
|
||||
protected:
|
||||
// Queue of audio frames. This queue is threadsafe, and is accessed from
|
||||
// the audio, decoder, state machine, and main threads.
|
||||
MediaQueue<AudioData> mAudioQueue;
|
||||
|
||||
// Queue of video frames. This queue is threadsafe, and is accessed from
|
||||
// the decoder, state machine, and main threads.
|
||||
MediaQueue<VideoData> mVideoQueue;
|
||||
|
||||
// An adapter to the audio queue which first copies data to buffers with
|
||||
// minimal allocation slop and then pushes them to the queue. This is
|
||||
// useful for decoders working with formats that give awkward numbers of
|
||||
// frames such as mp3.
|
||||
AudioCompactor mAudioCompactor;
|
||||
|
||||
public:
|
||||
// Populates aBuffered with the time ranges which are buffered. aStartTime
|
||||
// must be the presentation time of the first frame in the media, e.g.
|
||||
// the media time corresponding to playback time/position 0. This function
|
||||
|
@ -156,15 +172,51 @@ public:
|
|||
AudioData* DecodeToFirstAudioData();
|
||||
VideoData* DecodeToFirstVideoData();
|
||||
|
||||
// Decodes samples until we reach frames required to play at time aTarget
|
||||
// (usecs). This also trims the samples to start exactly at aTarget,
|
||||
// by discarding audio samples and adjusting start times of video frames.
|
||||
nsresult DecodeToTarget(int64_t aTarget);
|
||||
|
||||
MediaInfo GetMediaInfo() { return mInfo; }
|
||||
|
||||
protected:
|
||||
|
||||
// Overrides of this function should decodes an unspecified amount of
|
||||
// audio data, enqueuing the audio data in mAudioQueue. Returns true
|
||||
// when there's more audio to decode, false if the audio is finished,
|
||||
// end of file has been reached, or an un-recoverable read error has
|
||||
// occured. This function blocks until the decode is complete.
|
||||
virtual bool DecodeAudioData() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Overrides of this function should read and decodes one video frame.
|
||||
// Packets with a timestamp less than aTimeThreshold will be decoded
|
||||
// (unless they're not keyframes and aKeyframeSkip is true), but will
|
||||
// not be added to the queue. This function blocks until the decode
|
||||
// is complete.
|
||||
virtual bool DecodeVideoFrame(bool &aKeyframeSkip, int64_t aTimeThreshold) {
|
||||
return false;
|
||||
}
|
||||
|
||||
RequestSampleCallback* GetCallback() {
|
||||
MOZ_ASSERT(mSampleDecodedCallback);
|
||||
return mSampleDecodedCallback;
|
||||
}
|
||||
|
||||
virtual MediaTaskQueue* GetTaskQueue() {
|
||||
return mTaskQueue;
|
||||
}
|
||||
|
||||
// Queue of audio frames. This queue is threadsafe, and is accessed from
|
||||
// the audio, decoder, state machine, and main threads.
|
||||
MediaQueue<AudioData> mAudioQueue;
|
||||
|
||||
// Queue of video frames. This queue is threadsafe, and is accessed from
|
||||
// the decoder, state machine, and main threads.
|
||||
MediaQueue<VideoData> mVideoQueue;
|
||||
|
||||
// An adapter to the audio queue which first copies data to buffers with
|
||||
// minimal allocation slop and then pushes them to the queue. This is
|
||||
// useful for decoders working with formats that give awkward numbers of
|
||||
// frames such as mp3.
|
||||
AudioCompactor mAudioCompactor;
|
||||
|
||||
// Reference to the owning decoder object.
|
||||
AbstractMediaDecoder* mDecoder;
|
||||
|
||||
|
@ -175,6 +227,82 @@ protected:
|
|||
// directly, because they have a number of channel higher than
|
||||
// what we support.
|
||||
bool mIgnoreAudioOutputFormat;
|
||||
|
||||
private:
|
||||
|
||||
nsRefPtr<RequestSampleCallback> mSampleDecodedCallback;
|
||||
|
||||
nsRefPtr<MediaTaskQueue> mTaskQueue;
|
||||
|
||||
// Flags whether a the next audio/video sample comes after a "gap" or
|
||||
// "discontinuity" in the stream. For example after a seek.
|
||||
bool mAudioDiscontinuity;
|
||||
bool mVideoDiscontinuity;
|
||||
};
|
||||
|
||||
// Interface that callers to MediaDecoderReader::Request{Audio,Video}Data()
|
||||
// must implement to receive the requested samples asynchronously.
|
||||
// This object is refcounted, and cycles must be broken by calling
|
||||
// BreakCycles() during shutdown.
|
||||
class RequestSampleCallback {
|
||||
public:
|
||||
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RequestSampleCallback)
|
||||
|
||||
// Receives the result of a RequestAudioData() call.
|
||||
virtual void OnAudioDecoded(AudioData* aSample) = 0;
|
||||
|
||||
// Called when a RequestAudioData() call can't be fulfiled as we've
|
||||
// reached the end of stream.
|
||||
virtual void OnAudioEOS() = 0;
|
||||
|
||||
// Receives the result of a RequestVideoData() call.
|
||||
virtual void OnVideoDecoded(VideoData* aSample) = 0;
|
||||
|
||||
// Called when a RequestVideoData() call can't be fulfiled as we've
|
||||
// reached the end of stream.
|
||||
virtual void OnVideoEOS() = 0;
|
||||
|
||||
// Called when there's a decode error. No more sample requests
|
||||
// will succeed.
|
||||
virtual void OnDecodeError() = 0;
|
||||
|
||||
// Called during shutdown to break any reference cycles.
|
||||
virtual void BreakCycles() = 0;
|
||||
|
||||
virtual ~RequestSampleCallback() {}
|
||||
};
|
||||
|
||||
// A RequestSampleCallback implementation that can be passed to the
|
||||
// MediaDecoderReader to block the thread requesting an audio sample until
|
||||
// the audio decode is complete. This is used to adapt the asynchronous
|
||||
// model of the MediaDecoderReader to a synchronous model.
|
||||
class AudioDecodeRendezvous : public RequestSampleCallback {
|
||||
public:
|
||||
AudioDecodeRendezvous();
|
||||
~AudioDecodeRendezvous();
|
||||
|
||||
// RequestSampleCallback implementation. Called when decode is complete.
|
||||
// Note: aSample is null at end of stream.
|
||||
virtual void OnAudioDecoded(AudioData* aSample) MOZ_OVERRIDE;
|
||||
virtual void OnAudioEOS() MOZ_OVERRIDE;
|
||||
virtual void OnVideoDecoded(VideoData* aSample) MOZ_OVERRIDE {}
|
||||
virtual void OnVideoEOS() MOZ_OVERRIDE {}
|
||||
virtual void OnDecodeError() MOZ_OVERRIDE;
|
||||
virtual void BreakCycles() MOZ_OVERRIDE {};
|
||||
void Reset();
|
||||
|
||||
// Returns failure on error, or NS_OK.
|
||||
// If *aSample is null, EOS has been reached.
|
||||
nsresult Await(nsAutoPtr<AudioData>& aSample);
|
||||
|
||||
// Interrupts a call to Wait().
|
||||
void Cancel();
|
||||
|
||||
private:
|
||||
Monitor mMonitor;
|
||||
nsresult mStatus;
|
||||
nsAutoPtr<AudioData> mSample;
|
||||
bool mHaveResult;
|
||||
};
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -4,29 +4,36 @@
|
|||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
/*
|
||||
Each video element for a media file has two threads:
|
||||
|
||||
1) The Audio thread writes the decoded audio data to the audio
|
||||
hardware. This is done in a separate thread to ensure that the
|
||||
audio hardware gets a constant stream of data without
|
||||
interruption due to decoding or display. At some point
|
||||
AudioStream will be refactored to have a callback interface
|
||||
where it asks for data and an extra thread will no longer be
|
||||
needed.
|
||||
Each media element for a media file has one thread called the "audio thread".
|
||||
|
||||
2) The decode thread. This thread reads from the media stream and
|
||||
decodes the Theora and Vorbis data. It places the decoded data into
|
||||
queues for the other threads to pull from.
|
||||
The audio thread writes the decoded audio data to the audio
|
||||
hardware. This is done in a separate thread to ensure that the
|
||||
audio hardware gets a constant stream of data without
|
||||
interruption due to decoding or display. At some point
|
||||
AudioStream will be refactored to have a callback interface
|
||||
where it asks for data and this thread will no longer be
|
||||
needed.
|
||||
|
||||
The element/state machine also has a MediaTaskQueue which runs in a
|
||||
SharedThreadPool that is shared with all other elements/decoders. The state
|
||||
machine dispatches tasks to this to call into the MediaDecoderReader to
|
||||
request decoded audio or video data. The Reader will callback with decoded
|
||||
sampled when it has them available, and the state machine places the decoded
|
||||
samples into its queues for the consuming threads to pull from.
|
||||
|
||||
The MediaDecoderReader can choose to decode asynchronously, or synchronously
|
||||
and return requested samples synchronously inside it's Request*Data()
|
||||
functions via callback. Asynchronous decoding is preferred, and should be
|
||||
used for any new readers.
|
||||
|
||||
All file reads, seeks, and all decoding must occur on the decode thread.
|
||||
Synchronisation of state between the thread is done via a monitor owned
|
||||
by MediaDecoder.
|
||||
|
||||
The lifetime of the decode and audio threads is controlled by the state
|
||||
machine when it runs on the shared state machine thread. When playback
|
||||
needs to occur they are created and events dispatched to them to run
|
||||
them. These events exit when decoding/audio playback is completed or
|
||||
no longer required.
|
||||
The lifetime of the audio thread is controlled by the state machine when
|
||||
it runs on the shared state machine thread. When playback needs to occur
|
||||
the audio thread is created and an event dispatched to run it. The audio
|
||||
thread exits when audio playback is completed or no longer required.
|
||||
|
||||
A/V synchronisation is handled by the state machine. It examines the audio
|
||||
playback time and compares this to the next frame in the queue of video
|
||||
|
@ -39,7 +46,7 @@ Frame skipping is done in the following ways:
|
|||
display time is less than the current audio time. This ensures
|
||||
the correct frame for the current time is always displayed.
|
||||
|
||||
2) The decode thread will stop decoding interframes and read to the
|
||||
2) The decode tasks will stop decoding interframes and read to the
|
||||
next keyframe if it determines that decoding the remaining
|
||||
interframes will cause playback issues. It detects this by:
|
||||
a) If the amount of audio data in the audio queue drops
|
||||
|
@ -47,11 +54,13 @@ Frame skipping is done in the following ways:
|
|||
b) If the video queue drops below a threshold where it
|
||||
will be decoding video data that won't be displayed due
|
||||
to the decode thread dropping the frame immediately.
|
||||
TODO: In future we should only do this when the Reader is decoding
|
||||
synchronously.
|
||||
|
||||
When hardware accelerated graphics is not available, YCbCr conversion
|
||||
is done on the decode thread when video frames are decoded.
|
||||
is done on the decode task queue when video frames are decoded.
|
||||
|
||||
The decode thread pushes decoded audio and videos frames into two
|
||||
The decode task queue pushes decoded audio and videos frames into two
|
||||
separate queues - one for audio and one for video. These are kept
|
||||
separate to make it easy to constantly feed audio data to the audio
|
||||
hardware while allowing frame skipping of video data. These queues are
|
||||
|
@ -59,13 +68,10 @@ threadsafe, and neither the decode, audio, or state machine should
|
|||
be able to monopolize them, and cause starvation of the other threads.
|
||||
|
||||
Both queues are bounded by a maximum size. When this size is reached
|
||||
the decode thread will no longer decode video or audio depending on the
|
||||
queue that has reached the threshold. If both queues are full, the decode
|
||||
thread will wait on the decoder monitor.
|
||||
|
||||
When the decode queues are full (they've reaced their maximum size) and
|
||||
the decoder is not in PLAYING play state, the state machine may opt
|
||||
to shut down the decode thread in order to conserve resources.
|
||||
the decode tasks will no longer request video or audio depending on the
|
||||
queue that has reached the threshold. If both queues are full, no more
|
||||
decode tasks will be dispatched to the decode task queue, so other
|
||||
decoders will have an opportunity to run.
|
||||
|
||||
During playback the audio thread will be idle (via a Wait() on the
|
||||
monitor) if the audio queue is empty. Otherwise it constantly pops
|
||||
|
@ -83,6 +89,7 @@ hardware (via AudioStream).
|
|||
#include "MediaDecoderReader.h"
|
||||
#include "MediaDecoderOwner.h"
|
||||
#include "MediaMetadataManager.h"
|
||||
#include "MediaDataDecodedListener.h"
|
||||
|
||||
class nsITimer;
|
||||
|
||||
|
@ -102,7 +109,7 @@ class SharedThreadPool;
|
|||
|
||||
/*
|
||||
The state machine class. This manages the decoding and seeking in the
|
||||
MediaDecoderReader on the decode thread, and A/V sync on the shared
|
||||
MediaDecoderReader on the decode task queue, and A/V sync on the shared
|
||||
state machine thread, and controls the audio "push" thread.
|
||||
|
||||
All internal state is synchronised via the decoder monitor. State changes
|
||||
|
@ -312,10 +319,9 @@ public:
|
|||
void SetFragmentEndTime(int64_t aEndTime);
|
||||
|
||||
// Drop reference to decoder. Only called during shutdown dance.
|
||||
void ReleaseDecoder() {
|
||||
MOZ_ASSERT(mReader);
|
||||
void BreakCycles() {
|
||||
if (mReader) {
|
||||
mReader->ReleaseDecoder();
|
||||
mReader->BreakCycles();
|
||||
}
|
||||
mDecoder = nullptr;
|
||||
}
|
||||
|
@ -357,11 +363,22 @@ public:
|
|||
// samples in advance of when they're needed for playback.
|
||||
void SetMinimizePrerollUntilPlaybackStarts();
|
||||
|
||||
void OnAudioDecoded(AudioData* aSample);
|
||||
void OnAudioEOS();
|
||||
void OnVideoDecoded(VideoData* aSample);
|
||||
void OnVideoEOS();
|
||||
void OnDecodeError();
|
||||
|
||||
protected:
|
||||
virtual ~MediaDecoderStateMachine();
|
||||
|
||||
void AssertCurrentThreadInMonitor() const { mDecoder->GetReentrantMonitor().AssertCurrentThreadIn(); }
|
||||
|
||||
// Inserts MediaData* samples into their respective MediaQueues.
|
||||
// aSample must not be null.
|
||||
void Push(AudioData* aSample);
|
||||
void Push(VideoData* aSample);
|
||||
|
||||
class WakeDecoderRunnable : public nsRunnable {
|
||||
public:
|
||||
WakeDecoderRunnable(MediaDecoderStateMachine* aSM)
|
||||
|
@ -397,8 +414,14 @@ protected:
|
|||
};
|
||||
WakeDecoderRunnable* GetWakeDecoderRunnable();
|
||||
|
||||
MediaQueue<AudioData>& AudioQueue() { return mReader->AudioQueue(); }
|
||||
MediaQueue<VideoData>& VideoQueue() { return mReader->VideoQueue(); }
|
||||
MediaQueue<AudioData>& AudioQueue() { return mAudioQueue; }
|
||||
MediaQueue<VideoData>& VideoQueue() { return mVideoQueue; }
|
||||
|
||||
nsresult FinishDecodeMetadata();
|
||||
|
||||
RefPtr<MediaDataDecodedListener<MediaDecoderStateMachine>> mMediaDecodedListener;
|
||||
|
||||
nsAutoPtr<MetadataTags> mMetadataTags;
|
||||
|
||||
// True if our buffers of decoded audio are not full, and we should
|
||||
// decode more.
|
||||
|
@ -468,11 +491,10 @@ protected:
|
|||
// Called on the state machine thread.
|
||||
int64_t GetClock();
|
||||
|
||||
// Returns the presentation time of the first audio or video frame in the
|
||||
// media. If the media has video, it returns the first video frame. The
|
||||
// decoder monitor must be held with exactly one lock count. Called on the
|
||||
// state machine thread.
|
||||
VideoData* FindStartTime();
|
||||
nsresult DropAudioUpToSeekTarget(AudioData* aSample);
|
||||
nsresult DropVideoUpToSeekTarget(VideoData* aSample);
|
||||
|
||||
void SetStartTime(int64_t aStartTimeUsecs);
|
||||
|
||||
// Update only the state machine's current playback position (and duration,
|
||||
// if unknown). Does not update the playback position on the decoder or
|
||||
|
@ -544,6 +566,10 @@ protected:
|
|||
// The decoder monitor must be held.
|
||||
nsresult EnqueueDecodeMetadataTask();
|
||||
|
||||
// Dispatches a task to the decode task queue to seek the decoder.
|
||||
// The decoder monitor must be held.
|
||||
nsresult EnqueueDecodeSeekTask();
|
||||
|
||||
nsresult DispatchAudioDecodeTaskIfNeeded();
|
||||
|
||||
// Ensures a to decode audio has been dispatched to the decode task queue.
|
||||
|
@ -561,10 +587,6 @@ protected:
|
|||
// The decoder monitor must be held.
|
||||
nsresult EnsureVideoDecodeTaskQueued();
|
||||
|
||||
// Dispatches a task to the decode task queue to seek the decoder.
|
||||
// The decoder monitor must be held.
|
||||
nsresult EnqueueDecodeSeekTask();
|
||||
|
||||
// Calls the reader's SetIdle(). This is only called in a task dispatched to
|
||||
// the decode task queue, don't call it directly.
|
||||
void SetReaderIdle();
|
||||
|
@ -575,12 +597,6 @@ protected:
|
|||
// The decoder monitor must be held.
|
||||
void DispatchDecodeTasksIfNeeded();
|
||||
|
||||
// Queries our state to see whether the decode has finished for all streams.
|
||||
// If so, we move into DECODER_STATE_COMPLETED and schedule the state machine
|
||||
// to run.
|
||||
// The decoder monitor must be held.
|
||||
void CheckIfDecodeComplete();
|
||||
|
||||
// Returns the "media time". This is the absolute time which the media
|
||||
// playback has reached. i.e. this returns values in the range
|
||||
// [mStartTime, mEndTime], and mStartTime will not be 0 if the media does
|
||||
|
@ -604,15 +620,29 @@ protected:
|
|||
// must be held with exactly one lock count.
|
||||
nsresult DecodeMetadata();
|
||||
|
||||
// Wraps the call to DecodeMetadata(), signals a DecodeError() on failure.
|
||||
void CallDecodeMetadata();
|
||||
|
||||
// Checks whether we're finished decoding metadata, and switches to DECODING
|
||||
// state if so.
|
||||
void MaybeFinishDecodeMetadata();
|
||||
|
||||
// Seeks to mSeekTarget. Called on the decode thread. The decoder monitor
|
||||
// must be held with exactly one lock count.
|
||||
void DecodeSeek();
|
||||
|
||||
// Decode loop, decodes data until EOF or shutdown.
|
||||
// Called on the decode thread.
|
||||
void DecodeLoop();
|
||||
void CheckIfSeekComplete();
|
||||
bool IsAudioSeekComplete();
|
||||
bool IsVideoSeekComplete();
|
||||
|
||||
void CallDecodeMetadata();
|
||||
// Completes the seek operation, moves onto the next appropriate state.
|
||||
void SeekCompleted();
|
||||
|
||||
// Queries our state to see whether the decode has finished for all streams.
|
||||
// If so, we move into DECODER_STATE_COMPLETED and schedule the state machine
|
||||
// to run.
|
||||
// The decoder monitor must be held.
|
||||
void CheckIfDecodeComplete();
|
||||
|
||||
// Copy audio from an AudioData packet to aOutput. This may require
|
||||
// inserting silence depending on the timing of the audio packet.
|
||||
|
@ -637,6 +667,11 @@ protected:
|
|||
// case as it may not be needed again.
|
||||
bool IsPausedAndDecoderWaiting();
|
||||
|
||||
// These return true if the respective stream's decode has not yet reached
|
||||
// the end of stream.
|
||||
bool IsAudioDecoding();
|
||||
bool IsVideoDecoding();
|
||||
|
||||
// The decoder object that created this state machine. The state machine
|
||||
// holds a strong reference to the decoder to ensure that the decoder stays
|
||||
// alive once media element has started the decoder shutdown process, and has
|
||||
|
@ -648,6 +683,19 @@ protected:
|
|||
// state machine, audio and main threads.
|
||||
nsRefPtr<MediaDecoder> mDecoder;
|
||||
|
||||
// Time at which the last video sample was requested. If it takes too long
|
||||
// before the sample arrives, we will increase the amount of audio we buffer.
|
||||
// This is necessary for legacy synchronous decoders to prevent underruns.
|
||||
TimeStamp mVideoDecodeStartTime;
|
||||
|
||||
// Queue of audio frames. This queue is threadsafe, and is accessed from
|
||||
// the audio, decoder, state machine, and main threads.
|
||||
MediaQueue<AudioData> mAudioQueue;
|
||||
|
||||
// Queue of video frames. This queue is threadsafe, and is accessed from
|
||||
// the decoder, state machine, and main threads.
|
||||
MediaQueue<VideoData> mVideoQueue;
|
||||
|
||||
// The decoder monitor must be obtained before modifying this state.
|
||||
// NotifyAll on the monitor must be called when the state is changed so
|
||||
// that interested threads can wake up and alter behaviour if appropriate
|
||||
|
@ -719,6 +767,14 @@ protected:
|
|||
// this value. Accessed on main and decode thread.
|
||||
SeekTarget mSeekTarget;
|
||||
|
||||
// The position that we're currently seeking to. This differs from
|
||||
// mSeekTarget, as mSeekTarget is the target we'll seek to next, whereas
|
||||
// mCurrentSeekTarget is the position that the decode is in the process
|
||||
// of seeking to.
|
||||
// The decoder monitor lock must be obtained before reading or writing
|
||||
// this value.
|
||||
SeekTarget mCurrentSeekTarget;
|
||||
|
||||
// Media Fragment end time in microseconds. Access controlled by decoder monitor.
|
||||
int64_t mFragmentEndTime;
|
||||
|
||||
|
@ -729,9 +785,8 @@ protected:
|
|||
RefPtr<AudioStream> mAudioStream;
|
||||
|
||||
// The reader, don't call its methods with the decoder monitor held.
|
||||
// This is created in the play state machine's constructor, and destroyed
|
||||
// in the play state machine's destructor.
|
||||
nsAutoPtr<MediaDecoderReader> mReader;
|
||||
// This is created in the state machine's constructor.
|
||||
nsRefPtr<MediaDecoderReader> mReader;
|
||||
|
||||
// Accessed only on the state machine thread.
|
||||
// Not an nsRevocableEventPtr since we must Revoke() it well before
|
||||
|
@ -817,6 +872,12 @@ protected:
|
|||
uint32_t mAudioPrerollUsecs;
|
||||
uint32_t mVideoPrerollFrames;
|
||||
|
||||
// This temporarily stores the first frame we decode after we seek.
|
||||
// This is so that if we hit end of stream while we're decoding to reach
|
||||
// the seek target, we will still have a frame that we can display as the
|
||||
// last frame in the media.
|
||||
nsAutoPtr<VideoData> mFirstVideoFrameAfterSeek;
|
||||
|
||||
// When we start decoding (either for the first time, or after a pause)
|
||||
// we may be low on decoded data. We don't want our "low data" logic to
|
||||
// kick in and decide that we're low on decoded data because the download
|
||||
|
@ -836,19 +897,11 @@ protected:
|
|||
// yet decoded to end of stream.
|
||||
bool mIsVideoDecoding;
|
||||
|
||||
// True when we have dispatched a task to the decode task queue to run
|
||||
// the audio decode.
|
||||
bool mDispatchedAudioDecodeTask;
|
||||
|
||||
// True when we have dispatched a task to the decode task queue to run
|
||||
// the video decode.
|
||||
bool mDispatchedVideoDecodeTask;
|
||||
|
||||
// If the video decode is falling behind the audio, we'll start dropping the
|
||||
// inter-frames up until the next keyframe which is at or before the current
|
||||
// playback position. skipToNextKeyframe is true if we're currently
|
||||
// skipping up to the next keyframe.
|
||||
bool mSkipToNextKeyFrame;
|
||||
// True when we have dispatched a task to the decode task queue to request
|
||||
// decoded audio/video, and/or we are waiting for the requested sample to be
|
||||
// returned by callback from the Reader.
|
||||
bool mAudioRequestPending;
|
||||
bool mVideoRequestPending;
|
||||
|
||||
// True if we shouldn't play our audio (but still write it to any capturing
|
||||
// streams). When this is true, mStopAudioThread is always true and
|
||||
|
@ -924,10 +977,16 @@ protected:
|
|||
// dispatch multiple tasks to re-do the metadata loading.
|
||||
bool mDispatchedDecodeMetadataTask;
|
||||
|
||||
// True if we've dispatched a task to the decode task queue to call
|
||||
// Seek on the reader. We maintain a flag to ensure that we don't
|
||||
// dispatch multiple tasks to re-do the seek.
|
||||
bool mDispatchedDecodeSeekTask;
|
||||
// These two flags are true when we need to drop decoded samples that
|
||||
// we receive up to the next discontinuity. We do this when we seek;
|
||||
// the first sample in each stream after the seek is marked as being
|
||||
// a "discontinuity".
|
||||
bool mDropAudioUntilNextDiscontinuity;
|
||||
bool mDropVideoUntilNextDiscontinuity;
|
||||
|
||||
// True if we need to decode forwards to the seek target inside
|
||||
// mCurrentSeekTarget.
|
||||
bool mDecodeToSeekTarget;
|
||||
|
||||
// Stores presentation info required for playback. The decoder monitor
|
||||
// must be held when accessing this.
|
||||
|
|
|
@ -43,11 +43,13 @@ template <class T> class MediaQueue : private nsDeque {
|
|||
|
||||
inline void Push(T* aItem) {
|
||||
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
|
||||
MOZ_ASSERT(aItem);
|
||||
nsDeque::Push(aItem);
|
||||
}
|
||||
|
||||
inline void PushFront(T* aItem) {
|
||||
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
|
||||
MOZ_ASSERT(aItem);
|
||||
nsDeque::PushFront(aItem);
|
||||
}
|
||||
|
||||
|
@ -75,11 +77,6 @@ template <class T> class MediaQueue : private nsDeque {
|
|||
nsDeque::Empty();
|
||||
}
|
||||
|
||||
inline void Erase() {
|
||||
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
|
||||
nsDeque::Erase();
|
||||
}
|
||||
|
||||
void Reset() {
|
||||
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
|
||||
while (GetSize() > 0) {
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
#include "nsSize.h"
|
||||
#include "VorbisUtils.h"
|
||||
#include "ImageContainer.h"
|
||||
#include "SharedThreadPool.h"
|
||||
#include "mozilla/Preferences.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
|
@ -190,4 +192,10 @@ IsValidVideoRegion(const nsIntSize& aFrame, const nsIntRect& aPicture,
|
|||
aDisplay.width * aDisplay.height != 0;
|
||||
}
|
||||
|
||||
TemporaryRef<SharedThreadPool> GetMediaDecodeThreadPool()
|
||||
{
|
||||
return SharedThreadPool::Get(NS_LITERAL_CSTRING("Media Decode"),
|
||||
Preferences::GetUint("media.num-decode-threads", 25));
|
||||
}
|
||||
|
||||
} // end namespace mozilla
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "nsThreadUtils.h"
|
||||
#include "prtime.h"
|
||||
#include "AudioSampleFormat.h"
|
||||
#include "mozilla/RefPtr.h"
|
||||
|
||||
using mozilla::CheckedInt64;
|
||||
using mozilla::CheckedUint64;
|
||||
|
@ -186,6 +187,12 @@ private:
|
|||
const T mValue;
|
||||
};
|
||||
|
||||
class SharedThreadPool;
|
||||
|
||||
// Returns the thread pool that is shared amongst all decoder state machines
|
||||
// for decoding streams.
|
||||
TemporaryRef<SharedThreadPool> GetMediaDecodeThreadPool();
|
||||
|
||||
} // end namespace mozilla
|
||||
|
||||
#endif
|
||||
|
|
|
@ -43,6 +43,8 @@ class MediaSourceReader : public MediaDecoderReader
|
|||
public:
|
||||
MediaSourceReader(MediaSourceDecoder* aDecoder, dom::MediaSource* aSource)
|
||||
: MediaDecoderReader(aDecoder)
|
||||
, mTimeThreshold(-1)
|
||||
, mDropVideoBeforeThreshold(false)
|
||||
, mActiveVideoDecoder(-1)
|
||||
, mActiveAudioDecoder(-1)
|
||||
, mMediaSource(aSource)
|
||||
|
@ -62,53 +64,72 @@ public:
|
|||
return mDecoders.IsEmpty() && mPendingDecoders.IsEmpty();
|
||||
}
|
||||
|
||||
bool DecodeAudioData() MOZ_OVERRIDE
|
||||
void RequestAudioData() MOZ_OVERRIDE
|
||||
{
|
||||
if (!GetAudioReader()) {
|
||||
MSE_DEBUG("%p DecodeAudioFrame called with no audio reader", this);
|
||||
MOZ_ASSERT(mPendingDecoders.IsEmpty());
|
||||
return false;
|
||||
GetCallback()->OnDecodeError();
|
||||
return;
|
||||
}
|
||||
bool rv = GetAudioReader()->DecodeAudioData();
|
||||
|
||||
nsAutoTArray<AudioData*, 10> audio;
|
||||
GetAudioReader()->AudioQueue().GetElementsAfter(-1, &audio);
|
||||
for (uint32_t i = 0; i < audio.Length(); ++i) {
|
||||
AudioQueue().Push(audio[i]);
|
||||
}
|
||||
GetAudioReader()->AudioQueue().Empty();
|
||||
|
||||
return rv;
|
||||
GetAudioReader()->RequestAudioData();
|
||||
}
|
||||
|
||||
bool DecodeVideoFrame(bool& aKeyFrameSkip, int64_t aTimeThreshold) MOZ_OVERRIDE
|
||||
void OnAudioDecoded(AudioData* aSample)
|
||||
{
|
||||
GetCallback()->OnAudioDecoded(aSample);
|
||||
}
|
||||
|
||||
void OnAudioEOS()
|
||||
{
|
||||
GetCallback()->OnAudioEOS();
|
||||
}
|
||||
|
||||
void RequestVideoData(bool aSkipToNextKeyframe, int64_t aTimeThreshold) MOZ_OVERRIDE
|
||||
{
|
||||
if (!GetVideoReader()) {
|
||||
MSE_DEBUG("%p DecodeVideoFrame called with no video reader", this);
|
||||
MOZ_ASSERT(mPendingDecoders.IsEmpty());
|
||||
return false;
|
||||
GetCallback()->OnDecodeError();
|
||||
return;
|
||||
}
|
||||
mTimeThreshold = aTimeThreshold;
|
||||
GetVideoReader()->RequestVideoData(aSkipToNextKeyframe, aTimeThreshold);
|
||||
}
|
||||
|
||||
if (MaybeSwitchVideoReaders(aTimeThreshold)) {
|
||||
GetVideoReader()->DecodeToTarget(aTimeThreshold);
|
||||
}
|
||||
|
||||
bool rv = GetVideoReader()->DecodeVideoFrame(aKeyFrameSkip, aTimeThreshold);
|
||||
|
||||
nsAutoTArray<VideoData*, 10> video;
|
||||
GetVideoReader()->VideoQueue().GetElementsAfter(-1, &video);
|
||||
for (uint32_t i = 0; i < video.Length(); ++i) {
|
||||
VideoQueue().Push(video[i]);
|
||||
}
|
||||
GetVideoReader()->VideoQueue().Empty();
|
||||
|
||||
if (rv) {
|
||||
return true;
|
||||
void OnVideoDecoded(VideoData* aSample)
|
||||
{
|
||||
if (mDropVideoBeforeThreshold) {
|
||||
if (aSample->mTime < mTimeThreshold) {
|
||||
delete aSample;
|
||||
GetVideoReader()->RequestVideoData(false, mTimeThreshold);
|
||||
} else {
|
||||
mDropVideoBeforeThreshold = false;
|
||||
GetCallback()->OnVideoDecoded(aSample);
|
||||
}
|
||||
} else {
|
||||
GetCallback()->OnVideoDecoded(aSample);
|
||||
}
|
||||
}
|
||||
|
||||
void OnVideoEOS()
|
||||
{
|
||||
// End of stream. See if we can switch to another video decoder.
|
||||
MSE_DEBUG("%p MSR::DecodeVF %d (%p) returned false (readers=%u)",
|
||||
this, mActiveVideoDecoder, mDecoders[mActiveVideoDecoder].get(), mDecoders.Length());
|
||||
return rv;
|
||||
if (MaybeSwitchVideoReaders()) {
|
||||
// Success! Resume decoding with next video decoder.
|
||||
RequestVideoData(false, mTimeThreshold);
|
||||
} else {
|
||||
// End of stream.
|
||||
MSE_DEBUG("%p MSR::DecodeVF %d (%p) EOS (readers=%u)",
|
||||
this, mActiveVideoDecoder, mDecoders[mActiveVideoDecoder].get(), mDecoders.Length());
|
||||
GetCallback()->OnVideoEOS();
|
||||
}
|
||||
}
|
||||
|
||||
void OnDecodeError() {
|
||||
GetCallback()->OnDecodeError();
|
||||
}
|
||||
|
||||
bool HasVideo() MOZ_OVERRIDE
|
||||
|
@ -126,7 +147,22 @@ public:
|
|||
int64_t aCurrentTime) MOZ_OVERRIDE;
|
||||
nsresult GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime) MOZ_OVERRIDE;
|
||||
already_AddRefed<SubBufferDecoder> CreateSubDecoder(const nsACString& aType,
|
||||
MediaSourceDecoder* aParentDecoder);
|
||||
MediaSourceDecoder* aParentDecoder,
|
||||
MediaTaskQueue* aTaskQueue);
|
||||
|
||||
void Shutdown() MOZ_OVERRIDE {
|
||||
MediaDecoderReader::Shutdown();
|
||||
for (uint32_t i = 0; i < mDecoders.Length(); ++i) {
|
||||
mDecoders[i]->GetReader()->Shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
virtual void BreakCycles() MOZ_OVERRIDE {
|
||||
MediaDecoderReader::BreakCycles();
|
||||
for (uint32_t i = 0; i < mDecoders.Length(); ++i) {
|
||||
mDecoders[i]->GetReader()->BreakCycles();
|
||||
}
|
||||
}
|
||||
|
||||
void InitializePendingDecoders();
|
||||
|
||||
|
@ -136,7 +172,12 @@ public:
|
|||
}
|
||||
|
||||
private:
|
||||
bool MaybeSwitchVideoReaders(int64_t aTimeThreshold) {
|
||||
|
||||
// These are read and written on the decode task queue threads.
|
||||
int64_t mTimeThreshold;
|
||||
bool mDropVideoBeforeThreshold;
|
||||
|
||||
bool MaybeSwitchVideoReaders() {
|
||||
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
|
||||
MOZ_ASSERT(mActiveVideoDecoder != -1);
|
||||
|
||||
|
@ -146,7 +187,7 @@ private:
|
|||
if (!mDecoders[i]->GetReader()->GetMediaInfo().HasVideo()) {
|
||||
continue;
|
||||
}
|
||||
if (aTimeThreshold >= mDecoders[i]->GetMediaStartTime()) {
|
||||
if (mTimeThreshold >= mDecoders[i]->GetMediaStartTime()) {
|
||||
GetVideoReader()->SetIdle();
|
||||
|
||||
mActiveVideoDecoder = i;
|
||||
|
@ -196,7 +237,7 @@ public:
|
|||
if (!mReader) {
|
||||
return nullptr;
|
||||
}
|
||||
return static_cast<MediaSourceReader*>(mReader.get())->CreateSubDecoder(aType, aParentDecoder);
|
||||
return static_cast<MediaSourceReader*>(mReader.get())->CreateSubDecoder(aType, aParentDecoder, mDecodeTaskQueue);
|
||||
}
|
||||
|
||||
nsresult EnqueueDecoderInitialization() {
|
||||
|
@ -366,7 +407,9 @@ MediaSourceReader::InitializePendingDecoders()
|
|||
}
|
||||
|
||||
already_AddRefed<SubBufferDecoder>
|
||||
MediaSourceReader::CreateSubDecoder(const nsACString& aType, MediaSourceDecoder* aParentDecoder)
|
||||
MediaSourceReader::CreateSubDecoder(const nsACString& aType,
|
||||
MediaSourceDecoder* aParentDecoder,
|
||||
MediaTaskQueue* aTaskQueue)
|
||||
{
|
||||
// XXX: Why/when is mDecoder null here, since it should be equal to aParentDecoder?!
|
||||
nsRefPtr<SubBufferDecoder> decoder =
|
||||
|
@ -375,6 +418,13 @@ MediaSourceReader::CreateSubDecoder(const nsACString& aType, MediaSourceDecoder*
|
|||
if (!reader) {
|
||||
return nullptr;
|
||||
}
|
||||
// Set a callback on the subreader that forwards calls to this reader.
|
||||
// This reader will then forward them onto the state machine via this
|
||||
// reader's callback.
|
||||
RefPtr<MediaDataDecodedListener<MediaSourceReader>> callback =
|
||||
new MediaDataDecodedListener<MediaSourceReader>(this, aTaskQueue);
|
||||
reader->SetCallback(callback);
|
||||
reader->SetTaskQueue(aTaskQueue);
|
||||
reader->Init(nullptr);
|
||||
ReentrantMonitorAutoEnter mon(aParentDecoder->GetReentrantMonitor());
|
||||
MSE_DEBUG("Registered subdecoder %p subreader %p", decoder.get(), reader.get());
|
||||
|
@ -424,7 +474,7 @@ MediaSourceReader::Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime,
|
|||
while (!mMediaSource->ActiveSourceBuffers()->AllContainsTime (aTime / USECS_PER_S)
|
||||
&& !IsShutdown()) {
|
||||
mMediaSource->WaitForData();
|
||||
MaybeSwitchVideoReaders(aTime);
|
||||
MaybeSwitchVideoReaders();
|
||||
}
|
||||
|
||||
if (IsShutdown()) {
|
||||
|
|
|
@ -78,6 +78,7 @@ EXPORTS += [
|
|||
'Latency.h',
|
||||
'MediaCache.h',
|
||||
'MediaData.h',
|
||||
'MediaDataDecodedListener.h',
|
||||
'MediaDecoder.h',
|
||||
'MediaDecoderOwner.h',
|
||||
'MediaDecoderReader.h',
|
||||
|
|
|
@ -59,9 +59,6 @@ MediaOmxReader::MediaOmxReader(AbstractMediaDecoder *aDecoder)
|
|||
|
||||
MediaOmxReader::~MediaOmxReader()
|
||||
{
|
||||
ReleaseMediaResources();
|
||||
ReleaseDecoder();
|
||||
mOmxDecoder.clear();
|
||||
}
|
||||
|
||||
nsresult MediaOmxReader::Init(MediaDecoderReader* aCloneDonor)
|
||||
|
@ -69,6 +66,15 @@ nsresult MediaOmxReader::Init(MediaDecoderReader* aCloneDonor)
|
|||
return NS_OK;
|
||||
}
|
||||
|
||||
void MediaOmxReader::Shutdown()
|
||||
{
|
||||
ReleaseMediaResources();
|
||||
if (mOmxDecoder.get()) {
|
||||
mOmxDecoder->ReleaseDecoder();
|
||||
}
|
||||
mOmxDecoder.clear();
|
||||
}
|
||||
|
||||
bool MediaOmxReader::IsWaitingMediaResources()
|
||||
{
|
||||
if (!mOmxDecoder.get()) {
|
||||
|
@ -99,13 +105,6 @@ void MediaOmxReader::ReleaseMediaResources()
|
|||
}
|
||||
}
|
||||
|
||||
void MediaOmxReader::ReleaseDecoder()
|
||||
{
|
||||
if (mOmxDecoder.get()) {
|
||||
mOmxDecoder->ReleaseDecoder();
|
||||
}
|
||||
}
|
||||
|
||||
nsresult MediaOmxReader::InitOmxDecoder()
|
||||
{
|
||||
if (!mOmxDecoder.get()) {
|
||||
|
@ -375,7 +374,6 @@ nsresult MediaOmxReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aEndT
|
|||
NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
|
||||
EnsureActive();
|
||||
|
||||
ResetDecode();
|
||||
VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
|
||||
if (container && container->GetImageContainer()) {
|
||||
container->GetImageContainer()->ClearAllImagesExceptFront();
|
||||
|
|
|
@ -80,14 +80,14 @@ public:
|
|||
virtual bool IsDormantNeeded();
|
||||
virtual void ReleaseMediaResources();
|
||||
|
||||
virtual void ReleaseDecoder() MOZ_OVERRIDE;
|
||||
|
||||
virtual nsresult ReadMetadata(MediaInfo* aInfo,
|
||||
MetadataTags** aTags);
|
||||
virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime);
|
||||
|
||||
virtual void SetIdle() MOZ_OVERRIDE;
|
||||
|
||||
virtual void Shutdown() MOZ_OVERRIDE;
|
||||
|
||||
void SetAudioChannel(dom::AudioChannel aAudioChannel) {
|
||||
mAudioChannel = aAudioChannel;
|
||||
}
|
||||
|
|
|
@ -35,11 +35,6 @@ MediaPluginReader::MediaPluginReader(AbstractMediaDecoder *aDecoder,
|
|||
{
|
||||
}
|
||||
|
||||
MediaPluginReader::~MediaPluginReader()
|
||||
{
|
||||
ResetDecode();
|
||||
}
|
||||
|
||||
nsresult MediaPluginReader::Init(MediaDecoderReader* aCloneDonor)
|
||||
{
|
||||
return NS_OK;
|
||||
|
@ -104,18 +99,22 @@ nsresult MediaPluginReader::ReadMetadata(MediaInfo* aInfo,
|
|||
return NS_OK;
|
||||
}
|
||||
|
||||
void MediaPluginReader::Shutdown()
|
||||
{
|
||||
ResetDecode();
|
||||
if (mPlugin) {
|
||||
GetMediaPluginHost()->DestroyDecoder(mPlugin);
|
||||
mPlugin = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// Resets all state related to decoding, emptying all buffers etc.
|
||||
nsresult MediaPluginReader::ResetDecode()
|
||||
{
|
||||
if (mLastVideoFrame) {
|
||||
mLastVideoFrame = nullptr;
|
||||
}
|
||||
if (mPlugin) {
|
||||
GetMediaPluginHost()->DestroyDecoder(mPlugin);
|
||||
mPlugin = nullptr;
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
return MediaDecoderReader::ResetDecode();
|
||||
}
|
||||
|
||||
bool MediaPluginReader::DecodeVideoFrame(bool &aKeyframeSkip,
|
||||
|
@ -321,9 +320,6 @@ nsresult MediaPluginReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aE
|
|||
{
|
||||
NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
|
||||
|
||||
mVideoQueue.Reset();
|
||||
mAudioQueue.Reset();
|
||||
|
||||
if (mHasAudio && mHasVideo) {
|
||||
// The decoder seeks/demuxes audio and video streams separately. So if
|
||||
// we seek both audio and video to aTarget, the audio stream can typically
|
||||
|
|
|
@ -43,7 +43,6 @@ class MediaPluginReader : public MediaDecoderReader
|
|||
public:
|
||||
MediaPluginReader(AbstractMediaDecoder* aDecoder,
|
||||
const nsACString& aContentType);
|
||||
~MediaPluginReader();
|
||||
|
||||
virtual nsresult Init(MediaDecoderReader* aCloneDonor);
|
||||
virtual nsresult ResetDecode();
|
||||
|
@ -66,6 +65,8 @@ public:
|
|||
MetadataTags** aTags);
|
||||
virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime);
|
||||
|
||||
virtual void Shutdown() MOZ_OVERRIDE;
|
||||
|
||||
class ImageBufferCallback : public MPAPI::BufferCallback {
|
||||
typedef mozilla::layers::Image Image;
|
||||
|
||||
|
|
|
@ -389,7 +389,7 @@ var gUnseekableTests = [
|
|||
{ name:"bogus.duh", type:"bogus/duh"}
|
||||
];
|
||||
// Unfortunately big-buck-bunny-unseekable.mp4 is doesn't play on Windows 7, so
|
||||
// only include it in the unseekable tests if we're on later versions of Windows.
|
||||
// only include it in the unseekable tests if we're on later versions of Windows.
|
||||
// This test actually only passes on win8 at the moment.
|
||||
if (navigator.userAgent.indexOf("Windows") != -1 && IsWindows8OrLater()) {
|
||||
gUnseekableTests = gUnseekableTests.concat([
|
||||
|
@ -677,6 +677,14 @@ function MediaTestManager() {
|
|||
is(this.numTestsRunning, this.tokens.length, "[started " + token + "] Length of array should match number of running tests");
|
||||
}
|
||||
|
||||
this.watchdog = null;
|
||||
|
||||
this.watchdogFn = function() {
|
||||
if (this.tokens.length > 0) {
|
||||
info("Watchdog remaining tests= " + this.tokens);
|
||||
}
|
||||
}
|
||||
|
||||
// Registers that the test corresponding to 'token' has finished. Call when
|
||||
// you've finished your test. If all tests are complete this will finish the
|
||||
// run, otherwise it may start up the next run. It's ok to call multiple times
|
||||
|
@ -687,10 +695,18 @@ function MediaTestManager() {
|
|||
// Remove the element from the list of running tests.
|
||||
this.tokens.splice(i, 1);
|
||||
}
|
||||
|
||||
if (this.watchdog) {
|
||||
clearTimeout(this.watchdog);
|
||||
this.watchdog = null;
|
||||
}
|
||||
|
||||
info("[finished " + token + "] remaining= " + this.tokens);
|
||||
this.numTestsRunning--;
|
||||
is(this.numTestsRunning, this.tokens.length, "[finished " + token + "] Length of array should match number of running tests");
|
||||
if (this.tokens.length < PARALLEL_TESTS) {
|
||||
this.nextTest();
|
||||
this.watchdog = setTimeout(this.watchdogFn.bind(this), 10000);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -14,11 +14,14 @@
|
|||
var manager = new MediaTestManager;
|
||||
|
||||
function startTest(e) {
|
||||
var v = e.target;
|
||||
info(v._name + " loadedmetadata");
|
||||
e.target.play();
|
||||
}
|
||||
|
||||
function playbackEnded(e) {
|
||||
var v = e.target;
|
||||
info(v._name + " ended");
|
||||
if (v._finished)
|
||||
return;
|
||||
ok(v.currentTime >= v.duration - 0.1 && v.currentTime <= v.duration + 0.1,
|
||||
|
@ -32,6 +35,7 @@ function playbackEnded(e) {
|
|||
|
||||
function seekEnded(e) {
|
||||
var v = e.target;
|
||||
info(v._name + " seeked");
|
||||
if (v._finished)
|
||||
return;
|
||||
ok(v.currentTime == 0, "Checking currentTime after seek: " +
|
||||
|
@ -42,6 +46,11 @@ function seekEnded(e) {
|
|||
manager.finished(v.token);
|
||||
}
|
||||
|
||||
function seeking(e) {
|
||||
var v = e.target;
|
||||
info(v._name + " seeking");
|
||||
}
|
||||
|
||||
function initTest(test, token) {
|
||||
var type = getMajorMimeType(test.type);
|
||||
var v = document.createElement(type);
|
||||
|
@ -62,6 +71,7 @@ function initTest(test, token) {
|
|||
v.addEventListener("loadedmetadata", startTest, false);
|
||||
v.addEventListener("ended", playbackEnded, false);
|
||||
v.addEventListener("seeked", seekEnded, false);
|
||||
v.addEventListener("seeking", seeking, false);
|
||||
document.body.appendChild(v);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,17 +20,22 @@ SimpleTest.expectAssertions(0, 2);
|
|||
var manager = new MediaTestManager;
|
||||
|
||||
function start(e) {
|
||||
var v = e.target;
|
||||
info("[" + v._name + "] start");
|
||||
e.target.currentTime = e.target.duration / 4;
|
||||
}
|
||||
|
||||
function startSeeking(e) {
|
||||
var v = e.target;
|
||||
info("[" + v._name + "] seeking");
|
||||
e.target._seeked = true;
|
||||
}
|
||||
|
||||
function canPlayThrough(e) {
|
||||
var v = e.target;
|
||||
info("[" + v._name + "] canPlayThrough");
|
||||
if (v._seeked && !v._finished) {
|
||||
ok(true, "Got canplaythrough after seek for " + v._name);
|
||||
ok(true, "[" + v._name + "] got canplaythrough after seek");
|
||||
v._finished = true;
|
||||
v.parentNode.removeChild(v);
|
||||
v.src = "";
|
||||
|
@ -38,6 +43,16 @@ function canPlayThrough(e) {
|
|||
}
|
||||
}
|
||||
|
||||
function seeked(e) {
|
||||
var v = e.target;
|
||||
info("[" + v._name + "] seeked");
|
||||
}
|
||||
|
||||
function error(e) {
|
||||
var v = e.target;
|
||||
info("[" + v._name + "] error");
|
||||
}
|
||||
|
||||
function startTest(test, token) {
|
||||
// TODO: Bug 568402, there's a bug in the WAV backend where we sometimes
|
||||
// don't send canplaythrough events after seeking. Once that is fixed,
|
||||
|
@ -58,6 +73,8 @@ function startTest(test, token) {
|
|||
v.addEventListener("loadedmetadata", start, false);
|
||||
v.addEventListener("canplaythrough", canPlayThrough, false);
|
||||
v.addEventListener("seeking", startSeeking, false);
|
||||
v.addEventListener("seeked", seeked, false);
|
||||
v.addEventListener("error", error, false);
|
||||
document.body.appendChild(v);
|
||||
}
|
||||
|
||||
|
|
|
@ -61,10 +61,10 @@ function createTestArray() {
|
|||
|
||||
function startTest(test, token) {
|
||||
var v = document.createElement('video');
|
||||
manager.started(token);
|
||||
v.token = token += "-seek" + test.number + ".js";
|
||||
manager.started(v.token);
|
||||
v.src = test.name;
|
||||
v.preload = "metadata";
|
||||
v.token = token;
|
||||
document.body.appendChild(v);
|
||||
var name = test.name + " seek test " + test.number;
|
||||
var localIs = function(name) { return function(a, b, msg) {
|
||||
|
@ -76,7 +76,7 @@ function startTest(test, token) {
|
|||
var localFinish = function(v, manager) { return function() {
|
||||
v.onerror = null;
|
||||
removeNodeAndSource(v);
|
||||
dump("SEEK-TEST: Finished " + name + "\n");
|
||||
dump("SEEK-TEST: Finished " + name + " token: " + v.token + "\n");
|
||||
manager.finished(v.token);
|
||||
}}(v, manager);
|
||||
dump("SEEK-TEST: Started " + name + "\n");
|
||||
|
|
|
@ -252,12 +252,25 @@ MediaDecodeTask::Decode()
|
|||
return;
|
||||
}
|
||||
|
||||
while (mDecoderReader->DecodeAudioData()) {
|
||||
// consume all of the buffer
|
||||
continue;
|
||||
MediaQueue<AudioData> audioQueue;
|
||||
nsRefPtr<AudioDecodeRendezvous> barrier(new AudioDecodeRendezvous());
|
||||
mDecoderReader->SetCallback(barrier);
|
||||
while (1) {
|
||||
mDecoderReader->RequestAudioData();
|
||||
nsAutoPtr<AudioData> audio;
|
||||
if (NS_FAILED(barrier->Await(audio))) {
|
||||
ReportFailureOnMainThread(WebAudioDecodeJob::InvalidContent);
|
||||
return;
|
||||
}
|
||||
if (!audio) {
|
||||
// End of stream.
|
||||
break;
|
||||
}
|
||||
audioQueue.Push(audio.forget());
|
||||
}
|
||||
mDecoderReader->Shutdown();
|
||||
mDecoderReader->BreakCycles();
|
||||
|
||||
MediaQueue<AudioData>& audioQueue = mDecoderReader->AudioQueue();
|
||||
uint32_t frameCount = audioQueue.FrameCount();
|
||||
uint32_t channelCount = mediaInfo.mAudio.mChannels;
|
||||
uint32_t sampleRate = mediaInfo.mAudio.mRate;
|
||||
|
|
|
@ -207,13 +207,14 @@ let Activities = {
|
|||
startActivity: function activities_startActivity(aMsg) {
|
||||
debug("StartActivity: " + JSON.stringify(aMsg));
|
||||
|
||||
let self = this;
|
||||
let successCb = function successCb(aResults) {
|
||||
debug(JSON.stringify(aResults));
|
||||
|
||||
function getActivityChoice(aResultType, aResult) {
|
||||
switch(aResultType) {
|
||||
case Ci.nsIActivityUIGlueCallback.NATIVE_ACTIVITY: {
|
||||
Activities.callers[aMsg.id].mm.sendAsyncMessage("Activity:FireSuccess", {
|
||||
self.callers[aMsg.id].mm.sendAsyncMessage("Activity:FireSuccess", {
|
||||
"id": aMsg.id,
|
||||
"result": aResult
|
||||
});
|
||||
|
@ -226,21 +227,19 @@ let Activities = {
|
|||
// Don't do this check until we have passed to UIGlue so the glue can choose to launch
|
||||
// its own activity if needed.
|
||||
if (aResults.options.length === 0) {
|
||||
Activities.callers[aMsg.id].mm.sendAsyncMessage("Activity:FireError", {
|
||||
self.trySendAndCleanup(aMsg.id, "Activity:FireError", {
|
||||
"id": aMsg.id,
|
||||
"error": "NO_PROVIDER"
|
||||
});
|
||||
delete Activities.callers[aMsg.id];
|
||||
return;
|
||||
}
|
||||
|
||||
// The user has cancelled the choice, fire an error.
|
||||
if (aResult === -1) {
|
||||
Activities.callers[aMsg.id].mm.sendAsyncMessage("Activity:FireError", {
|
||||
self.trySendAndCleanup(aMsg.id, "Activity:FireError", {
|
||||
"id": aMsg.id,
|
||||
"error": "ActivityCanceled"
|
||||
});
|
||||
delete Activities.callers[aMsg.id];
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -248,7 +247,7 @@ let Activities = {
|
|||
.getService(Ci.nsISystemMessagesInternal);
|
||||
if (!sysmm) {
|
||||
// System message is not present, what should we do?
|
||||
delete Activities.callers[aMsg.id];
|
||||
delete self.callers[aMsg.id];
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -262,18 +261,17 @@ let Activities = {
|
|||
Services.io.newURI(result.description.href, null, null),
|
||||
Services.io.newURI(result.manifest, null, null),
|
||||
{
|
||||
"manifestURL": Activities.callers[aMsg.id].manifestURL,
|
||||
"pageURL": Activities.callers[aMsg.id].pageURL
|
||||
"manifestURL": self.callers[aMsg.id].manifestURL,
|
||||
"pageURL": self.callers[aMsg.id].pageURL
|
||||
});
|
||||
|
||||
if (!result.description.returnValue) {
|
||||
Activities.callers[aMsg.id].mm.sendAsyncMessage("Activity:FireSuccess", {
|
||||
// No need to notify observers, since we don't want the caller
|
||||
// to be raised on the foreground that quick.
|
||||
self.trySendAndCleanup(aMsg.id, "Activity:FireSuccess", {
|
||||
"id": aMsg.id,
|
||||
"result": null
|
||||
});
|
||||
// No need to notify observers, since we don't want the caller
|
||||
// to be raised on the foreground that quick.
|
||||
delete Activities.callers[aMsg.id];
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -332,6 +330,14 @@ let Activities = {
|
|||
this.db.find(aMsg, successCb, errorCb, matchFunc);
|
||||
},
|
||||
|
||||
trySendAndCleanup: function activities_trySendAndCleanup(aId, aName, aPayload) {
|
||||
try {
|
||||
this.callers[aId].mm.sendAsyncMessage(aName, aPayload);
|
||||
} finally {
|
||||
delete this.callers[aId];
|
||||
}
|
||||
},
|
||||
|
||||
receiveMessage: function activities_receiveMessage(aMessage) {
|
||||
let mm = aMessage.target;
|
||||
let msg = aMessage.json;
|
||||
|
@ -365,12 +371,10 @@ let Activities = {
|
|||
break;
|
||||
|
||||
case "Activity:PostResult":
|
||||
caller.mm.sendAsyncMessage("Activity:FireSuccess", msg);
|
||||
delete this.callers[msg.id];
|
||||
this.trySendAndCleanup(msg.id, "Activity:FireSuccess", msg);
|
||||
break;
|
||||
case "Activity:PostError":
|
||||
caller.mm.sendAsyncMessage("Activity:FireError", msg);
|
||||
delete this.callers[msg.id];
|
||||
this.trySendAndCleanup(msg.id, "Activity:FireError", msg);
|
||||
break;
|
||||
|
||||
case "Activities:Register":
|
||||
|
@ -398,11 +402,10 @@ let Activities = {
|
|||
case "child-process-shutdown":
|
||||
for (let id in this.callers) {
|
||||
if (this.callers[id].childMM == mm) {
|
||||
this.callers[id].mm.sendAsyncMessage("Activity:FireError", {
|
||||
this.trySendAndCleanup(id, "Activity:FireError", {
|
||||
"id": id,
|
||||
"error": "ActivityCanceled"
|
||||
});
|
||||
delete this.callers[id];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -171,7 +171,21 @@ function next() {
|
|||
SimpleTest.waitForExplicitFinish();
|
||||
|
||||
SpecialPowers.addPermission("networkstats-manage", true, document);
|
||||
SpecialPowers.pushPrefEnv({'set': [["dom.mozNetworkStats.enabled", true]]}, test);
|
||||
SpecialPowers.pushPrefEnv({'set': [["dom.mozNetworkStats.enabled", true]]},
|
||||
function() {
|
||||
ok(SpecialPowers.hasPermission("networkstats-manage", document),
|
||||
"Has permission 'networkstats-manage'.");
|
||||
|
||||
ok(SpecialPowers.getBoolPref("dom.mozNetworkStats.enabled"),
|
||||
"Preference 'dom.mozNetworkStats.enabled' is true.");
|
||||
|
||||
ok('mozNetworkStats' in navigator, "navigator.mozNetworkStats should exist");
|
||||
|
||||
ok(navigator.mozNetworkStats instanceof SpecialPowers.Ci.nsIDOMMozNetworkStatsManager,
|
||||
"navigator.mozNetworkStats should be a nsIDOMMozNetworkStatsManager object");
|
||||
|
||||
test();
|
||||
});
|
||||
|
||||
</script>
|
||||
</pre>
|
||||
|
|
|
@ -323,7 +323,21 @@ function next() {
|
|||
SimpleTest.waitForExplicitFinish();
|
||||
|
||||
SpecialPowers.addPermission("networkstats-manage", true, document);
|
||||
SpecialPowers.pushPrefEnv({'set': [["dom.mozNetworkStats.enabled", true]]}, test);
|
||||
SpecialPowers.pushPrefEnv({'set': [["dom.mozNetworkStats.enabled", true]]},
|
||||
function() {
|
||||
ok(SpecialPowers.hasPermission("networkstats-manage", document),
|
||||
"Has permission 'networkstats-manage'.");
|
||||
|
||||
ok(SpecialPowers.getBoolPref("dom.mozNetworkStats.enabled"),
|
||||
"Preference 'dom.mozNetworkStats.enabled' is true.");
|
||||
|
||||
ok('mozNetworkStats' in navigator, "navigator.mozNetworkStats should exist");
|
||||
|
||||
ok(navigator.mozNetworkStats instanceof SpecialPowers.Ci.nsIDOMMozNetworkStatsManager,
|
||||
"navigator.mozNetworkStats should be a nsIDOMMozNetworkStatsManager object");
|
||||
|
||||
test();
|
||||
});
|
||||
|
||||
</script>
|
||||
</pre>
|
||||
|
|
|
@ -17,10 +17,15 @@
|
|||
SimpleTest.waitForExplicitFinish();
|
||||
|
||||
// Test to ensure NetworkStats is not accessible when it is disabled
|
||||
SpecialPowers.pushPrefEnv({'set': [["dom.mozNetworkStats.enabled", false]]}, function(){
|
||||
SpecialPowers.pushPrefEnv({'set': [["dom.mozNetworkStats.enabled", false]]},
|
||||
function() {
|
||||
ok(!SpecialPowers.getBoolPref("dom.mozNetworkStats.enabled"),
|
||||
"Preference 'dom.mozNetworkStats.enabled' is false.");
|
||||
|
||||
ok('mozNetworkStats' in navigator, "navigator.mozNetworkStats should exist");
|
||||
is(navigator.mozNetworkStats, null, "mozNetworkStats should be null when not enabled.");
|
||||
|
||||
is(navigator.mozNetworkStats, null,
|
||||
"mozNetworkStats should be null when not enabled.");
|
||||
|
||||
SimpleTest.finish();
|
||||
});
|
||||
|
|
|
@ -13,21 +13,34 @@
|
|||
<script type="application/javascript">
|
||||
|
||||
// Test to ensure NetworkStats is enabled but mozNetworkStats.getAvailableNetworks
|
||||
// does not work in content.
|
||||
// does not work in content because mozNetworkStats is null when no permission.
|
||||
|
||||
SpecialPowers.setBoolPref("dom.mozNetworkStats.enabled", true);
|
||||
SpecialPowers.removePermission("networkstats-manage", document);
|
||||
|
||||
ok('mozNetworkStats' in navigator, "navigator.mozNetworkStats should be accessible if dom.mozNetworkStats.enabled is true");
|
||||
ok(SpecialPowers.getBoolPref("dom.mozNetworkStats.enabled"),
|
||||
"Preference 'dom.mozNetworkStats.enabled' is true.");
|
||||
|
||||
ok(!SpecialPowers.hasPermission("networkstats-manage", document),
|
||||
"Has no permission 'networkstats-manage'.");
|
||||
|
||||
ok('mozNetworkStats' in navigator, "navigator.mozNetworkStats should exist");
|
||||
|
||||
is(navigator.mozNetworkStats, null,
|
||||
"mozNetworkStats should be null when no permission.");
|
||||
|
||||
var error;
|
||||
try {
|
||||
navigator.mozNetworkStats.getAvailableNetworks;
|
||||
ok(false, "Accessing navigator.mozNetworkStats.getAvailableNetworks should have thrown!");
|
||||
|
||||
ok(false,
|
||||
"Accessing navigator.mozNetworkStats.getAvailableNetworks should throw!");
|
||||
} catch (ex) {
|
||||
error = ex;
|
||||
}
|
||||
ok(error, "Got an exception accessing navigator.mozNetworkStats.getAvailableNetworks");
|
||||
|
||||
ok(error,
|
||||
"Got an exception accessing navigator.mozNetworkStats.getAvailableNetworks");
|
||||
|
||||
</script>
|
||||
</pre>
|
||||
|
|
|
@ -18,11 +18,18 @@ SimpleTest.waitForExplicitFinish();
|
|||
|
||||
// Test to ensure NetworkStats is not accessible when it is disabled
|
||||
SpecialPowers.addPermission("networkstats-manage", true, document);
|
||||
SpecialPowers.pushPrefEnv({'set': [["dom.mozNetworkStats.enabled", true]]}, function(){
|
||||
SpecialPowers.pushPrefEnv({'set': [["dom.mozNetworkStats.enabled", true]]},
|
||||
function(){
|
||||
ok(SpecialPowers.hasPermission("networkstats-manage", document),
|
||||
"Has permission 'networkstats-manage'.");
|
||||
|
||||
ok(SpecialPowers.getBoolPref("dom.mozNetworkStats.enabled"),
|
||||
"Preference 'dom.mozNetworkStats.enabled' is true.");
|
||||
|
||||
ok('mozNetworkStats' in navigator, "navigator.mozNetworkStats should exist");
|
||||
|
||||
ok(navigator.mozNetworkStats instanceof SpecialPowers.Ci.nsIDOMMozNetworkStatsManager,
|
||||
"navigator.mozNetworkStats should be a nsIDOMMozNetworkStatsManager object");
|
||||
"navigator.mozNetworkStats should be a nsIDOMMozNetworkStatsManager object");
|
||||
|
||||
SpecialPowers.removePermission("networkstats-manage", document);
|
||||
SimpleTest.finish();
|
||||
|
|
|
@ -59,6 +59,17 @@ function toggleNFC(enabled) {
|
|||
return deferred.promise;
|
||||
}
|
||||
|
||||
function clearPendingMessages(type) {
|
||||
if (!window.navigator.mozHasPendingMessage(type)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// setting a handler removes all messages from queue
|
||||
window.navigator.mozSetMessageHandler(type, function() {
|
||||
window.navigator.mozSetMessageHandler(type, null);
|
||||
});
|
||||
}
|
||||
|
||||
function enableRE0() {
|
||||
let deferred = Promise.defer();
|
||||
let cmd = 'nfc nci rf_intf_activated_ntf 0';
|
||||
|
@ -83,6 +94,9 @@ function cleanUp() {
|
|||
}
|
||||
|
||||
function runNextTest() {
|
||||
clearPendingMessages('nfc-manager-tech-discovered');
|
||||
clearPendingMessages('nfc-manager-tech-lost');
|
||||
|
||||
let test = tests.shift();
|
||||
if (!test) {
|
||||
cleanUp();
|
||||
|
|
|
@ -1556,10 +1556,16 @@ bool NetworkUtils::setWifiTethering(NetworkParams& aOptions)
|
|||
getIFProperties(GET_CHAR(mExternalIfname), interfaceProperties);
|
||||
|
||||
if (strcmp(interfaceProperties.dns1, "")) {
|
||||
aOptions.mDns1 = NS_ConvertUTF8toUTF16(interfaceProperties.dns1);
|
||||
int type = getIpType(interfaceProperties.dns1);
|
||||
if (type != AF_INET6) {
|
||||
aOptions.mDns1 = NS_ConvertUTF8toUTF16(interfaceProperties.dns1);
|
||||
}
|
||||
}
|
||||
if (strcmp(interfaceProperties.dns2, "")) {
|
||||
aOptions.mDns2 = NS_ConvertUTF8toUTF16(interfaceProperties.dns2);
|
||||
int type = getIpType(interfaceProperties.dns2);
|
||||
if (type != AF_INET6) {
|
||||
aOptions.mDns2 = NS_ConvertUTF8toUTF16(interfaceProperties.dns2);
|
||||
}
|
||||
}
|
||||
dumpParams(aOptions, "WIFI");
|
||||
|
||||
|
@ -1582,10 +1588,16 @@ bool NetworkUtils::setUSBTethering(NetworkParams& aOptions)
|
|||
getIFProperties(GET_CHAR(mExternalIfname), interfaceProperties);
|
||||
|
||||
if (strcmp(interfaceProperties.dns1, "")) {
|
||||
aOptions.mDns1 = NS_ConvertUTF8toUTF16(interfaceProperties.dns1);
|
||||
int type = getIpType(interfaceProperties.dns1);
|
||||
if (type != AF_INET6) {
|
||||
aOptions.mDns1 = NS_ConvertUTF8toUTF16(interfaceProperties.dns1);
|
||||
}
|
||||
}
|
||||
if (strcmp(interfaceProperties.dns2, "")) {
|
||||
aOptions.mDns2 = NS_ConvertUTF8toUTF16(interfaceProperties.dns2);
|
||||
int type = getIpType(interfaceProperties.dns2);
|
||||
if (type != AF_INET6) {
|
||||
aOptions.mDns2 = NS_ConvertUTF8toUTF16(interfaceProperties.dns2);
|
||||
}
|
||||
}
|
||||
dumpParams(aOptions, "USB");
|
||||
|
||||
|
|
|
@ -2778,6 +2778,11 @@ WifiWorker.prototype = {
|
|||
var timer = null;
|
||||
var self = this;
|
||||
|
||||
if (!WifiManager.enabled) {
|
||||
callback.onfailure();
|
||||
return;
|
||||
}
|
||||
|
||||
self.waitForScan(waitForScanCallback);
|
||||
doScan();
|
||||
function doScan() {
|
||||
|
|
|
@ -49,6 +49,11 @@ inline Color ToColor(const gfxRGBA &aRGBA)
|
|||
Float(aRGBA.b), Float(aRGBA.a));
|
||||
}
|
||||
|
||||
inline gfxRGBA ThebesColor(Color &aColor)
|
||||
{
|
||||
return gfxRGBA(aColor.r, aColor.g, aColor.b, aColor.a);
|
||||
}
|
||||
|
||||
inline Matrix ToMatrix(const gfxMatrix &aMatrix)
|
||||
{
|
||||
return Matrix(Float(aMatrix.xx), Float(aMatrix.yx), Float(aMatrix.xy),
|
||||
|
|
|
@ -4871,6 +4871,11 @@ gfxFontGroup::ResolveGenericFontNames(FontFamilyType aGenericType,
|
|||
static const char kGeneric_cursive[] = "cursive";
|
||||
static const char kGeneric_fantasy[] = "fantasy";
|
||||
|
||||
// treat -moz-fixed as monospace
|
||||
if (aGenericType == eFamily_moz_fixed) {
|
||||
aGenericType = eFamily_monospace;
|
||||
}
|
||||
|
||||
// type should be standard generic type at this point
|
||||
NS_ASSERTION(aGenericType >= eFamily_serif &&
|
||||
aGenericType <= eFamily_fantasy,
|
||||
|
|
|
@ -539,30 +539,6 @@ gfxPlatform::PreferMemoryOverShmem() const {
|
|||
return mLayersPreferMemoryOverShmem;
|
||||
}
|
||||
|
||||
already_AddRefed<gfxASurface>
|
||||
gfxPlatform::OptimizeImage(gfxImageSurface *aSurface,
|
||||
gfxImageFormat format)
|
||||
{
|
||||
IntSize surfaceSize = aSurface->GetSize().ToIntSize();
|
||||
|
||||
#ifdef XP_WIN
|
||||
if (gfxWindowsPlatform::GetPlatform()->GetRenderMode() ==
|
||||
gfxWindowsPlatform::RENDER_DIRECT2D) {
|
||||
return nullptr;
|
||||
}
|
||||
#endif
|
||||
nsRefPtr<gfxASurface> optSurface = CreateOffscreenSurface(surfaceSize, gfxASurface::ContentFromFormat(format));
|
||||
if (!optSurface || optSurface->CairoStatus() != 0)
|
||||
return nullptr;
|
||||
|
||||
gfxContext tmpCtx(optSurface);
|
||||
tmpCtx.SetOperator(gfxContext::OPERATOR_SOURCE);
|
||||
tmpCtx.SetSource(aSurface);
|
||||
tmpCtx.Paint();
|
||||
|
||||
return optSurface.forget();
|
||||
}
|
||||
|
||||
cairo_user_data_key_t kDrawTarget;
|
||||
|
||||
RefPtr<DrawTarget>
|
||||
|
|
|
@ -178,9 +178,6 @@ public:
|
|||
CreateOffscreenSurface(const IntSize& size,
|
||||
gfxContentType contentType) = 0;
|
||||
|
||||
virtual already_AddRefed<gfxASurface> OptimizeImage(gfxImageSurface *aSurface,
|
||||
gfxImageFormat format);
|
||||
|
||||
/**
|
||||
* Beware that these methods may return DrawTargets which are not fully supported
|
||||
* on the current platform and might fail silently in subtle ways. This is a massive
|
||||
|
|
|
@ -383,16 +383,29 @@ bool imgFrame::Draw(gfxContext *aContext, GraphicsFilter aFilter,
|
|||
bool doPadding = aPadding != nsIntMargin(0,0,0,0);
|
||||
bool doPartialDecode = !ImageComplete();
|
||||
|
||||
RefPtr<DrawTarget> dt = aContext->GetDrawTarget();
|
||||
|
||||
if (mSinglePixel && !doPadding && !doPartialDecode) {
|
||||
if (mSinglePixelColor.a == 0.0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
Rect target(aFill.x, aFill.y, aFill.width, aFill.height);
|
||||
dt->FillRect(target, ColorPattern(mSinglePixelColor),
|
||||
DrawOptions(1.0f, CompositionOpForOp(aContext->CurrentOperator())));
|
||||
if (aContext->IsCairo()) {
|
||||
gfxContext::GraphicsOperator op = aContext->CurrentOperator();
|
||||
if (op == gfxContext::OPERATOR_OVER && mSinglePixelColor.a == 1.0) {
|
||||
aContext->SetOperator(gfxContext::OPERATOR_SOURCE);
|
||||
}
|
||||
aContext->SetDeviceColor(ThebesColor(mSinglePixelColor));
|
||||
aContext->NewPath();
|
||||
aContext->Rectangle(aFill);
|
||||
aContext->Fill();
|
||||
aContext->SetOperator(op);
|
||||
aContext->SetDeviceColor(gfxRGBA(0,0,0,0));
|
||||
return true;
|
||||
}
|
||||
RefPtr<DrawTarget> dt = aContext->GetDrawTarget();
|
||||
dt->FillRect(ToRect(aFill),
|
||||
ColorPattern(mSinglePixelColor),
|
||||
DrawOptions(1.0f,
|
||||
CompositionOpForOp(aContext->CurrentOperator())));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -576,17 +576,19 @@ GeckoChildProcessHost::PerformAsyncLaunchInternal(std::vector<std::string>& aExt
|
|||
}
|
||||
|
||||
# if (MOZ_WIDGET_GTK == 3)
|
||||
const char *ld_preload = PR_GetEnv("LD_PRELOAD");
|
||||
nsCString new_ld_preload;
|
||||
if (mProcessType == GeckoProcessType_Plugin) {
|
||||
const char *ld_preload = PR_GetEnv("LD_PRELOAD");
|
||||
nsCString new_ld_preload;
|
||||
|
||||
new_ld_preload.Assign(path.get());
|
||||
new_ld_preload.AppendLiteral("/" DLL_PREFIX "mozgtk2" DLL_SUFFIX);
|
||||
new_ld_preload.Assign(path.get());
|
||||
new_ld_preload.AppendLiteral("/" DLL_PREFIX "mozgtk2" DLL_SUFFIX);
|
||||
|
||||
if (ld_preload && *ld_preload) {
|
||||
new_ld_preload.AppendLiteral(":");
|
||||
new_ld_preload.Append(ld_preload);
|
||||
if (ld_preload && *ld_preload) {
|
||||
new_ld_preload.AppendLiteral(":");
|
||||
new_ld_preload.Append(ld_preload);
|
||||
}
|
||||
newEnvVars["LD_PRELOAD"] = new_ld_preload.get();
|
||||
}
|
||||
newEnvVars["LD_PRELOAD"] = new_ld_preload.get();
|
||||
# endif // MOZ_WIDGET_GTK
|
||||
|
||||
|
||||
|
|
|
@ -55,11 +55,23 @@ static const uint32_t BLACK = 0;
|
|||
static const uint32_t GRAY = 1;
|
||||
|
||||
/*
|
||||
* Constants used to indicate whether a chunk is part of the tenured heap or the
|
||||
* nusery.
|
||||
* The "location" field in the Chunk trailer is a bit vector indicting various
|
||||
* roles of the chunk.
|
||||
*
|
||||
* The value 0 for the "location" field is invalid, at least one bit must be
|
||||
* set.
|
||||
*
|
||||
* Some bits preclude others, for example, any "nursery" bit precludes any
|
||||
* "tenured" or "middle generation" bit.
|
||||
*/
|
||||
const uint32_t ChunkLocationNursery = 0;
|
||||
const uint32_t ChunkLocationTenuredHeap = 1;
|
||||
const uintptr_t ChunkLocationBitNursery = 1; // Standard GGC nursery
|
||||
const uintptr_t ChunkLocationBitTenuredHeap = 2; // Standard GGC tenured generation
|
||||
const uintptr_t ChunkLocationBitPJSNewspace = 4; // The PJS generational GC's allocation space
|
||||
const uintptr_t ChunkLocationBitPJSFromspace = 8; // The PJS generational GC's fromspace (during GC)
|
||||
|
||||
const uintptr_t ChunkLocationAnyNursery = ChunkLocationBitNursery |
|
||||
ChunkLocationBitPJSNewspace |
|
||||
ChunkLocationBitPJSFromspace;
|
||||
|
||||
#ifdef JS_DEBUG
|
||||
/* When downcasting, ensure we are actually the right type. */
|
||||
|
@ -225,9 +237,8 @@ IsInsideNursery(const js::gc::Cell *cell)
|
|||
addr &= ~js::gc::ChunkMask;
|
||||
addr |= js::gc::ChunkLocationOffset;
|
||||
uint32_t location = *reinterpret_cast<uint32_t *>(addr);
|
||||
JS_ASSERT(location == gc::ChunkLocationNursery ||
|
||||
location == gc::ChunkLocationTenuredHeap);
|
||||
return location == gc::ChunkLocationNursery;
|
||||
JS_ASSERT(location != 0);
|
||||
return location & ChunkLocationAnyNursery;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
|
|
|
@ -47,6 +47,7 @@ namespace js {}
|
|||
#define JS_ALLOCATED_TENURED_PATTERN 0x4D
|
||||
#define JS_SWEPT_CODE_PATTERN 0x3b
|
||||
#define JS_SWEPT_FRAME_PATTERN 0x5b
|
||||
#define JS_POISONED_FORKJOIN_CHUNK 0xBD
|
||||
|
||||
#define JS_ASSERT(expr) MOZ_ASSERT(expr)
|
||||
#define JS_ASSERT_IF(cond, expr) MOZ_ASSERT_IF(cond, expr)
|
||||
|
|
|
@ -768,7 +768,7 @@ function ArrayMapPar(func, mode) {
|
|||
break parallel;
|
||||
|
||||
var slicesInfo = ComputeSlicesInfo(length);
|
||||
ForkJoin(mapThread, 0, slicesInfo.count, ForkJoinMode(mode));
|
||||
ForkJoin(mapThread, 0, slicesInfo.count, ForkJoinMode(mode), buffer);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
|
@ -817,7 +817,7 @@ function ArrayReducePar(func, mode) {
|
|||
var numSlices = slicesInfo.count;
|
||||
var subreductions = NewDenseArray(numSlices);
|
||||
|
||||
ForkJoin(reduceThread, 0, numSlices, ForkJoinMode(mode));
|
||||
ForkJoin(reduceThread, 0, numSlices, ForkJoinMode(mode), null);
|
||||
|
||||
var accumulator = subreductions[0];
|
||||
for (var i = 1; i < numSlices; i++)
|
||||
|
@ -876,7 +876,7 @@ function ArrayScanPar(func, mode) {
|
|||
var numSlices = slicesInfo.count;
|
||||
|
||||
// Scan slices individually (see comment on phase1()).
|
||||
ForkJoin(phase1, 0, numSlices, ForkJoinMode(mode));
|
||||
ForkJoin(phase1, 0, numSlices, ForkJoinMode(mode), buffer);
|
||||
|
||||
// Compute intermediates array (see comment on phase2()).
|
||||
var intermediates = [];
|
||||
|
@ -892,7 +892,7 @@ function ArrayScanPar(func, mode) {
|
|||
// We start from slice 1 instead of 0 since there is no work to be done
|
||||
// for slice 0.
|
||||
if (numSlices > 1)
|
||||
ForkJoin(phase2, 1, numSlices, ForkJoinMode(mode));
|
||||
ForkJoin(phase2, 1, numSlices, ForkJoinMode(mode), buffer);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
|
@ -1106,7 +1106,7 @@ function ArrayFilterPar(func, mode) {
|
|||
UnsafePutElements(counts, i, 0);
|
||||
|
||||
var survivors = new Uint8Array(length);
|
||||
ForkJoin(findSurvivorsThread, 0, numSlices, ForkJoinMode(mode));
|
||||
ForkJoin(findSurvivorsThread, 0, numSlices, ForkJoinMode(mode), survivors);
|
||||
|
||||
// Step 2. Compress the slices into one contiguous set.
|
||||
var count = 0;
|
||||
|
@ -1114,7 +1114,7 @@ function ArrayFilterPar(func, mode) {
|
|||
count += counts[i];
|
||||
var buffer = NewDenseArray(count);
|
||||
if (count > 0)
|
||||
ForkJoin(copySurvivorsThread, 0, numSlices, ForkJoinMode(mode));
|
||||
ForkJoin(copySurvivorsThread, 0, numSlices, ForkJoinMode(mode), buffer);
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
@ -1224,7 +1224,7 @@ function ArrayStaticBuildPar(length, func, mode) {
|
|||
break parallel;
|
||||
|
||||
var slicesInfo = ComputeSlicesInfo(length);
|
||||
ForkJoin(constructThread, 0, slicesInfo.count, ForkJoinMode(mode));
|
||||
ForkJoin(constructThread, 0, slicesInfo.count, ForkJoinMode(mode), buffer);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
|
|
|
@ -1182,7 +1182,7 @@ function MapTypedParImplDepth1(inArray, inArrayType, outArrayType, func) {
|
|||
// relative to its owner (which is often but not always 0).
|
||||
const inBaseOffset = TYPEDOBJ_BYTEOFFSET(inArray);
|
||||
|
||||
ForkJoin(mapThread, 0, slicesInfo.count, ForkJoinMode(mode));
|
||||
ForkJoin(mapThread, 0, slicesInfo.count, ForkJoinMode(mode), outArray);
|
||||
return outArray;
|
||||
|
||||
function mapThread(workerId, sliceStart, sliceEnd) {
|
||||
|
@ -1238,11 +1238,17 @@ function MapTypedParImplDepth1(inArray, inArrayType, outArrayType, func) {
|
|||
inOffset += inGrainTypeSize;
|
||||
outOffset += outGrainTypeSize;
|
||||
|
||||
#ifndef JSGC_FJGENERATIONAL
|
||||
// A transparent result type cannot contain references, and
|
||||
// hence there is no way for a pointer to a thread-local object
|
||||
// to escape.
|
||||
//
|
||||
// This has been disabled for the PJS generational collector
|
||||
// as it probably has little effect in that setting and adds
|
||||
// per-iteration cost.
|
||||
if (outGrainTypeIsTransparent)
|
||||
ClearThreadLocalArenas();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3203,6 +3203,8 @@ MOZ_ARG_DISABLE_BOOL(gcgenerational,
|
|||
if test -n "$JSGC_GENERATIONAL"; then
|
||||
AC_DEFINE(JSGC_GENERATIONAL)
|
||||
fi
|
||||
JSGC_GENERATIONAL_CONFIGURED=$JSGC_GENERATIONAL
|
||||
AC_SUBST(JSGC_GENERATIONAL_CONFIGURED)
|
||||
|
||||
dnl ========================================================
|
||||
dnl = Use exact stack rooting for GC
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||
*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef gc_ForkJoinNursery_inl_h
|
||||
#define gc_ForkJoinNursery_inl_h
|
||||
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
|
||||
#include "gc/ForkJoinNursery.h"
|
||||
|
||||
namespace js {
|
||||
namespace gc {
|
||||
|
||||
// For the following two predicates we can't check the attributes on
|
||||
// the chunk trailer because it's not known whether addr points into a
|
||||
// chunk.
|
||||
//
|
||||
// A couple of optimizations are possible if performance is an issue:
|
||||
//
|
||||
// - The loop can be unrolled, and we can arrange for all array entries
|
||||
// to be valid for this purpose so that the bound is constant.
|
||||
// - The per-chunk test can be reduced to testing whether the high bits
|
||||
// of the object pointer and the high bits of the chunk pointer are
|
||||
// the same (and the latter value is essentially space[i]).
|
||||
// Note, experiments with that do not show an improvement yet.
|
||||
// - Taken together, those optimizations yield code that is one LOAD,
|
||||
// one XOR, and one AND for each chunk, with the result being true
|
||||
// iff the resulting value is zero.
|
||||
// - We can have multiple versions of the predicates, and those that
|
||||
// take known-good GCThing types can go directly to the attributes;
|
||||
// it may be possible to ensure that more calls use GCThing types.
|
||||
// Note, this requires the worker ID to be part of the chunk
|
||||
// attribute bit vector.
|
||||
//
|
||||
// Performance may not be an issue as there may be few survivors of a
|
||||
// collection in the ForkJoinNursery and few objects will be tested.
|
||||
// If so then the bulk of the calls may come from the code that scans
|
||||
// the roots. Behavior will be workload-dependent however.
|
||||
|
||||
MOZ_ALWAYS_INLINE bool
|
||||
ForkJoinNursery::isInsideNewspace(const void *addr)
|
||||
{
|
||||
uintptr_t p = reinterpret_cast<uintptr_t>(addr);
|
||||
for (unsigned i = 0 ; i <= currentChunk_ ; i++) {
|
||||
if (p >= newspace[i]->start() && p < newspace[i]->end())
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
MOZ_ALWAYS_INLINE bool
|
||||
ForkJoinNursery::isInsideFromspace(const void *addr)
|
||||
{
|
||||
uintptr_t p = reinterpret_cast<uintptr_t>(addr);
|
||||
for (unsigned i = 0 ; i < numFromspaceChunks_ ; i++) {
|
||||
if (p >= fromspace[i]->start() && p < fromspace[i]->end())
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
MOZ_ALWAYS_INLINE bool
|
||||
ForkJoinNursery::getForwardedPointer(T **ref)
|
||||
{
|
||||
JS_ASSERT(ref);
|
||||
JS_ASSERT(isInsideFromspace(*ref));
|
||||
const RelocationOverlay *overlay = reinterpret_cast<const RelocationOverlay *>(*ref);
|
||||
if (!overlay->isForwarded())
|
||||
return false;
|
||||
// This static_cast from Cell* restricts T to valid (GC thing) types.
|
||||
*ref = static_cast<T *>(overlay->forwardingAddress());
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace gc
|
||||
} // namespace js
|
||||
|
||||
#endif // JSGC_FJGENERATIONAL
|
||||
|
||||
#endif // gc_ForkJoinNursery_inl_h
|
|
@ -0,0 +1,908 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||
*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
|
||||
#include "gc/ForkJoinNursery-inl.h"
|
||||
|
||||
#include "mozilla/IntegerPrintfMacros.h"
|
||||
|
||||
#include "prmjtime.h"
|
||||
|
||||
#include "gc/Heap.h"
|
||||
#include "jit/IonFrames.h"
|
||||
#include "vm/ArrayObject.h"
|
||||
#include "vm/ForkJoin.h"
|
||||
#include "vm/TypedArrayObject.h"
|
||||
|
||||
#include "jsgcinlines.h"
|
||||
#include "gc/Nursery-inl.h"
|
||||
#include "vm/ObjectImpl-inl.h"
|
||||
|
||||
// The ForkJoinNursery provides an object nursery for movable object
|
||||
// types for one ForkJoin worker thread. There is a one-to-one
|
||||
// correspondence between ForkJoinNursery and ForkJoinContext.
|
||||
//
|
||||
// For a general overview of how the ForkJoinNursery fits into the
|
||||
// overall PJS system, see the comment block in vm/ForkJoin.h.
|
||||
//
|
||||
//
|
||||
// Invariants on the ForkJoinNursery:
|
||||
//
|
||||
// Let "the tenured area" from the point of view of one
|
||||
// ForkJoinNursery comprise the global tenured area and the nursery's
|
||||
// owning worker's private tenured area. Then:
|
||||
//
|
||||
// - There can be pointers from the tenured area into a ForkJoinNursery,
|
||||
// and from the ForkJoinNursery into the tenured area
|
||||
//
|
||||
// - There *cannot* be a pointer from one ForkJoinNursery into
|
||||
// another, or from one private tenured area into another, or from a
|
||||
// ForkJoinNursery into another worker's private tenured are or vice
|
||||
// versa, or from any ForkJoinNursery or private tenured area into
|
||||
// the normal Nursery.
|
||||
//
|
||||
// For those invariants to hold the normal Nursery must be empty before
|
||||
// a ForkJoin section.
|
||||
//
|
||||
//
|
||||
// General description:
|
||||
//
|
||||
// The nursery maintains a space into which small, movable objects
|
||||
// are allocated. Other objects are allocated directly in the private
|
||||
// tenured area for the worker.
|
||||
//
|
||||
// If an allocation request can't be satisfied because the nursery is
|
||||
// full then a /minor collection/ is triggered without bailouts. This
|
||||
// collection copies nursery-allocated objects reachable from the
|
||||
// worker's roots into a fresh space. Then the old space is
|
||||
// discarded.
|
||||
//
|
||||
// Nurseries are maintained in 1MB chunks. If the live data in a
|
||||
// nursery after a collection exceeds some set fraction (currently
|
||||
// 1/3) then the nursery is grown, independently of other nurseries.
|
||||
//
|
||||
// There is an upper limit on the number of chunks in a nursery. If
|
||||
// the live data in a nursery after a collection exceeds the set
|
||||
// fraction and the nursery can't grow, then the next collection will
|
||||
// be an /evacuating collection/.
|
||||
//
|
||||
// An evacuating collection copies nursery-allocated objects reachable
|
||||
// from the worker's roots into the worker's private tenured area.
|
||||
//
|
||||
// If an allocation request in the tenured area - whether the request
|
||||
// comes from the mutator or from the garbage collector during
|
||||
// evacuation - can't be satisified because the tenured area is full,
|
||||
// then the worker bails out and triggers a full collection in the
|
||||
// ForkJoin worker's zone. This is expected to happen very rarely in
|
||||
// practice.
|
||||
//
|
||||
// The roots for a collection in the ForkJoinNursery are: the frames
|
||||
// of the execution stack, any registered roots on the execution
|
||||
// stack, any objects in the private tenured area, and the ForkJoin
|
||||
// result object in the common tenured area.
|
||||
//
|
||||
// The entire private tenured area is considered to be rooted in order
|
||||
// not to have to run write barriers during the ForkJoin section.
|
||||
// During a minor or evacuating collection in a worker the GC will
|
||||
// step through the worker's tenured area, examining each object for
|
||||
// pointers into the nursery.
|
||||
//
|
||||
// The ForkJoinNursery contains its own object tracing machinery for
|
||||
// most of the types that can be allocated in the nursery. But it
|
||||
// does not handle all types, and there are two places where the code
|
||||
// in ForkJoinNursery loses control of the tracing:
|
||||
//
|
||||
// - When calling clasp->trace() in traceObject()
|
||||
// - When calling MarkForkJoinStack() in forwardFromStack()
|
||||
//
|
||||
// In both cases:
|
||||
//
|
||||
// - We pass a ForkJoinNurseryCollectionTracer object with a callback
|
||||
// to ForkJoinNursery::MinorGCCallback
|
||||
//
|
||||
// - We should only ever end up in MarkInternal() in Marking.cpp, in
|
||||
// the case in that code that calls back to trc->callback. We
|
||||
// should /never/ end up in functions that trigger use of the mark
|
||||
// stack internal to the general GC's marker.
|
||||
//
|
||||
// - Any function along the path to MarkInternal() that asks about
|
||||
// whether something is in the nursery or is tenured /must/ be aware
|
||||
// that there can be multiple nursery and tenured areas; assertions
|
||||
// get this wrong a lot of the time and must be fixed when they do.
|
||||
// In practice, such code either must have a case for each nursery
|
||||
// kind or must use the IsInsideNursery(Cell*) method, which looks
|
||||
// only at the chunk tag.
|
||||
//
|
||||
//
|
||||
// Terminological note:
|
||||
//
|
||||
// - While the mutator is running it is allocating in what's known as
|
||||
// the nursery's "newspace". The mutator may also allocate directly
|
||||
// in the tenured space, but the tenured space is not part of the
|
||||
// newspace.
|
||||
//
|
||||
// - While the gc is running, the previous "newspace" has been renamed
|
||||
// as the gc's "fromspace", and the space that objects are copied
|
||||
// into is known as the "tospace". The tospace may be a nursery
|
||||
// space (during a minor collection), or it may be a tenured space
|
||||
// (during an evacuation collection), but it's always one or the
|
||||
// other, never a combination. After gc the fromspace is always
|
||||
// discarded.
|
||||
//
|
||||
// - If the gc copies objects into a nursery tospace then this tospace
|
||||
// becomes known as the "newspace" following gc. Otherwise, a new
|
||||
// newspace won't be needed (if the parallel section is finished) or
|
||||
// can be created empty (if the gc just needed to evacuate).
|
||||
//
|
||||
//
|
||||
// Style note:
|
||||
//
|
||||
// - Use js_memcpy, malloc_, realloc_, and js_free uniformly, do not
|
||||
// use PodCopy or pod_malloc: the type information for the latter is
|
||||
// not always correct and surrounding code usually operates in terms
|
||||
// of bytes, anyhow.
|
||||
//
|
||||
// With power comes responsibility, etc: code that used pod_malloc
|
||||
// gets safe size computation built-in; here we must handle that
|
||||
// manually.
|
||||
|
||||
namespace js {
|
||||
namespace gc {
|
||||
|
||||
ForkJoinNursery::ForkJoinNursery(ForkJoinContext *cx, ForkJoinGCShared *shared, Allocator *tenured)
|
||||
: cx_(cx)
|
||||
, tenured_(tenured)
|
||||
, shared_(shared)
|
||||
, evacuationZone_(nullptr)
|
||||
, currentStart_(0)
|
||||
, currentEnd_(0)
|
||||
, position_(0)
|
||||
, currentChunk_(0)
|
||||
, numActiveChunks_(0)
|
||||
, numFromspaceChunks_(0)
|
||||
, mustEvacuate_(false)
|
||||
, isEvacuating_(false)
|
||||
, movedSize_(0)
|
||||
, head_(nullptr)
|
||||
, tail_(&head_)
|
||||
, hugeSlotsNew(0)
|
||||
, hugeSlotsFrom(1)
|
||||
{
|
||||
for ( size_t i=0 ; i < MaxNurseryChunks ; i++ ) {
|
||||
newspace[i] = nullptr;
|
||||
fromspace[i] = nullptr;
|
||||
}
|
||||
if (!hugeSlots[hugeSlotsNew].init() || !hugeSlots[hugeSlotsFrom].init())
|
||||
CrashAtUnhandlableOOM("Cannot initialize PJS nursery");
|
||||
initNewspace(); // This can fail to return
|
||||
}
|
||||
|
||||
ForkJoinNursery::~ForkJoinNursery()
|
||||
{
|
||||
for ( size_t i=0 ; i < numActiveChunks_ ; i++ ) {
|
||||
if (newspace[i])
|
||||
shared_->freeNurseryChunk(newspace[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ForkJoinNursery::minorGC()
|
||||
{
|
||||
if (mustEvacuate_) {
|
||||
mustEvacuate_ = false;
|
||||
pjsCollection(Evacuate|Recreate);
|
||||
} else {
|
||||
pjsCollection(Collect|Recreate);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ForkJoinNursery::evacuatingGC()
|
||||
{
|
||||
pjsCollection(Evacuate);
|
||||
}
|
||||
|
||||
#define TIME_START(name) int64_t timstampStart_##name = PRMJ_Now()
|
||||
#define TIME_END(name) int64_t timstampEnd_##name = PRMJ_Now()
|
||||
#define TIME_TOTAL(name) (timstampEnd_##name - timstampStart_##name)
|
||||
|
||||
void
|
||||
ForkJoinNursery::pjsCollection(int op)
|
||||
{
|
||||
JS_ASSERT((op & Collect) != (op & Evacuate));
|
||||
|
||||
bool evacuate = op & Evacuate;
|
||||
bool recreate = op & Recreate;
|
||||
|
||||
JS_ASSERT(!isEvacuating_);
|
||||
JS_ASSERT(!evacuationZone_);
|
||||
JS_ASSERT(!head_);
|
||||
JS_ASSERT(tail_ == &head_);
|
||||
|
||||
JSRuntime *const rt = shared_->runtime();
|
||||
const unsigned currentNumActiveChunks_ = numActiveChunks_;
|
||||
const char *msg = "";
|
||||
|
||||
JS_ASSERT(!rt->needsBarrier());
|
||||
|
||||
TIME_START(pjsCollection);
|
||||
|
||||
rt->incFJMinorCollecting();
|
||||
if (evacuate) {
|
||||
isEvacuating_ = true;
|
||||
evacuationZone_ = shared_->zone();
|
||||
}
|
||||
|
||||
flip();
|
||||
if (recreate) {
|
||||
initNewspace();
|
||||
// newspace must be at least as large as fromSpace
|
||||
numActiveChunks_ = currentNumActiveChunks_;
|
||||
}
|
||||
ForkJoinNurseryCollectionTracer trc(rt, this);
|
||||
forwardFromRoots(&trc);
|
||||
collectToFixedPoint(&trc);
|
||||
#ifdef JS_ION
|
||||
jit::UpdateJitActivationsForMinorGC(TlsPerThreadData.get(), &trc);
|
||||
#endif
|
||||
freeFromspace();
|
||||
|
||||
size_t live = movedSize_;
|
||||
computeNurserySizeAfterGC(live, &msg);
|
||||
|
||||
sweepHugeSlots();
|
||||
JS_ASSERT(hugeSlots[hugeSlotsFrom].empty());
|
||||
JS_ASSERT_IF(isEvacuating_, hugeSlots[hugeSlotsNew].empty());
|
||||
|
||||
isEvacuating_ = false;
|
||||
evacuationZone_ = nullptr;
|
||||
head_ = nullptr;
|
||||
tail_ = &head_;
|
||||
movedSize_ = 0;
|
||||
|
||||
rt->decFJMinorCollecting();
|
||||
|
||||
TIME_END(pjsCollection);
|
||||
|
||||
// Note, the spew is awk-friendly, non-underlined words serve as markers:
|
||||
// FJGC _tag_ us _value_ copied _value_ size _value_ _message-word_ ...
|
||||
shared_->spewGC("FJGC %s us %5" PRId64 " copied %7" PRIu64 " size %" PRIu64 " %s",
|
||||
(evacuate ? "evacuate " : "collect "),
|
||||
TIME_TOTAL(pjsCollection),
|
||||
(uint64_t)live,
|
||||
(uint64_t)numActiveChunks_*1024*1024,
|
||||
msg);
|
||||
}
|
||||
|
||||
#undef TIME_START
|
||||
#undef TIME_END
|
||||
#undef TIME_TOTAL
|
||||
|
||||
void
|
||||
ForkJoinNursery::computeNurserySizeAfterGC(size_t live, const char **msg)
|
||||
{
|
||||
// Grow the nursery if it is too full. Do not bother to shrink it - lazy
|
||||
// chunk allocation means that a too-large nursery will not really be a problem,
|
||||
// the entire nursery will be deallocated soon anyway.
|
||||
if (live * NurseryLoadFactor > numActiveChunks_ * ForkJoinNurseryChunk::UsableSize) {
|
||||
if (numActiveChunks_ < MaxNurseryChunks) {
|
||||
while (numActiveChunks_ < MaxNurseryChunks &&
|
||||
live * NurseryLoadFactor > numActiveChunks_ * ForkJoinNurseryChunk::UsableSize)
|
||||
{
|
||||
++numActiveChunks_;
|
||||
}
|
||||
} else {
|
||||
// Evacuation will tend to drive us toward the cliff of a bailout GC, which
|
||||
// is not good, probably worse than working within the thread at a higher load
|
||||
// than desirable.
|
||||
//
|
||||
// Thus it's possible to be more sophisticated than this:
|
||||
//
|
||||
// - evacuate only after several minor GCs in a row exceeded the set load
|
||||
// - evacuate only if significantly less space than required is available, eg,
|
||||
// if only 1/2 the required free space is available
|
||||
*msg = " Overfull, will evacuate next";
|
||||
mustEvacuate_ = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ForkJoinNursery::flip()
|
||||
{
|
||||
size_t i;
|
||||
for (i=0; i < numActiveChunks_; i++) {
|
||||
if (!newspace[i])
|
||||
break;
|
||||
fromspace[i] = newspace[i];
|
||||
newspace[i] = nullptr;
|
||||
fromspace[i]->trailer.location = gc::ChunkLocationBitPJSFromspace;
|
||||
}
|
||||
numFromspaceChunks_ = i;
|
||||
numActiveChunks_ = 0;
|
||||
|
||||
int tmp = hugeSlotsNew;
|
||||
hugeSlotsNew = hugeSlotsFrom;
|
||||
hugeSlotsFrom = tmp;
|
||||
|
||||
JS_ASSERT(hugeSlots[hugeSlotsNew].empty());
|
||||
}
|
||||
|
||||
void
|
||||
ForkJoinNursery::freeFromspace()
|
||||
{
|
||||
for (size_t i=0; i < numFromspaceChunks_; i++) {
|
||||
shared_->freeNurseryChunk(fromspace[i]);
|
||||
fromspace[i] = nullptr;
|
||||
}
|
||||
numFromspaceChunks_ = 0;
|
||||
}
|
||||
|
||||
void
|
||||
ForkJoinNursery::initNewspace()
|
||||
{
|
||||
JS_ASSERT(newspace[0] == nullptr);
|
||||
JS_ASSERT(numActiveChunks_ == 0);
|
||||
|
||||
numActiveChunks_ = 1;
|
||||
setCurrentChunk(0);
|
||||
}
|
||||
|
||||
MOZ_ALWAYS_INLINE bool
|
||||
ForkJoinNursery::shouldMoveObject(void **thingp)
|
||||
{
|
||||
// Note that thingp must really be a T** where T is some GCThing,
|
||||
// ie, something that lives in a chunk (or nullptr). This should
|
||||
// be the case because the MinorGCCallback is only called on exact
|
||||
// roots on the stack or slots within in tenured objects and not
|
||||
// on slot/element arrays that can be malloc'd; they are forwarded
|
||||
// using the forwardBufferPointer() mechanism.
|
||||
//
|
||||
// The main reason for that restriction is so that we can call a
|
||||
// method here that can check the chunk trailer for the cell (a
|
||||
// future optimization).
|
||||
Cell *cell = static_cast<Cell *>(*thingp);
|
||||
return isInsideFromspace(cell) && !getForwardedPointer(thingp);
|
||||
}
|
||||
|
||||
/* static */ void
|
||||
ForkJoinNursery::MinorGCCallback(JSTracer *trcArg, void **thingp, JSGCTraceKind traceKind)
|
||||
{
|
||||
// traceKind can be all sorts of things, when we're marking from stack roots
|
||||
ForkJoinNursery *nursery = static_cast<ForkJoinNurseryCollectionTracer *>(trcArg)->nursery_;
|
||||
if (nursery->shouldMoveObject(thingp)) {
|
||||
// When other types of objects become nursery-allocable then the static_cast
|
||||
// to JSObject * will no longer be valid.
|
||||
JS_ASSERT(traceKind == JSTRACE_OBJECT);
|
||||
*thingp = nursery->moveObjectToTospace(static_cast<JSObject *>(*thingp));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ForkJoinNursery::forwardFromRoots(ForkJoinNurseryCollectionTracer *trc)
|
||||
{
|
||||
// There should be no other roots as a result of effect-freedom.
|
||||
forwardFromUpdatable(trc);
|
||||
forwardFromStack(trc);
|
||||
forwardFromTenured(trc);
|
||||
}
|
||||
|
||||
void
|
||||
ForkJoinNursery::forwardFromUpdatable(ForkJoinNurseryCollectionTracer *trc)
|
||||
{
|
||||
JSObject *obj = shared_->updatable();
|
||||
if (obj)
|
||||
traceObject(trc, obj);
|
||||
}
|
||||
|
||||
void
|
||||
ForkJoinNursery::forwardFromStack(ForkJoinNurseryCollectionTracer *trc)
|
||||
{
|
||||
MarkForkJoinStack(trc);
|
||||
}
|
||||
|
||||
void
|
||||
ForkJoinNursery::forwardFromTenured(ForkJoinNurseryCollectionTracer *trc)
|
||||
{
|
||||
JSObject *objs[ArenaCellCount];
|
||||
for (size_t k=0; k < FINALIZE_LIMIT; k++) {
|
||||
AllocKind kind = (AllocKind)k;
|
||||
if (!IsFJNurseryAllocable(kind))
|
||||
continue;
|
||||
|
||||
// When non-JSObject types become nursery-allocable the assumptions in the
|
||||
// loops below will no longer hold; other types than JSObject must be
|
||||
// handled.
|
||||
JS_ASSERT(kind <= FINALIZE_OBJECT_LAST);
|
||||
|
||||
ArenaIter ai;
|
||||
ai.init(const_cast<Allocator *>(tenured_), kind);
|
||||
for (; !ai.done(); ai.next()) {
|
||||
// Do the walk in two steps to avoid problems resulting from allocating
|
||||
// into the arena that's being walked: ArenaCellIter is not safe for that.
|
||||
// It can happen during evacuation.
|
||||
//
|
||||
// ArenaCellIterUnderFinalize requires any free list to be flushed into
|
||||
// its arena, and since we may allocate within traceObject() we must
|
||||
// purge before each arena scan. This is probably not very expensive,
|
||||
// it's constant work, and inlined.
|
||||
//
|
||||
// Use ArenaCellIterUnderFinalize, not ...UnderGC, because that side-steps
|
||||
// some assertions in the latter that are wrong for PJS collection.
|
||||
size_t numObjs = 0;
|
||||
tenured_->arenas.purge(kind);
|
||||
for (ArenaCellIterUnderFinalize i(ai.get()); !i.done(); i.next())
|
||||
objs[numObjs++] = i.get<JSObject>();
|
||||
for (size_t i=0; i < numObjs; i++)
|
||||
traceObject(trc, objs[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*static*/ void
|
||||
ForkJoinNursery::forwardBufferPointer(JSTracer *trc, HeapSlot **pSlotsElems)
|
||||
{
|
||||
ForkJoinNursery *nursery = static_cast<ForkJoinNurseryCollectionTracer *>(trc)->nursery_;
|
||||
HeapSlot *old = *pSlotsElems;
|
||||
|
||||
if (!nursery->isInsideFromspace(old))
|
||||
return;
|
||||
|
||||
// If the elements buffer is zero length, the "first" item could be inside
|
||||
// of the next object or past the end of the allocable area. However,
|
||||
// since we always store the runtime as the last word in a nursery chunk,
|
||||
// isInsideFromspace will still be true, even if this zero-size allocation
|
||||
// abuts the end of the allocable area. Thus, it is always safe to read the
|
||||
// first word of |old| here.
|
||||
*pSlotsElems = *reinterpret_cast<HeapSlot **>(old);
|
||||
JS_ASSERT(!nursery->isInsideFromspace(*pSlotsElems));
|
||||
}
|
||||
|
||||
void
|
||||
ForkJoinNursery::collectToFixedPoint(ForkJoinNurseryCollectionTracer *trc)
|
||||
{
|
||||
for (RelocationOverlay *p = head_; p; p = p->next())
|
||||
traceObject(trc, static_cast<JSObject *>(p->forwardingAddress()));
|
||||
}
|
||||
|
||||
inline void
|
||||
ForkJoinNursery::setCurrentChunk(int index)
|
||||
{
|
||||
JS_ASSERT((size_t)index < numActiveChunks_);
|
||||
JS_ASSERT(!newspace[index]);
|
||||
|
||||
currentChunk_ = index;
|
||||
ForkJoinNurseryChunk *c = shared_->allocateNurseryChunk();
|
||||
if (!c)
|
||||
CrashAtUnhandlableOOM("Cannot expand PJS nursery");
|
||||
c->trailer.runtime = shared_->runtime();
|
||||
c->trailer.location = gc::ChunkLocationBitPJSNewspace;
|
||||
c->trailer.storeBuffer = nullptr;
|
||||
currentStart_ = c->start();
|
||||
currentEnd_ = c->end();
|
||||
position_ = currentStart_;
|
||||
newspace[index] = c;
|
||||
}
|
||||
|
||||
void *
|
||||
ForkJoinNursery::allocate(size_t size)
|
||||
{
|
||||
JS_ASSERT(position_ >= currentStart_);
|
||||
|
||||
if (currentEnd_ - position_ < size) {
|
||||
if (currentChunk_ + 1 == numActiveChunks_)
|
||||
return nullptr;
|
||||
setCurrentChunk(currentChunk_ + 1);
|
||||
}
|
||||
|
||||
void *thing = reinterpret_cast<void *>(position_);
|
||||
position_ += size;
|
||||
|
||||
JS_POISON(thing, JS_ALLOCATED_NURSERY_PATTERN, size);
|
||||
return thing;
|
||||
}
|
||||
|
||||
JSObject *
|
||||
ForkJoinNursery::allocateObject(size_t baseSize, size_t numDynamic, bool& tooLarge)
|
||||
{
|
||||
// Ensure there's enough space to replace the contents with a RelocationOverlay.
|
||||
JS_ASSERT(baseSize >= sizeof(js::gc::RelocationOverlay));
|
||||
|
||||
// Too-large slot arrays cannot be accomodated.
|
||||
if (numDynamic > MaxNurserySlots) {
|
||||
tooLarge = true;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Allocate slots contiguously after the object.
|
||||
size_t totalSize = baseSize + sizeof(HeapSlot) * numDynamic;
|
||||
JSObject *obj = static_cast<JSObject *>(allocate(totalSize));
|
||||
if (!obj) {
|
||||
tooLarge = false;
|
||||
return nullptr;
|
||||
}
|
||||
obj->setInitialSlots(numDynamic
|
||||
? reinterpret_cast<HeapSlot *>(size_t(obj) + baseSize)
|
||||
: nullptr);
|
||||
return obj;
|
||||
}
|
||||
|
||||
HeapSlot *
|
||||
ForkJoinNursery::allocateSlots(JSObject *obj, uint32_t nslots)
|
||||
{
|
||||
JS_ASSERT(obj);
|
||||
JS_ASSERT(nslots > 0);
|
||||
|
||||
if (nslots & mozilla::tl::MulOverflowMask<sizeof(HeapSlot)>::value)
|
||||
return nullptr;
|
||||
size_t size = nslots * sizeof(HeapSlot);
|
||||
|
||||
if (!isInsideNewspace(obj))
|
||||
return reinterpret_cast<HeapSlot *>(cx_->malloc_(size));
|
||||
|
||||
if (nslots > MaxNurserySlots)
|
||||
return allocateHugeSlots(nslots);
|
||||
|
||||
HeapSlot *slots = static_cast<HeapSlot *>(allocate(size));
|
||||
if (slots)
|
||||
return slots;
|
||||
|
||||
return allocateHugeSlots(nslots);
|
||||
}
|
||||
|
||||
HeapSlot *
|
||||
ForkJoinNursery::reallocateSlots(JSObject *obj, HeapSlot *oldSlots,
|
||||
uint32_t oldCount, uint32_t newCount)
|
||||
{
|
||||
if (newCount & mozilla::tl::MulOverflowMask<sizeof(HeapSlot)>::value)
|
||||
return nullptr;
|
||||
|
||||
size_t oldSize = oldCount * sizeof(HeapSlot);
|
||||
size_t newSize = newCount * sizeof(HeapSlot);
|
||||
|
||||
if (!isInsideNewspace(obj)) {
|
||||
JS_ASSERT_IF(oldSlots, !isInsideNewspace(oldSlots));
|
||||
return static_cast<HeapSlot *>(cx_->realloc_(oldSlots, oldSize, newSize));
|
||||
}
|
||||
|
||||
if (!isInsideNewspace(oldSlots))
|
||||
return reallocateHugeSlots(oldSlots, oldSize, newSize);
|
||||
|
||||
// No-op if we're shrinking, we can't make use of the freed portion.
|
||||
if (newCount < oldCount)
|
||||
return oldSlots;
|
||||
|
||||
HeapSlot *newSlots = allocateSlots(obj, newCount);
|
||||
if (!newSlots)
|
||||
return nullptr;
|
||||
|
||||
js_memcpy(newSlots, oldSlots, oldSize);
|
||||
return newSlots;
|
||||
}
|
||||
|
||||
ObjectElements *
|
||||
ForkJoinNursery::allocateElements(JSObject *obj, uint32_t nelems)
|
||||
{
|
||||
JS_ASSERT(nelems >= ObjectElements::VALUES_PER_HEADER);
|
||||
return reinterpret_cast<ObjectElements *>(allocateSlots(obj, nelems));
|
||||
}
|
||||
|
||||
ObjectElements *
|
||||
ForkJoinNursery::reallocateElements(JSObject *obj, ObjectElements *oldHeader,
|
||||
uint32_t oldCount, uint32_t newCount)
|
||||
{
|
||||
HeapSlot *slots = reallocateSlots(obj, reinterpret_cast<HeapSlot *>(oldHeader),
|
||||
oldCount, newCount);
|
||||
return reinterpret_cast<ObjectElements *>(slots);
|
||||
}
|
||||
|
||||
void
|
||||
ForkJoinNursery::freeSlots(HeapSlot *slots)
|
||||
{
|
||||
if (!isInsideNewspace(slots)) {
|
||||
hugeSlots[hugeSlotsNew].remove(slots);
|
||||
js_free(slots);
|
||||
}
|
||||
}
|
||||
|
||||
HeapSlot *
|
||||
ForkJoinNursery::allocateHugeSlots(size_t nslots)
|
||||
{
|
||||
if (nslots & mozilla::tl::MulOverflowMask<sizeof(HeapSlot)>::value)
|
||||
return nullptr;
|
||||
|
||||
size_t size = nslots * sizeof(HeapSlot);
|
||||
HeapSlot *slots = reinterpret_cast<HeapSlot *>(cx_->malloc_(size));
|
||||
if (!slots)
|
||||
return slots;
|
||||
|
||||
// If this put fails, we will only leak the slots.
|
||||
(void)hugeSlots[hugeSlotsNew].put(slots);
|
||||
return slots;
|
||||
}
|
||||
|
||||
HeapSlot *
|
||||
ForkJoinNursery::reallocateHugeSlots(HeapSlot *oldSlots, uint32_t oldSize, uint32_t newSize)
|
||||
{
|
||||
HeapSlot *newSlots = static_cast<HeapSlot *>(cx_->realloc_(oldSlots, oldSize, newSize));
|
||||
if (!newSlots)
|
||||
return newSlots;
|
||||
|
||||
if (oldSlots != newSlots) {
|
||||
hugeSlots[hugeSlotsNew].remove(oldSlots);
|
||||
// If this put fails, we will only leak the slots.
|
||||
(void)hugeSlots[hugeSlotsNew].put(newSlots);
|
||||
}
|
||||
return newSlots;
|
||||
}
|
||||
|
||||
void
|
||||
ForkJoinNursery::sweepHugeSlots()
|
||||
{
|
||||
for (HugeSlotsSet::Range r = hugeSlots[hugeSlotsFrom].all(); !r.empty(); r.popFront())
|
||||
js_free(r.front());
|
||||
hugeSlots[hugeSlotsFrom].clear();
|
||||
}
|
||||
|
||||
MOZ_ALWAYS_INLINE void
|
||||
ForkJoinNursery::traceObject(ForkJoinNurseryCollectionTracer *trc, JSObject *obj)
|
||||
{
|
||||
const Class *clasp = obj->getClass();
|
||||
if (clasp->trace)
|
||||
clasp->trace(trc, obj);
|
||||
|
||||
if (!obj->isNative())
|
||||
return;
|
||||
|
||||
if (!obj->hasEmptyElements())
|
||||
markSlots(obj->getDenseElements(), obj->getDenseInitializedLength());
|
||||
|
||||
HeapSlot *fixedStart, *fixedEnd, *dynStart, *dynEnd;
|
||||
obj->getSlotRange(0, obj->slotSpan(), &fixedStart, &fixedEnd, &dynStart, &dynEnd);
|
||||
markSlots(fixedStart, fixedEnd);
|
||||
markSlots(dynStart, dynEnd);
|
||||
}
|
||||
|
||||
MOZ_ALWAYS_INLINE void
|
||||
ForkJoinNursery::markSlots(HeapSlot *vp, uint32_t nslots)
|
||||
{
|
||||
markSlots(vp, vp + nslots);
|
||||
}
|
||||
|
||||
MOZ_ALWAYS_INLINE void
|
||||
ForkJoinNursery::markSlots(HeapSlot *vp, HeapSlot *end)
|
||||
{
|
||||
for (; vp != end; ++vp)
|
||||
markSlot(vp);
|
||||
}
|
||||
|
||||
MOZ_ALWAYS_INLINE void
|
||||
ForkJoinNursery::markSlot(HeapSlot *slotp)
|
||||
{
|
||||
if (!slotp->isObject())
|
||||
return;
|
||||
|
||||
JSObject *obj = &slotp->toObject();
|
||||
if (!isInsideFromspace(obj))
|
||||
return;
|
||||
|
||||
if (getForwardedPointer(&obj)) {
|
||||
slotp->unsafeGet()->setObject(*obj);
|
||||
return;
|
||||
}
|
||||
|
||||
JSObject *moved = static_cast<JSObject *>(moveObjectToTospace(obj));
|
||||
slotp->unsafeGet()->setObject(*moved);
|
||||
}
|
||||
|
||||
AllocKind
|
||||
ForkJoinNursery::getObjectAllocKind(JSObject *obj)
|
||||
{
|
||||
if (obj->is<ArrayObject>()) {
|
||||
JS_ASSERT(obj->numFixedSlots() == 0);
|
||||
|
||||
// Use minimal size object if we are just going to copy the pointer.
|
||||
if (!isInsideFromspace((void *)obj->getElementsHeader()))
|
||||
return FINALIZE_OBJECT0_BACKGROUND;
|
||||
|
||||
size_t nelements = obj->getDenseCapacity();
|
||||
return GetBackgroundAllocKind(GetGCArrayKind(nelements));
|
||||
}
|
||||
|
||||
if (obj->is<JSFunction>())
|
||||
return obj->as<JSFunction>().getAllocKind();
|
||||
|
||||
AllocKind kind = GetGCObjectFixedSlotsKind(obj->numFixedSlots());
|
||||
JS_ASSERT(!IsBackgroundFinalized(kind));
|
||||
JS_ASSERT(CanBeFinalizedInBackground(kind, obj->getClass()));
|
||||
return GetBackgroundAllocKind(kind);
|
||||
}
|
||||
|
||||
void *
|
||||
ForkJoinNursery::allocateInTospace(gc::AllocKind thingKind)
|
||||
{
|
||||
size_t thingSize = Arena::thingSize(thingKind);
|
||||
if (isEvacuating_) {
|
||||
void *t = tenured_->arenas.allocateFromFreeList(thingKind, thingSize);
|
||||
if (t)
|
||||
return t;
|
||||
tenured_->arenas.checkEmptyFreeList(thingKind);
|
||||
// This call may return NULL but should do so only if memory
|
||||
// is truly exhausted. However, allocateFromArena() can fail
|
||||
// either because memory is exhausted or if the allocation
|
||||
// budget is used up. There is a guard in
|
||||
// Chunk::allocateArena() against the latter case.
|
||||
return tenured_->arenas.allocateFromArena(evacuationZone_, thingKind);
|
||||
} else {
|
||||
// Nursery allocation will never fail during GC - apart from
|
||||
// true OOM - since newspace is at least as large as
|
||||
// fromspace; true OOM is caught and signaled within
|
||||
// ForkJoinNursery::setCurrentChunk().
|
||||
return allocate(thingSize);
|
||||
}
|
||||
}
|
||||
|
||||
void *
|
||||
ForkJoinNursery::allocateInTospace(size_t nelem, size_t elemSize)
|
||||
{
|
||||
if (isEvacuating_)
|
||||
return evacuationZone_->malloc_(nelem * elemSize);
|
||||
return allocate(nelem * elemSize);
|
||||
}
|
||||
|
||||
MOZ_ALWAYS_INLINE void
|
||||
ForkJoinNursery::insertIntoFixupList(RelocationOverlay *entry)
|
||||
{
|
||||
*tail_ = entry;
|
||||
tail_ = &entry->next_;
|
||||
*tail_ = nullptr;
|
||||
}
|
||||
|
||||
void *
|
||||
ForkJoinNursery::moveObjectToTospace(JSObject *src)
|
||||
{
|
||||
AllocKind dstKind = getObjectAllocKind(src);
|
||||
JSObject *dst = static_cast<JSObject *>(allocateInTospace(dstKind));
|
||||
if (!dst)
|
||||
CrashAtUnhandlableOOM("Failed to allocate object while moving object.");
|
||||
|
||||
movedSize_ += copyObjectToTospace(dst, src, dstKind);
|
||||
|
||||
RelocationOverlay *overlay = reinterpret_cast<RelocationOverlay *>(src);
|
||||
overlay->forwardTo(dst);
|
||||
insertIntoFixupList(overlay);
|
||||
|
||||
return static_cast<void *>(dst);
|
||||
}
|
||||
|
||||
size_t
|
||||
ForkJoinNursery::copyObjectToTospace(JSObject *dst, JSObject *src, AllocKind dstKind)
|
||||
{
|
||||
size_t srcSize = Arena::thingSize(dstKind);
|
||||
size_t movedSize = srcSize;
|
||||
|
||||
// Arrays do not necessarily have the same AllocKind between src and dst.
|
||||
// We deal with this by copying elements manually, possibly re-inlining
|
||||
// them if there is adequate room inline in dst.
|
||||
if (src->is<ArrayObject>())
|
||||
srcSize = movedSize = sizeof(ObjectImpl);
|
||||
|
||||
js_memcpy(dst, src, srcSize);
|
||||
movedSize += copySlotsToTospace(dst, src, dstKind);
|
||||
movedSize += copyElementsToTospace(dst, src, dstKind);
|
||||
|
||||
if (src->is<TypedArrayObject>())
|
||||
dst->setPrivate(dst->fixedData(TypedArrayObject::FIXED_DATA_START));
|
||||
|
||||
// The shape's list head may point into the old object.
|
||||
if (&src->shape_ == dst->shape_->listp) {
|
||||
JS_ASSERT(cx_->isThreadLocal(dst->shape_.get()));
|
||||
dst->shape_->listp = &dst->shape_;
|
||||
}
|
||||
|
||||
return movedSize;
|
||||
}
|
||||
|
||||
size_t
|
||||
ForkJoinNursery::copySlotsToTospace(JSObject *dst, JSObject *src, AllocKind dstKind)
|
||||
{
|
||||
// Fixed slots have already been copied over.
|
||||
if (!src->hasDynamicSlots())
|
||||
return 0;
|
||||
|
||||
if (!isInsideFromspace(src->slots)) {
|
||||
hugeSlots[hugeSlotsFrom].remove(src->slots);
|
||||
if (!isEvacuating_)
|
||||
hugeSlots[hugeSlotsNew].put(src->slots);
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t count = src->numDynamicSlots();
|
||||
dst->slots = reinterpret_cast<HeapSlot *>(allocateInTospace(count, sizeof(HeapSlot)));
|
||||
if (!dst->slots)
|
||||
CrashAtUnhandlableOOM("Failed to allocate slots while moving object.");
|
||||
js_memcpy(dst->slots, src->slots, count * sizeof(HeapSlot));
|
||||
setSlotsForwardingPointer(src->slots, dst->slots, count);
|
||||
return count * sizeof(HeapSlot);
|
||||
}
|
||||
|
||||
size_t
|
||||
ForkJoinNursery::copyElementsToTospace(JSObject *dst, JSObject *src, AllocKind dstKind)
|
||||
{
|
||||
if (src->hasEmptyElements())
|
||||
return 0;
|
||||
|
||||
ObjectElements *srcHeader = src->getElementsHeader();
|
||||
ObjectElements *dstHeader;
|
||||
|
||||
// TODO Bug 874151: Prefer to put element data inline if we have space.
|
||||
// (Note, not a correctness issue.)
|
||||
if (!isInsideFromspace(srcHeader)) {
|
||||
JS_ASSERT(src->elements == dst->elements);
|
||||
hugeSlots[hugeSlotsFrom].remove(reinterpret_cast<HeapSlot*>(srcHeader));
|
||||
if (!isEvacuating_)
|
||||
hugeSlots[hugeSlotsNew].put(reinterpret_cast<HeapSlot*>(srcHeader));
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t nslots = ObjectElements::VALUES_PER_HEADER + srcHeader->capacity;
|
||||
|
||||
// Unlike other objects, Arrays can have fixed elements.
|
||||
if (src->is<ArrayObject>() && nslots <= GetGCKindSlots(dstKind)) {
|
||||
dst->setFixedElements();
|
||||
dstHeader = dst->getElementsHeader();
|
||||
js_memcpy(dstHeader, srcHeader, nslots * sizeof(HeapSlot));
|
||||
setElementsForwardingPointer(srcHeader, dstHeader, nslots);
|
||||
return nslots * sizeof(HeapSlot);
|
||||
}
|
||||
|
||||
JS_ASSERT(nslots >= 2);
|
||||
dstHeader = reinterpret_cast<ObjectElements *>(allocateInTospace(nslots, sizeof(HeapSlot)));
|
||||
if (!dstHeader)
|
||||
CrashAtUnhandlableOOM("Failed to allocate elements while moving object.");
|
||||
js_memcpy(dstHeader, srcHeader, nslots * sizeof(HeapSlot));
|
||||
setElementsForwardingPointer(srcHeader, dstHeader, nslots);
|
||||
dst->elements = dstHeader->elements();
|
||||
return nslots * sizeof(HeapSlot);
|
||||
}
|
||||
|
||||
void
|
||||
ForkJoinNursery::setSlotsForwardingPointer(HeapSlot *oldSlots, HeapSlot *newSlots, uint32_t nslots)
|
||||
{
|
||||
JS_ASSERT(nslots > 0);
|
||||
JS_ASSERT(isInsideFromspace(oldSlots));
|
||||
JS_ASSERT(!isInsideFromspace(newSlots));
|
||||
*reinterpret_cast<HeapSlot **>(oldSlots) = newSlots;
|
||||
}
|
||||
|
||||
void
|
||||
ForkJoinNursery::setElementsForwardingPointer(ObjectElements *oldHeader, ObjectElements *newHeader,
|
||||
uint32_t nelems)
|
||||
{
|
||||
// If the JIT has hoisted a zero length pointer, then we do not need to
|
||||
// relocate it because reads and writes to/from this pointer are invalid.
|
||||
if (nelems - ObjectElements::VALUES_PER_HEADER < 1)
|
||||
return;
|
||||
JS_ASSERT(isInsideFromspace(oldHeader));
|
||||
JS_ASSERT(!isInsideFromspace(newHeader));
|
||||
*reinterpret_cast<HeapSlot **>(oldHeader->elements()) = newHeader->elements();
|
||||
}
|
||||
|
||||
ForkJoinNurseryCollectionTracer::ForkJoinNurseryCollectionTracer(JSRuntime *rt,
|
||||
ForkJoinNursery *nursery)
|
||||
: JSTracer(rt, ForkJoinNursery::MinorGCCallback, TraceWeakMapKeysValues)
|
||||
, nursery_(nursery)
|
||||
{
|
||||
JS_ASSERT(rt);
|
||||
JS_ASSERT(nursery);
|
||||
}
|
||||
|
||||
} // namespace gc
|
||||
} // namespace js
|
||||
|
||||
#endif /* JSGC_FJGENERATIONAL */
|
|
@ -0,0 +1,297 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||
*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef gc_ForkJoinNursery_h
|
||||
#define gc_ForkJoinNursery_h
|
||||
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
|
||||
#ifndef JSGC_GENERATIONAL
|
||||
#error "JSGC_GENERATIONAL is required for the ForkJoinNursery"
|
||||
#endif
|
||||
#ifndef JS_THREADSAFE
|
||||
#error "JS_THREADSAFE is required for the ForkJoinNursery"
|
||||
#endif
|
||||
#ifndef JS_ION
|
||||
#error "JS_ION is required for the ForkJoinNursery"
|
||||
#endif
|
||||
|
||||
#include "jsalloc.h"
|
||||
#include "jspubtd.h"
|
||||
|
||||
#include "gc/Heap.h"
|
||||
#include "gc/Memory.h"
|
||||
#include "gc/Nursery.h"
|
||||
|
||||
#include "js/HashTable.h"
|
||||
#include "js/TracingAPI.h"
|
||||
|
||||
namespace js {
|
||||
class ObjectElements;
|
||||
class HeapSlot;
|
||||
class ForkJoinShared;
|
||||
}
|
||||
|
||||
namespace js {
|
||||
namespace gc {
|
||||
|
||||
class ForkJoinGCShared;
|
||||
class ForkJoinNursery;
|
||||
class ForkJoinNurseryCollectionTracer;
|
||||
|
||||
// This tracer comes into play when a class has a tracer function, but
|
||||
// is otherwise unused and has no other functionality.
|
||||
//
|
||||
// It could look like this could be merged into ForkJoinNursery by
|
||||
// making the latter derive from JSTracer; I've decided to keep them
|
||||
// separate for now, since it allows for multiple instantiations of
|
||||
// this class with different parameters, for different purposes. That
|
||||
// may change.
|
||||
|
||||
class ForkJoinNurseryCollectionTracer : public JSTracer
|
||||
{
|
||||
friend class ForkJoinNursery;
|
||||
|
||||
public:
|
||||
ForkJoinNurseryCollectionTracer(JSRuntime *rt, ForkJoinNursery *nursery);
|
||||
|
||||
private:
|
||||
ForkJoinNursery *const nursery_;
|
||||
};
|
||||
|
||||
// The layout for a chunk used by the ForkJoinNursery.
|
||||
|
||||
struct ForkJoinNurseryChunk
|
||||
{
|
||||
// The amount of space in the mapped nursery available to allocations
|
||||
static const size_t UsableSize = ChunkSize - sizeof(ChunkTrailer);
|
||||
|
||||
char data[UsableSize];
|
||||
ChunkTrailer trailer;
|
||||
uintptr_t start() { return uintptr_t(&data); }
|
||||
uintptr_t end() { return uintptr_t(&trailer); }
|
||||
};
|
||||
|
||||
// A GC adapter to ForkJoinShared, which is a complex class hidden
|
||||
// inside ForkJoin.cpp.
|
||||
|
||||
class ForkJoinGCShared
|
||||
{
|
||||
public:
|
||||
ForkJoinGCShared(ForkJoinShared *shared) : shared_(shared) {}
|
||||
|
||||
JSRuntime *runtime();
|
||||
JS::Zone *zone();
|
||||
|
||||
// The updatable object (the ForkJoin result array), or nullptr.
|
||||
JSObject *updatable();
|
||||
|
||||
// allocateNurseryChunk() returns nullptr on oom.
|
||||
ForkJoinNurseryChunk *allocateNurseryChunk();
|
||||
|
||||
// p must have been obtained through allocateNurseryChunk.
|
||||
void freeNurseryChunk(ForkJoinNurseryChunk *p);
|
||||
|
||||
// GC statistics output.
|
||||
void spewGC(const char *fmt, ...);
|
||||
|
||||
private:
|
||||
ForkJoinShared *const shared_;
|
||||
};
|
||||
|
||||
// There is one ForkJoinNursery per ForkJoin worker.
|
||||
//
|
||||
// See the comment in ForkJoinNursery.cpp about how it works.
|
||||
|
||||
class ForkJoinNursery
|
||||
{
|
||||
friend class ForkJoinNurseryCollectionTracer;
|
||||
friend class RelocationOverlay;
|
||||
|
||||
static_assert(sizeof(ForkJoinNurseryChunk) == ChunkSize,
|
||||
"ForkJoinNursery chunk size must match Chunk size.");
|
||||
public:
|
||||
ForkJoinNursery(ForkJoinContext *cx, ForkJoinGCShared *shared, Allocator *tenured);
|
||||
~ForkJoinNursery();
|
||||
|
||||
// Perform a collection within the nursery, and if that for some reason
|
||||
// cannot be done then perform an evacuating collection.
|
||||
void minorGC();
|
||||
|
||||
// Evacuate the live data from the nursery into the tenured area;
|
||||
// do not recreate the nursery.
|
||||
void evacuatingGC();
|
||||
|
||||
// Allocate an object with a number of dynamic slots. Returns an
|
||||
// object, or nullptr in one of two circumstances:
|
||||
//
|
||||
// - The nursery was full, the collector must be run, and the
|
||||
// allocation must be retried. tooLarge is set to 'false'.
|
||||
// - The number of dynamic slots requested is too large and
|
||||
// the object should be allocated in the tenured area.
|
||||
// tooLarge is set to 'true'.
|
||||
//
|
||||
// This method will never run the garbage collector.
|
||||
JSObject *allocateObject(size_t size, size_t numDynamic, bool& tooLarge);
|
||||
|
||||
// Allocate and reallocate slot and element arrays for existing
|
||||
// objects. These will create or maintain the arrays within the
|
||||
// nursery if possible and appropriate, and otherwise will fall
|
||||
// back to allocating in the tenured area. They will return
|
||||
// nullptr only if memory is exhausted. If the reallocate methods
|
||||
// return nullptr then the old array is still live.
|
||||
//
|
||||
// These methods will never run the garbage collector.
|
||||
HeapSlot *allocateSlots(JSObject *obj, uint32_t nslots);
|
||||
HeapSlot *reallocateSlots(JSObject *obj, HeapSlot *oldSlots,
|
||||
uint32_t oldCount, uint32_t newCount);
|
||||
ObjectElements *allocateElements(JSObject *obj, uint32_t nelems);
|
||||
ObjectElements *reallocateElements(JSObject *obj, ObjectElements *oldHeader,
|
||||
uint32_t oldCount, uint32_t newCount);
|
||||
|
||||
// Free a slots array.
|
||||
void freeSlots(HeapSlot *slots);
|
||||
|
||||
// The method embedded in a ForkJoinNurseryCollectionTracer
|
||||
static void MinorGCCallback(JSTracer *trcArg, void **thingp, JSGCTraceKind kind);
|
||||
|
||||
// A method called from the JIT frame updater
|
||||
static void forwardBufferPointer(JSTracer *trc, HeapSlot **pSlotsElems);
|
||||
|
||||
// Return true iff obj is inside the current newspace.
|
||||
MOZ_ALWAYS_INLINE bool isInsideNewspace(const void *obj);
|
||||
|
||||
// Return true iff collection is ongoing and obj is inside the current fromspace.
|
||||
MOZ_ALWAYS_INLINE bool isInsideFromspace(const void *obj);
|
||||
|
||||
template <typename T>
|
||||
MOZ_ALWAYS_INLINE bool getForwardedPointer(T **ref);
|
||||
|
||||
static size_t offsetOfPosition() {
|
||||
return offsetof(ForkJoinNursery, position_);
|
||||
}
|
||||
|
||||
static size_t offsetOfCurrentEnd() {
|
||||
return offsetof(ForkJoinNursery, currentEnd_);
|
||||
}
|
||||
|
||||
private:
|
||||
// The largest slot arrays that will be allocated in the nursery.
|
||||
// On the one hand we want this limit to be large, to avoid
|
||||
// managing many hugeSlots. On the other hand, slot arrays have
|
||||
// to be copied during GC and will induce some external
|
||||
// fragmentation in the nursery at chunk boundaries.
|
||||
static const size_t MaxNurserySlots = 2048;
|
||||
|
||||
// The fixed limit on the per-worker nursery, in chunks.
|
||||
//
|
||||
// For production runs, 16 may be good - programs that need it,
|
||||
// really need it, and as allocation is lazy programs that don't
|
||||
// need it won't suck up a lot of resources.
|
||||
//
|
||||
// For debugging runs, 1 or 2 may sometimes be good, because it
|
||||
// will more easily provoke bugs in the evacuation paths.
|
||||
static const size_t MaxNurseryChunks = 16;
|
||||
|
||||
// The inverse load factor in the per-worker nursery. Grow the nursery
|
||||
// or schedule an evacuation if more than 1/NurseryLoadFactor of the
|
||||
// current nursery size is live after minor GC.
|
||||
static const int NurseryLoadFactor = 3;
|
||||
|
||||
// Allocate an object in the nursery's newspace. Return nullptr
|
||||
// when allocation fails (ie the object can't fit in the current
|
||||
// chunk and the number of chunks it at its maximum).
|
||||
void *allocate(size_t size);
|
||||
|
||||
// Allocate an external slot array and register it with this nursery.
|
||||
HeapSlot *allocateHugeSlots(size_t nslots);
|
||||
|
||||
// Reallocate an external slot array, unregister the old array and
|
||||
// register the new array. If the allocation fails then leave
|
||||
// everything unchanged.
|
||||
HeapSlot *reallocateHugeSlots(HeapSlot *oldSlots, uint32_t oldSize, uint32_t newSize);
|
||||
|
||||
// Walk the list of registered slot arrays and free them all.
|
||||
void sweepHugeSlots();
|
||||
|
||||
// Set the position/end pointers to correspond to the numbered
|
||||
// chunk.
|
||||
void setCurrentChunk(int index);
|
||||
|
||||
enum PJSCollectionOp {
|
||||
Evacuate = 1,
|
||||
Collect = 2,
|
||||
Recreate = 4
|
||||
};
|
||||
|
||||
// Misc GC internals.
|
||||
void pjsCollection(int op /* A combination of PJSCollectionOp bits */);
|
||||
void initNewspace();
|
||||
void flip();
|
||||
void forwardFromRoots(ForkJoinNurseryCollectionTracer *trc);
|
||||
void forwardFromUpdatable(ForkJoinNurseryCollectionTracer *trc);
|
||||
void forwardFromStack(ForkJoinNurseryCollectionTracer *trc);
|
||||
void forwardFromTenured(ForkJoinNurseryCollectionTracer *trc);
|
||||
void collectToFixedPoint(ForkJoinNurseryCollectionTracer *trc);
|
||||
void freeFromspace();
|
||||
void computeNurserySizeAfterGC(size_t live, const char **msg);
|
||||
|
||||
AllocKind getObjectAllocKind(JSObject *src);
|
||||
void *allocateInTospace(AllocKind thingKind);
|
||||
void *allocateInTospace(size_t nelem, size_t elemSize);
|
||||
MOZ_ALWAYS_INLINE bool shouldMoveObject(void **thingp);
|
||||
void *moveObjectToTospace(JSObject *src);
|
||||
size_t copyObjectToTospace(JSObject *dst, JSObject *src, gc::AllocKind dstKind);
|
||||
size_t copyElementsToTospace(JSObject *dst, JSObject *src, gc::AllocKind dstKind);
|
||||
size_t copySlotsToTospace(JSObject *dst, JSObject *src, gc::AllocKind dstKind);
|
||||
MOZ_ALWAYS_INLINE void insertIntoFixupList(RelocationOverlay *entry);
|
||||
|
||||
void setSlotsForwardingPointer(HeapSlot *oldSlots, HeapSlot *newSlots, uint32_t nslots);
|
||||
void setElementsForwardingPointer(ObjectElements *oldHeader, ObjectElements *newHeader,
|
||||
uint32_t nelems);
|
||||
|
||||
MOZ_ALWAYS_INLINE void traceObject(ForkJoinNurseryCollectionTracer *trc, JSObject *obj);
|
||||
MOZ_ALWAYS_INLINE void markSlots(HeapSlot *vp, uint32_t nslots);
|
||||
MOZ_ALWAYS_INLINE void markSlots(HeapSlot *vp, HeapSlot *end);
|
||||
MOZ_ALWAYS_INLINE void markSlot(HeapSlot *slotp);
|
||||
|
||||
ForkJoinContext *const cx_; // The context that owns this nursery
|
||||
Allocator *const tenured_; // Private tenured area
|
||||
ForkJoinGCShared *const shared_; // Common to all nurseries belonging to a ForkJoin instance
|
||||
JS::Zone *evacuationZone_; // During evacuating GC this is non-NULL: the Zone we
|
||||
// allocate into
|
||||
|
||||
uintptr_t currentStart_; // Start of current area in newspace
|
||||
uintptr_t currentEnd_; // End of current area in newspace (last byte + 1)
|
||||
uintptr_t position_; // Next free byte in current newspace chunk
|
||||
unsigned currentChunk_; // Index of current / highest numbered chunk in newspace
|
||||
unsigned numActiveChunks_; // Number of active chunks in newspace, not all may be allocated
|
||||
unsigned numFromspaceChunks_; // Number of active chunks in fromspace, all are allocated
|
||||
bool mustEvacuate_; // Set to true after GC when the /next/ minor GC must evacuate
|
||||
|
||||
bool isEvacuating_; // Set to true when the current minor GC is evacuating
|
||||
size_t movedSize_; // Bytes copied during the current minor GC
|
||||
RelocationOverlay *head_; // First node of relocation list
|
||||
RelocationOverlay **tail_; // Pointer to 'next_' field of last node of relocation list
|
||||
|
||||
typedef HashSet<HeapSlot *, PointerHasher<HeapSlot *, 3>, SystemAllocPolicy> HugeSlotsSet;
|
||||
|
||||
HugeSlotsSet hugeSlots[2]; // Hash sets for huge slots
|
||||
|
||||
int hugeSlotsNew; // Huge slot arrays in the newspace (index in hugeSlots)
|
||||
int hugeSlotsFrom; // Huge slot arrays in the fromspace (index in hugeSlots)
|
||||
|
||||
ForkJoinNurseryChunk *newspace[MaxNurseryChunks]; // All allocation happens here
|
||||
ForkJoinNurseryChunk *fromspace[MaxNurseryChunks]; // Meaningful during GC: the previous newspace
|
||||
};
|
||||
|
||||
} // namespace gc
|
||||
} // namespace js
|
||||
|
||||
#endif // JSGC_FJGENERATIONAL
|
||||
|
||||
#endif // gc_ForkJoinNursery_h
|
|
@ -19,6 +19,13 @@ namespace gc {
|
|||
void
|
||||
MarkPersistentRootedChains(JSTracer *trc);
|
||||
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
class ForkJoinNurseryCollectionTracer;
|
||||
|
||||
void
|
||||
MarkForkJoinStack(ForkJoinNurseryCollectionTracer *trc);
|
||||
#endif
|
||||
|
||||
class AutoCopyFreeListToArenas
|
||||
{
|
||||
JSRuntime *runtime;
|
||||
|
|
|
@ -484,6 +484,16 @@ class GCRuntime
|
|||
js::gc::StoreBuffer storeBuffer;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ForkJoin workers enter and leave GC independently; this counter
|
||||
* tracks the number that are currently in GC.
|
||||
*
|
||||
* Technically this should be #ifdef JSGC_FJGENERATIONAL but that
|
||||
* affects the observed size of JSRuntime in problematic ways, see
|
||||
* note in vm/ThreadPool.h.
|
||||
*/
|
||||
mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> fjCollectionCounter;
|
||||
|
||||
private:
|
||||
/*
|
||||
* These options control the zealousness of the GC. The fundamental values
|
||||
|
|
|
@ -171,6 +171,16 @@ CheckMarkedThing(JSTracer *trc, T **thingp)
|
|||
JS_ASSERT(*thingp);
|
||||
|
||||
#ifdef DEBUG
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
/*
|
||||
* The code below (runtimeFromMainThread(), etc) makes assumptions
|
||||
* not valid for the ForkJoin worker threads during ForkJoin GGC,
|
||||
* so just bail.
|
||||
*/
|
||||
if (ForkJoinContext::current())
|
||||
return;
|
||||
#endif
|
||||
|
||||
/* This function uses data that's not available in the nursery. */
|
||||
if (IsInsideNursery(thing))
|
||||
return;
|
||||
|
@ -229,6 +239,16 @@ MarkInternal(JSTracer *trc, T **thingp)
|
|||
T *thing = *thingp;
|
||||
|
||||
if (!trc->callback) {
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
/*
|
||||
* This case should never be reached from PJS collections as
|
||||
* those should all be using a ForkJoinNurseryCollectionTracer
|
||||
* that carries a callback.
|
||||
*/
|
||||
JS_ASSERT(!ForkJoinContext::current());
|
||||
JS_ASSERT(!trc->runtime()->isFJMinorCollecting());
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We may mark a Nursery thing outside the context of the
|
||||
* MinorCollectionTracer because of a pre-barrier. The pre-barrier is
|
||||
|
@ -357,11 +377,25 @@ IsMarked(T **thingp)
|
|||
JS_ASSERT(thingp);
|
||||
JS_ASSERT(*thingp);
|
||||
#ifdef JSGC_GENERATIONAL
|
||||
if (IsInsideNursery(*thingp)) {
|
||||
Nursery &nursery = (*thingp)->runtimeFromMainThread()->gc.nursery;
|
||||
return nursery.getForwardedPointer(thingp);
|
||||
JSRuntime* rt = (*thingp)->runtimeFromAnyThread();
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
// Must precede the case for JSGC_GENERATIONAL because IsInsideNursery()
|
||||
// will also be true for the ForkJoinNursery.
|
||||
if (rt->isFJMinorCollecting()) {
|
||||
ForkJoinContext *ctx = ForkJoinContext::current();
|
||||
ForkJoinNursery &fjNursery = ctx->fjNursery();
|
||||
if (fjNursery.isInsideFromspace(*thingp))
|
||||
return fjNursery.getForwardedPointer(thingp);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
if (IsInsideNursery(*thingp)) {
|
||||
Nursery &nursery = rt->gc.nursery;
|
||||
return nursery.getForwardedPointer(thingp);
|
||||
}
|
||||
}
|
||||
#endif // JSGC_GENERATIONAL
|
||||
Zone *zone = (*thingp)->tenuredZone();
|
||||
if (!zone->isCollecting() || zone->isGCFinished())
|
||||
return true;
|
||||
|
@ -383,14 +417,25 @@ IsAboutToBeFinalized(T **thingp)
|
|||
return false;
|
||||
|
||||
#ifdef JSGC_GENERATIONAL
|
||||
Nursery &nursery = rt->gc.nursery;
|
||||
JS_ASSERT_IF(!rt->isHeapMinorCollecting(), !IsInsideNursery(thing));
|
||||
if (rt->isHeapMinorCollecting()) {
|
||||
if (IsInsideNursery(thing))
|
||||
return !nursery.getForwardedPointer(thingp);
|
||||
return false;
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
if (rt->isFJMinorCollecting()) {
|
||||
ForkJoinContext *ctx = ForkJoinContext::current();
|
||||
ForkJoinNursery &fjNursery = ctx->fjNursery();
|
||||
if (fjNursery.isInsideFromspace(thing))
|
||||
return !fjNursery.getForwardedPointer(thingp);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
Nursery &nursery = rt->gc.nursery;
|
||||
JS_ASSERT_IF(!rt->isHeapMinorCollecting(), !IsInsideNursery(thing));
|
||||
if (rt->isHeapMinorCollecting()) {
|
||||
if (IsInsideNursery(thing))
|
||||
return !nursery.getForwardedPointer(thingp);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#endif // JSGC_GENERATIONAL
|
||||
|
||||
if (!thing->tenuredZone()->isGCSweeping())
|
||||
return false;
|
||||
|
@ -413,9 +458,20 @@ UpdateIfRelocated(JSRuntime *rt, T **thingp)
|
|||
{
|
||||
JS_ASSERT(thingp);
|
||||
#ifdef JSGC_GENERATIONAL
|
||||
if (*thingp && rt->isHeapMinorCollecting() && IsInsideNursery(*thingp))
|
||||
rt->gc.nursery.getForwardedPointer(thingp);
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
if (*thingp && rt->isFJMinorCollecting()) {
|
||||
ForkJoinContext *ctx = ForkJoinContext::current();
|
||||
ForkJoinNursery &fjNursery = ctx->fjNursery();
|
||||
if (fjNursery.isInsideFromspace(*thingp))
|
||||
fjNursery.getForwardedPointer(thingp);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
if (*thingp && rt->isHeapMinorCollecting() && IsInsideNursery(*thingp))
|
||||
rt->gc.nursery.getForwardedPointer(thingp);
|
||||
}
|
||||
#endif // JSGC_GENERATIONAL
|
||||
return *thingp;
|
||||
}
|
||||
|
||||
|
|
|
@ -17,52 +17,41 @@
|
|||
namespace js {
|
||||
namespace gc {
|
||||
|
||||
/*
|
||||
* This structure overlays a Cell in the Nursery and re-purposes its memory
|
||||
* for managing the Nursery collection process.
|
||||
*/
|
||||
class RelocationOverlay
|
||||
/* static */
|
||||
inline RelocationOverlay *
|
||||
RelocationOverlay::fromCell(Cell *cell)
|
||||
{
|
||||
friend class MinorCollectionTracer;
|
||||
JS_ASSERT(!cell->isTenured());
|
||||
return reinterpret_cast<RelocationOverlay *>(cell);
|
||||
}
|
||||
|
||||
/* The low bit is set so this should never equal a normal pointer. */
|
||||
static const uintptr_t Relocated = uintptr_t(0xbad0bad1);
|
||||
inline bool
|
||||
RelocationOverlay::isForwarded() const
|
||||
{
|
||||
return magic_ == Relocated;
|
||||
}
|
||||
|
||||
/* Set to Relocated when moved. */
|
||||
uintptr_t magic_;
|
||||
inline Cell *
|
||||
RelocationOverlay::forwardingAddress() const
|
||||
{
|
||||
JS_ASSERT(isForwarded());
|
||||
return newLocation_;
|
||||
}
|
||||
|
||||
/* The location |this| was moved to. */
|
||||
Cell *newLocation_;
|
||||
inline void
|
||||
RelocationOverlay::forwardTo(Cell *cell)
|
||||
{
|
||||
JS_ASSERT(!isForwarded());
|
||||
magic_ = Relocated;
|
||||
newLocation_ = cell;
|
||||
next_ = nullptr;
|
||||
}
|
||||
|
||||
/* A list entry to track all relocated things. */
|
||||
RelocationOverlay *next_;
|
||||
|
||||
public:
|
||||
static RelocationOverlay *fromCell(Cell *cell) {
|
||||
JS_ASSERT(!cell->isTenured());
|
||||
return reinterpret_cast<RelocationOverlay *>(cell);
|
||||
}
|
||||
|
||||
bool isForwarded() const {
|
||||
return magic_ == Relocated;
|
||||
}
|
||||
|
||||
Cell *forwardingAddress() const {
|
||||
JS_ASSERT(isForwarded());
|
||||
return newLocation_;
|
||||
}
|
||||
|
||||
void forwardTo(Cell *cell) {
|
||||
JS_ASSERT(!isForwarded());
|
||||
magic_ = Relocated;
|
||||
newLocation_ = cell;
|
||||
next_ = nullptr;
|
||||
}
|
||||
|
||||
RelocationOverlay *next() const {
|
||||
return next_;
|
||||
}
|
||||
};
|
||||
inline RelocationOverlay *
|
||||
RelocationOverlay::next() const
|
||||
{
|
||||
return next_;
|
||||
}
|
||||
|
||||
} /* namespace gc */
|
||||
} /* namespace js */
|
||||
|
|
|
@ -914,6 +914,10 @@ js::Nursery::collect(JSRuntime *rt, JS::gcreason::Reason reason, TypeObjectList
|
|||
#endif
|
||||
}
|
||||
|
||||
#undef TIME_START
|
||||
#undef TIME_END
|
||||
#undef TIME_TOTAL
|
||||
|
||||
void
|
||||
js::Nursery::freeHugeSlots()
|
||||
{
|
||||
|
|
|
@ -36,6 +36,7 @@ namespace gc {
|
|||
class Cell;
|
||||
class Collector;
|
||||
class MinorCollectionTracer;
|
||||
class ForkJoinNursery;
|
||||
} /* namespace gc */
|
||||
|
||||
namespace types {
|
||||
|
@ -49,6 +50,39 @@ class ICStubCompiler;
|
|||
class BaselineCompiler;
|
||||
}
|
||||
|
||||
namespace gc {
|
||||
|
||||
/*
|
||||
* This structure overlays a Cell in the Nursery and re-purposes its memory
|
||||
* for managing the Nursery collection process.
|
||||
*/
|
||||
class RelocationOverlay
|
||||
{
|
||||
friend class MinorCollectionTracer;
|
||||
friend class ForkJoinNursery;
|
||||
|
||||
/* The low bit is set so this should never equal a normal pointer. */
|
||||
static const uintptr_t Relocated = uintptr_t(0xbad0bad1);
|
||||
|
||||
/* Set to Relocated when moved. */
|
||||
uintptr_t magic_;
|
||||
|
||||
/* The location |this| was moved to. */
|
||||
Cell *newLocation_;
|
||||
|
||||
/* A list entry to track all relocated things. */
|
||||
RelocationOverlay *next_;
|
||||
|
||||
public:
|
||||
static inline RelocationOverlay *fromCell(Cell *cell);
|
||||
inline bool isForwarded() const;
|
||||
inline Cell *forwardingAddress() const;
|
||||
inline void forwardTo(Cell *cell);
|
||||
inline RelocationOverlay *next() const;
|
||||
};
|
||||
|
||||
} /* namespace gc */
|
||||
|
||||
class Nursery
|
||||
{
|
||||
public:
|
||||
|
@ -215,7 +249,7 @@ class Nursery
|
|||
MOZ_ALWAYS_INLINE void initChunk(int chunkno) {
|
||||
NurseryChunkLayout &c = chunk(chunkno);
|
||||
c.trailer.storeBuffer = JS::shadow::Runtime::asShadowRuntime(runtime())->gcStoreBufferPtr();
|
||||
c.trailer.location = gc::ChunkLocationNursery;
|
||||
c.trailer.location = gc::ChunkLocationBitNursery;
|
||||
c.trailer.runtime = runtime();
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include "builtin/MapObject.h"
|
||||
#include "frontend/BytecodeCompiler.h"
|
||||
#include "gc/ForkJoinNursery.h"
|
||||
#include "gc/GCInternals.h"
|
||||
#include "gc/Marking.h"
|
||||
#ifdef JS_ION
|
||||
|
@ -102,30 +103,38 @@ MarkExactStackRootList(JSTracer *trc, Source *s, const char *name)
|
|||
|
||||
template <class T, void (MarkFunc)(JSTracer *trc, T *ref, const char *name)>
|
||||
static inline void
|
||||
MarkExactStackRootsForType(JSTracer *trc, const char *name = nullptr)
|
||||
MarkExactStackRootsForType(JSRuntime* rt, JSTracer *trc, const char *name = nullptr)
|
||||
{
|
||||
for (ContextIter cx(trc->runtime()); !cx.done(); cx.next())
|
||||
for (ContextIter cx(rt); !cx.done(); cx.next())
|
||||
MarkExactStackRootList<T, MarkFunc>(trc, cx.get(), name);
|
||||
MarkExactStackRootList<T, MarkFunc>(trc, &trc->runtime()->mainThread, name);
|
||||
MarkExactStackRootList<T, MarkFunc>(trc, &rt->mainThread, name);
|
||||
}
|
||||
|
||||
static void
|
||||
MarkExactStackRoots(JSTracer *trc)
|
||||
template <class T, void (MarkFunc)(JSTracer *trc, T *ref, const char *name)>
|
||||
static inline void
|
||||
MarkExactStackRootsForType(ThreadSafeContext* cx, JSTracer *trc, const char *name = nullptr)
|
||||
{
|
||||
MarkExactStackRootsForType<JSObject *, MarkObjectRoot>(trc, "exact-object");
|
||||
MarkExactStackRootsForType<Shape *, MarkShapeRoot>(trc, "exact-shape");
|
||||
MarkExactStackRootsForType<BaseShape *, MarkBaseShapeRoot>(trc, "exact-baseshape");
|
||||
MarkExactStackRootsForType<types::TypeObject *, MarkTypeObjectRoot>(trc, "exact-typeobject");
|
||||
MarkExactStackRootsForType<JSString *, MarkStringRoot>(trc, "exact-string");
|
||||
MarkExactStackRootsForType<jit::JitCode *, MarkJitCodeRoot>(trc, "exact-jitcode");
|
||||
MarkExactStackRootsForType<JSScript *, MarkScriptRoot>(trc, "exact-script");
|
||||
MarkExactStackRootsForType<LazyScript *, MarkLazyScriptRoot>(trc, "exact-lazy-script");
|
||||
MarkExactStackRootsForType<jsid, MarkIdRoot>(trc, "exact-id");
|
||||
MarkExactStackRootsForType<Value, MarkValueRoot>(trc, "exact-value");
|
||||
MarkExactStackRootsForType<types::Type, MarkTypeRoot>(trc, "exact-type");
|
||||
MarkExactStackRootsForType<Bindings, MarkBindingsRoot>(trc);
|
||||
MarkExactStackRootsForType<JSPropertyDescriptor, MarkPropertyDescriptorRoot>(trc);
|
||||
MarkExactStackRootsForType<PropDesc, MarkPropDescRoot>(trc);
|
||||
MarkExactStackRootList<T, MarkFunc>(trc, cx->perThreadData, name);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static void
|
||||
MarkExactStackRoots(T context, JSTracer *trc)
|
||||
{
|
||||
MarkExactStackRootsForType<JSObject *, MarkObjectRoot>(context, trc, "exact-object");
|
||||
MarkExactStackRootsForType<Shape *, MarkShapeRoot>(context, trc, "exact-shape");
|
||||
MarkExactStackRootsForType<BaseShape *, MarkBaseShapeRoot>(context, trc, "exact-baseshape");
|
||||
MarkExactStackRootsForType<types::TypeObject *, MarkTypeObjectRoot>(context, trc, "exact-typeobject");
|
||||
MarkExactStackRootsForType<JSString *, MarkStringRoot>(context, trc, "exact-string");
|
||||
MarkExactStackRootsForType<jit::JitCode *, MarkJitCodeRoot>(context, trc, "exact-jitcode");
|
||||
MarkExactStackRootsForType<JSScript *, MarkScriptRoot>(context, trc, "exact-script");
|
||||
MarkExactStackRootsForType<LazyScript *, MarkLazyScriptRoot>(context, trc, "exact-lazy-script");
|
||||
MarkExactStackRootsForType<jsid, MarkIdRoot>(context, trc, "exact-id");
|
||||
MarkExactStackRootsForType<Value, MarkValueRoot>(context, trc, "exact-value");
|
||||
MarkExactStackRootsForType<types::Type, MarkTypeRoot>(context, trc, "exact-type");
|
||||
MarkExactStackRootsForType<Bindings, MarkBindingsRoot>(context, trc);
|
||||
MarkExactStackRootsForType<JSPropertyDescriptor, MarkPropertyDescriptorRoot>(context, trc);
|
||||
MarkExactStackRootsForType<PropDesc, MarkPropDescRoot>(context, trc);
|
||||
}
|
||||
#endif /* JSGC_USE_EXACT_ROOTING */
|
||||
|
||||
|
@ -583,10 +592,8 @@ AutoGCRooter::trace(JSTracer *trc)
|
|||
/* static */ void
|
||||
AutoGCRooter::traceAll(JSTracer *trc)
|
||||
{
|
||||
for (ContextIter cx(trc->runtime()); !cx.done(); cx.next()) {
|
||||
for (AutoGCRooter *gcr = cx->autoGCRooters; gcr; gcr = gcr->down)
|
||||
gcr->trace(trc);
|
||||
}
|
||||
for (ContextIter cx(trc->runtime()); !cx.done(); cx.next())
|
||||
traceAllInContext(&*cx, trc);
|
||||
}
|
||||
|
||||
/* static */ void
|
||||
|
@ -685,6 +692,27 @@ js::gc::MarkPersistentRootedChains(JSTracer *trc)
|
|||
"PersistentRooted<Value>");
|
||||
}
|
||||
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
void
|
||||
js::gc::MarkForkJoinStack(ForkJoinNurseryCollectionTracer *trc)
|
||||
{
|
||||
ForkJoinContext *cx = ForkJoinContext::current();
|
||||
PerThreadData *ptd = cx->perThreadData;
|
||||
|
||||
AutoGCRooter::traceAllInContext(cx, trc);
|
||||
MarkExactStackRoots<ThreadSafeContext*>(cx, trc);
|
||||
jit::MarkJitActivations(ptd, trc);
|
||||
|
||||
#ifdef DEBUG
|
||||
// There should be only JIT activations on the stack
|
||||
for (ActivationIterator iter(ptd); !iter.done(); ++iter) {
|
||||
Activation *act = iter.activation();
|
||||
JS_ASSERT(act->isJit());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif // JSGC_FJGENERATIONAL
|
||||
|
||||
void
|
||||
js::gc::GCRuntime::markRuntime(JSTracer *trc, bool useSavedRoots)
|
||||
{
|
||||
|
@ -704,7 +732,7 @@ js::gc::GCRuntime::markRuntime(JSTracer *trc, bool useSavedRoots)
|
|||
|
||||
if (!rt->isBeingDestroyed()) {
|
||||
#ifdef JSGC_USE_EXACT_ROOTING
|
||||
MarkExactStackRoots(trc);
|
||||
MarkExactStackRoots<JSRuntime*>(rt, trc);
|
||||
#else
|
||||
markConservativeStackRoots(trc, useSavedRoots);
|
||||
#endif
|
||||
|
@ -786,10 +814,10 @@ js::gc::GCRuntime::markRuntime(JSTracer *trc, bool useSavedRoots)
|
|||
c->debugScopes->mark(trc);
|
||||
}
|
||||
|
||||
MarkInterpreterActivations(rt, trc);
|
||||
MarkInterpreterActivations(&rt->mainThread, trc);
|
||||
|
||||
#ifdef JS_ION
|
||||
jit::MarkJitActivations(rt, trc);
|
||||
jit::MarkJitActivations(&rt->mainThread, trc);
|
||||
#endif
|
||||
|
||||
if (!isHeapMinorCollecting()) {
|
||||
|
|
|
@ -1149,7 +1149,8 @@ CodeGenerator::visitLambdaPar(LLambdaPar *lir)
|
|||
|
||||
JS_ASSERT(scopeChainReg != resultReg);
|
||||
|
||||
emitAllocateGCThingPar(lir, resultReg, cxReg, tempReg1, tempReg2, info.fun);
|
||||
if (!emitAllocateGCThingPar(lir, resultReg, cxReg, tempReg1, tempReg2, info.fun))
|
||||
return false;
|
||||
emitLambdaInit(resultReg, scopeChainReg, info);
|
||||
return true;
|
||||
}
|
||||
|
@ -3898,10 +3899,13 @@ CodeGenerator::visitNewCallObjectPar(LNewCallObjectPar *lir)
|
|||
Register tempReg2 = ToRegister(lir->getTemp1());
|
||||
JSObject *templateObj = lir->mir()->templateObj();
|
||||
|
||||
emitAllocateGCThingPar(lir, resultReg, cxReg, tempReg1, tempReg2, templateObj);
|
||||
return true;
|
||||
return emitAllocateGCThingPar(lir, resultReg, cxReg, tempReg1, tempReg2, templateObj);
|
||||
}
|
||||
|
||||
typedef JSObject *(*ExtendArrayParFn)(ForkJoinContext*, JSObject*, uint32_t);
|
||||
static const VMFunction ExtendArrayParInfo =
|
||||
FunctionInfo<ExtendArrayParFn>(ExtendArrayPar);
|
||||
|
||||
bool
|
||||
CodeGenerator::visitNewDenseArrayPar(LNewDenseArrayPar *lir)
|
||||
{
|
||||
|
@ -3912,26 +3916,23 @@ CodeGenerator::visitNewDenseArrayPar(LNewDenseArrayPar *lir)
|
|||
Register tempReg2 = ToRegister(lir->getTemp2());
|
||||
JSObject *templateObj = lir->mir()->templateObject();
|
||||
|
||||
// Allocate the array into tempReg2. Don't use resultReg because it
|
||||
// may alias cxReg etc.
|
||||
emitAllocateGCThingPar(lir, tempReg2, cxReg, tempReg0, tempReg1, templateObj);
|
||||
masm.push(lengthReg);
|
||||
if (!emitAllocateGCThingPar(lir, tempReg2, cxReg, tempReg0, tempReg1, templateObj))
|
||||
return false;
|
||||
masm.pop(lengthReg);
|
||||
|
||||
// Invoke a C helper to allocate the elements. For convenience,
|
||||
// this helper also returns the array back to us, or nullptr, which
|
||||
// obviates the need to preserve the register across the call. In
|
||||
// reality, we should probably just have the C helper also
|
||||
// *allocate* the array, but that would require that it initialize
|
||||
// the various fields of the object, and I didn't want to
|
||||
// duplicate the code in initGCThing() that already does such an
|
||||
// admirable job.
|
||||
masm.setupUnalignedABICall(3, tempReg0);
|
||||
masm.passABIArg(cxReg);
|
||||
masm.passABIArg(tempReg2);
|
||||
masm.passABIArg(lengthReg);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ExtendArrayPar));
|
||||
// Invoke a C helper to allocate the elements. The helper returns
|
||||
// nullptr on allocation error or the array object.
|
||||
|
||||
saveLive(lir);
|
||||
pushArg(lengthReg);
|
||||
pushArg(tempReg2);
|
||||
if (!callVM(ExtendArrayParInfo, lir))
|
||||
return false;
|
||||
storeResultTo(ToRegister(lir->output()));
|
||||
restoreLive(lir);
|
||||
|
||||
Register resultReg = ToRegister(lir->output());
|
||||
JS_ASSERT(resultReg == ReturnReg);
|
||||
OutOfLineAbortPar *bail = oolAbortPar(ParallelBailoutOutOfMemory, lir);
|
||||
if (!bail)
|
||||
return false;
|
||||
|
@ -3976,10 +3977,10 @@ CodeGenerator::visitNewPar(LNewPar *lir)
|
|||
Register tempReg1 = ToRegister(lir->getTemp0());
|
||||
Register tempReg2 = ToRegister(lir->getTemp1());
|
||||
JSObject *templateObject = lir->mir()->templateObject();
|
||||
emitAllocateGCThingPar(lir, objReg, cxReg, tempReg1, tempReg2, templateObject);
|
||||
return true;
|
||||
return emitAllocateGCThingPar(lir, objReg, cxReg, tempReg1, tempReg2, templateObject);
|
||||
}
|
||||
|
||||
#ifndef JSGC_FJGENERATIONAL
|
||||
class OutOfLineNewGCThingPar : public OutOfLineCodeBase<CodeGenerator>
|
||||
{
|
||||
public:
|
||||
|
@ -3997,15 +3998,27 @@ public:
|
|||
return codegen->visitOutOfLineNewGCThingPar(this);
|
||||
}
|
||||
};
|
||||
#endif // JSGC_FJGENERATIONAL
|
||||
|
||||
typedef JSObject *(*NewGCThingParFn)(ForkJoinContext *, js::gc::AllocKind allocKind);
|
||||
static const VMFunction NewGCThingParInfo =
|
||||
FunctionInfo<NewGCThingParFn>(NewGCThingPar);
|
||||
|
||||
bool
|
||||
CodeGenerator::emitAllocateGCThingPar(LInstruction *lir, Register objReg, Register cxReg,
|
||||
Register tempReg1, Register tempReg2, JSObject *templateObj)
|
||||
{
|
||||
gc::AllocKind allocKind = templateObj->tenuredGetAllocKind();
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
OutOfLineCode *ool = oolCallVM(NewGCThingParInfo, lir,
|
||||
(ArgList(), Imm32(allocKind)), StoreRegisterTo(objReg));
|
||||
if (!ool)
|
||||
return false;
|
||||
#else
|
||||
OutOfLineNewGCThingPar *ool = new(alloc()) OutOfLineNewGCThingPar(lir, allocKind, objReg, cxReg);
|
||||
if (!ool || !addOutOfLineCode(ool))
|
||||
return false;
|
||||
#endif
|
||||
|
||||
masm.newGCThingPar(objReg, cxReg, tempReg1, tempReg2, templateObj, ool->entry());
|
||||
masm.bind(ool->rejoin());
|
||||
|
@ -4013,6 +4026,7 @@ CodeGenerator::emitAllocateGCThingPar(LInstruction *lir, Register objReg, Regist
|
|||
return true;
|
||||
}
|
||||
|
||||
#ifndef JSGC_FJGENERATIONAL
|
||||
bool
|
||||
CodeGenerator::visitOutOfLineNewGCThingPar(OutOfLineNewGCThingPar *ool)
|
||||
{
|
||||
|
@ -4038,6 +4052,7 @@ CodeGenerator::visitOutOfLineNewGCThingPar(OutOfLineNewGCThingPar *ool)
|
|||
masm.jump(ool->rejoin());
|
||||
return true;
|
||||
}
|
||||
#endif // JSGC_FJGENERATIONAL
|
||||
|
||||
bool
|
||||
CodeGenerator::visitAbortPar(LAbortPar *lir)
|
||||
|
@ -6508,7 +6523,7 @@ static const VMFunctionsModal InitRestParameterInfo = VMFunctionsModal(
|
|||
bool
|
||||
CodeGenerator::emitRest(LInstruction *lir, Register array, Register numActuals,
|
||||
Register temp0, Register temp1, unsigned numFormals,
|
||||
JSObject *templateObject)
|
||||
JSObject *templateObject, bool saveAndRestore, Register resultreg)
|
||||
{
|
||||
// Compute actuals() + numFormals.
|
||||
size_t actualsOffset = frameSize() + IonJSFrameLayout::offsetOfActualArgs();
|
||||
|
@ -6527,12 +6542,22 @@ CodeGenerator::emitRest(LInstruction *lir, Register array, Register numActuals,
|
|||
}
|
||||
masm.bind(&joinLength);
|
||||
|
||||
if (saveAndRestore)
|
||||
saveLive(lir);
|
||||
|
||||
pushArg(array);
|
||||
pushArg(ImmGCPtr(templateObject));
|
||||
pushArg(temp1);
|
||||
pushArg(temp0);
|
||||
|
||||
return callVM(InitRestParameterInfo, lir);
|
||||
bool result = callVM(InitRestParameterInfo, lir);
|
||||
|
||||
if (saveAndRestore) {
|
||||
storeResultTo(resultreg);
|
||||
restoreLive(lir);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -6554,9 +6579,12 @@ CodeGenerator::visitRest(LRest *lir)
|
|||
}
|
||||
masm.bind(&joinAlloc);
|
||||
|
||||
return emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject);
|
||||
return emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject, false, ToRegister(lir->output()));
|
||||
}
|
||||
|
||||
// LRestPar cannot derive from LCallInstructionHelper because emitAllocateGCThingPar may
|
||||
// itself contain a VM call. Thus there's some manual work here and in emitRest().
|
||||
|
||||
bool
|
||||
CodeGenerator::visitRestPar(LRestPar *lir)
|
||||
{
|
||||
|
@ -6568,10 +6596,12 @@ CodeGenerator::visitRestPar(LRestPar *lir)
|
|||
unsigned numFormals = lir->mir()->numFormals();
|
||||
JSObject *templateObject = lir->mir()->templateObject();
|
||||
|
||||
masm.push(numActuals);
|
||||
if (!emitAllocateGCThingPar(lir, temp2, cx, temp0, temp1, templateObject))
|
||||
return false;
|
||||
masm.pop(numActuals);
|
||||
|
||||
return emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject);
|
||||
return emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject, true, ToRegister(lir->output()));
|
||||
}
|
||||
|
||||
bool
|
||||
|
|
|
@ -269,7 +269,7 @@ class CodeGenerator : public CodeGeneratorSpecific
|
|||
bool visitRunOncePrologue(LRunOncePrologue *lir);
|
||||
bool emitRest(LInstruction *lir, Register array, Register numActuals,
|
||||
Register temp0, Register temp1, unsigned numFormals,
|
||||
JSObject *templateObject);
|
||||
JSObject *templateObject, bool saveAndRestore, Register resultreg);
|
||||
bool visitRest(LRest *lir);
|
||||
bool visitRestPar(LRestPar *lir);
|
||||
bool visitCallSetProperty(LCallSetProperty *ins);
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "jsobj.h"
|
||||
#include "jsscript.h"
|
||||
|
||||
#include "gc/ForkJoinNursery.h"
|
||||
#include "gc/Marking.h"
|
||||
#include "jit/BaselineDebugModeOSR.h"
|
||||
#include "jit/BaselineFrame.h"
|
||||
|
@ -867,10 +868,8 @@ MarkIonJSFrame(JSTracer *trc, const JitFrameIterator &frame)
|
|||
// longer reachable through the callee token (JSFunction/JSScript->ion
|
||||
// is now nullptr or recompiled). Manually trace it here.
|
||||
IonScript::Trace(trc, ionScript);
|
||||
} else if (CalleeTokenIsFunction(layout->calleeToken())) {
|
||||
ionScript = CalleeTokenToFunction(layout->calleeToken())->nonLazyScript()->ionScript();
|
||||
} else {
|
||||
ionScript = CalleeTokenToScript(layout->calleeToken())->ionScript();
|
||||
ionScript = frame.ionScriptFromCalleeToken();
|
||||
}
|
||||
|
||||
if (CalleeTokenIsFunction(layout->calleeToken()))
|
||||
|
@ -937,10 +936,8 @@ UpdateIonJSFrameForMinorGC(JSTracer *trc, const JitFrameIterator &frame)
|
|||
// This frame has been invalidated, meaning that its IonScript is no
|
||||
// longer reachable through the callee token (JSFunction/JSScript->ion
|
||||
// is now nullptr or recompiled).
|
||||
} else if (CalleeTokenIsFunction(layout->calleeToken())) {
|
||||
ionScript = CalleeTokenToFunction(layout->calleeToken())->nonLazyScript()->ionScript();
|
||||
} else {
|
||||
ionScript = CalleeTokenToScript(layout->calleeToken())->ionScript();
|
||||
ionScript = frame.ionScriptFromCalleeToken();
|
||||
}
|
||||
|
||||
const SafepointIndex *si = ionScript->getSafepointIndex(frame.returnAddressToFp());
|
||||
|
@ -950,8 +947,16 @@ UpdateIonJSFrameForMinorGC(JSTracer *trc, const JitFrameIterator &frame)
|
|||
uintptr_t *spill = frame.spillBase();
|
||||
for (GeneralRegisterBackwardIterator iter(safepoint.allGprSpills()); iter.more(); iter++) {
|
||||
--spill;
|
||||
if (slotsRegs.has(*iter))
|
||||
if (slotsRegs.has(*iter)) {
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
if (trc->callback == gc::ForkJoinNursery::MinorGCCallback) {
|
||||
gc::ForkJoinNursery::forwardBufferPointer(trc,
|
||||
reinterpret_cast<HeapSlot **>(spill));
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
trc->runtime()->gc.nursery.forwardBufferPointer(reinterpret_cast<HeapSlot **>(spill));
|
||||
}
|
||||
}
|
||||
|
||||
// Skip to the right place in the safepoint
|
||||
|
@ -965,6 +970,12 @@ UpdateIonJSFrameForMinorGC(JSTracer *trc, const JitFrameIterator &frame)
|
|||
|
||||
while (safepoint.getSlotsOrElementsSlot(&slot)) {
|
||||
HeapSlot **slots = reinterpret_cast<HeapSlot **>(layout->slotRef(slot));
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
if (trc->callback == gc::ForkJoinNursery::MinorGCCallback) {
|
||||
gc::ForkJoinNursery::forwardBufferPointer(trc, slots);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
trc->runtime()->gc.nursery.forwardBufferPointer(slots);
|
||||
}
|
||||
}
|
||||
|
@ -1226,9 +1237,9 @@ MarkJitActivation(JSTracer *trc, const JitActivationIterator &activations)
|
|||
}
|
||||
|
||||
void
|
||||
MarkJitActivations(JSRuntime *rt, JSTracer *trc)
|
||||
MarkJitActivations(PerThreadData *ptd, JSTracer *trc)
|
||||
{
|
||||
for (JitActivationIterator activations(rt); !activations.done(); ++activations)
|
||||
for (JitActivationIterator activations(ptd); !activations.done(); ++activations)
|
||||
MarkJitActivation(trc, activations);
|
||||
}
|
||||
|
||||
|
@ -1256,6 +1267,22 @@ UpdateJitActivationsForMinorGC(JSRuntime *rt, JSTracer *trc)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
UpdateJitActivationsForMinorGC(PerThreadData *ptd, JSTracer *trc)
|
||||
{
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
JS_ASSERT(trc->runtime()->isHeapMinorCollecting() || trc->runtime()->isFJMinorCollecting());
|
||||
#else
|
||||
JS_ASSERT(trc->runtime()->isHeapMinorCollecting());
|
||||
#endif
|
||||
for (JitActivationIterator activations(ptd); !activations.done(); ++activations) {
|
||||
for (JitFrameIterator frames(activations); !frames.done(); ++frames) {
|
||||
if (frames.type() == JitFrame_IonJS)
|
||||
UpdateIonJSFrameForMinorGC(trc, frames);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
|
@ -1650,6 +1677,15 @@ JitFrameIterator::ionScript() const
|
|||
IonScript *ionScript = nullptr;
|
||||
if (checkInvalidation(&ionScript))
|
||||
return ionScript;
|
||||
return ionScriptFromCalleeToken();
|
||||
}
|
||||
|
||||
IonScript *
|
||||
JitFrameIterator::ionScriptFromCalleeToken() const
|
||||
{
|
||||
JS_ASSERT(type() == JitFrame_IonJS);
|
||||
JS_ASSERT(!checkInvalidation());
|
||||
|
||||
switch (GetCalleeTokenTag(calleeToken())) {
|
||||
case CalleeToken_Function:
|
||||
case CalleeToken_Script:
|
||||
|
|
|
@ -264,7 +264,7 @@ void HandleParallelFailure(ResumeFromException *rfe);
|
|||
|
||||
void EnsureExitFrame(IonCommonFrameLayout *frame);
|
||||
|
||||
void MarkJitActivations(JSRuntime *rt, JSTracer *trc);
|
||||
void MarkJitActivations(PerThreadData *ptd, JSTracer *trc);
|
||||
void MarkIonCompilerRoots(JSTracer *trc);
|
||||
|
||||
JSCompartment *
|
||||
|
@ -272,6 +272,7 @@ TopmostIonActivationCompartment(JSRuntime *rt);
|
|||
|
||||
#ifdef JSGC_GENERATIONAL
|
||||
void UpdateJitActivationsForMinorGC(JSRuntime *rt, JSTracer *trc);
|
||||
void UpdateJitActivationsForMinorGC(PerThreadData *ptd, JSTracer *trc);
|
||||
#endif
|
||||
|
||||
static inline uint32_t
|
||||
|
|
|
@ -634,11 +634,54 @@ MacroAssembler::newGCFatInlineString(Register result, Register temp, Label *fail
|
|||
void
|
||||
MacroAssembler::newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
|
||||
gc::AllocKind allocKind, Label *fail)
|
||||
{
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
if (IsNurseryAllocable(allocKind))
|
||||
return newGCNurseryThingPar(result, cx, tempReg1, tempReg2, allocKind, fail);
|
||||
#endif
|
||||
return newGCTenuredThingPar(result, cx, tempReg1, tempReg2, allocKind, fail);
|
||||
}
|
||||
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
void
|
||||
MacroAssembler::newGCNurseryThingPar(Register result, Register cx,
|
||||
Register tempReg1, Register tempReg2,
|
||||
gc::AllocKind allocKind, Label *fail)
|
||||
{
|
||||
JS_ASSERT(IsNurseryAllocable(allocKind));
|
||||
|
||||
uint32_t thingSize = uint32_t(gc::Arena::thingSize(allocKind));
|
||||
|
||||
// Correctness depends on thingSize being smaller than a chunk
|
||||
// (not a problem) and the last chunk of the nursery not being
|
||||
// located at the very top of the address space. The regular
|
||||
// Nursery makes the same assumption, see nurseryAllocate() above.
|
||||
|
||||
// The ForkJoinNursery is a member variable of the ForkJoinContext.
|
||||
size_t offsetOfPosition =
|
||||
ForkJoinContext::offsetOfFJNursery() + gc::ForkJoinNursery::offsetOfPosition();
|
||||
size_t offsetOfEnd =
|
||||
ForkJoinContext::offsetOfFJNursery() + gc::ForkJoinNursery::offsetOfCurrentEnd();
|
||||
loadPtr(Address(cx, offsetOfPosition), result);
|
||||
loadPtr(Address(cx, offsetOfEnd), tempReg2);
|
||||
computeEffectiveAddress(Address(result, thingSize), tempReg1);
|
||||
branchPtr(Assembler::Below, tempReg2, tempReg1, fail);
|
||||
storePtr(tempReg1, Address(cx, offsetOfPosition));
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
MacroAssembler::newGCTenuredThingPar(Register result, Register cx,
|
||||
Register tempReg1, Register tempReg2,
|
||||
gc::AllocKind allocKind, Label *fail)
|
||||
{
|
||||
// Similar to ::newGCThing(), except that it allocates from a custom
|
||||
// Allocator in the ForkJoinContext*, rather than being hardcoded to the
|
||||
// compartment allocator. This requires two temporary registers.
|
||||
//
|
||||
// When the ForkJoin generational collector is enabled this is only used
|
||||
// for those object types that cannot be allocated in the ForkJoinNursery.
|
||||
//
|
||||
// Subtle: I wanted to reuse `result` for one of the temporaries, but the
|
||||
// register allocator was assigning it to the same register as `cx`.
|
||||
// Then we overwrite that register which messed up the OOL code.
|
||||
|
@ -693,14 +736,14 @@ void
|
|||
MacroAssembler::newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
|
||||
Label *fail)
|
||||
{
|
||||
newGCThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_STRING, fail);
|
||||
newGCTenuredThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_STRING, fail);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::newGCFatInlineStringPar(Register result, Register cx, Register tempReg1,
|
||||
Register tempReg2, Label *fail)
|
||||
{
|
||||
newGCThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_FAT_INLINE_STRING, fail);
|
||||
newGCTenuredThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_FAT_INLINE_STRING, fail);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -816,6 +816,12 @@ class MacroAssembler : public MacroAssemblerSpecific
|
|||
|
||||
void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
|
||||
gc::AllocKind allocKind, Label *fail);
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
void newGCNurseryThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
|
||||
gc::AllocKind allocKind, Label *fail);
|
||||
#endif
|
||||
void newGCTenuredThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
|
||||
gc::AllocKind allocKind, Label *fail);
|
||||
void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
|
||||
JSObject *templateObject, Label *fail);
|
||||
void newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
|
||||
|
|
|
@ -201,6 +201,10 @@ class JitFrameIterator
|
|||
// Returns the IonScript associated with this JS frame.
|
||||
IonScript *ionScript() const;
|
||||
|
||||
// Returns the IonScript associated with this JS frame; the frame must
|
||||
// not be invalidated.
|
||||
IonScript *ionScriptFromCalleeToken() const;
|
||||
|
||||
// Returns the Safepoint associated with this JS frame. Incurs a lookup
|
||||
// overhead.
|
||||
const SafepointIndex *safepoint() const;
|
||||
|
|
|
@ -374,7 +374,7 @@ class LNewPar : public LInstructionHelper<1, 1, 2>
|
|||
}
|
||||
};
|
||||
|
||||
class LNewDenseArrayPar : public LCallInstructionHelper<1, 2, 3>
|
||||
class LNewDenseArrayPar : public LInstructionHelper<1, 2, 3>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(NewDenseArrayPar);
|
||||
|
@ -5380,7 +5380,7 @@ class LRest : public LCallInstructionHelper<1, 1, 3>
|
|||
}
|
||||
};
|
||||
|
||||
class LRestPar : public LCallInstructionHelper<1, 2, 3>
|
||||
class LRestPar : public LInstructionHelper<1, 2, 3>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(RestPar);
|
||||
|
|
|
@ -132,11 +132,7 @@ LIRGenerator::visitCheckOverRecursedPar(MCheckOverRecursedPar *ins)
|
|||
{
|
||||
LCheckOverRecursedPar *lir =
|
||||
new(alloc()) LCheckOverRecursedPar(useRegister(ins->forkJoinContext()), temp());
|
||||
if (!add(lir, ins))
|
||||
return false;
|
||||
if (!assignSafepoint(lir, ins))
|
||||
return false;
|
||||
return true;
|
||||
return add(lir, ins) && assignSafepoint(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -229,7 +225,7 @@ LIRGenerator::visitNewCallObjectPar(MNewCallObjectPar *ins)
|
|||
{
|
||||
const LAllocation &parThreadContext = useRegister(ins->forkJoinContext());
|
||||
LNewCallObjectPar *lir = LNewCallObjectPar::New(alloc(), parThreadContext, temp(), temp());
|
||||
return define(lir, ins);
|
||||
return define(lir, ins) && assignSafepoint(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -2094,7 +2090,7 @@ LIRGenerator::visitLambdaPar(MLambdaPar *ins)
|
|||
LLambdaPar *lir = new(alloc()) LLambdaPar(useRegister(ins->forkJoinContext()),
|
||||
useRegister(ins->scopeChain()),
|
||||
temp(), temp());
|
||||
return define(lir, ins);
|
||||
return define(lir, ins) && assignSafepoint(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -2200,30 +2196,30 @@ LIRGenerator::visitInterruptCheckPar(MInterruptCheckPar *ins)
|
|||
{
|
||||
LInterruptCheckPar *lir =
|
||||
new(alloc()) LInterruptCheckPar(useRegister(ins->forkJoinContext()), temp());
|
||||
if (!add(lir, ins))
|
||||
return false;
|
||||
if (!assignSafepoint(lir, ins))
|
||||
return false;
|
||||
return true;
|
||||
return add(lir, ins) && assignSafepoint(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitNewPar(MNewPar *ins)
|
||||
{
|
||||
LNewPar *lir = new(alloc()) LNewPar(useRegister(ins->forkJoinContext()), temp(), temp());
|
||||
return define(lir, ins);
|
||||
return define(lir, ins) && assignSafepoint(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitNewDenseArrayPar(MNewDenseArrayPar *ins)
|
||||
{
|
||||
JS_ASSERT(ins->forkJoinContext()->type() == MIRType_ForkJoinContext);
|
||||
JS_ASSERT(ins->length()->type() == MIRType_Int32);
|
||||
JS_ASSERT(ins->type() == MIRType_Object);
|
||||
|
||||
LNewDenseArrayPar *lir =
|
||||
new(alloc()) LNewDenseArrayPar(useFixed(ins->forkJoinContext(), CallTempReg0),
|
||||
useFixed(ins->length(), CallTempReg1),
|
||||
tempFixed(CallTempReg2),
|
||||
tempFixed(CallTempReg3),
|
||||
tempFixed(CallTempReg4));
|
||||
return defineReturn(lir, ins);
|
||||
new(alloc()) LNewDenseArrayPar(useRegister(ins->forkJoinContext()),
|
||||
useRegister(ins->length()),
|
||||
temp(),
|
||||
temp(),
|
||||
temp());
|
||||
return define(lir, ins) && assignSafepoint(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -3329,12 +3325,12 @@ LIRGenerator::visitRestPar(MRestPar *ins)
|
|||
{
|
||||
JS_ASSERT(ins->numActuals()->type() == MIRType_Int32);
|
||||
|
||||
LRestPar *lir = new(alloc()) LRestPar(useFixed(ins->forkJoinContext(), CallTempReg0),
|
||||
useFixed(ins->numActuals(), CallTempReg1),
|
||||
tempFixed(CallTempReg2),
|
||||
tempFixed(CallTempReg3),
|
||||
tempFixed(CallTempReg4));
|
||||
return defineReturn(lir, ins) && assignSafepoint(lir, ins);
|
||||
LRestPar *lir = new(alloc()) LRestPar(useRegister(ins->forkJoinContext()),
|
||||
useRegister(ins->numActuals()),
|
||||
temp(),
|
||||
temp(),
|
||||
temp());
|
||||
return define(lir, ins) && assignSafepoint(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
|
|
|
@ -1695,6 +1695,10 @@ class MNewPar : public MUnaryInstruction
|
|||
JSObject *templateObject() const {
|
||||
return templateObject_;
|
||||
}
|
||||
|
||||
AliasSet getAliasSet() const {
|
||||
return AliasSet::None();
|
||||
}
|
||||
};
|
||||
|
||||
class MTypedObjectProto
|
||||
|
@ -9862,6 +9866,10 @@ class MNewDenseArrayPar : public MBinaryInstruction
|
|||
bool possiblyCalls() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
AliasSet getAliasSet() const {
|
||||
return AliasSet::None();
|
||||
}
|
||||
};
|
||||
|
||||
// A resume point contains the information needed to reconstruct the Baseline
|
||||
|
|
|
@ -38,7 +38,11 @@ JSObject *
|
|||
jit::NewGCThingPar(ForkJoinContext *cx, gc::AllocKind allocKind)
|
||||
{
|
||||
JS_ASSERT(ForkJoinContext::current() == cx);
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
return js::NewGCObject<CanGC>(cx, allocKind, 0, gc::DefaultHeap);
|
||||
#else
|
||||
return js::NewGCObject<NoGC>(cx, allocKind, 0, gc::TenuredHeap);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
|
|
|
@ -609,6 +609,11 @@ ParallelSafetyVisitor::replace(MInstruction *oldInstruction,
|
|||
MBasicBlock *block = oldInstruction->block();
|
||||
block->insertBefore(oldInstruction, replacementInstruction);
|
||||
oldInstruction->replaceAllUsesWith(replacementInstruction);
|
||||
MResumePoint *rp = oldInstruction->resumePoint();
|
||||
if (rp && rp->instruction() == oldInstruction) {
|
||||
rp->setInstruction(replacementInstruction);
|
||||
replacementInstruction->setResumePoint(rp);
|
||||
}
|
||||
block->discard(oldInstruction);
|
||||
|
||||
// We may have replaced a specialized Float32 instruction by its
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
#include "vm/Interpreter.h"
|
||||
#include "vm/ProxyObject.h"
|
||||
|
||||
#include "gc/ForkJoinNursery-inl.h"
|
||||
|
||||
namespace js {
|
||||
|
||||
#ifdef JS_CRASH_DIAGNOSTICS
|
||||
|
|
|
@ -771,7 +771,7 @@ Chunk::init(JSRuntime *rt)
|
|||
/* Initialize the chunk info. */
|
||||
info.age = 0;
|
||||
info.trailer.storeBuffer = nullptr;
|
||||
info.trailer.location = ChunkLocationTenuredHeap;
|
||||
info.trailer.location = ChunkLocationBitTenuredHeap;
|
||||
info.trailer.runtime = rt;
|
||||
|
||||
/* The rest of info fields are initialized in pickChunk. */
|
||||
|
@ -880,8 +880,17 @@ Chunk::allocateArena(Zone *zone, AllocKind thingKind)
|
|||
JS_ASSERT(hasAvailableArenas());
|
||||
|
||||
JSRuntime *rt = zone->runtimeFromAnyThread();
|
||||
if (!rt->isHeapMinorCollecting() && rt->gc.bytes >= rt->gc.maxBytes)
|
||||
if (!rt->isHeapMinorCollecting() && rt->gc.bytes >= rt->gc.maxBytes) {
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
// This is an approximation to the best test, which would check that
|
||||
// this thread is currently promoting into the tenured area. I doubt
|
||||
// the better test would make much difference.
|
||||
if (!rt->isFJMinorCollecting())
|
||||
return nullptr;
|
||||
#else
|
||||
return nullptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
ArenaHeader *aheader = MOZ_LIKELY(info.numArenasFreeCommitted > 0)
|
||||
? fetchNextFreeArena(rt)
|
||||
|
@ -1701,7 +1710,7 @@ ArenaLists::allocateFromArenaInline(Zone *zone, AllocKind thingKind,
|
|||
|
||||
/*
|
||||
* While we still hold the GC lock get an arena from some chunk, mark it
|
||||
* as full as its single free span is moved to the free lits, and insert
|
||||
* as full as its single free span is moved to the free lists, and insert
|
||||
* it to the list as a fully allocated arena.
|
||||
*
|
||||
* We add the arena before the the head, so that after the GC the most
|
||||
|
@ -2154,7 +2163,7 @@ GCRuntime::triggerGC(JS::gcreason::Reason reason)
|
|||
bool
|
||||
js::TriggerZoneGC(Zone *zone, JS::gcreason::Reason reason)
|
||||
{
|
||||
return zone->runtimeFromAnyThread()->gc.triggerZoneGC(zone,reason);
|
||||
return zone->runtimeFromAnyThread()->gc.triggerZoneGC(zone, reason);
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -2374,6 +2383,10 @@ DecommitArenas(JSRuntime *rt)
|
|||
static void
|
||||
ExpireChunksAndArenas(JSRuntime *rt, bool shouldShrink)
|
||||
{
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
rt->threadPool.pruneChunkCache();
|
||||
#endif
|
||||
|
||||
if (Chunk *toFree = rt->gc.chunkPool.expire(rt, shouldShrink)) {
|
||||
AutoUnlockGC unlock(rt);
|
||||
FreeChunkList(rt, toFree);
|
||||
|
|
|
@ -41,6 +41,10 @@ class ScopeObject;
|
|||
class Shape;
|
||||
class UnownedBaseShape;
|
||||
|
||||
namespace gc {
|
||||
class ForkJoinNursery;
|
||||
}
|
||||
|
||||
unsigned GetCPUCount();
|
||||
|
||||
enum HeapState {
|
||||
|
@ -196,6 +200,42 @@ IsNurseryAllocable(AllocKind kind)
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(JSGC_FJGENERATIONAL)
|
||||
// This is separate from IsNurseryAllocable() so that the latter can evolve
|
||||
// without worrying about what the ForkJoinNursery's needs are, and vice
|
||||
// versa to some extent.
|
||||
static inline bool
|
||||
IsFJNurseryAllocable(AllocKind kind)
|
||||
{
|
||||
JS_ASSERT(kind >= 0 && unsigned(kind) < FINALIZE_LIMIT);
|
||||
static const bool map[] = {
|
||||
false, /* FINALIZE_OBJECT0 */
|
||||
true, /* FINALIZE_OBJECT0_BACKGROUND */
|
||||
false, /* FINALIZE_OBJECT2 */
|
||||
true, /* FINALIZE_OBJECT2_BACKGROUND */
|
||||
false, /* FINALIZE_OBJECT4 */
|
||||
true, /* FINALIZE_OBJECT4_BACKGROUND */
|
||||
false, /* FINALIZE_OBJECT8 */
|
||||
true, /* FINALIZE_OBJECT8_BACKGROUND */
|
||||
false, /* FINALIZE_OBJECT12 */
|
||||
true, /* FINALIZE_OBJECT12_BACKGROUND */
|
||||
false, /* FINALIZE_OBJECT16 */
|
||||
true, /* FINALIZE_OBJECT16_BACKGROUND */
|
||||
false, /* FINALIZE_SCRIPT */
|
||||
false, /* FINALIZE_LAZY_SCRIPT */
|
||||
false, /* FINALIZE_SHAPE */
|
||||
false, /* FINALIZE_BASE_SHAPE */
|
||||
false, /* FINALIZE_TYPE_OBJECT */
|
||||
false, /* FINALIZE_FAT_INLINE_STRING */
|
||||
false, /* FINALIZE_STRING */
|
||||
false, /* FINALIZE_EXTERNAL_STRING */
|
||||
false, /* FINALIZE_JITCODE */
|
||||
};
|
||||
JS_STATIC_ASSERT(JS_ARRAY_LENGTH(map) == FINALIZE_LIMIT);
|
||||
return map[kind];
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool
|
||||
IsBackgroundFinalized(AllocKind kind)
|
||||
{
|
||||
|
@ -782,6 +822,7 @@ class ArenaLists
|
|||
inline void normalizeBackgroundFinalizeState(AllocKind thingKind);
|
||||
|
||||
friend class js::Nursery;
|
||||
friend class js::gc::ForkJoinNursery;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "jsgc.h"
|
||||
|
||||
#include "gc/Zone.h"
|
||||
#include "vm/ForkJoin.h"
|
||||
|
||||
namespace js {
|
||||
|
||||
|
@ -56,8 +57,17 @@ ThreadSafeContext::isThreadLocal(T thing) const
|
|||
if (!isForkJoinContext())
|
||||
return true;
|
||||
|
||||
if (!IsInsideNursery(thing) &&
|
||||
allocator_->arenas.containsArena(runtime_, thing->arenaHeader()))
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
ForkJoinContext *cx = static_cast<ForkJoinContext*>(const_cast<ThreadSafeContext*>(this));
|
||||
if (cx->fjNursery().isInsideNewspace(thing))
|
||||
return true;
|
||||
#endif
|
||||
|
||||
// Global invariant
|
||||
JS_ASSERT(!IsInsideNursery(thing));
|
||||
|
||||
// The thing is not in the nursery, but is it in the private tenured area?
|
||||
if (allocator_->arenas.containsArena(runtime_, thing->arenaHeader()))
|
||||
{
|
||||
// GC should be suppressed in preparation for mutating thread local
|
||||
// objects, as we don't want to trip any barriers.
|
||||
|
@ -91,6 +101,14 @@ ShouldNurseryAllocate(const Nursery &nursery, AllocKind kind, InitialHeap heap)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
inline bool
|
||||
ShouldFJNurseryAllocate(const ForkJoinNursery &nursery, AllocKind kind, InitialHeap heap)
|
||||
{
|
||||
return IsFJNurseryAllocable(kind) && heap != TenuredHeap;
|
||||
}
|
||||
#endif
|
||||
|
||||
inline JSGCTraceKind
|
||||
GetGCThingTraceKind(const void *thing)
|
||||
{
|
||||
|
@ -130,15 +148,19 @@ class ArenaIter
|
|||
init(zone, kind);
|
||||
}
|
||||
|
||||
void init(JS::Zone *zone, AllocKind kind) {
|
||||
aheader = zone->allocator.arenas.getFirstArena(kind);
|
||||
remainingHeader = zone->allocator.arenas.getFirstArenaToSweep(kind);
|
||||
void init(Allocator *allocator, AllocKind kind) {
|
||||
aheader = allocator->arenas.getFirstArena(kind);
|
||||
remainingHeader = allocator->arenas.getFirstArenaToSweep(kind);
|
||||
if (!aheader) {
|
||||
aheader = remainingHeader;
|
||||
remainingHeader = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void init(JS::Zone *zone, AllocKind kind) {
|
||||
init(&zone->allocator, kind);
|
||||
}
|
||||
|
||||
bool done() const {
|
||||
return !aheader;
|
||||
}
|
||||
|
@ -187,7 +209,11 @@ class ArenaCellIterImpl
|
|||
}
|
||||
|
||||
public:
|
||||
ArenaCellIterImpl() {}
|
||||
ArenaCellIterImpl()
|
||||
: firstThingOffset(0) // Squelch
|
||||
, thingSize(0) // warnings
|
||||
{
|
||||
}
|
||||
|
||||
void initUnsynchronized(ArenaHeader *aheader) {
|
||||
AllocKind kind = aheader->getAllocKind();
|
||||
|
@ -479,6 +505,28 @@ TryNewNurseryObject(ThreadSafeContext *cxArg, size_t thingSize, size_t nDynamicS
|
|||
}
|
||||
#endif /* JSGC_GENERATIONAL */
|
||||
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
template <AllowGC allowGC>
|
||||
inline JSObject *
|
||||
TryNewFJNurseryObject(ForkJoinContext *cx, size_t thingSize, size_t nDynamicSlots)
|
||||
{
|
||||
ForkJoinNursery &nursery = cx->fjNursery();
|
||||
bool tooLarge = false;
|
||||
JSObject *obj = nursery.allocateObject(thingSize, nDynamicSlots, tooLarge);
|
||||
if (obj)
|
||||
return obj;
|
||||
|
||||
if (!tooLarge && allowGC) {
|
||||
nursery.minorGC();
|
||||
obj = nursery.allocateObject(thingSize, nDynamicSlots, tooLarge);
|
||||
if (obj)
|
||||
return obj;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
#endif /* JSGC_FJGENERATIONAL */
|
||||
|
||||
static inline bool
|
||||
PossiblyFail()
|
||||
{
|
||||
|
@ -568,6 +616,16 @@ AllocateObject(ThreadSafeContext *cx, AllocKind kind, size_t nDynamicSlots, Init
|
|||
return obj;
|
||||
}
|
||||
#endif
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
if (cx->isForkJoinContext() &&
|
||||
ShouldFJNurseryAllocate(cx->asForkJoinContext()->fjNursery(), kind, heap))
|
||||
{
|
||||
JSObject *obj =
|
||||
TryNewFJNurseryObject<allowGC>(cx->asForkJoinContext(), thingSize, nDynamicSlots);
|
||||
if (obj)
|
||||
return obj;
|
||||
}
|
||||
#endif
|
||||
|
||||
HeapSlot *slots = nullptr;
|
||||
if (nDynamicSlots) {
|
||||
|
@ -615,6 +673,8 @@ AllocateNonObject(ThreadSafeContext *cx)
|
|||
* other hand, since these allocations are extremely common, we don't want to
|
||||
* delay GC from these allocation sites. Instead we allow the GC, but still
|
||||
* fail the allocation, forcing the non-cached path.
|
||||
*
|
||||
* Observe this won't be used for ForkJoin allocation, as it takes a JSContext*
|
||||
*/
|
||||
template <AllowGC allowGC>
|
||||
inline JSObject *
|
||||
|
|
|
@ -2824,16 +2824,26 @@ JSObject::setSlotSpan(ThreadSafeContext *cx, HandleObject obj, uint32_t span)
|
|||
return true;
|
||||
}
|
||||
|
||||
// This will not run the garbage collector. If a nursery cannot accomodate the slot array
|
||||
// an attempt will be made to place the array in the tenured area.
|
||||
static HeapSlot *
|
||||
AllocateSlots(ThreadSafeContext *cx, JSObject *obj, uint32_t nslots)
|
||||
{
|
||||
#ifdef JSGC_GENERATIONAL
|
||||
if (cx->isJSContext())
|
||||
return cx->asJSContext()->runtime()->gc.nursery.allocateSlots(cx->asJSContext(), obj, nslots);
|
||||
#endif
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
if (cx->isForkJoinContext())
|
||||
return cx->asForkJoinContext()->fjNursery().allocateSlots(obj, nslots);
|
||||
#endif
|
||||
return cx->pod_malloc<HeapSlot>(nslots);
|
||||
}
|
||||
|
||||
// This will not run the garbage collector. If a nursery cannot accomodate the slot array
|
||||
// an attempt will be made to place the array in the tenured area.
|
||||
//
|
||||
// If this returns null then the old slots will be left alone.
|
||||
static HeapSlot *
|
||||
ReallocateSlots(ThreadSafeContext *cx, JSObject *obj, HeapSlot *oldSlots,
|
||||
uint32_t oldCount, uint32_t newCount)
|
||||
|
@ -2841,8 +2851,14 @@ ReallocateSlots(ThreadSafeContext *cx, JSObject *obj, HeapSlot *oldSlots,
|
|||
#ifdef JSGC_GENERATIONAL
|
||||
if (cx->isJSContext()) {
|
||||
return cx->asJSContext()->runtime()->gc.nursery.reallocateSlots(cx->asJSContext(),
|
||||
obj, oldSlots,
|
||||
oldCount, newCount);
|
||||
obj, oldSlots,
|
||||
oldCount, newCount);
|
||||
}
|
||||
#endif
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
if (cx->isForkJoinContext()) {
|
||||
return cx->asForkJoinContext()->fjNursery().reallocateSlots(obj, oldSlots,
|
||||
oldCount, newCount);
|
||||
}
|
||||
#endif
|
||||
return (HeapSlot *)cx->realloc_(oldSlots, oldCount * sizeof(HeapSlot),
|
||||
|
@ -2914,10 +2930,14 @@ JSObject::growSlots(ThreadSafeContext *cx, HandleObject obj, uint32_t oldCount,
|
|||
static void
|
||||
FreeSlots(ThreadSafeContext *cx, HeapSlot *slots)
|
||||
{
|
||||
// Note: threads without a JSContext do not have access to nursery allocated things.
|
||||
#ifdef JSGC_GENERATIONAL
|
||||
// Note: threads without a JSContext do not have access to GGC nursery allocated things.
|
||||
if (cx->isJSContext())
|
||||
return cx->asJSContext()->runtime()->gc.nursery.freeSlots(cx->asJSContext(), slots);
|
||||
#endif
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
if (cx->isForkJoinContext())
|
||||
return cx->asForkJoinContext()->fjNursery().freeSlots(slots);
|
||||
#endif
|
||||
js_free(slots);
|
||||
}
|
||||
|
@ -3134,6 +3154,8 @@ JSObject::maybeDensifySparseElements(js::ExclusiveContext *cx, HandleObject obj)
|
|||
return ED_OK;
|
||||
}
|
||||
|
||||
// This will not run the garbage collector. If a nursery cannot accomodate the element array
|
||||
// an attempt will be made to place the array in the tenured area.
|
||||
static ObjectElements *
|
||||
AllocateElements(ThreadSafeContext *cx, JSObject *obj, uint32_t nelems)
|
||||
{
|
||||
|
@ -3141,10 +3163,16 @@ AllocateElements(ThreadSafeContext *cx, JSObject *obj, uint32_t nelems)
|
|||
if (cx->isJSContext())
|
||||
return cx->asJSContext()->runtime()->gc.nursery.allocateElements(cx->asJSContext(), obj, nelems);
|
||||
#endif
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
if (cx->isForkJoinContext())
|
||||
return cx->asForkJoinContext()->fjNursery().allocateElements(obj, nelems);
|
||||
#endif
|
||||
|
||||
return static_cast<js::ObjectElements *>(cx->malloc_(nelems * sizeof(HeapValue)));
|
||||
}
|
||||
|
||||
// This will not run the garbage collector. If a nursery cannot accomodate the element array
|
||||
// an attempt will be made to place the array in the tenured area.
|
||||
static ObjectElements *
|
||||
ReallocateElements(ThreadSafeContext *cx, JSObject *obj, ObjectElements *oldHeader,
|
||||
uint32_t oldCount, uint32_t newCount)
|
||||
|
@ -3152,8 +3180,14 @@ ReallocateElements(ThreadSafeContext *cx, JSObject *obj, ObjectElements *oldHead
|
|||
#ifdef JSGC_GENERATIONAL
|
||||
if (cx->isJSContext()) {
|
||||
return cx->asJSContext()->runtime()->gc.nursery.reallocateElements(cx->asJSContext(), obj,
|
||||
oldHeader, oldCount,
|
||||
newCount);
|
||||
oldHeader, oldCount,
|
||||
newCount);
|
||||
}
|
||||
#endif
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
if (cx->isForkJoinContext()) {
|
||||
return cx->asForkJoinContext()->fjNursery().reallocateElements(obj, oldHeader,
|
||||
oldCount, newCount);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -187,6 +187,10 @@ DenseRangeWriteBarrierPost(JSRuntime *rt, JSObject *obj, uint32_t start, uint32_
|
|||
#endif
|
||||
}
|
||||
|
||||
namespace gc {
|
||||
class ForkJoinNursery;
|
||||
}
|
||||
|
||||
} /* namespace js */
|
||||
|
||||
/*
|
||||
|
@ -206,6 +210,7 @@ class JSObject : public js::ObjectImpl
|
|||
friend struct js::GCMarker;
|
||||
friend class js::NewObjectCache;
|
||||
friend class js::Nursery;
|
||||
friend class js::gc::ForkJoinNursery;
|
||||
|
||||
/* Make the type object to use for LAZY_TYPE objects. */
|
||||
static js::types::TypeObject *makeLazyType(JSContext *cx, js::HandleObject obj);
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "jsgcinlines.h"
|
||||
#include "jsinferinlines.h"
|
||||
|
||||
#include "gc/ForkJoinNursery-inl.h"
|
||||
#include "vm/ObjectImpl-inl.h"
|
||||
|
||||
/* static */ inline bool
|
||||
|
|
|
@ -266,6 +266,13 @@ class JS_PUBLIC_API(AutoGCRooter)
|
|||
static void traceAll(JSTracer *trc);
|
||||
static void traceAllWrappers(JSTracer *trc);
|
||||
|
||||
/* T must be a context type */
|
||||
template<typename T>
|
||||
static void traceAllInContext(T* cx, JSTracer *trc) {
|
||||
for (AutoGCRooter *gcr = cx->autoGCRooters; gcr; gcr = gcr->down)
|
||||
gcr->trace(trc);
|
||||
}
|
||||
|
||||
protected:
|
||||
AutoGCRooter * const down;
|
||||
|
||||
|
|
|
@ -112,6 +112,7 @@ UNIFIED_SOURCES += [
|
|||
'frontend/ParseNode.cpp',
|
||||
'frontend/TokenStream.cpp',
|
||||
'gc/Barrier.cpp',
|
||||
'gc/ForkJoinNursery.cpp',
|
||||
'gc/Iteration.cpp',
|
||||
'gc/Marking.cpp',
|
||||
'gc/Memory.cpp',
|
||||
|
@ -457,6 +458,8 @@ if CONFIG['NIGHTLY_BUILD']:
|
|||
DEFINES['ENABLE_PARALLEL_JS'] = True
|
||||
DEFINES['ENABLE_BINARYDATA'] = True
|
||||
DEFINES['ENABLE_SHARED_ARRAY_BUFFER'] = True
|
||||
if CONFIG['JSGC_GENERATIONAL_CONFIGURED']:
|
||||
DEFINES['JSGC_FJGENERATIONAL'] = True
|
||||
|
||||
DEFINES['EXPORT_JS_API'] = True
|
||||
|
||||
|
|
|
@ -799,8 +799,14 @@ ArrayBufferObject::finalize(FreeOp *fop, JSObject *obj)
|
|||
/* static */ void
|
||||
ArrayBufferObject::obj_trace(JSTracer *trc, JSObject *obj)
|
||||
{
|
||||
if (!IS_GC_MARKING_TRACER(trc) && !trc->runtime()->isHeapMinorCollecting())
|
||||
if (!IS_GC_MARKING_TRACER(trc) && !trc->runtime()->isHeapMinorCollecting()
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
&& !trc->runtime()->isFJMinorCollecting()
|
||||
#endif
|
||||
)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// ArrayBufferObjects need to maintain a list of possibly-weak pointers to
|
||||
// their views. The straightforward way to update the weak pointers would
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
|
||||
#if defined(JS_THREADSAFE) && defined(JS_ION)
|
||||
# include "jit/JitCommon.h"
|
||||
# ifdef DEBUG
|
||||
# ifdef FORKJOIN_SPEW
|
||||
# include "jit/Ion.h"
|
||||
# include "jit/JitCompartment.h"
|
||||
# include "jit/MIR.h"
|
||||
|
@ -35,6 +35,7 @@
|
|||
# endif
|
||||
#endif // THREADSAFE && ION
|
||||
|
||||
#include "gc/ForkJoinNursery-inl.h"
|
||||
#include "vm/Interpreter-inl.h"
|
||||
|
||||
using namespace js;
|
||||
|
@ -279,7 +280,7 @@ class ForkJoinOperation
|
|||
jsbytecode *bailoutBytecode;
|
||||
|
||||
ForkJoinOperation(JSContext *cx, HandleFunction fun, uint16_t sliceStart,
|
||||
uint16_t sliceEnd, ForkJoinMode mode);
|
||||
uint16_t sliceEnd, ForkJoinMode mode, HandleObject updatable);
|
||||
ExecutionStatus apply();
|
||||
|
||||
private:
|
||||
|
@ -318,6 +319,7 @@ class ForkJoinOperation
|
|||
|
||||
JSContext *cx_;
|
||||
HandleFunction fun_;
|
||||
HandleObject updatable_;
|
||||
uint16_t sliceStart_;
|
||||
uint16_t sliceEnd_;
|
||||
Vector<ParallelBailoutRecord, 16> bailoutRecords_;
|
||||
|
@ -345,12 +347,17 @@ class ForkJoinOperation
|
|||
|
||||
class ForkJoinShared : public ParallelJob, public Monitor
|
||||
{
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
friend class gc::ForkJoinGCShared;
|
||||
#endif
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
// Constant fields
|
||||
|
||||
JSContext *const cx_; // Current context
|
||||
ThreadPool *const threadPool_; // The thread pool
|
||||
HandleFunction fun_; // The JavaScript function to execute
|
||||
HandleObject updatable_; // Pre-existing object that might be updated
|
||||
uint16_t sliceStart_; // The starting slice id.
|
||||
uint16_t sliceEnd_; // The ending slice id + 1.
|
||||
PRLock *cxLock_; // Locks cx_ for parallel VM calls
|
||||
|
@ -387,6 +394,7 @@ class ForkJoinShared : public ParallelJob, public Monitor
|
|||
ForkJoinShared(JSContext *cx,
|
||||
ThreadPool *threadPool,
|
||||
HandleFunction fun,
|
||||
HandleObject updatable,
|
||||
uint16_t sliceStart,
|
||||
uint16_t sliceEnd,
|
||||
ParallelBailoutRecord *records);
|
||||
|
@ -428,6 +436,8 @@ class ForkJoinShared : public ParallelJob, public Monitor
|
|||
|
||||
JSContext *acquireJSContext() { PR_Lock(cxLock_); return cx_; }
|
||||
void releaseJSContext() { PR_Unlock(cxLock_); }
|
||||
|
||||
HandleObject updatable() { return updatable_; }
|
||||
};
|
||||
|
||||
class AutoEnterWarmup
|
||||
|
@ -502,24 +512,26 @@ static const char *ForkJoinModeString(ForkJoinMode mode);
|
|||
bool
|
||||
js::ForkJoin(JSContext *cx, CallArgs &args)
|
||||
{
|
||||
JS_ASSERT(args.length() == 4); // else the self-hosted code is wrong
|
||||
JS_ASSERT(args.length() == 5); // else the self-hosted code is wrong
|
||||
JS_ASSERT(args[0].isObject());
|
||||
JS_ASSERT(args[0].toObject().is<JSFunction>());
|
||||
JS_ASSERT(args[1].isInt32());
|
||||
JS_ASSERT(args[2].isInt32());
|
||||
JS_ASSERT(args[3].isInt32());
|
||||
JS_ASSERT(args[3].toInt32() < NumForkJoinModes);
|
||||
JS_ASSERT(args[4].isObjectOrNull());
|
||||
|
||||
RootedFunction fun(cx, &args[0].toObject().as<JSFunction>());
|
||||
uint16_t sliceStart = (uint16_t)(args[1].toInt32());
|
||||
uint16_t sliceEnd = (uint16_t)(args[2].toInt32());
|
||||
ForkJoinMode mode = (ForkJoinMode)(args[3].toInt32());
|
||||
RootedObject updatable(cx, args[4].toObjectOrNull());
|
||||
|
||||
MOZ_ASSERT(sliceStart == args[1].toInt32());
|
||||
MOZ_ASSERT(sliceEnd == args[2].toInt32());
|
||||
MOZ_ASSERT(sliceStart <= sliceEnd);
|
||||
|
||||
ForkJoinOperation op(cx, fun, sliceStart, sliceEnd, mode);
|
||||
ForkJoinOperation op(cx, fun, sliceStart, sliceEnd, mode, updatable);
|
||||
ExecutionStatus status = op.apply();
|
||||
if (status == ExecutionFatal)
|
||||
return false;
|
||||
|
@ -578,13 +590,14 @@ ForkJoinModeString(ForkJoinMode mode) {
|
|||
}
|
||||
|
||||
ForkJoinOperation::ForkJoinOperation(JSContext *cx, HandleFunction fun, uint16_t sliceStart,
|
||||
uint16_t sliceEnd, ForkJoinMode mode)
|
||||
uint16_t sliceEnd, ForkJoinMode mode, HandleObject updatable)
|
||||
: bailouts(0),
|
||||
bailoutCause(ParallelBailoutNone),
|
||||
bailoutScript(cx),
|
||||
bailoutBytecode(nullptr),
|
||||
cx_(cx),
|
||||
fun_(fun),
|
||||
updatable_(updatable),
|
||||
sliceStart_(sliceStart),
|
||||
sliceEnd_(sliceEnd),
|
||||
bailoutRecords_(cx),
|
||||
|
@ -1237,7 +1250,8 @@ ForkJoinOperation::parallelExecution(ExecutionStatus *status)
|
|||
|
||||
ForkJoinActivation activation(cx_);
|
||||
ThreadPool *threadPool = &cx_->runtime()->threadPool;
|
||||
ForkJoinShared shared(cx_, threadPool, fun_, sliceStart_, sliceEnd_, &bailoutRecords_[0]);
|
||||
ForkJoinShared shared(cx_, threadPool, fun_, updatable_, sliceStart_, sliceEnd_,
|
||||
&bailoutRecords_[0]);
|
||||
if (!shared.init()) {
|
||||
*status = ExecutionFatal;
|
||||
return RedLight;
|
||||
|
@ -1333,7 +1347,8 @@ class ParallelIonInvoke
|
|||
|
||||
bool invoke(ForkJoinContext *cx) {
|
||||
JitActivation activation(cx);
|
||||
Value result;
|
||||
// In-out parameter: on input it denotes the number of values to preserve after the call.
|
||||
Value result = Int32Value(0);
|
||||
CALL_GENERATED_CODE(enter_, jitcode_, argc_ + 1, argv_ + 1, nullptr, calleeToken_,
|
||||
nullptr, 0, &result);
|
||||
return !result.isMagic();
|
||||
|
@ -1347,12 +1362,14 @@ class ParallelIonInvoke
|
|||
ForkJoinShared::ForkJoinShared(JSContext *cx,
|
||||
ThreadPool *threadPool,
|
||||
HandleFunction fun,
|
||||
HandleObject updatable,
|
||||
uint16_t sliceStart,
|
||||
uint16_t sliceEnd,
|
||||
ParallelBailoutRecord *records)
|
||||
: cx_(cx),
|
||||
threadPool_(threadPool),
|
||||
fun_(fun),
|
||||
updatable_(updatable),
|
||||
sliceStart_(sliceStart),
|
||||
sliceEnd_(sliceEnd),
|
||||
cxLock_(nullptr),
|
||||
|
@ -1402,7 +1419,8 @@ ForkJoinShared::init()
|
|||
|
||||
ForkJoinShared::~ForkJoinShared()
|
||||
{
|
||||
PR_DestroyLock(cxLock_);
|
||||
if (cxLock_)
|
||||
PR_DestroyLock(cxLock_);
|
||||
|
||||
while (allocators_.length() > 0)
|
||||
js_delete(allocators_.popCopy());
|
||||
|
@ -1425,12 +1443,15 @@ ForkJoinShared::execute()
|
|||
|
||||
// Push parallel tasks and wait until they're all done.
|
||||
jobResult = threadPool_->executeJob(cx_, this, sliceStart_, sliceEnd_);
|
||||
if (jobResult == TP_FATAL)
|
||||
return TP_FATAL;
|
||||
}
|
||||
|
||||
// Arenas must be transfered unconditionally until we have the means
|
||||
// to clear the ForkJoin result array, see bug 993347.
|
||||
transferArenasToCompartmentAndProcessGCRequests();
|
||||
|
||||
if (jobResult == TP_FATAL)
|
||||
return TP_FATAL;
|
||||
|
||||
// Check if any of the workers failed.
|
||||
if (abort_) {
|
||||
if (fatal_)
|
||||
|
@ -1438,11 +1459,15 @@ ForkJoinShared::execute()
|
|||
return TP_RETRY_SEQUENTIALLY;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
#ifdef FORKJOIN_SPEW
|
||||
Spew(SpewOps, "Completed parallel job [slices: %d, threads: %d, stolen: %d (work stealing:%s)]",
|
||||
sliceEnd_ - sliceStart_,
|
||||
threadPool_->numWorkers(),
|
||||
#ifdef DEBUG
|
||||
threadPool_->stolenSlices(),
|
||||
#else
|
||||
0,
|
||||
#endif
|
||||
threadPool_->workStealing() ? "ON" : "OFF");
|
||||
#endif
|
||||
|
||||
|
@ -1458,6 +1483,7 @@ ForkJoinShared::transferArenasToCompartmentAndProcessGCRequests()
|
|||
comp->adoptWorkerAllocator(allocators_[i]);
|
||||
|
||||
if (gcRequested_) {
|
||||
Spew(SpewGC, "Triggering garbage collection in SpiderMonkey heap");
|
||||
if (!gcZone_)
|
||||
TriggerGC(cx_->runtime(), gcReason_);
|
||||
else
|
||||
|
@ -1493,7 +1519,22 @@ ForkJoinShared::executeFromWorker(ThreadPoolWorker *worker, uintptr_t stackLimit
|
|||
bool
|
||||
ForkJoinShared::executeFromMainThread(ThreadPoolWorker *worker)
|
||||
{
|
||||
executePortion(&cx_->mainThread(), worker);
|
||||
// Note that we need new PerThreadData on the main thread as well,
|
||||
// so that PJS GC does not walk up the old mainThread stack.
|
||||
PerThreadData *oldData = TlsPerThreadData.get();
|
||||
PerThreadData thisThread(cx_->runtime());
|
||||
if (!thisThread.init()) {
|
||||
setAbortFlagAndRequestInterrupt(true);
|
||||
return false;
|
||||
}
|
||||
TlsPerThreadData.set(&thisThread);
|
||||
|
||||
// Don't use setIonStackLimit() because that acquires the ionStackLimitLock, and the
|
||||
// lock has not been initialized in these cases.
|
||||
thisThread.jitStackLimit = oldData->jitStackLimit;
|
||||
executePortion(&thisThread, worker);
|
||||
TlsPerThreadData.set(oldData);
|
||||
|
||||
return !abort_;
|
||||
}
|
||||
|
||||
|
@ -1512,7 +1553,7 @@ ForkJoinShared::executePortion(PerThreadData *perThread, ThreadPoolWorker *worke
|
|||
// assertion here for maximum clarity.
|
||||
JS::AutoSuppressGCAnalysis nogc;
|
||||
|
||||
#ifdef DEBUG
|
||||
#ifdef FORKJOIN_SPEW
|
||||
// Set the maximum worker and slice number for prettier spewing.
|
||||
cx.maxWorkerId = threadPool_->numWorkers();
|
||||
#endif
|
||||
|
@ -1544,8 +1585,36 @@ ForkJoinShared::executePortion(PerThreadData *perThread, ThreadPoolWorker *worke
|
|||
|
||||
bool ok = fii.invoke(&cx);
|
||||
JS_ASSERT(ok == !cx.bailoutRecord->topScript);
|
||||
if (!ok)
|
||||
if (!ok) {
|
||||
setAbortFlagAndRequestInterrupt(false);
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
// TODO: See bugs 1010169, 993347.
|
||||
//
|
||||
// It is not desirable to promote here, but if we don't do
|
||||
// this then we can't unconditionally transfer arenas to
|
||||
// the compartment, since the arenas can contain objects
|
||||
// that point into the nurseries. If those objects are
|
||||
// touched at all by the GC, eg as part of a prebarrier,
|
||||
// then chaos ensues.
|
||||
//
|
||||
// The proper fix might appear to be to note the abort and
|
||||
// not transfer, but instead clear, the arenas. However,
|
||||
// the result array will remain live and unless it is
|
||||
// cleared immediately and without running barriers then
|
||||
// it will have pointers into the now-cleared areas, which
|
||||
// is also wrong.
|
||||
//
|
||||
// For the moment, until we figure out how to clear the
|
||||
// result array properly and implement that, it may be
|
||||
// that the best thing we can do here is to evacuate and
|
||||
// then let the GC run its course.
|
||||
cx.evacuateLiveData();
|
||||
#endif
|
||||
} else {
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
cx.evacuateLiveData();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
Spew(SpewOps, "Down");
|
||||
|
@ -1608,6 +1677,49 @@ ForkJoinShared::requestZoneGC(JS::Zone *zone, JS::gcreason::Reason reason)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
|
||||
JSRuntime*
|
||||
js::gc::ForkJoinGCShared::runtime()
|
||||
{
|
||||
return shared_->runtime();
|
||||
}
|
||||
|
||||
JS::Zone*
|
||||
js::gc::ForkJoinGCShared::zone()
|
||||
{
|
||||
return shared_->zone();
|
||||
}
|
||||
|
||||
JSObject*
|
||||
js::gc::ForkJoinGCShared::updatable()
|
||||
{
|
||||
return shared_->updatable();
|
||||
}
|
||||
|
||||
js::gc::ForkJoinNurseryChunk *
|
||||
js::gc::ForkJoinGCShared::allocateNurseryChunk()
|
||||
{
|
||||
return shared_->threadPool_->getChunk();
|
||||
}
|
||||
|
||||
void
|
||||
js::gc::ForkJoinGCShared::freeNurseryChunk(js::gc::ForkJoinNurseryChunk *p)
|
||||
{
|
||||
shared_->threadPool_->putFreeChunk(p);
|
||||
}
|
||||
|
||||
void
|
||||
js::gc::ForkJoinGCShared::spewGC(const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
va_start(ap, fmt);
|
||||
SpewVA(SpewGC, fmt, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
#endif // JSGC_FJGENERATIONAL
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
// ForkJoinContext
|
||||
//
|
||||
|
@ -1620,6 +1732,10 @@ ForkJoinContext::ForkJoinContext(PerThreadData *perThreadData, ThreadPoolWorker
|
|||
targetRegionStart(nullptr),
|
||||
targetRegionEnd(nullptr),
|
||||
shared_(shared),
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
gcShared_(shared),
|
||||
fjNursery_(const_cast<ForkJoinContext*>(this), &this->gcShared_, allocator),
|
||||
#endif
|
||||
worker_(worker),
|
||||
acquiredJSContext_(false),
|
||||
nogc_()
|
||||
|
@ -1779,7 +1895,7 @@ js::ParallelBailoutRecord::addTrace(JSScript *script,
|
|||
// Debug spew
|
||||
//
|
||||
|
||||
#ifdef DEBUG
|
||||
#ifdef FORKJOIN_SPEW
|
||||
|
||||
static const char *
|
||||
ExecutionStatusToString(ExecutionStatus status)
|
||||
|
@ -1873,6 +1989,8 @@ class ParallelSpewer
|
|||
active[SpewCompile] = true;
|
||||
if (strstr(env, "bailouts"))
|
||||
active[SpewBailouts] = true;
|
||||
if (strstr(env, "gc"))
|
||||
active[SpewGC] = true;
|
||||
if (strstr(env, "full")) {
|
||||
for (uint32_t i = 0; i < NumSpewChannels; i++)
|
||||
active[i] = true;
|
||||
|
@ -2077,6 +2195,12 @@ parallel::Spew(SpewChannel channel, const char *fmt, ...)
|
|||
va_end(ap);
|
||||
}
|
||||
|
||||
void
|
||||
parallel::SpewVA(SpewChannel channel, const char *fmt, va_list ap)
|
||||
{
|
||||
spewer.spewVA(channel, fmt, ap);
|
||||
}
|
||||
|
||||
void
|
||||
parallel::SpewBeginOp(JSContext *cx, const char *name)
|
||||
{
|
||||
|
@ -2125,7 +2249,7 @@ parallel::SpewBailoutIR(IonLIRTraceData *data)
|
|||
spewer.spewBailoutIR(data);
|
||||
}
|
||||
|
||||
#endif // DEBUG
|
||||
#endif // FORKJOIN_SPEW
|
||||
|
||||
bool
|
||||
js::InExclusiveParallelSection()
|
||||
|
|
|
@ -9,12 +9,19 @@
|
|||
|
||||
#include "mozilla/ThreadLocal.h"
|
||||
|
||||
#include <stdarg.h>
|
||||
|
||||
#include "jscntxt.h"
|
||||
|
||||
#include "gc/ForkJoinNursery.h"
|
||||
#include "gc/GCInternals.h"
|
||||
|
||||
#include "jit/Ion.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
#define FORKJOIN_SPEW
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Read Me First
|
||||
//
|
||||
|
@ -30,7 +37,7 @@
|
|||
// to enable parallel execution. At the top-level, it consists of a native
|
||||
// function (exposed as the ForkJoin intrinsic) that is used like so:
|
||||
//
|
||||
// ForkJoin(func, sliceStart, sliceEnd, mode)
|
||||
// ForkJoin(func, sliceStart, sliceEnd, mode, updatable)
|
||||
//
|
||||
// The intention of this statement is to start some some number (usually the
|
||||
// number of hardware threads) of copies of |func()| running in parallel. Each
|
||||
|
@ -47,6 +54,13 @@
|
|||
// The fourth argument, |mode|, is an internal mode integer giving finer
|
||||
// control over the behavior of ForkJoin. See the |ForkJoinMode| enum.
|
||||
//
|
||||
// The fifth argument, |updatable|, if not null, is an object that may
|
||||
// be updated in a race-free manner by |func()| or its callees.
|
||||
// Typically this is some sort of pre-sized array. Only this object
|
||||
// may be updated by |func()|, and updates must not race. (A more
|
||||
// general approach is perhaps desirable, eg passing an Array of
|
||||
// objects that may be updated, but that is not presently needed.)
|
||||
//
|
||||
// func() should expect the following arguments:
|
||||
//
|
||||
// func(workerId, sliceStart, sliceEnd)
|
||||
|
@ -164,7 +178,7 @@
|
|||
// the error location might not be in the same JSScript as the one
|
||||
// which was executing due to inlining.
|
||||
//
|
||||
// Garbage collection and allocation:
|
||||
// Garbage collection, allocation, and write barriers:
|
||||
//
|
||||
// Code which executes on these parallel threads must be very careful
|
||||
// with respect to garbage collection and allocation. The typical
|
||||
|
@ -173,24 +187,49 @@
|
|||
// any synchronization. They can also trigger GC in an ad-hoc way.
|
||||
//
|
||||
// To deal with this, the forkjoin code creates a distinct |Allocator|
|
||||
// object for each slice. You can access the appropriate object via
|
||||
// the |ForkJoinContext| object that is provided to the callbacks. Once
|
||||
// the execution is complete, all the objects found in these distinct
|
||||
// |Allocator| is merged back into the main compartment lists and
|
||||
// things proceed normally.
|
||||
// object for each worker, which is used as follows.
|
||||
//
|
||||
// In a non-generational setting you can access the appropriate
|
||||
// allocator via the |ForkJoinContext| object that is provided to the
|
||||
// callbacks. Once the parallel execution is complete, all the
|
||||
// objects found in these distinct |Allocator| are merged back into
|
||||
// the main compartment lists and things proceed normally. (If it is
|
||||
// known that the result array contains no references then no merging
|
||||
// is necessary.)
|
||||
//
|
||||
// In a generational setting there is a per-thread |ForkJoinNursery|
|
||||
// in addition to the per-thread Allocator. All "simple" objects
|
||||
// (meaning they are reasonably small, can be copied, and have no
|
||||
// complicated finalization semantics) are allocated in the nurseries;
|
||||
// other objects are allocated directly in the threads' Allocators,
|
||||
// which serve as the tenured areas for the threads.
|
||||
//
|
||||
// When a thread's nursery fills up it can be collected independently
|
||||
// of the other threads' nurseries, and does not require any of the
|
||||
// threads to bail out of the parallel section. The nursery is
|
||||
// copy-collected, and the expectation is that the survival rate will
|
||||
// be very low and the collection will be very cheap.
|
||||
//
|
||||
// When the parallel execution is complete, and only if merging of the
|
||||
// Allocators into the main compartment is necessary, then the live
|
||||
// objects of the nurseries are copied into the respective Allocators,
|
||||
// in parallel, before the merging takes place.
|
||||
//
|
||||
// In Ion-generated code, we will do allocation through the
|
||||
// |Allocator| found in |ForkJoinContext| (which is obtained via TLS).
|
||||
// Also, no write barriers are emitted. Conceptually, we should never
|
||||
// need a write barrier because we only permit writes to objects that
|
||||
// are newly allocated, and such objects are always black (to use
|
||||
// incremental GC terminology). However, to be safe, we also block
|
||||
// upon entering a parallel section to ensure that any concurrent
|
||||
// marking or incremental GC has completed.
|
||||
// |ForkJoinNursery| or |Allocator| found in |ForkJoinContext| (which
|
||||
// is obtained via TLS).
|
||||
//
|
||||
// No write barriers are emitted. We permit writes to thread-local
|
||||
// objects, and such writes can create cross-generational pointers or
|
||||
// pointers that may interact with incremental GC. However, the
|
||||
// per-thread generational collector scans its entire tenured area on
|
||||
// each minor collection, and we block upon entering a parallel
|
||||
// section to ensure that any concurrent marking or incremental GC has
|
||||
// completed.
|
||||
//
|
||||
// In the future, it should be possible to lift the restriction that
|
||||
// we must block until inc. GC has completed and also to permit GC
|
||||
// during parallel exeution. But we're not there yet.
|
||||
// we must block until incremental GC has completed. But we're not
|
||||
// there yet.
|
||||
//
|
||||
// Load balancing (work stealing):
|
||||
//
|
||||
|
@ -316,7 +355,7 @@ class ForkJoinContext : public ThreadSafeContext
|
|||
// Bailout record used to record the reason this thread stopped executing
|
||||
ParallelBailoutRecord *const bailoutRecord;
|
||||
|
||||
#ifdef DEBUG
|
||||
#ifdef FORKJOIN_SPEW
|
||||
// Records the last instr. to execute on this thread.
|
||||
IonLIRTraceData traceData;
|
||||
|
||||
|
@ -412,6 +451,21 @@ class ForkJoinContext : public ThreadSafeContext
|
|||
return offsetof(ForkJoinContext, worker_);
|
||||
}
|
||||
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
// There is already a nursery() method in ThreadSafeContext.
|
||||
gc::ForkJoinNursery &fjNursery() { return fjNursery_; }
|
||||
|
||||
// Evacuate live data from the per-thread nursery into the per-thread
|
||||
// tenured area.
|
||||
void evacuateLiveData() { fjNursery_.evacuatingGC(); }
|
||||
|
||||
// Used in inlining nursery allocation. Note the nursery is a
|
||||
// member of the ForkJoinContext (a substructure), not a pointer.
|
||||
static size_t offsetOfFJNursery() {
|
||||
return offsetof(ForkJoinContext, fjNursery_);
|
||||
}
|
||||
#endif
|
||||
|
||||
private:
|
||||
friend class AutoSetForkJoinContext;
|
||||
|
||||
|
@ -420,6 +474,11 @@ class ForkJoinContext : public ThreadSafeContext
|
|||
|
||||
ForkJoinShared *const shared_;
|
||||
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
gc::ForkJoinGCShared gcShared_;
|
||||
gc::ForkJoinNursery fjNursery_;
|
||||
#endif
|
||||
|
||||
ThreadPoolWorker *worker_;
|
||||
|
||||
bool acquiredJSContext_;
|
||||
|
@ -504,13 +563,15 @@ enum SpewChannel {
|
|||
SpewOps,
|
||||
SpewCompile,
|
||||
SpewBailouts,
|
||||
SpewGC,
|
||||
NumSpewChannels
|
||||
};
|
||||
|
||||
#if defined(DEBUG) && defined(JS_THREADSAFE) && defined(JS_ION)
|
||||
#if defined(FORKJOIN_SPEW) && defined(JS_THREADSAFE) && defined(JS_ION)
|
||||
|
||||
bool SpewEnabled(SpewChannel channel);
|
||||
void Spew(SpewChannel channel, const char *fmt, ...);
|
||||
void SpewVA(SpewChannel channel, const char *fmt, va_list args);
|
||||
void SpewBeginOp(JSContext *cx, const char *name);
|
||||
void SpewBailout(uint32_t count, HandleScript script, jsbytecode *pc,
|
||||
ParallelBailoutCause cause);
|
||||
|
@ -524,6 +585,7 @@ void SpewBailoutIR(IonLIRTraceData *data);
|
|||
|
||||
static inline bool SpewEnabled(SpewChannel channel) { return false; }
|
||||
static inline void Spew(SpewChannel channel, const char *fmt, ...) { }
|
||||
static inline void SpewVA(SpewChannel channel, const char *fmt, va_list args) { }
|
||||
static inline void SpewBeginOp(JSContext *cx, const char *name) { }
|
||||
static inline void SpewBailout(uint32_t count, HandleScript script,
|
||||
jsbytecode *pc, ParallelBailoutCause cause) {}
|
||||
|
@ -535,7 +597,7 @@ static inline void SpewMIR(jit::MDefinition *mir, const char *fmt, ...) { }
|
|||
#endif
|
||||
static inline void SpewBailoutIR(IonLIRTraceData *data) { }
|
||||
|
||||
#endif // DEBUG && JS_THREADSAFE && JS_ION
|
||||
#endif // FORKJOIN_SPEW && JS_THREADSAFE && JS_ION
|
||||
|
||||
} // namespace parallel
|
||||
} // namespace js
|
||||
|
|
|
@ -30,6 +30,10 @@ class ObjectImpl;
|
|||
class Nursery;
|
||||
class Shape;
|
||||
|
||||
namespace gc {
|
||||
class ForkJoinNursery;
|
||||
}
|
||||
|
||||
/*
|
||||
* To really poison a set of values, using 'magic' or 'undefined' isn't good
|
||||
* enough since often these will just be ignored by buggy code (see bug 629974)
|
||||
|
@ -177,6 +181,7 @@ class ObjectElements
|
|||
friend class ObjectImpl;
|
||||
friend class ArrayObject;
|
||||
friend class Nursery;
|
||||
friend class gc::ForkJoinNursery;
|
||||
|
||||
template <ExecutionMode mode>
|
||||
friend bool
|
||||
|
@ -445,6 +450,7 @@ class ObjectImpl : public gc::BarrieredCell<ObjectImpl>
|
|||
|
||||
private:
|
||||
friend class Nursery;
|
||||
friend class gc::ForkJoinNursery;
|
||||
|
||||
/*
|
||||
* Get internal pointers to the range of values starting at start and
|
||||
|
|
|
@ -955,6 +955,15 @@ struct JSRuntime : public JS::shadow::Runtime,
|
|||
bool isHeapMinorCollecting() { return gc.isHeapMinorCollecting(); }
|
||||
bool isHeapCollecting() { return gc.isHeapCollecting(); }
|
||||
|
||||
// Performance note: if isFJMinorCollecting turns out to be slow
|
||||
// because reading the counter is slow then we may be able to
|
||||
// augment the counter with a volatile flag that is set iff the
|
||||
// counter is greater than zero. (It will require some care to
|
||||
// make sure the two variables stay in sync.)
|
||||
bool isFJMinorCollecting() { return gc.fjCollectionCounter > 0; }
|
||||
void incFJMinorCollecting() { gc.fjCollectionCounter++; }
|
||||
void decFJMinorCollecting() { gc.fjCollectionCounter--; }
|
||||
|
||||
int gcZeal() { return gc.zeal(); }
|
||||
|
||||
void lockGC() {
|
||||
|
|
|
@ -304,7 +304,11 @@ JS_JITINFO_NATIVE_PARALLEL_THREADSAFE(intrinsic_ParallelSpew_jitInfo, intrinsic_
|
|||
#endif
|
||||
|
||||
/*
|
||||
* ForkJoin(func, feedback): Invokes |func| many times in parallel.
|
||||
* ForkJoin(func, sliceStart, sliceEnd, mode, updatable): Invokes |func| many times in parallel.
|
||||
*
|
||||
* If "func" will update a pre-existing object then that object /must/ be passed
|
||||
* as the object "updatable". It is /not/ correct to pass an object that
|
||||
* references the updatable objects indirectly.
|
||||
*
|
||||
* See ForkJoin.cpp for details and ParallelArray.js for examples.
|
||||
*/
|
||||
|
@ -786,7 +790,7 @@ static const JSFunctionSpec intrinsic_functions[] = {
|
|||
JS_FN("NewStringIterator", intrinsic_NewStringIterator, 0,0),
|
||||
JS_FN("IsStringIterator", intrinsic_IsStringIterator, 1,0),
|
||||
|
||||
JS_FN("ForkJoin", intrinsic_ForkJoin, 2,0),
|
||||
JS_FN("ForkJoin", intrinsic_ForkJoin, 5,0),
|
||||
JS_FN("ForkJoinNumWorkers", intrinsic_ForkJoinNumWorkers, 0,0),
|
||||
JS_FN("NewDenseArray", intrinsic_NewDenseArray, 1,0),
|
||||
JS_FN("ShouldForceSequential", intrinsic_ShouldForceSequential, 0,0),
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "jscntxtinlines.h"
|
||||
#include "jsobjinlines.h"
|
||||
|
||||
#include "gc/ForkJoinNursery-inl.h"
|
||||
#include "vm/ObjectImpl-inl.h"
|
||||
#include "vm/Runtime-inl.h"
|
||||
|
||||
|
|
|
@ -111,6 +111,10 @@ class Nursery;
|
|||
class ObjectImpl;
|
||||
class StaticBlockObject;
|
||||
|
||||
namespace gc {
|
||||
class ForkJoinNursery;
|
||||
}
|
||||
|
||||
typedef JSPropertyOp PropertyOp;
|
||||
typedef JSStrictPropertyOp StrictPropertyOp;
|
||||
typedef JSPropertyDescriptor PropertyDescriptor;
|
||||
|
@ -612,6 +616,7 @@ class Shape : public gc::BarrieredCell<Shape>
|
|||
friend class ::JSFunction;
|
||||
friend class js::Bindings;
|
||||
friend class js::Nursery;
|
||||
friend class js::gc::ForkJoinNursery;
|
||||
friend class js::ObjectImpl;
|
||||
friend class js::PropertyTree;
|
||||
friend class js::StaticBlockObject;
|
||||
|
|
|
@ -437,14 +437,13 @@ MarkInterpreterActivation(JSTracer *trc, InterpreterActivation *act)
|
|||
}
|
||||
|
||||
void
|
||||
js::MarkInterpreterActivations(JSRuntime *rt, JSTracer *trc)
|
||||
js::MarkInterpreterActivations(PerThreadData *ptd, JSTracer *trc)
|
||||
{
|
||||
for (ActivationIterator iter(rt); !iter.done(); ++iter) {
|
||||
for (ActivationIterator iter(ptd); !iter.done(); ++iter) {
|
||||
Activation *act = iter.activation();
|
||||
if (act->isInterpreter())
|
||||
MarkInterpreterActivation(trc, act->asInterpreter());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
|
|
|
@ -1093,7 +1093,7 @@ class InterpreterStack
|
|||
}
|
||||
};
|
||||
|
||||
void MarkInterpreterActivations(JSRuntime *rt, JSTracer *trc);
|
||||
void MarkInterpreterActivations(PerThreadData *ptd, JSTracer *trc);
|
||||
|
||||
/*****************************************************************************/
|
||||
|
||||
|
|
|
@ -10,10 +10,15 @@
|
|||
|
||||
#include "jslock.h"
|
||||
|
||||
#include "js/Utility.h"
|
||||
#include "vm/ForkJoin.h"
|
||||
#include "vm/Monitor.h"
|
||||
#include "vm/Runtime.h"
|
||||
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
#include "prmjtime.h"
|
||||
#endif
|
||||
|
||||
using namespace js;
|
||||
|
||||
const size_t WORKER_THREAD_STACK_SIZE = 1*1024*1024;
|
||||
|
@ -256,18 +261,25 @@ ThreadPool::ThreadPool(JSRuntime *rt)
|
|||
: activeWorkers_(0),
|
||||
joinBarrier_(nullptr),
|
||||
job_(nullptr),
|
||||
#ifdef DEBUG
|
||||
runtime_(rt),
|
||||
#ifdef DEBUG
|
||||
stolenSlices_(0),
|
||||
#endif
|
||||
pendingSlices_(0),
|
||||
isMainThreadActive_(false)
|
||||
isMainThreadActive_(false),
|
||||
chunkLock_(nullptr),
|
||||
timeOfLastAllocation_(0),
|
||||
freeChunks_(nullptr)
|
||||
{ }
|
||||
|
||||
ThreadPool::~ThreadPool()
|
||||
{
|
||||
terminateWorkers();
|
||||
if (chunkLock_)
|
||||
clearChunkCache();
|
||||
#ifdef JS_THREADSAFE
|
||||
if (chunkLock_)
|
||||
PR_DestroyLock(chunkLock_);
|
||||
if (joinBarrier_)
|
||||
PR_DestroyCondVar(joinBarrier_);
|
||||
#endif
|
||||
|
@ -280,10 +292,13 @@ ThreadPool::init()
|
|||
if (!Monitor::init())
|
||||
return false;
|
||||
joinBarrier_ = PR_NewCondVar(lock_);
|
||||
return !!joinBarrier_;
|
||||
#else
|
||||
return true;
|
||||
if (!joinBarrier_)
|
||||
return false;
|
||||
chunkLock_ = PR_NewLock();
|
||||
if (!chunkLock_)
|
||||
return false;
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
|
@ -482,3 +497,92 @@ ThreadPool::abortJob()
|
|||
// the thread pool having more work.
|
||||
while (hasWork());
|
||||
}
|
||||
|
||||
// We are not using the markPagesUnused() / markPagesInUse() APIs here
|
||||
// for two reasons. One, the free list is threaded through the
|
||||
// chunks, so some pages are actually in use. Two, the expectation is
|
||||
// that a small number of chunks will be used intensively for a short
|
||||
// while and then be abandoned at the next GC.
|
||||
//
|
||||
// It's an open question whether it's best to go directly to the
|
||||
// pageAllocator, as now, or go via the GC's chunk pool. Either way
|
||||
// there's a need to manage a predictable chunk cache here as we don't
|
||||
// want chunks to be deallocated during a parallel section.
|
||||
|
||||
gc::ForkJoinNurseryChunk *
|
||||
ThreadPool::getChunk()
|
||||
{
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
PR_Lock(chunkLock_);
|
||||
timeOfLastAllocation_ = PRMJ_Now()/1000000;
|
||||
ChunkFreeList *p = freeChunks_;
|
||||
if (p)
|
||||
freeChunks_ = p->next;
|
||||
PR_Unlock(chunkLock_);
|
||||
|
||||
if (p) {
|
||||
// Already poisoned.
|
||||
return reinterpret_cast<gc::ForkJoinNurseryChunk *>(p);
|
||||
}
|
||||
gc::ForkJoinNurseryChunk *c =
|
||||
reinterpret_cast<gc::ForkJoinNurseryChunk *>(
|
||||
runtime_->gc.pageAllocator.mapAlignedPages(gc::ChunkSize, gc::ChunkSize));
|
||||
if (!c)
|
||||
return c;
|
||||
poisonChunk(c);
|
||||
return c;
|
||||
#else
|
||||
return nullptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
ThreadPool::putFreeChunk(gc::ForkJoinNurseryChunk *c)
|
||||
{
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
poisonChunk(c);
|
||||
|
||||
PR_Lock(chunkLock_);
|
||||
ChunkFreeList *p = reinterpret_cast<ChunkFreeList *>(c);
|
||||
p->next = freeChunks_;
|
||||
freeChunks_ = p;
|
||||
PR_Unlock(chunkLock_);
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
ThreadPool::poisonChunk(gc::ForkJoinNurseryChunk *c)
|
||||
{
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
#ifdef DEBUG
|
||||
memset(c, JS_POISONED_FORKJOIN_CHUNK, gc::ChunkSize);
|
||||
#endif
|
||||
c->trailer.runtime = nullptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
ThreadPool::pruneChunkCache()
|
||||
{
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
if (PRMJ_Now()/1000000 - timeOfLastAllocation_ >= secondsBeforePrune)
|
||||
clearChunkCache();
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
ThreadPool::clearChunkCache()
|
||||
{
|
||||
#ifdef JSGC_FJGENERATIONAL
|
||||
PR_Lock(chunkLock_);
|
||||
ChunkFreeList *p = freeChunks_;
|
||||
freeChunks_ = nullptr;
|
||||
PR_Unlock(chunkLock_);
|
||||
|
||||
while (p) {
|
||||
ChunkFreeList *victim = p;
|
||||
p = p->next;
|
||||
runtime_->gc.pageAllocator.unmapPages(victim, gc::ChunkSize);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче