Merge m-c to b2g-inbound. a=merge

This commit is contained in:
Ryan VanderMeulen 2014-10-23 14:10:21 -04:00
Родитель 8ff2b62495 f275a7b9e9
Коммит 08e40c5272
206 изменённых файлов: 6788 добавлений и 997 удалений

Просмотреть файл

@ -1180,7 +1180,7 @@ private:
class AutoTreeMutation
{
public:
AutoTreeMutation(Accessible* aRoot, bool aInvalidationRequired = true) :
explicit AutoTreeMutation(Accessible* aRoot, bool aInvalidationRequired = true) :
mInvalidationRequired(aInvalidationRequired), mRoot(aRoot)
{
MOZ_ASSERT(!(mRoot->mStateFlags & Accessible::eSubtreeMutating));

Просмотреть файл

@ -118,7 +118,7 @@ protected:
class DummyAccessible : public AccessibleWrap
{
public:
DummyAccessible(DocAccessible* aDocument = nullptr) :
explicit DummyAccessible(DocAccessible* aDocument = nullptr) :
AccessibleWrap(nullptr, aDocument) { }
virtual uint64_t NativeState() MOZ_OVERRIDE MOZ_FINAL;

Просмотреть файл

@ -22,7 +22,7 @@ class AccShowEvent;
class DocAccessibleChild : public PDocAccessibleChild
{
public:
DocAccessibleChild(DocAccessible* aDoc) :
explicit DocAccessibleChild(DocAccessible* aDoc) :
mDoc(aDoc)
{ MOZ_COUNT_CTOR(DocAccessibleChild); }
~DocAccessibleChild()

Просмотреть файл

@ -101,7 +101,7 @@ private:
class ProxyEntry : public PLDHashEntryHdr
{
public:
ProxyEntry(const void*) : mProxy(nullptr) {}
explicit ProxyEntry(const void*) : mProxy(nullptr) {}
ProxyEntry(ProxyEntry&& aOther) :
mProxy(aOther.mProxy) { aOther.mProxy = nullptr; }
~ProxyEntry() { delete mProxy; }

Просмотреть файл

@ -21,7 +21,8 @@ class xpcAccessibleApplication : public xpcAccessibleGeneric,
public nsIAccessibleApplication
{
public:
xpcAccessibleApplication(Accessible* aIntl) : xpcAccessibleGeneric(aIntl) { }
explicit xpcAccessibleApplication(Accessible* aIntl) :
xpcAccessibleGeneric(aIntl) { }
NS_DECL_ISUPPORTS_INHERITED

Просмотреть файл

@ -24,7 +24,7 @@ class xpcAccessibleDocument : public xpcAccessibleHyperText,
public nsIAccessibleDocument
{
public:
xpcAccessibleDocument(DocAccessible* aIntl) :
explicit xpcAccessibleDocument(DocAccessible* aIntl) :
xpcAccessibleHyperText(aIntl), mCache(kDefaultCacheLength) { }
NS_DECL_ISUPPORTS_INHERITED

Просмотреть файл

@ -26,7 +26,7 @@ class xpcAccessibleGeneric : public xpcAccessible,
public xpcAccessibleValue
{
public:
xpcAccessibleGeneric(Accessible* aInternal) :
explicit xpcAccessibleGeneric(Accessible* aInternal) :
mIntl(aInternal), mSupportedIfaces(0)
{
if (mIntl->IsSelect())

Просмотреть файл

@ -23,7 +23,8 @@ class xpcAccessibleHyperText : public xpcAccessibleGeneric,
public nsIAccessibleHyperText
{
public:
xpcAccessibleHyperText(Accessible* aIntl) : xpcAccessibleGeneric(aIntl)
explicit xpcAccessibleHyperText(Accessible* aIntl) :
xpcAccessibleGeneric(aIntl)
{
if (mIntl->IsHyperText() && mIntl->AsHyperText()->IsTextRole())
mSupportedIfaces |= eText;

Просмотреть файл

@ -18,7 +18,8 @@ class xpcAccessibleImage : public xpcAccessibleGeneric,
public nsIAccessibleImage
{
public:
xpcAccessibleImage(Accessible* aIntl) : xpcAccessibleGeneric(aIntl) { }
explicit xpcAccessibleImage(Accessible* aIntl) :
xpcAccessibleGeneric(aIntl) { }
NS_DECL_ISUPPORTS_INHERITED

Просмотреть файл

@ -20,7 +20,8 @@ class xpcAccessibleTable : public xpcAccessibleGeneric,
public nsIAccessibleTable
{
public:
xpcAccessibleTable(Accessible* aIntl) : xpcAccessibleGeneric(aIntl) { }
explicit xpcAccessibleTable(Accessible* aIntl) :
xpcAccessibleGeneric(aIntl) { }
NS_DECL_ISUPPORTS_INHERITED

Просмотреть файл

@ -21,7 +21,8 @@ class xpcAccessibleTableCell : public xpcAccessibleHyperText,
public nsIAccessibleTableCell
{
public:
xpcAccessibleTableCell(Accessible* aIntl) : xpcAccessibleHyperText(aIntl) { }
explicit xpcAccessibleTableCell(Accessible* aIntl) :
xpcAccessibleHyperText(aIntl) { }
NS_DECL_ISUPPORTS_INHERITED

Просмотреть файл

@ -25,7 +25,6 @@ lv
nb-NO
nl
nn-NO
oc
pl
pt-BR
pt-PT

Просмотреть файл

@ -963,6 +963,16 @@ Element::CreateShadowRoot(ErrorResult& aError)
return nullptr;
}
nsIDocument* doc = GetCrossShadowCurrentDoc();
nsIContent* destroyedFramesFor = nullptr;
if (doc) {
nsIPresShell* shell = doc->GetShell();
if (shell) {
shell->DestroyFramesFor(this, &destroyedFramesFor);
}
}
MOZ_ASSERT(!GetPrimaryFrame());
// Unlike for XBL, false is the default for inheriting style.
protoBinding->SetInheritsStyle(false);
@ -998,11 +1008,11 @@ Element::CreateShadowRoot(ErrorResult& aError)
// Recreate the frame for the bound content because binding a ShadowRoot
// changes how things are rendered.
nsIDocument* doc = GetCrossShadowCurrentDoc();
if (doc) {
nsIPresShell *shell = doc->GetShell();
MOZ_ASSERT(doc == GetCrossShadowCurrentDoc());
nsIPresShell* shell = doc->GetShell();
if (shell) {
shell->RecreateFramesFor(this);
shell->CreateFramesFor(destroyedFramesFor);
}
}

Просмотреть файл

@ -453,16 +453,57 @@ nsDOMMutationObserver::Observe(nsINode& aTarget,
mozilla::ErrorResult& aRv)
{
if (!(aOptions.mChildList || aOptions.mAttributes || aOptions.mCharacterData)) {
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
bool childList = aOptions.mChildList;
bool attributes =
aOptions.mAttributes.WasPassed() &&
aOptions.mAttributes.Value();
bool characterData =
aOptions.mCharacterData.WasPassed() &&
aOptions.mCharacterData.Value();
bool subtree = aOptions.mSubtree;
bool attributeOldValue =
aOptions.mAttributeOldValue.WasPassed() &&
aOptions.mAttributeOldValue.Value();
bool characterDataOldValue =
aOptions.mCharacterDataOldValue.WasPassed() &&
aOptions.mCharacterDataOldValue.Value();
if (!aOptions.mAttributes.WasPassed() &&
(aOptions.mAttributeOldValue.WasPassed() ||
aOptions.mAttributeFilter.WasPassed())) {
attributes = true;
}
if (!aOptions.mCharacterData.WasPassed() &&
aOptions.mCharacterDataOldValue.WasPassed()) {
characterData = true;
}
if (!(childList || attributes || characterData)) {
aRv.Throw(NS_ERROR_DOM_TYPE_ERR);
return;
}
if (aOptions.mAttributeOldValue && !aOptions.mAttributes) {
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
if (aOptions.mAttributeOldValue.WasPassed() &&
aOptions.mAttributeOldValue.Value() &&
aOptions.mAttributes.WasPassed() &&
!aOptions.mAttributes.Value()) {
aRv.Throw(NS_ERROR_DOM_TYPE_ERR);
return;
}
if (aOptions.mCharacterDataOldValue && !aOptions.mCharacterData) {
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
if (aOptions.mAttributeFilter.WasPassed() &&
aOptions.mAttributes.WasPassed() &&
!aOptions.mAttributes.Value()) {
aRv.Throw(NS_ERROR_DOM_TYPE_ERR);
return;
}
if (aOptions.mCharacterDataOldValue.WasPassed() &&
aOptions.mCharacterDataOldValue.Value() &&
aOptions.mCharacterData.WasPassed() &&
!aOptions.mCharacterData.Value()) {
aRv.Throw(NS_ERROR_DOM_TYPE_ERR);
return;
}
@ -473,11 +514,6 @@ nsDOMMutationObserver::Observe(nsINode& aTarget,
const mozilla::dom::Sequence<nsString>& filtersAsString =
aOptions.mAttributeFilter.Value();
uint32_t len = filtersAsString.Length();
if (len != 0 && !aOptions.mAttributes) {
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
return;
}
filters.SetCapacity(len);
for (uint32_t i = 0; i < len; ++i) {
@ -487,12 +523,12 @@ nsDOMMutationObserver::Observe(nsINode& aTarget,
}
nsMutationReceiver* r = GetReceiverFor(&aTarget, true);
r->SetChildList(aOptions.mChildList);
r->SetAttributes(aOptions.mAttributes);
r->SetCharacterData(aOptions.mCharacterData);
r->SetSubtree(aOptions.mSubtree);
r->SetAttributeOldValue(aOptions.mAttributeOldValue);
r->SetCharacterDataOldValue(aOptions.mCharacterDataOldValue);
r->SetChildList(childList);
r->SetAttributes(attributes);
r->SetCharacterData(characterData);
r->SetSubtree(subtree);
r->SetAttributeOldValue(attributeOldValue);
r->SetCharacterDataOldValue(characterDataOldValue);
r->SetAttributeFilter(filters);
r->SetAllAttributes(allAttrs);
r->RemoveClones();
@ -541,11 +577,11 @@ nsDOMMutationObserver::GetObservingInfo(nsTArray<Nullable<MutationObservingInfo>
MutationObservingInfo& info = aResult.AppendElement()->SetValue();
nsMutationReceiver* mr = mReceivers[i];
info.mChildList = mr->ChildList();
info.mAttributes = mr->Attributes();
info.mCharacterData = mr->CharacterData();
info.mAttributes.Construct(mr->Attributes());
info.mCharacterData.Construct(mr->CharacterData());
info.mSubtree = mr->Subtree();
info.mAttributeOldValue = mr->AttributeOldValue();
info.mCharacterDataOldValue = mr->CharacterDataOldValue();
info.mAttributeOldValue.Construct(mr->AttributeOldValue());
info.mCharacterDataOldValue.Construct(mr->CharacterDataOldValue());
nsCOMArray<nsIAtom>& filters = mr->AttributeFilter();
if (filters.Count()) {
info.mAttributeFilter.Construct();

Просмотреть файл

@ -101,8 +101,7 @@ function runTest() {
e = ex;
}
ok(e, "Should have thrown an exception");
is(e.name, "SyntaxError", "Should have thrown SyntaxError");
is(e.code, DOMException.SYNTAX_ERR, "Should have thrown DOMException.SYNTAX_ERR");
is(e.name, "TypeError", "Should have thrown TypeError");
e = null;
try {
@ -110,9 +109,7 @@ function runTest() {
} catch (ex) {
e = ex;
}
ok(e, "Should have thrown an exception");
is(e.name, "SyntaxError", "Should have thrown SyntaxError");
is(e.code, DOMException.SYNTAX_ERR, "Should have thrown DOMException.SYNTAX_ERR");
ok(!e, "Shouldn't have thrown an exception");
e = null;
try {
@ -120,9 +117,7 @@ function runTest() {
} catch (ex) {
e = ex;
}
ok(e, "Should have thrown an exception");
is(e.name, "SyntaxError", "Should have thrown SyntaxError");
is(e.code, DOMException.SYNTAX_ERR, "Should have thrown DOMException.SYNTAX_ERR");
ok(!e, "Shouldn't have thrown an exception");
e = null;
try {
@ -130,9 +125,7 @@ function runTest() {
} catch (ex) {
e = ex;
}
ok(e, "Should have thrown an exception");
is(e.name, "SyntaxError", "Should have thrown SyntaxError");
is(e.code, DOMException.SYNTAX_ERR, "Should have thrown DOMException.SYNTAX_ERR");
ok(!e, "Shouldn't have thrown an exception");
e = null;
try {

Просмотреть файл

@ -594,6 +594,12 @@ MP4Reader::Output(TrackType aTrack, MediaData* aSample)
TrackTypeToStr(aTrack), aSample->mTime, aSample->mDuration);
#endif
if (!aSample) {
NS_WARNING("MP4Reader::Output() passed a null sample");
Error(aTrack);
return;
}
DecoderData& data = GetDecoderData(aTrack);
// Don't accept output while we're flushing.
MonitorAutoLock mon(data.mMonitor);

Просмотреть файл

@ -156,15 +156,16 @@ EMEH264Decoder::Decoded(GMPVideoi420Frame* aDecodedFrame)
b.mPlanes[2].mOffset = 0;
b.mPlanes[2].mSkip = 0;
gfx::IntRect pictureRegion(0, 0, width, height);
VideoData *v = VideoData::Create(mVideoInfo,
mImageContainer,
mLastStreamOffset,
aDecodedFrame->Timestamp(),
aDecodedFrame->Duration(),
b,
false,
-1,
ToIntRect(mPictureRegion));
mImageContainer,
mLastStreamOffset,
aDecodedFrame->Timestamp(),
aDecodedFrame->Duration(),
b,
false,
-1,
pictureRegion);
aDecodedFrame->Destroy();
mCallback->Output(v);
}
@ -261,7 +262,6 @@ EMEH264Decoder::GmpInit()
mVideoInfo.mDisplay = nsIntSize(mConfig.display_width, mConfig.display_height);
mVideoInfo.mHasVideo = true;
mPictureRegion = nsIntRect(0, 0, mConfig.display_width, mConfig.display_height);
return NS_OK;
}

Просмотреть файл

@ -99,7 +99,6 @@ private:
GMPVideoHost* mHost;
VideoInfo mVideoInfo;
nsIntRect mPictureRegion;
const mp4_demuxer::VideoDecoderConfig& mConfig;
nsRefPtr<layers::ImageContainer> mImageContainer;
nsRefPtr<MediaTaskQueue> mTaskQueue;

Просмотреть файл

@ -359,6 +359,24 @@ GMPParent::Shutdown()
MOZ_ASSERT(mState == GMPStateNotLoaded);
}
class NotifyGMPShutdownTask : public nsRunnable {
public:
NotifyGMPShutdownTask(const nsAString& aNodeId)
: mNodeId(aNodeId)
{
}
NS_IMETHOD Run() {
MOZ_ASSERT(NS_IsMainThread());
nsCOMPtr<nsIObserverService> obsService = mozilla::services::GetObserverService();
MOZ_ASSERT(obsService);
if (obsService) {
obsService->NotifyObservers(nullptr, "gmp-shutdown", mNodeId.get());
}
return NS_OK;
}
nsString mNodeId;
};
void
GMPParent::DeleteProcess()
{
@ -374,6 +392,11 @@ GMPParent::DeleteProcess()
LOGD(("%s::%s: Shut down process %p", __CLASS__, __FUNCTION__, (void *) mProcess));
mProcess = nullptr;
mState = GMPStateNotLoaded;
NS_DispatchToMainThread(
new NotifyGMPShutdownTask(NS_ConvertUTF8toUTF16(mNodeId)),
NS_DISPATCH_NORMAL);
}
void
@ -984,6 +1007,10 @@ bool
GMPParent::RecvAsyncShutdownRequired()
{
LOGD(("%s::%s: %p", __CLASS__, __FUNCTION__, this));
if (mAsyncShutdownRequired) {
NS_WARNING("Received AsyncShutdownRequired message more than once!");
return true;
}
mAsyncShutdownRequired = true;
mService->AsyncShutdownNeeded(this);
return true;

Просмотреть файл

@ -518,6 +518,7 @@ GeckoMediaPluginService::AsyncShutdownNeeded(GMPParent* aParent)
LOGD(("%s::%s %p", __CLASS__, __FUNCTION__, aParent));
MOZ_ASSERT(NS_GetCurrentThread() == mGMPThread);
MOZ_ASSERT(!mAsyncShutdownPlugins.Contains(aParent));
mAsyncShutdownPlugins.AppendElement(aParent);
}

Просмотреть файл

@ -106,6 +106,66 @@ GMPTestRunner::RunTestGMPCrossOrigin()
if (encoder2) encoder2->Close();
}
static already_AddRefed<nsIThread>
GetGMPThread()
{
nsRefPtr<GeckoMediaPluginService> service =
GeckoMediaPluginService::GetGeckoMediaPluginService();
nsCOMPtr<nsIThread> thread;
EXPECT_TRUE(NS_SUCCEEDED(service->GetThread(getter_AddRefs(thread))));
return thread.forget();
}
class GMPShutdownObserver : public nsIRunnable
, public nsIObserver {
public:
GMPShutdownObserver(nsIRunnable* aShutdownTask,
nsIRunnable* Continuation,
const nsACString& aNodeId)
: mShutdownTask(aShutdownTask)
, mContinuation(Continuation)
, mNodeId(NS_ConvertUTF8toUTF16(aNodeId))
{}
NS_DECL_THREADSAFE_ISUPPORTS
NS_IMETHOD Run() MOZ_OVERRIDE {
MOZ_ASSERT(NS_IsMainThread());
nsCOMPtr<nsIObserverService> observerService =
mozilla::services::GetObserverService();
EXPECT_TRUE(observerService);
observerService->AddObserver(this, "gmp-shutdown", false);
nsCOMPtr<nsIThread> thread(GetGMPThread());
thread->Dispatch(mShutdownTask, NS_DISPATCH_NORMAL);
return NS_OK;
}
NS_IMETHOD Observe(nsISupports* aSubject,
const char* aTopic,
const char16_t* aSomeData) MOZ_OVERRIDE
{
if (!strcmp(aTopic, "gmp-shutdown") &&
mNodeId.Equals(nsDependentString(aSomeData))) {
nsCOMPtr<nsIObserverService> observerService =
mozilla::services::GetObserverService();
EXPECT_TRUE(observerService);
observerService->RemoveObserver(this, "gmp-shutdown");
nsCOMPtr<nsIThread> thread(GetGMPThread());
thread->Dispatch(mContinuation, NS_DISPATCH_NORMAL);
}
return NS_OK;
}
private:
virtual ~GMPShutdownObserver() {}
nsRefPtr<nsIRunnable> mShutdownTask;
nsRefPtr<nsIRunnable> mContinuation;
const nsString mNodeId;
};
NS_IMPL_ISUPPORTS(GMPShutdownObserver, nsIRunnable, nsIObserver)
class NotifyObserversTask : public nsRunnable {
public:
NotifyObserversTask(const char* aTopic)
@ -230,15 +290,6 @@ AssertIsOnGMPThread()
MOZ_ASSERT(currentThread == thread);
}
static already_AddRefed<nsIThread>
GetGMPThread()
{
nsRefPtr<GeckoMediaPluginService> service =
GeckoMediaPluginService::GetGeckoMediaPluginService();
nsCOMPtr<nsIThread> thread;
EXPECT_TRUE(NS_SUCCEEDED(service->GetThread(getter_AddRefs(thread))));
return thread.forget();
}
class GMPStorageTest : public GMPDecryptorProxyCallback
{
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(GMPStorageTest)
@ -323,15 +374,13 @@ class GMPStorageTest : public GMPDecryptorProxyCallback
GeckoMediaPluginService::GetGeckoMediaPluginService();
EXPECT_TRUE(service);
const nsCString nodeId = GetNodeId(aOrigin,
aTopLevelOrigin,
aInPBMode);
EXPECT_TRUE(!nodeId.IsEmpty());
mNodeId = GetNodeId(aOrigin, aTopLevelOrigin, aInPBMode);
EXPECT_TRUE(!mNodeId.IsEmpty());
nsTArray<nsCString> tags;
tags.AppendElement(NS_LITERAL_CSTRING("fake"));
nsresult rv = service->GetGMPDecryptor(&tags, nodeId, &mDecryptor);
nsresult rv = service->GetGMPDecryptor(&tags, mNodeId, &mDecryptor);
EXPECT_TRUE(NS_SUCCEEDED(rv));
EXPECT_TRUE(!!mDecryptor);
@ -396,7 +445,6 @@ class GMPStorageTest : public GMPDecryptorProxyCallback
}
void TestPBStorage() {
// Open decryptor on one, origin, write a record, close decryptor,
// open another, and test that record can be read, close decryptor,
// then send pb-last-context-closed notification, then open decryptor
@ -440,7 +488,69 @@ class GMPStorageTest : public GMPDecryptorProxyCallback
NS_NewRunnableMethod(this,
&GMPStorageTest::SetFinished));
Update(NS_LITERAL_CSTRING("retrieve pbdata"));
}
void CreateAsyncShutdownTimeoutGMP(const nsAString& aOrigin1,
const nsAString& aOrigin2) {
CreateDecryptor(aOrigin1, aOrigin2, false);
Update(NS_LITERAL_CSTRING("shutdown-mode timeout"));
Shutdown();
}
void TestAsyncShutdownTimeout() {
// Create decryptors that timeout in their async shutdown.
// If the gtest hangs on shutdown, test fails!
CreateAsyncShutdownTimeoutGMP(NS_LITERAL_STRING("example7.com"),
NS_LITERAL_STRING("example8.com"));
CreateAsyncShutdownTimeoutGMP(NS_LITERAL_STRING("example9.com"),
NS_LITERAL_STRING("example10.com"));
CreateAsyncShutdownTimeoutGMP(NS_LITERAL_STRING("example11.com"),
NS_LITERAL_STRING("example12.com"));
SetFinished();
};
void TestAsyncShutdownStorage() {
// Test that a GMP can write to storage during shutdown, and retrieve
// that written data in a subsequent session.
CreateDecryptor(NS_LITERAL_STRING("example13.com"),
NS_LITERAL_STRING("example14.com"),
false);
// Instruct the GMP to write a token (the current timestamp, so it's
// unique) during async shutdown, then shutdown the plugin, re-create
// it, and check that the token was successfully stored.
auto t = time(0);
nsCString update("shutdown-mode token ");
nsCString token;
token.AppendInt((int64_t)t);
update.Append(token);
// Wait for a response from the GMP, so we know it's had time to receive
// the token.
nsCString response("shutdown-token received ");
response.Append(token);
Expect(response, NS_NewRunnableMethodWithArg<nsCString>(this,
&GMPStorageTest::TestAsyncShutdownStorage_ReceivedShutdownToken, token));
Update(update);
}
void TestAsyncShutdownStorage_ReceivedShutdownToken(const nsCString& aToken) {
ShutdownThen(NS_NewRunnableMethodWithArg<nsCString>(this,
&GMPStorageTest::TestAsyncShutdownStorage_AsyncShutdownComplete, aToken));
}
void TestAsyncShutdownStorage_AsyncShutdownComplete(const nsCString& aToken) {
// Create a new instance of the plugin, retrieve the token written
// during shutdown and verify it is correct.
CreateDecryptor(NS_LITERAL_STRING("example13.com"),
NS_LITERAL_STRING("example14.com"),
false);
nsCString response("retrieved shutdown-token ");
response.Append(aToken);
Expect(response,
NS_NewRunnableMethod(this, &GMPStorageTest::SetFinished));
Update(NS_LITERAL_CSTRING("retrieve-shutdown-token"));
}
void Expect(const nsCString& aMessage, nsIRunnable* aContinuation) {
@ -454,10 +564,23 @@ class GMPStorageTest : public GMPDecryptorProxyCallback
mFinished = false;
}
void ShutdownThen(nsIRunnable* aContinuation) {
EXPECT_TRUE(!!mDecryptor);
if (!mDecryptor) {
return;
}
EXPECT_FALSE(mNodeId.IsEmpty());
nsRefPtr<GMPShutdownObserver> task(
new GMPShutdownObserver(NS_NewRunnableMethod(this, &GMPStorageTest::Shutdown),
aContinuation, mNodeId));
NS_DispatchToMainThread(task, NS_DISPATCH_NORMAL);
}
void Shutdown() {
if (mDecryptor) {
mDecryptor->Close();
mDecryptor = nullptr;
mNodeId = EmptyCString();
}
}
@ -531,6 +654,7 @@ private:
GMPDecryptorProxy* mDecryptor;
Monitor mMonitor;
Atomic<bool> mFinished;
nsCString mNodeId;
};
void
@ -573,3 +697,13 @@ TEST(GeckoMediaPlugins, GMPStoragePrivateBrowsing) {
nsRefPtr<GMPStorageTest> runner = new GMPStorageTest();
runner->DoTest(&GMPStorageTest::TestPBStorage);
}
TEST(GeckoMediaPlugins, GMPStorageAsyncShutdownTimeout) {
nsRefPtr<GMPStorageTest> runner = new GMPStorageTest();
runner->DoTest(&GMPStorageTest::TestAsyncShutdownTimeout);
}
TEST(GeckoMediaPlugins, GMPStorageAsyncShutdownStorage) {
nsRefPtr<GMPStorageTest> runner = new GMPStorageTest();
runner->DoTest(&GMPStorageTest::TestAsyncShutdownStorage);
}

Просмотреть файл

@ -20,14 +20,6 @@ public:
{
}
virtual bool
preventExtensions(JSContext* aCx, JS::Handle<JSObject*> aProxy) const MOZ_OVERRIDE
{
// Throw a TypeError, per WebIDL.
JS_ReportErrorNumber(aCx, js_GetErrorMessage, nullptr,
JSMSG_CANT_CHANGE_EXTENSIBILITY);
return false;
}
virtual bool
getOwnPropDescriptor(JSContext* aCx, JS::Handle<JSObject*> aProxy,
JS::Handle<jsid> aId,
bool /* unused */,
@ -44,6 +36,13 @@ public:
delete_(JSContext* aCx, JS::Handle<JSObject*> aProxy, JS::Handle<jsid> aId,
bool* aBp) const MOZ_OVERRIDE;
virtual bool
preventExtensions(JSContext* aCx, JS::Handle<JSObject*> aProxy,
bool *succeeded) const MOZ_OVERRIDE
{
*succeeded = false;
return true;
}
virtual bool
isExtensible(JSContext* aCx, JS::Handle<JSObject*> aProxy,
bool* aIsExtensible) const MOZ_OVERRIDE
{

Просмотреть файл

@ -620,10 +620,11 @@ public:
bool *bp) const MOZ_OVERRIDE;
virtual bool enumerate(JSContext *cx, JS::Handle<JSObject*> proxy,
JS::AutoIdVector &props) const MOZ_OVERRIDE;
virtual bool preventExtensions(JSContext *cx,
JS::Handle<JSObject*> proxy,
bool *succeeded) const MOZ_OVERRIDE;
virtual bool isExtensible(JSContext *cx, JS::Handle<JSObject*> proxy, bool *extensible)
const MOZ_OVERRIDE;
virtual bool preventExtensions(JSContext *cx,
JS::Handle<JSObject*> proxy) const MOZ_OVERRIDE;
virtual bool has(JSContext *cx, JS::Handle<JSObject*> proxy,
JS::Handle<jsid> id, bool *bp) const MOZ_OVERRIDE;
virtual bool get(JSContext *cx, JS::Handle<JSObject*> proxy,
@ -706,27 +707,6 @@ const js::Class OuterWindowProxyClass =
nsOuterWindowProxy::ObjectMoved
));
bool
nsOuterWindowProxy::isExtensible(JSContext *cx, JS::Handle<JSObject*> proxy,
bool *extensible) const
{
// If [[Extensible]] could be false, then navigating a window could navigate
// to a window that's [[Extensible]] after being at one that wasn't: an
// invariant violation. So always report true for this.
*extensible = true;
return true;
}
bool
nsOuterWindowProxy::preventExtensions(JSContext *cx,
JS::Handle<JSObject*> proxy) const
{
// See above.
JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr,
JSMSG_CANT_CHANGE_EXTENSIBILITY);
return false;
}
const char *
nsOuterWindowProxy::className(JSContext *cx, JS::Handle<JSObject*> proxy) const
{
@ -864,6 +844,27 @@ nsOuterWindowProxy::enumerate(JSContext *cx, JS::Handle<JSObject*> proxy,
return js::AppendUnique(cx, props, innerProps);
}
bool
nsOuterWindowProxy::preventExtensions(JSContext *cx,
JS::Handle<JSObject*> proxy,
bool *succeeded) const
{
// If [[Extensible]] could be false, then navigating a window could navigate
// to a window that's [[Extensible]] after being at one that wasn't: an
// invariant violation. So never change a window's extensibility.
*succeeded = false;
return true;
}
bool
nsOuterWindowProxy::isExtensible(JSContext *cx, JS::Handle<JSObject*> proxy,
bool *extensible) const
{
// See above.
*extensible = true;
return true;
}
bool
nsOuterWindowProxy::has(JSContext *cx, JS::Handle<JSObject*> proxy,
JS::Handle<jsid> id, bool *bp) const

Просмотреть файл

@ -141,20 +141,19 @@ DOMProxyHandler::EnsureExpandoObject(JSContext* cx, JS::Handle<JSObject*> obj)
}
bool
DOMProxyHandler::isExtensible(JSContext *cx, JS::Handle<JSObject*> proxy, bool *extensible) const
DOMProxyHandler::preventExtensions(JSContext *cx, JS::Handle<JSObject*> proxy,
bool *succeeded) const
{
// always extensible per WebIDL
*extensible = true;
*succeeded = false;
return true;
}
bool
DOMProxyHandler::preventExtensions(JSContext *cx, JS::Handle<JSObject*> proxy) const
DOMProxyHandler::isExtensible(JSContext *cx, JS::Handle<JSObject*> proxy, bool *extensible) const
{
// Throw a TypeError, per WebIDL.
JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr,
JSMSG_CANT_CHANGE_EXTENSIBILITY);
return false;
*extensible = true;
return true;
}
bool

Просмотреть файл

@ -115,9 +115,10 @@ public:
const;
bool delete_(JSContext* cx, JS::Handle<JSObject*> proxy,
JS::Handle<jsid> id, bool* bp) const MOZ_OVERRIDE;
bool preventExtensions(JSContext *cx, JS::Handle<JSObject*> proxy,
bool *succeeded) const MOZ_OVERRIDE;
bool isExtensible(JSContext *cx, JS::Handle<JSObject*> proxy, bool *extensible)
const MOZ_OVERRIDE;
bool preventExtensions(JSContext *cx, JS::Handle<JSObject*> proxy) const MOZ_OVERRIDE;
bool has(JSContext* cx, JS::Handle<JSObject*> proxy, JS::Handle<jsid> id,
bool* bp) const MOZ_OVERRIDE;
bool set(JSContext *cx, JS::Handle<JSObject*> proxy, JS::Handle<JSObject*> receiver,

Просмотреть файл

@ -82,6 +82,7 @@
#include "nsThreadManager.h"
#include "nsAnonymousTemporaryFile.h"
#include "nsISpellChecker.h"
#include "nsClipboardProxy.h"
#include "IHistory.h"
#include "nsNetUtil.h"
@ -720,9 +721,15 @@ ContentChild::InitXPCOM()
NS_WARNING("Couldn't register console listener for child process");
bool isOffline;
SendGetXPCOMProcessAttributes(&isOffline, &mAvailableDictionaries);
ClipboardCapabilities clipboardCaps;
SendGetXPCOMProcessAttributes(&isOffline, &mAvailableDictionaries, &clipboardCaps);
RecvSetOffline(isOffline);
nsCOMPtr<nsIClipboard> clipboard(do_GetService("@mozilla.org/widget/clipboard;1"));
if (nsCOMPtr<nsIClipboardProxy> clipboardProxy = do_QueryInterface(clipboard)) {
clipboardProxy->SetCapabilities(clipboardCaps);
}
DebugOnly<FileUpdateDispatcher*> observer = FileUpdateDispatcher::GetSingleton();
NS_ASSERTION(observer, "FileUpdateDispatcher is null");

Просмотреть файл

@ -2502,8 +2502,13 @@ ContentParent::RecvAddNewProcess(const uint32_t& aPid,
// Update offline settings.
bool isOffline;
InfallibleTArray<nsString> unusedDictionaries;
RecvGetXPCOMProcessAttributes(&isOffline, &unusedDictionaries);
ClipboardCapabilities clipboardCaps;
RecvGetXPCOMProcessAttributes(&isOffline, &unusedDictionaries,
&clipboardCaps);
content->SendSetOffline(isOffline);
MOZ_ASSERT(!clipboardCaps.supportsSelectionClipboard() &&
!clipboardCaps.supportsFindClipboard(),
"Unexpected values");
PreallocatedProcessManager::PublishSpareProcess(content);
return true;
@ -2787,7 +2792,8 @@ ContentParent::RecvGetProcessAttributes(uint64_t* aId,
bool
ContentParent::RecvGetXPCOMProcessAttributes(bool* aIsOffline,
InfallibleTArray<nsString>* dictionaries)
InfallibleTArray<nsString>* dictionaries,
ClipboardCapabilities* clipboardCaps)
{
nsCOMPtr<nsIIOService> io(do_GetIOService());
MOZ_ASSERT(io, "No IO service?");
@ -2799,6 +2805,15 @@ ContentParent::RecvGetXPCOMProcessAttributes(bool* aIsOffline,
spellChecker->GetDictionaryList(dictionaries);
nsCOMPtr<nsIClipboard> clipboard(do_GetService("@mozilla.org/widget/clipboard;1"));
MOZ_ASSERT(clipboard, "No clipboard?");
rv = clipboard->SupportsSelectionClipboard(&clipboardCaps->supportsSelectionClipboard());
MOZ_ASSERT(NS_SUCCEEDED(rv));
rv = clipboard->SupportsFindClipboard(&clipboardCaps->supportsFindClipboard());
MOZ_ASSERT(NS_SUCCEEDED(rv));
return true;
}

Просмотреть файл

@ -413,7 +413,8 @@ private:
bool* aIsForApp,
bool* aIsForBrowser) MOZ_OVERRIDE;
virtual bool RecvGetXPCOMProcessAttributes(bool* aIsOffline,
InfallibleTArray<nsString>* dictionaries)
InfallibleTArray<nsString>* dictionaries,
ClipboardCapabilities* clipboardCaps)
MOZ_OVERRIDE;
virtual bool DeallocPJavaScriptParent(mozilla::jsipc::PJavaScriptParent*) MOZ_OVERRIDE;

Просмотреть файл

@ -315,6 +315,11 @@ struct VolumeInfo {
bool isUnmounting;
};
struct ClipboardCapabilities {
bool supportsSelectionClipboard;
bool supportsFindClipboard;
};
union MaybeFileDesc {
FileDescriptor;
void_t;
@ -517,7 +522,8 @@ parent:
sync GetProcessAttributes()
returns (uint64_t id, bool isForApp, bool isForBrowser);
sync GetXPCOMProcessAttributes()
returns (bool isOffline, nsString[] dictionaries);
returns (bool isOffline, nsString[] dictionaries,
ClipboardCapabilities clipboardCaps);
sync CreateChildProcess(IPCTabContext context,
ProcessPriority priority)

Просмотреть файл

@ -402,7 +402,10 @@ extern "C" {
*aPluginApi = new FakeVideoEncoder (static_cast<GMPVideoHost*> (aHostAPI));
return GMPNoErr;
} else if (!strcmp (aApiName, "eme-decrypt")) {
*aPluginApi = new FakeDecryptor(static_cast<GMPDecryptorHost*> (aHostAPI));
*aPluginApi = new FakeDecryptor();
return GMPNoErr;
} else if (!strcmp (aApiName, "async-shutdown")) {
*aPluginApi = new TestAsyncShutdown(static_cast<GMPAsyncShutdownHost*> (aHostAPI));
return GMPNoErr;
}
return GMPGenericErr;

Просмотреть файл

@ -33,9 +33,8 @@ MaybeFinish()
}
}
FakeDecryptor::FakeDecryptor(GMPDecryptorHost* aHost)
: mHost(aHost)
, mCallback(nullptr)
FakeDecryptor::FakeDecryptor()
: mCallback(nullptr)
{
assert(!sInstance);
sInstance = this;
@ -80,8 +79,8 @@ public:
void Destroy() MOZ_OVERRIDE {
delete this;
}
ReadContinuation* mThen;
string mId;
ReadContinuation* mThen;
};
class TestEmptyContinuation : public ReadContinuation {
@ -178,7 +177,7 @@ public:
return;
}
auto err = GMPOpenRecord(OpenAgainRecordId, new OpenedSecondTimeContinuation(aRecord));
GMPOpenRecord(OpenAgainRecordId, new OpenedSecondTimeContinuation(aRecord));
delete this;
}
@ -268,6 +267,31 @@ public:
string mRecordId;
};
class ReportReadRecordContinuation : public ReadContinuation {
public:
ReportReadRecordContinuation(const string& aRecordId)
: mRecordId(aRecordId)
{}
void ReadComplete(GMPErr aErr, const std::string& aData) MOZ_OVERRIDE {
if (GMP_FAILED(aErr)) {
FakeDecryptor::Message("retrieved " + mRecordId + " failed");
} else {
FakeDecryptor::Message("retrieved " + mRecordId + " " + aData);
}
delete this;
}
string mRecordId;
};
enum ShutdownMode {
ShutdownNormal,
ShutdownTimeout,
ShutdownStoreToken
};
static ShutdownMode sShutdownMode = ShutdownNormal;
static string sShutdownToken = "";
void
FakeDecryptor::UpdateSession(uint32_t aPromiseId,
const char* aSessionId,
@ -290,5 +314,48 @@ FakeDecryptor::UpdateSession(uint32_t aPromiseId,
} else if (task == "retrieve") {
const string& id = tokens[1];
ReadRecord(id, new ReportReadStatusContinuation(id));
} else if (task == "shutdown-mode") {
const string& mode = tokens[1];
if (mode == "timeout") {
sShutdownMode = ShutdownTimeout;
} else if (mode == "token") {
sShutdownMode = ShutdownStoreToken;
sShutdownToken = tokens[2];
Message("shutdown-token received " + sShutdownToken);
}
} else if (task == "retrieve-shutdown-token") {
ReadRecord("shutdown-token", new ReportReadRecordContinuation("shutdown-token"));
}
}
class CompleteShutdownTask : public GMPTask {
public:
CompleteShutdownTask(GMPAsyncShutdownHost* aHost)
: mHost(aHost)
{
}
virtual void Run() {
mHost->ShutdownComplete();
}
virtual void Destroy() { delete this; }
GMPAsyncShutdownHost* mHost;
};
void
TestAsyncShutdown::BeginShutdown() {
switch (sShutdownMode) {
case ShutdownNormal:
mHost->ShutdownComplete();
break;
case ShutdownTimeout:
// Don't do anything; wait for timeout, Gecko should kill
// the plugin and recover.
break;
case ShutdownStoreToken:
// Store message, then shutdown.
WriteRecord("shutdown-token",
sShutdownToken,
new CompleteShutdownTask(mHost));
break;
}
}

Просмотреть файл

@ -14,7 +14,7 @@
class FakeDecryptor : public GMPDecryptor {
public:
FakeDecryptor(GMPDecryptorHost* aHost);
FakeDecryptor();
virtual void Init(GMPDecryptorCallback* aCallback) MOZ_OVERRIDE {
mCallback = aCallback;
@ -70,12 +70,23 @@ public:
private:
virtual ~FakeDecryptor() {}
static FakeDecryptor* sInstance;
void TestStorage();
GMPDecryptorCallback* mCallback;
GMPDecryptorHost* mHost;
};
class TestAsyncShutdown : public GMPAsyncShutdown {
public:
TestAsyncShutdown(GMPAsyncShutdownHost* aHost)
: mHost(aHost)
{
}
virtual void BeginShutdown() MOZ_OVERRIDE;
private:
GMPAsyncShutdownHost* mHost;
};
#endif

Просмотреть файл

@ -12,6 +12,7 @@
class ReadContinuation {
public:
virtual ~ReadContinuation() {}
virtual void ReadComplete(GMPErr aErr, const std::string& aData) = 0;
};
@ -45,6 +46,7 @@ GMPRunOnMainThread(GMPTask* aTask);
class OpenContinuation {
public:
virtual ~OpenContinuation() {}
virtual void OpenComplete(GMPErr aStatus, GMPRecord* aRecord) = 0;
};

Просмотреть файл

@ -85,6 +85,7 @@ var ecmaGlobals =
{name: "SharedFloat32Array", nightly: true},
{name: "SharedFloat64Array", nightly: true},
{name: "SIMD", nightly: true},
{name: "Atomics", nightly: true},
"StopIteration",
"String",
"SyntaxError",

Просмотреть файл

@ -47,11 +47,11 @@ callback MutationCallback = void (sequence<MutationRecord> mutations, MutationOb
dictionary MutationObserverInit {
boolean childList = false;
boolean attributes = false;
boolean characterData = false;
boolean attributes;
boolean characterData;
boolean subtree = false;
boolean attributeOldValue = false;
boolean characterDataOldValue = false;
boolean attributeOldValue;
boolean characterDataOldValue;
sequence<DOMString> attributeFilter;
};

Просмотреть файл

@ -59,6 +59,7 @@ var ecmaGlobals =
{name: "SharedFloat32Array", nightly: true},
{name: "SharedFloat64Array", nightly: true},
{name: "SIMD", nightly: true},
{name: "Atomics", nightly: true},
"StopIteration",
"String",
"SyntaxError",

Просмотреть файл

@ -116,6 +116,14 @@ public:
if (!doc)
return;
// Destroy the frames for mBoundElement.
nsIContent* destroyedFramesFor = nullptr;
nsIPresShell* shell = doc->GetShell();
if (shell) {
shell->DestroyFramesFor(mBoundElement, &destroyedFramesFor);
}
MOZ_ASSERT(!mBoundElement->GetPrimaryFrame());
// Get the binding.
bool ready = false;
nsXBLService::GetInstance()->BindingReady(mBoundElement, mBindingURI, &ready);
@ -131,7 +139,7 @@ public:
// has a primary frame and whether it's in the undisplayed map
// before sending a ContentInserted notification, or bad things
// will happen.
nsIPresShell *shell = doc->GetShell();
MOZ_ASSERT(shell == doc->GetShell());
if (shell) {
nsIFrame* childFrame = mBoundElement->GetPrimaryFrame();
if (!childFrame) {
@ -140,7 +148,7 @@ public:
shell->FrameManager()->GetUndisplayedContent(mBoundElement);
if (!sc) {
shell->RecreateFramesFor(mBoundElement);
shell->CreateFramesFor(destroyedFramesFor);
}
}
}

Просмотреть файл

@ -667,7 +667,7 @@ class FirstLargerOffset
int32_t mSoftTextOffset;
public:
FirstLargerOffset(int32_t aSoftTextOffset) : mSoftTextOffset(aSoftTextOffset) {}
explicit FirstLargerOffset(int32_t aSoftTextOffset) : mSoftTextOffset(aSoftTextOffset) {}
int operator()(const T& t) const {
// We want the first larger offset, so never return 0 (which would
// short-circuit evaluation before finding the last such offset).

Просмотреть файл

@ -66,50 +66,6 @@ LayerHasCheckerboardingAPZC(Layer* aLayer, gfxRGBA* aOutColor)
return false;
}
/**
* Returns a rectangle of content painted opaquely by aLayer. Very consertative;
* bails by returning an empty rect in any tricky situations.
*/
static nsIntRect
GetOpaqueRect(Layer* aLayer)
{
nsIntRect result;
gfx::Matrix matrix;
bool is2D = aLayer->AsLayerComposite()->GetShadowTransform().Is2D(&matrix);
// Just bail if there's anything difficult to handle.
if (!is2D || aLayer->GetMaskLayer() ||
aLayer->GetIsFixedPosition() ||
aLayer->GetIsStickyPosition() ||
aLayer->GetEffectiveOpacity() != 1.0f ||
matrix.HasNonIntegerTranslation()) {
return result;
}
if (aLayer->GetContentFlags() & Layer::CONTENT_OPAQUE) {
result = aLayer->GetEffectiveVisibleRegion().GetLargestRectangle();
} else {
// Drill down into RefLayers because that's what we particularly care about;
// layer construction for aLayer will not have known about the opaqueness
// of any RefLayer subtrees.
RefLayer* refLayer = aLayer->AsRefLayer();
if (refLayer && refLayer->GetFirstChild()) {
result = GetOpaqueRect(refLayer->GetFirstChild());
}
}
// Translate our opaque region to cover the child
gfx::Point point = matrix.GetTranslation();
result.MoveBy(static_cast<int>(point.x), static_cast<int>(point.y));
const nsIntRect* clipRect = aLayer->GetEffectiveClipRect();
if (clipRect) {
result.IntersectRect(result, *clipRect);
}
return result;
}
static void DrawLayerInfo(const RenderTargetIntRect& aClipRect,
LayerManagerComposite* aManager,
Layer* aLayer)
@ -162,12 +118,10 @@ static void PrintUniformityInfo(Layer* aLayer)
/* all of the per-layer prepared data we need to maintain */
struct PreparedLayer
{
PreparedLayer(LayerComposite *aLayer, RenderTargetIntRect aClipRect, bool aRestoreVisibleRegion, nsIntRegion &aVisibleRegion) :
mLayer(aLayer), mClipRect(aClipRect), mRestoreVisibleRegion(aRestoreVisibleRegion), mSavedVisibleRegion(aVisibleRegion) {}
PreparedLayer(LayerComposite *aLayer, RenderTargetIntRect aClipRect) :
mLayer(aLayer), mClipRect(aClipRect) {}
LayerComposite* mLayer;
RenderTargetIntRect mClipRect;
bool mRestoreVisibleRegion;
nsIntRegion mSavedVisibleRegion;
};
/* all of the prepared data that we need in RenderLayer() */
@ -223,38 +177,8 @@ ContainerPrepare(ContainerT* aContainer,
CULLING_LOG("Preparing sublayer %p\n", layerToRender->GetLayer());
nsIntRegion savedVisibleRegion;
bool restoreVisibleRegion = false;
gfx::Matrix matrix;
bool is2D = layerToRender->GetLayer()->GetBaseTransform().Is2D(&matrix);
if (i + 1 < children.Length() &&
is2D && !matrix.HasNonIntegerTranslation()) {
LayerComposite* nextLayer = static_cast<LayerComposite*>(children.ElementAt(i + 1)->ImplData());
CULLING_LOG("Culling against %p\n", nextLayer->GetLayer());
nsIntRect nextLayerOpaqueRect;
if (nextLayer && nextLayer->GetLayer()) {
nextLayerOpaqueRect = GetOpaqueRect(nextLayer->GetLayer());
gfx::Point point = matrix.GetTranslation();
nextLayerOpaqueRect.MoveBy(static_cast<int>(-point.x), static_cast<int>(-point.y));
CULLING_LOG(" point %i, %i\n", static_cast<int>(-point.x), static_cast<int>(-point.y));
CULLING_LOG(" opaque rect %i, %i, %i, %i\n", nextLayerOpaqueRect.x, nextLayerOpaqueRect.y, nextLayerOpaqueRect.width, nextLayerOpaqueRect.height);
}
if (!nextLayerOpaqueRect.IsEmpty()) {
CULLING_LOG(" draw\n");
savedVisibleRegion = layerToRender->GetShadowVisibleRegion();
nsIntRegion visibleRegion;
visibleRegion.Sub(savedVisibleRegion, nextLayerOpaqueRect);
if (visibleRegion.IsEmpty()) {
continue;
}
layerToRender->SetShadowVisibleRegion(visibleRegion);
restoreVisibleRegion = true;
} else {
CULLING_LOG(" skip\n");
}
}
layerToRender->Prepare(clipRect);
aContainer->mPrepared->mLayers.AppendElement(PreparedLayer(layerToRender, clipRect, restoreVisibleRegion, savedVisibleRegion));
aContainer->mPrepared->mLayers.AppendElement(PreparedLayer(layerToRender, clipRect));
}
CULLING_LOG("Preparing container layer %p\n", aContainer->GetLayer());
@ -325,11 +249,6 @@ RenderLayers(ContainerT* aContainer,
layerToRender->RenderLayer(RenderTargetPixel::ToUntyped(clipRect));
}
if (preparedData.mRestoreVisibleRegion) {
// Restore the region in case it's not covered by opaque content next time
layerToRender->SetShadowVisibleRegion(preparedData.mSavedVisibleRegion);
}
if (gfxPrefs::UniformityInfo()) {
PrintUniformityInfo(layer);
}

Просмотреть файл

@ -197,6 +197,55 @@ LayerManagerComposite::BeginTransactionWithDrawTarget(DrawTarget* aTarget, const
mTargetBounds = aRect;
}
void
LayerManagerComposite::ApplyOcclusionCulling(Layer* aLayer, nsIntRegion& aOpaqueRegion)
{
nsIntRegion localOpaque;
Matrix transform2d;
bool isTranslation = false;
// If aLayer has a simple transform (only an integer translation) then we
// can easily convert aOpaqueRegion into pre-transform coordinates and include
// that region.
if (aLayer->GetLocalTransform().Is2D(&transform2d)) {
if (transform2d.IsIntegerTranslation()) {
isTranslation = true;
localOpaque = aOpaqueRegion;
localOpaque.MoveBy(-transform2d._31, -transform2d._32);
}
}
// Subtract any areas that we know to be opaque from our
// visible region.
LayerComposite *composite = aLayer->AsLayerComposite();
if (!localOpaque.IsEmpty()) {
nsIntRegion visible = composite->GetShadowVisibleRegion();
visible.Sub(visible, localOpaque);
composite->SetShadowVisibleRegion(visible);
}
// Compute occlusions for our descendants (in front-to-back order) and allow them to
// contribute to localOpaque.
for (Layer* child = aLayer->GetLastChild(); child; child = child->GetPrevSibling()) {
ApplyOcclusionCulling(child, localOpaque);
}
// If we have a simple transform, then we can add our opaque area into
// aOpaqueRegion.
if (isTranslation &&
!aLayer->GetMaskLayer() &&
aLayer->GetLocalOpacity() == 1.0f) {
if (aLayer->GetContentFlags() & Layer::CONTENT_OPAQUE) {
localOpaque.Or(localOpaque, composite->GetShadowVisibleRegion());
}
localOpaque.MoveBy(transform2d._31, transform2d._32);
const nsIntRect* clip = aLayer->GetEffectiveClipRect();
if (clip) {
localOpaque.And(localOpaque, *clip);
}
aOpaqueRegion.Or(aOpaqueRegion, localOpaque);
}
}
bool
LayerManagerComposite::EndEmptyTransaction(EndTransactionFlags aFlags)
{
@ -257,6 +306,9 @@ LayerManagerComposite::EndTransaction(DrawPaintedLayerCallback aCallback,
// so we don't need to pass any global transform here.
mRoot->ComputeEffectiveTransforms(gfx::Matrix4x4());
nsIntRegion opaque;
ApplyOcclusionCulling(mRoot, opaque);
Render();
mGeometryChanged = false;
} else {

Просмотреть файл

@ -165,6 +165,13 @@ public:
virtual const char* Name() const MOZ_OVERRIDE { return ""; }
/**
* Restricts the shadow visible region of layers that are covered with
* opaque content. aOpaqueRegion is the region already known to be covered
* with opaque content, in the post-transform coordinate space of aLayer.
*/
void ApplyOcclusionCulling(Layer* aLayer, nsIntRegion& aOpaqueRegion);
/**
* RAII helper class to add a mask effect with the compositable from aMaskLayer
* to the EffectChain aEffect and notify the compositable when we are done.

Просмотреть файл

@ -414,9 +414,8 @@ CompositorD3D11::CreateRenderTarget(const gfx::IntRect& aRect,
D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET);
RefPtr<ID3D11Texture2D> texture;
mDevice->CreateTexture2D(&desc, nullptr, byRef(texture));
NS_ASSERTION(texture, "Could not create texture");
if (!texture) {
HRESULT hr = mDevice->CreateTexture2D(&desc, nullptr, byRef(texture));
if (Failed(hr) || !texture) {
return nullptr;
}
@ -447,9 +446,9 @@ CompositorD3D11::CreateRenderTargetFromSource(const gfx::IntRect &aRect,
D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET);
RefPtr<ID3D11Texture2D> texture;
mDevice->CreateTexture2D(&desc, nullptr, byRef(texture));
HRESULT hr = mDevice->CreateTexture2D(&desc, nullptr, byRef(texture));
NS_ASSERTION(texture, "Could not create texture");
if (!texture) {
if (Failed(hr) || !texture) {
return nullptr;
}
@ -601,7 +600,12 @@ CompositorD3D11::DrawQuad(const gfx::Rect& aRect,
}
RefPtr<ID3D11ShaderResourceView> view;
mDevice->CreateShaderResourceView(source->GetD3D11Texture(), nullptr, byRef(view));
HRESULT hr = mDevice->CreateShaderResourceView(source->GetD3D11Texture(), nullptr, byRef(view));
if (Failed(hr)) {
// XXX - There's a chance we won't be able to render anything, should we
// just crash release builds?
return;
}
ID3D11ShaderResourceView* srView = view;
mContext->PSSetShaderResources(3, 1, &srView);
@ -654,7 +658,12 @@ CompositorD3D11::DrawQuad(const gfx::Rect& aRect,
SetPSForEffect(aEffectChain.mPrimaryEffect, maskType, texturedEffect->mTexture->GetFormat());
RefPtr<ID3D11ShaderResourceView> view;
mDevice->CreateShaderResourceView(source->GetD3D11Texture(), nullptr, byRef(view));
HRESULT hr = mDevice->CreateShaderResourceView(source->GetD3D11Texture(), nullptr, byRef(view));
if (Failed(hr)) {
// XXX - There's a chance we won't be able to render anything, should we
// just crash release builds?
return;
}
ID3D11ShaderResourceView* srView = view;
mContext->PSSetShaderResources(0, 1, &srView);
@ -695,13 +704,27 @@ CompositorD3D11::DrawQuad(const gfx::Rect& aRect,
TextureSourceD3D11* sourceCb = source->GetSubSource(Cb)->AsSourceD3D11();
TextureSourceD3D11* sourceCr = source->GetSubSource(Cr)->AsSourceD3D11();
HRESULT hr;
RefPtr<ID3D11ShaderResourceView> views[3];
mDevice->CreateShaderResourceView(sourceY->GetD3D11Texture(),
nullptr, byRef(views[0]));
mDevice->CreateShaderResourceView(sourceCb->GetD3D11Texture(),
nullptr, byRef(views[1]));
mDevice->CreateShaderResourceView(sourceCr->GetD3D11Texture(),
nullptr, byRef(views[2]));
hr = mDevice->CreateShaderResourceView(sourceY->GetD3D11Texture(),
nullptr, byRef(views[0]));
if (Failed(hr)) {
return;
}
hr = mDevice->CreateShaderResourceView(sourceCb->GetD3D11Texture(),
nullptr, byRef(views[1]));
if (Failed(hr)) {
return;
}
hr = mDevice->CreateShaderResourceView(sourceCr->GetD3D11Texture(),
nullptr, byRef(views[2]));
if (Failed(hr)) {
return;
}
ID3D11ShaderResourceView* srViews[3] = { views[0], views[1], views[2] };
mContext->PSSetShaderResources(0, 3, srViews);
@ -728,8 +751,17 @@ CompositorD3D11::DrawQuad(const gfx::Rect& aRect,
mVSConstants.textureCoords = effectComponentAlpha->mTextureCoords;
RefPtr<ID3D11ShaderResourceView> views[2];
mDevice->CreateShaderResourceView(sourceOnBlack->GetD3D11Texture(), nullptr, byRef(views[0]));
mDevice->CreateShaderResourceView(sourceOnWhite->GetD3D11Texture(), nullptr, byRef(views[1]));
HRESULT hr;
hr = mDevice->CreateShaderResourceView(sourceOnBlack->GetD3D11Texture(), nullptr, byRef(views[0]));
if (Failed(hr)) {
return;
}
hr = mDevice->CreateShaderResourceView(sourceOnWhite->GetD3D11Texture(), nullptr, byRef(views[1]));
if (Failed(hr)) {
return;
}
ID3D11ShaderResourceView* srViews[2] = { views[0], views[1] };
mContext->PSSetShaderResources(0, 2, srViews);
@ -905,7 +937,7 @@ CompositorD3D11::UpdateRenderTarget()
nsRefPtr<ID3D11Texture2D> backBuf;
hr = mSwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)backBuf.StartAssignment());
if (FAILED(hr)) {
if (Failed(hr)) {
return;
}
@ -972,29 +1004,6 @@ CompositorD3D11::CreateShaders()
return true;
}
static
bool ShouldRecoverFromMapFailure(HRESULT hr, ID3D11Device* device)
{
// XXX - it would be nice to use gfxCriticalError, but it needs to
// be made to work off the main thread first.
if (SUCCEEDED(hr)) {
return true;
}
if (hr == DXGI_ERROR_DEVICE_REMOVED) {
switch (device->GetDeviceRemovedReason()) {
case DXGI_ERROR_DEVICE_HUNG:
case DXGI_ERROR_DEVICE_REMOVED:
case DXGI_ERROR_DEVICE_RESET:
case DXGI_ERROR_DRIVER_INTERNAL_ERROR:
return true;
case DXGI_ERROR_INVALID_CALL:
default:
return false;
}
}
return false;
}
bool
CompositorD3D11::UpdateConstantBuffers()
{
@ -1002,21 +1011,15 @@ CompositorD3D11::UpdateConstantBuffers()
D3D11_MAPPED_SUBRESOURCE resource;
hr = mContext->Map(mAttachments->mVSConstantBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &resource);
if (FAILED(hr)) {
if (ShouldRecoverFromMapFailure(hr, GetDevice())) {
return false;
}
MOZ_CRASH();
if (Failed(hr)) {
return false;
}
*(VertexShaderConstants*)resource.pData = mVSConstants;
mContext->Unmap(mAttachments->mVSConstantBuffer, 0);
hr = mContext->Map(mAttachments->mPSConstantBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &resource);
if (FAILED(hr)) {
if (ShouldRecoverFromMapFailure(hr, GetDevice())) {
return false;
}
MOZ_CRASH();
if (Failed(hr)) {
return false;
}
*(PixelShaderConstants*)resource.pData = mPSConstants;
mContext->Unmap(mAttachments->mPSConstantBuffer, 0);
@ -1082,5 +1085,46 @@ CompositorD3D11::PaintToTarget()
mContext->Unmap(readTexture, 0);
}
void
CompositorD3D11::HandleError(HRESULT hr, Severity aSeverity)
{
// XXX - It would be nice to use gfxCriticalError, but it needs to
// be made to work off the main thread first.
MOZ_ASSERT(aSeverity != DebugAssert);
if (aSeverity == Critical) {
MOZ_CRASH("Unrecoverable D3D11 error");
}
if (mDevice && hr == DXGI_ERROR_DEVICE_REMOVED) {
hr = mDevice->GetDeviceRemovedReason();
}
// Always crash if we are making invalid calls
if (hr == DXGI_ERROR_INVALID_CALL) {
MOZ_CRASH("Invalid D3D11 api call");
}
if (aSeverity == Recoverable) {
NS_WARNING("Encountered a recoverable D3D11 error");
}
}
bool
CompositorD3D11::Failed(HRESULT hr, Severity aSeverity)
{
if (FAILED(hr)) {
HandleError(hr, aSeverity);
return true;
}
return false;
}
bool
CompositorD3D11::Succeeded(HRESULT hr, Severity aSeverity)
{
return !Failed(hr, aSeverity);
}
}
}

Просмотреть файл

@ -143,6 +143,16 @@ public:
ID3D11DeviceContext* GetDC() { return mContext; }
private:
enum Severity {
Recoverable,
DebugAssert,
Critical,
};
void HandleError(HRESULT hr, Severity aSeverity);
bool Failed(HRESULT hr, Severity aSeverity = DebugAssert);
bool Succeeded(HRESULT hr, Severity aSeverity = DebugAssert);
// ensure mSize is up to date with respect to mWidget
void EnsureSize();
void VerifyBufferSize();

Просмотреть файл

@ -792,6 +792,26 @@ CompositorParent::CompositeCallback(TimeStamp aScheduleTime)
CompositeToTarget(nullptr);
}
// Go down the composite layer tree, setting properties to match their
// content-side counterparts.
static void
SetShadowProperties(Layer* aLayer)
{
// FIXME: Bug 717688 -- Do these updates in LayerTransactionParent::RecvUpdate.
LayerComposite* layerComposite = aLayer->AsLayerComposite();
// Set the layerComposite's base transform to the layer's base transform.
layerComposite->SetShadowTransform(aLayer->GetBaseTransform());
layerComposite->SetShadowTransformSetByAnimation(false);
layerComposite->SetShadowVisibleRegion(aLayer->GetVisibleRegion());
layerComposite->SetShadowClipRect(aLayer->GetClipRect());
layerComposite->SetShadowOpacity(aLayer->GetOpacity());
for (Layer* child = aLayer->GetFirstChild();
child; child = child->GetNextSibling()) {
SetShadowProperties(child);
}
}
void
CompositorParent::CompositeToTarget(DrawTarget* aTarget, const nsIntRect* aRect)
{
@ -824,6 +844,8 @@ CompositorParent::CompositeToTarget(DrawTarget* aTarget, const nsIntRect* aRect)
mLayerManager->BeginTransaction();
}
SetShadowProperties(mLayerManager->GetRoot());
if (mForceCompositionTask && !mOverrideComposeReadiness) {
if (mCompositionManager->ReadyForCompose()) {
mForceCompositionTask->Cancel();
@ -904,26 +926,6 @@ CompositorParent::CanComposite()
!mPaused;
}
// Go down the composite layer tree, setting properties to match their
// content-side counterparts.
static void
SetShadowProperties(Layer* aLayer)
{
// FIXME: Bug 717688 -- Do these updates in LayerTransactionParent::RecvUpdate.
LayerComposite* layerComposite = aLayer->AsLayerComposite();
// Set the layerComposite's base transform to the layer's base transform.
layerComposite->SetShadowTransform(aLayer->GetBaseTransform());
layerComposite->SetShadowTransformSetByAnimation(false);
layerComposite->SetShadowVisibleRegion(aLayer->GetVisibleRegion());
layerComposite->SetShadowClipRect(aLayer->GetClipRect());
layerComposite->SetShadowOpacity(aLayer->GetOpacity());
for (Layer* child = aLayer->GetFirstChild();
child; child = child->GetNextSibling()) {
SetShadowProperties(child);
}
}
void
CompositorParent::ScheduleRotationOnCompositorThread(const TargetConfig& aTargetConfig,
bool aIsFirstPaint)

Просмотреть файл

@ -173,6 +173,9 @@ CompositorOGL::CleanupResources()
mQuadVBO = 0;
}
mGLContext->MakeCurrent();
mContextStateTracker.DestroyOGL(mGLContext);
// On the main thread the Widget will be destroyed soon and calling MakeCurrent
// after that could cause a crash (at least with GLX, see bug 1059793), unless
// context is marked as destroyed.
@ -664,6 +667,8 @@ CompositorOGL::SetRenderTarget(CompositingRenderTarget *aSurface)
= static_cast<CompositingRenderTargetOGL*>(aSurface);
if (mCurrentRenderTarget != surface) {
mCurrentRenderTarget = surface;
mContextStateTracker.PopOGLSection(gl(), "Frame");
mContextStateTracker.PushOGLSection(gl(), "Frame");
surface->BindRenderTarget();
}
}
@ -768,6 +773,8 @@ CompositorOGL::BeginFrame(const nsIntRegion& aInvalidRegion,
CompositingRenderTargetOGL::RenderTargetForWindow(this,
IntSize(width, height));
mCurrentRenderTarget->BindRenderTarget();
mContextStateTracker.PushOGLSection(gl(), "Frame");
#ifdef DEBUG
mWindowRenderTarget = mCurrentRenderTarget;
#endif
@ -1345,6 +1352,8 @@ CompositorOGL::EndFrame()
}
#endif
mContextStateTracker.PopOGLSection(gl(), "Frame");
mFrameInProgress = false;
if (mTarget) {

Просмотреть файл

@ -6,6 +6,7 @@
#ifndef MOZILLA_GFX_COMPOSITOROGL_H
#define MOZILLA_GFX_COMPOSITOROGL_H
#include "ContextStateTracker.h"
#include "gfx2DGlue.h"
#include "GLContextTypes.h" // for GLContext, etc
#include "GLDefs.h" // for GLuint, LOCAL_GL_TEXTURE_2D, etc
@ -387,6 +388,8 @@ private:
RefPtr<CompositorTexturePoolOGL> mTexturePool;
ContextStateTrackerOGL mContextStateTracker;
bool mDestroyed;
/**

Просмотреть файл

@ -0,0 +1,137 @@
/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ContextStateTracker.h"
#include "GLContext.h"
#ifdef MOZ_ENABLE_PROFILER_SPS
#include "ProfilerMarkers.h"
#endif
namespace mozilla {
void
ContextStateTrackerOGL::PushOGLSection(GLContext* aGL, const char* aSectionName)
{
if (!profiler_feature_active("gpu")) {
return;
}
if (!aGL->IsSupported(gl::GLFeature::query_objects)) {
return;
}
if (mSectionStack.Length() > 0) {
// We need to end the query since we're starting a new section and restore it
// when this section is finished.
aGL->fEndQuery(LOCAL_GL_TIME_ELAPSED);
Top().mCpuTimeEnd = TimeStamp::Now();
}
ContextState newSection(aSectionName);
GLuint queryObject;
aGL->fGenQueries(1, &queryObject);
newSection.mStartQueryHandle = queryObject;
newSection.mCpuTimeStart = TimeStamp::Now();
aGL->fBeginQuery(LOCAL_GL_TIME_ELAPSED_EXT, queryObject);
mSectionStack.AppendElement(newSection);
}
void
ContextStateTrackerOGL::PopOGLSection(GLContext* aGL, const char* aSectionName)
{
// We might have ignored a section start if we started profiling
// in the middle section. If so we will ignore this unmatched end.
if (mSectionStack.Length() == 0) {
return;
}
int i = mSectionStack.Length() - 1;
MOZ_ASSERT(strcmp(mSectionStack[i].mSectionName, aSectionName) == 0);
aGL->fEndQuery(LOCAL_GL_TIME_ELAPSED);
mSectionStack[i].mCpuTimeEnd = TimeStamp::Now();
mCompletedSections.AppendElement(mSectionStack[i]);
mSectionStack.RemoveElementAt(i);
if (i - 1 >= 0) {
const char* sectionToRestore = Top().mSectionName;
// We need to restore the outer section
// Well do this by completing this section and adding a new
// one with the same name
mCompletedSections.AppendElement(Top());
mSectionStack.RemoveElementAt(i - 1);
ContextState newSection(sectionToRestore);
GLuint queryObject;
aGL->fGenQueries(1, &queryObject);
newSection.mStartQueryHandle = queryObject;
newSection.mCpuTimeStart = TimeStamp::Now();
aGL->fBeginQuery(LOCAL_GL_TIME_ELAPSED_EXT, queryObject);
mSectionStack.AppendElement(newSection);
}
Flush(aGL);
}
void
ContextStateTrackerOGL::Flush(GLContext* aGL)
{
TimeStamp now = TimeStamp::Now();
while (mCompletedSections.Length() != 0) {
// On mac we see QUERY_RESULT_AVAILABLE cause a GL flush if we query it
// too early. For profiling we rather have the last 200ms of data missing
// while causing let's measurement distortions.
if (mCompletedSections[0].mCpuTimeEnd + TimeDuration::FromMilliseconds(200) > now) {
break;
}
GLuint handle = mCompletedSections[0].mStartQueryHandle;
// We've waiting 200ms, content rendering at > 20 FPS will be ready. We
// shouldn't see any flushes now.
GLuint returned = 0;
aGL->fGetQueryObjectuiv(handle, LOCAL_GL_QUERY_RESULT_AVAILABLE, &returned);
if (!returned) {
break;
}
GLuint gpuTime = 0;
aGL->fGetQueryObjectuiv(handle, LOCAL_GL_QUERY_RESULT, &gpuTime);
aGL->fDeleteQueries(1, &handle);
#ifdef MOZ_ENABLE_PROFILER_SPS
PROFILER_MARKER_PAYLOAD("gpu_timer_query", new GPUMarkerPayload(
mCompletedSections[0].mCpuTimeStart,
mCompletedSections[0].mCpuTimeEnd,
0,
gpuTime
));
#endif
mCompletedSections.RemoveElementAt(0);
}
}
void
ContextStateTrackerOGL::DestroyOGL(GLContext* aGL)
{
while (mCompletedSections.Length() != 0) {
GLuint handle = (GLuint)mCompletedSections[0].mStartQueryHandle;
aGL->fDeleteQueries(1, &handle);
mCompletedSections.RemoveElementAt(0);
}
}
}

Просмотреть файл

@ -0,0 +1,83 @@
/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef GFX_CONTEXTSTATETRACKER_H
#define GFX_CONTEXTSTATETRACKER_H
#include "GLTypes.h"
#include "mozilla/TimeStamp.h"
#include "nsTArray.h"
#include <string.h>
namespace mozilla {
namespace gl {
class GLContext;
}
/**
* This class tracks the state of the context for debugging and profiling.
* Each section pushes a new stack entry and must be matched by an end section.
* All nested section must be ended before ending a parent section.
*/
class ContextStateTracker {
public:
ContextStateTracker() {}
private:
bool IsProfiling() { return true; }
protected:
typedef GLuint TimerQueryHandle;
class ContextState {
public:
ContextState(const char* aSectionName)
: mSectionName(aSectionName)
{}
const char* mSectionName;
mozilla::TimeStamp mCpuTimeStart;
mozilla::TimeStamp mCpuTimeEnd;
TimerQueryHandle mStartQueryHandle;
};
ContextState& Top() {
MOZ_ASSERT(mSectionStack.Length());
return mSectionStack[mSectionStack.Length() - 1];
}
nsTArray<ContextState> mCompletedSections;
nsTArray<ContextState> mSectionStack;
};
/*
class ID3D11DeviceContext;
class ContextStateTrackerD3D11 MOZ_FINAL : public ContextStateTracker {
public:
// TODO Implement me
void PushD3D11Section(ID3D11DeviceContext* aCtxt, const char* aSectionName) {}
void PopD3D11Section(ID3D11DeviceContext* aCtxt, const char* aSectionName) {}
void DestroyD3D11(ID3D11DeviceContext* aCtxt) {}
private:
void Flush();
};
*/
class ContextStateTrackerOGL MOZ_FINAL : public ContextStateTracker {
typedef mozilla::gl::GLContext GLContext;
public:
void PushOGLSection(GLContext* aGL, const char* aSectionName);
void PopOGLSection(GLContext* aGL, const char* aSectionName);
void DestroyOGL(GLContext* aGL);
private:
void Flush(GLContext* aGL);
};
}
#endif

Просмотреть файл

@ -5,6 +5,7 @@
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
EXPORTS += [
'ContextStateTracker.h',
'DrawMode.h',
'gfx2DGlue.h',
'gfx3DMatrix.h',
@ -196,6 +197,7 @@ if CONFIG['INTEL_ARCHITECTURE']:
SOURCES['gfxAlphaRecoverySSE2.cpp'].flags += CONFIG['SSE2_FLAGS']
SOURCES += [
'ContextStateTracker.cpp',
# Includes mac system header conflicting with point/size,
# and includes glxXlibSurface.h which drags in Xrender.h
'gfxASurface.cpp',

Просмотреть файл

@ -383,14 +383,20 @@ bool Channel::ChannelImpl::EnqueueHelloMessage() {
return true;
}
static void
ClearAndShrink(std::string& s, size_t capacity)
void Channel::ChannelImpl::ClearAndShrinkInputOverflowBuf()
{
// This swap trick is the closest thing C++ has to a guaranteed way to
// shrink the capacity of a string.
std::string tmp;
tmp.reserve(capacity);
s.swap(tmp);
// If input_overflow_buf_ has grown, shrink it back to its normal size.
static size_t previousCapacityAfterClearing = 0;
if (input_overflow_buf_.capacity() > previousCapacityAfterClearing) {
// This swap trick is the closest thing C++ has to a guaranteed way
// to shrink the capacity of a string.
std::string tmp;
tmp.reserve(Channel::kReadBufferSize);
input_overflow_buf_.swap(tmp);
previousCapacityAfterClearing = input_overflow_buf_.capacity();
} else {
input_overflow_buf_.clear();
}
}
bool Channel::ChannelImpl::Connect() {
@ -519,7 +525,7 @@ bool Channel::ChannelImpl::ProcessIncomingMessages() {
} else {
if (input_overflow_buf_.size() >
static_cast<size_t>(kMaximumMessageSize - bytes_read)) {
ClearAndShrink(input_overflow_buf_, Channel::kReadBufferSize);
ClearAndShrinkInputOverflowBuf();
CHROMIUM_LOG(ERROR) << "IPC message is too big";
return false;
}
@ -628,7 +634,7 @@ bool Channel::ChannelImpl::ProcessIncomingMessages() {
}
}
if (end == p) {
ClearAndShrink(input_overflow_buf_, Channel::kReadBufferSize);
ClearAndShrinkInputOverflowBuf();
} else if (!overflowp) {
// p is from input_buf_
input_overflow_buf_.assign(p, end - p);

Просмотреть файл

@ -59,6 +59,8 @@ class Channel::ChannelImpl : public MessageLoopForIO::Watcher {
bool ProcessIncomingMessages();
bool ProcessOutgoingMessages();
void ClearAndShrinkInputOverflowBuf();
// MessageLoopForIO::Watcher implementation.
virtual void OnFileCanReadWithoutBlocking(int fd);
virtual void OnFileCanWriteWithoutBlocking(int fd);

Просмотреть файл

@ -35,8 +35,10 @@ class JavaScriptBase : public WrapperOwner, public WrapperAnswer, public Base
/*** IPC handlers ***/
bool RecvPreventExtensions(const uint64_t &objId, ReturnStatus *rs) {
return Answer::RecvPreventExtensions(ObjectId::deserialize(objId), rs);
bool RecvPreventExtensions(const uint64_t &objId, ReturnStatus *rs,
bool *succeeded) {
return Answer::RecvPreventExtensions(ObjectId::deserialize(objId), rs,
succeeded);
}
bool RecvGetPropertyDescriptor(const uint64_t &objId, const JSIDVariant &id,
ReturnStatus *rs,
@ -131,8 +133,9 @@ class JavaScriptBase : public WrapperOwner, public WrapperAnswer, public Base
bool SendDropObject(const ObjectId &objId) {
return Base::SendDropObject(objId.serialize());
}
bool SendPreventExtensions(const ObjectId &objId, ReturnStatus *rs) {
return Base::SendPreventExtensions(objId.serialize(), rs);
bool SendPreventExtensions(const ObjectId &objId, ReturnStatus *rs,
bool *succeeded) {
return Base::SendPreventExtensions(objId.serialize(), rs, succeeded);
}
bool SendGetPropertyDescriptor(const ObjectId &objId, const JSIDVariant &id,
ReturnStatus *rs,

Просмотреть файл

@ -24,7 +24,7 @@ both:
async DropObject(uint64_t objId);
// These roughly map to the ProxyHandler hooks that CPOWs need.
prio(high) sync PreventExtensions(uint64_t objId) returns (ReturnStatus rs);
prio(high) sync PreventExtensions(uint64_t objId) returns (ReturnStatus rs, bool result);
prio(high) sync GetPropertyDescriptor(uint64_t objId, JSIDVariant id) returns (ReturnStatus rs, PPropertyDescriptor result);
prio(high) sync GetOwnPropertyDescriptor(uint64_t objId, JSIDVariant id) returns (ReturnStatus rs, PPropertyDescriptor result);
prio(high) sync DefineProperty(uint64_t objId, JSIDVariant id, PPropertyDescriptor descriptor) returns (ReturnStatus rs);

Просмотреть файл

@ -58,17 +58,20 @@ WrapperAnswer::ok(ReturnStatus *rs)
}
bool
WrapperAnswer::RecvPreventExtensions(const ObjectId &objId, ReturnStatus *rs)
WrapperAnswer::RecvPreventExtensions(const ObjectId &objId, ReturnStatus *rs,
bool *succeeded)
{
AutoSafeJSContext cx;
JSAutoRequest request(cx);
*succeeded = false;
RootedObject obj(cx, findObjectById(cx, objId));
if (!obj)
return fail(cx, rs);
JSAutoCompartment comp(cx, obj);
if (!JS_PreventExtensions(cx, obj))
if (!JS_PreventExtensions(cx, obj, succeeded))
return fail(cx, rs);
LOG("%s.preventExtensions()", ReceiverObj(objId));

Просмотреть файл

@ -18,7 +18,8 @@ class WrapperAnswer : public virtual JavaScriptShared
public:
explicit WrapperAnswer(JSRuntime *rt) : JavaScriptShared(rt) {}
bool RecvPreventExtensions(const ObjectId &objId, ReturnStatus *rs);
bool RecvPreventExtensions(const ObjectId &objId, ReturnStatus *rs,
bool *succeeded);
bool RecvGetPropertyDescriptor(const ObjectId &objId, const JSIDVariant &id,
ReturnStatus *rs,
PPropertyDescriptor *out);

Просмотреть файл

@ -71,8 +71,8 @@ class CPOWProxyHandler : public BaseProxyHandler
AutoIdVector &props) const MOZ_OVERRIDE;
virtual bool delete_(JSContext *cx, HandleObject proxy, HandleId id, bool *bp) const MOZ_OVERRIDE;
virtual bool enumerate(JSContext *cx, HandleObject proxy, AutoIdVector &props) const MOZ_OVERRIDE;
virtual bool preventExtensions(JSContext *cx, HandleObject proxy, bool *succeeded) const MOZ_OVERRIDE;
virtual bool isExtensible(JSContext *cx, HandleObject proxy, bool *extensible) const MOZ_OVERRIDE;
virtual bool preventExtensions(JSContext *cx, HandleObject proxy) const MOZ_OVERRIDE;
virtual bool has(JSContext *cx, HandleObject proxy, HandleId id, bool *bp) const MOZ_OVERRIDE;
virtual bool get(JSContext *cx, HandleObject proxy, HandleObject receiver,
HandleId id, MutableHandleValue vp) const MOZ_OVERRIDE;
@ -112,26 +112,6 @@ const CPOWProxyHandler CPOWProxyHandler::singleton;
} \
return owner->call args;
bool
CPOWProxyHandler::preventExtensions(JSContext *cx, HandleObject proxy) const
{
FORWARD(preventExtensions, (cx, proxy));
}
bool
WrapperOwner::preventExtensions(JSContext *cx, HandleObject proxy)
{
ObjectId objId = idOf(proxy);
ReturnStatus status;
if (!SendPreventExtensions(objId, &status))
return ipcfail(cx);
LOG_STACK();
return ok(cx, status);
}
bool
CPOWProxyHandler::getPropertyDescriptor(JSContext *cx, HandleObject proxy, HandleId id,
MutableHandle<JSPropertyDescriptor> desc) const
@ -475,6 +455,26 @@ WrapperOwner::getOwnEnumerablePropertyKeys(JSContext *cx, HandleObject proxy, Au
return getPropertyKeys(cx, proxy, JSITER_OWNONLY, props);
}
bool
CPOWProxyHandler::preventExtensions(JSContext *cx, HandleObject proxy, bool *succeeded) const
{
FORWARD(preventExtensions, (cx, proxy, succeeded));
}
bool
WrapperOwner::preventExtensions(JSContext *cx, HandleObject proxy, bool *succeeded)
{
ObjectId objId = idOf(proxy);
ReturnStatus status;
if (!SendPreventExtensions(objId, &status, succeeded))
return ipcfail(cx);
LOG_STACK();
return ok(cx, status);
}
bool
CPOWProxyHandler::isExtensible(JSContext *cx, HandleObject proxy, bool *extensible) const
{

Просмотреть файл

@ -40,8 +40,8 @@ class WrapperOwner : public virtual JavaScriptShared
bool ownPropertyKeys(JSContext *cx, JS::HandleObject proxy, JS::AutoIdVector &props);
bool delete_(JSContext *cx, JS::HandleObject proxy, JS::HandleId id, bool *bp);
bool enumerate(JSContext *cx, JS::HandleObject proxy, JS::AutoIdVector &props);
bool preventExtensions(JSContext *cx, JS::HandleObject proxy, bool *succeeded);
bool isExtensible(JSContext *cx, JS::HandleObject proxy, bool *extensible);
bool preventExtensions(JSContext *cx, JS::HandleObject proxy);
bool has(JSContext *cx, JS::HandleObject proxy, JS::HandleId id, bool *bp);
bool get(JSContext *cx, JS::HandleObject proxy, JS::HandleObject receiver,
JS::HandleId id, JS::MutableHandleValue vp);
@ -106,7 +106,8 @@ class WrapperOwner : public virtual JavaScriptShared
/*** Dummy call handlers ***/
public:
virtual bool SendDropObject(const ObjectId &objId) = 0;
virtual bool SendPreventExtensions(const ObjectId &objId, ReturnStatus *rs) = 0;
virtual bool SendPreventExtensions(const ObjectId &objId, ReturnStatus *rs,
bool *succeeded) = 0;
virtual bool SendGetPropertyDescriptor(const ObjectId &objId, const JSIDVariant &id,
ReturnStatus *rs,
PPropertyDescriptor *out) = 0;

Просмотреть файл

@ -675,35 +675,6 @@ class RetType
bool operator!=(RetType rhs) const { return which_ != rhs.which_; }
};
// Represents the subset of Type that can be used as a return type of a builtin
// Math function.
class MathRetType
{
public:
enum Which {
Double = Type::Double,
Float = Type::Float,
Floatish = Type::Floatish,
Signed = Type::Signed,
Unsigned = Type::Unsigned
};
private:
Which which_;
public:
MathRetType() : which_(Which(-1)) {}
MOZ_IMPLICIT MathRetType(Which w) : which_(w) {}
Type toType() const {
return Type(Type::Which(which_));
}
Which which() const {
return which_;
}
};
namespace {
// Represents the subset of Type that can be used as a variable or
@ -4408,7 +4379,7 @@ CheckAssign(FunctionCompiler &f, ParseNode *assign, MDefinition **def, Type *typ
}
static bool
CheckMathIMul(FunctionCompiler &f, ParseNode *call, MDefinition **def, MathRetType *type)
CheckMathIMul(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
{
if (CallArgListLength(call) != 2)
return f.fail(call, "Math.imul must be passed 2 arguments");
@ -4432,12 +4403,12 @@ CheckMathIMul(FunctionCompiler &f, ParseNode *call, MDefinition **def, MathRetTy
return f.failf(rhs, "%s is not a subtype of intish", rhsType.toChars());
*def = f.mul(lhsDef, rhsDef, MIRType_Int32, MMul::Integer);
*type = MathRetType::Signed;
*type = Type::Signed;
return true;
}
static bool
CheckMathClz32(FunctionCompiler &f, ParseNode *call, MDefinition **def, MathRetType *type)
CheckMathClz32(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
{
if (CallArgListLength(call) != 1)
return f.fail(call, "Math.clz32 must be passed 1 argument");
@ -4453,12 +4424,12 @@ CheckMathClz32(FunctionCompiler &f, ParseNode *call, MDefinition **def, MathRetT
return f.failf(arg, "%s is not a subtype of intish", argType.toChars());
*def = f.unary<MClz>(argDef);
*type = MathRetType::Signed;
*type = Type::Fixnum;
return true;
}
static bool
CheckMathAbs(FunctionCompiler &f, ParseNode *call, MDefinition **def, MathRetType *type)
CheckMathAbs(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
{
if (CallArgListLength(call) != 1)
return f.fail(call, "Math.abs must be passed 1 argument");
@ -4472,19 +4443,19 @@ CheckMathAbs(FunctionCompiler &f, ParseNode *call, MDefinition **def, MathRetTyp
if (argType.isSigned()) {
*def = f.unary<MAbs>(argDef, MIRType_Int32);
*type = MathRetType::Unsigned;
*type = Type::Unsigned;
return true;
}
if (argType.isMaybeDouble()) {
*def = f.unary<MAbs>(argDef, MIRType_Double);
*type = MathRetType::Double;
*type = Type::Double;
return true;
}
if (argType.isMaybeFloat()) {
*def = f.unary<MAbs>(argDef, MIRType_Float32);
*type = MathRetType::Floatish;
*type = Type::Floatish;
return true;
}
@ -4492,7 +4463,7 @@ CheckMathAbs(FunctionCompiler &f, ParseNode *call, MDefinition **def, MathRetTyp
}
static bool
CheckMathSqrt(FunctionCompiler &f, ParseNode *call, MDefinition **def, MathRetType *type)
CheckMathSqrt(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
{
if (CallArgListLength(call) != 1)
return f.fail(call, "Math.sqrt must be passed 1 argument");
@ -4506,13 +4477,13 @@ CheckMathSqrt(FunctionCompiler &f, ParseNode *call, MDefinition **def, MathRetTy
if (argType.isMaybeDouble()) {
*def = f.unary<MSqrt>(argDef, MIRType_Double);
*type = MathRetType::Double;
*type = Type::Double;
return true;
}
if (argType.isMaybeFloat()) {
*def = f.unary<MSqrt>(argDef, MIRType_Float32);
*type = MathRetType::Floatish;
*type = Type::Floatish;
return true;
}
@ -4520,8 +4491,7 @@ CheckMathSqrt(FunctionCompiler &f, ParseNode *call, MDefinition **def, MathRetTy
}
static bool
CheckMathMinMax(FunctionCompiler &f, ParseNode *callNode, MDefinition **def, bool isMax,
MathRetType *type)
CheckMathMinMax(FunctionCompiler &f, ParseNode *callNode, MDefinition **def, bool isMax, Type *type)
{
if (CallArgListLength(callNode) < 2)
return f.fail(callNode, "Math.min/max must be passed at least 2 arguments");
@ -4533,20 +4503,19 @@ CheckMathMinMax(FunctionCompiler &f, ParseNode *callNode, MDefinition **def, boo
return false;
if (firstType.isMaybeDouble()) {
*type = MathRetType::Double;
*type = Type::Double;
firstType = Type::MaybeDouble;
} else if (firstType.isMaybeFloat()) {
*type = MathRetType::Float;
*type = Type::Float;
firstType = Type::MaybeFloat;
} else if (firstType.isInt()) {
*type = MathRetType::Signed;
firstType = Type::Int;
} else if (firstType.isSigned()) {
*type = Type::Signed;
firstType = Type::Signed;
} else {
return f.failf(firstArg, "%s is not a subtype of double?, float? or int",
firstType.toChars());
}
MIRType opType = firstType.toMIRType();
MDefinition *lastDef = firstDef;
ParseNode *nextArg = NextNode(firstArg);
for (unsigned i = 1; i < CallArgListLength(callNode); i++, nextArg = NextNode(nextArg)) {
@ -4558,7 +4527,7 @@ CheckMathMinMax(FunctionCompiler &f, ParseNode *callNode, MDefinition **def, boo
if (!(nextType <= firstType))
return f.failf(nextArg, "%s is not a subtype of %s", nextType.toChars(), firstType.toChars());
lastDef = f.minMax(lastDef, nextDef, opType, isMax);
lastDef = f.minMax(lastDef, nextDef, firstType.toMIRType(), isMax);
}
*def = lastDef;
@ -4834,7 +4803,7 @@ CheckCoercionArg(FunctionCompiler &f, ParseNode *arg, AsmJSCoercion expected, MD
}
static bool
CheckMathFRound(FunctionCompiler &f, ParseNode *callNode, MDefinition **def, MathRetType *type)
CheckMathFRound(FunctionCompiler &f, ParseNode *callNode, MDefinition **def, Type *type)
{
if (CallArgListLength(callNode) != 1)
return f.fail(callNode, "Math.fround must be passed 1 argument");
@ -4847,13 +4816,13 @@ CheckMathFRound(FunctionCompiler &f, ParseNode *callNode, MDefinition **def, Mat
MOZ_ASSERT(argType == Type::Float);
*def = argDef;
*type = MathRetType::Float;
*type = Type::Float;
return true;
}
static bool
CheckMathBuiltinCall(FunctionCompiler &f, ParseNode *callNode, AsmJSMathBuiltinFunction func,
MDefinition **def, MathRetType *type)
MDefinition **def, Type *type)
{
unsigned arity = 0;
AsmJSImmKind doubleCallee, floatCallee;
@ -4926,7 +4895,7 @@ CheckMathBuiltinCall(FunctionCompiler &f, ParseNode *callNode, AsmJSMathBuiltinF
if (!f.builtinCall(callee, call, varType.toMIRType(), def))
return false;
*type = MathRetType(opIsDouble ? MathRetType::Double : MathRetType::Floatish);
*type = opIsDouble ? Type::Double : Type::Floatish;
return true;
}
@ -5348,14 +5317,8 @@ CheckUncoercedCall(FunctionCompiler &f, ParseNode *expr, MDefinition **def, Type
const ModuleCompiler::Global *global;
if (IsCallToGlobal(f.m(), expr, &global)) {
if (global->isMathFunction()) {
MathRetType mathRetType;
if (!CheckMathBuiltinCall(f, expr, global->mathBuiltinFunction(), def, &mathRetType))
return false;
*type = mathRetType.toType();
return true;
}
if (global->isMathFunction())
return CheckMathBuiltinCall(f, expr, global->mathBuiltinFunction(), def, type);
if (global->isSimdCtor())
return CheckSimdCtorCall(f, expr, global, def, type);
if (global->isSimdOperation())
@ -5435,11 +5398,11 @@ static bool
CheckCoercedMathBuiltinCall(FunctionCompiler &f, ParseNode *callNode, AsmJSMathBuiltinFunction func,
RetType retType, MDefinition **def, Type *type)
{
MDefinition *result;
MathRetType resultType;
if (!CheckMathBuiltinCall(f, callNode, func, &result, &resultType))
MDefinition *resultDef;
Type resultType;
if (!CheckMathBuiltinCall(f, callNode, func, &resultDef, &resultType))
return false;
return CoerceResult(f, callNode, retType, result, resultType.toType(), def, type);
return CoerceResult(f, callNode, retType, resultDef, resultType, def, type);
}
static bool

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,52 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef builtin_AtomicsObject_h
#define builtin_AtomicsObject_h
#include "jsobj.h"
namespace js {
class AtomicsObject : public JSObject
{
public:
static const Class class_;
static JSObject* initClass(JSContext *cx, Handle<GlobalObject *> global);
static bool toString(JSContext *cx, unsigned int argc, jsval *vp);
static const int FutexOK = 0;
// The error values must be negative because APIs such as futexWaitOrRequeue
// return a value that is either the number of tasks woken or an error code.
static const int FutexNotequal = -1;
static const int FutexTimedout = -2;
// Internal signals; negative for the same reason.
static const int FutexInterrupted = -1000;
};
void atomics_fullMemoryBarrier();
bool atomics_compareExchange(JSContext *cx, unsigned argc, Value *vp);
bool atomics_load(JSContext *cx, unsigned argc, Value *vp);
bool atomics_store(JSContext *cx, unsigned argc, Value *vp);
bool atomics_fence(JSContext *cx, unsigned argc, Value *vp);
bool atomics_add(JSContext *cx, unsigned argc, Value *vp);
bool atomics_sub(JSContext *cx, unsigned argc, Value *vp);
bool atomics_and(JSContext *cx, unsigned argc, Value *vp);
bool atomics_or(JSContext *cx, unsigned argc, Value *vp);
bool atomics_xor(JSContext *cx, unsigned argc, Value *vp);
bool atomics_futexWait(JSContext *cx, unsigned argc, Value *vp);
bool atomics_futexWake(JSContext *cx, unsigned argc, Value *vp);
bool atomics_futexWakeOrRequeue(JSContext *cx, unsigned argc, Value *vp);
} /* namespace js */
JSObject *
js_InitAtomicsClass(JSContext *cx, js::HandleObject obj);
#endif /* builtin_AtomicsObject_h */

Просмотреть файл

@ -7,6 +7,7 @@
#include "builtin/Object.h"
#include "mozilla/ArrayUtils.h"
#include "mozilla/UniquePtr.h"
#include "jscntxt.h"
@ -22,7 +23,7 @@ using namespace js::types;
using js::frontend::IsIdentifier;
using mozilla::ArrayLength;
using mozilla::UniquePtr;
bool
js::obj_construct(JSContext *cx, unsigned argc, Value *vp)
@ -609,7 +610,12 @@ obj_setPrototypeOf(JSContext *cx, unsigned argc, Value *vp)
/* Step 7. */
if (!success) {
JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_OBJECT_NOT_EXTENSIBLE, "object");
UniquePtr<char[], JS::FreePolicy> bytes(DecompileValueGenerator(cx, JSDVG_SEARCH_STACK,
args[0], NullPtr()));
if (!bytes)
return false;
JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_SETPROTOTYPEOF_FAIL,
bytes.get());
return false;
}
@ -1006,7 +1012,7 @@ obj_isExtensible(JSContext *cx, unsigned argc, Value *vp)
return true;
}
// ES6 draft rev27 (2014/08/24) 19.1.2.15 Object.preventExtensions(O)
// ES6 20141014 draft 19.1.2.15 Object.preventExtensions(O)
static bool
obj_preventExtensions(JSContext *cx, unsigned argc, Value *vp)
{
@ -1017,10 +1023,21 @@ obj_preventExtensions(JSContext *cx, unsigned argc, Value *vp)
if (!args.get(0).isObject())
return true;
// Steps 2-5.
// Steps 2-3.
RootedObject obj(cx, &args.get(0).toObject());
return JSObject::preventExtensions(cx, obj);
bool status;
if (!JSObject::preventExtensions(cx, obj, &status))
return false;
// Step 4.
if (!status) {
JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_CANT_CHANGE_EXTENSIBILITY);
return false;
}
// Step 5.
return true;
}
// ES6 draft rev27 (2014/08/24) 19.1.2.5 Object.freeze(O)

Просмотреть файл

@ -2078,6 +2078,25 @@ ByteSize(JSContext *cx, unsigned argc, Value *vp)
return true;
}
static bool
SetImmutablePrototype(JSContext *cx, unsigned argc, Value *vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
if (!args.get(0).isObject()) {
JS_ReportError(cx, "setImmutablePrototype: object expected");
return false;
}
RootedObject obj(cx, &args[0].toObject());
bool succeeded;
if (!JSObject::setImmutablePrototype(cx, obj, &succeeded))
return false;
args.rval().setBoolean(succeeded);
return true;
}
static const JSFunctionSpecWithHelp TestingFunctions[] = {
JS_FN_HELP("gc", ::GC, 0, 0,
"gc([obj] | 'compartment' [, 'shrinking'])",
@ -2400,6 +2419,14 @@ gc::ZealModeHelpText),
" Return the size in bytes occupied by |value|, or |undefined| if value\n"
" is not allocated in memory.\n"),
JS_FN_HELP("setImmutablePrototype", SetImmutablePrototype, 1, 0,
"setImmutablePrototype(obj)",
" Try to make obj's [[Prototype]] immutable, such that subsequent attempts to\n"
" change it will fail. Return true if obj's [[Prototype]] was successfully made\n"
" immutable (or if it already was immutable), false otherwise. Throws in case\n"
" of internal error, or if the operation doesn't even make sense (for example,\n"
" because the object is a revoked proxy)."),
JS_FS_HELP_END
};

Просмотреть файл

@ -26,6 +26,10 @@ StoreBuffer::SlotsEdge::mark(JSTracer *trc)
{
NativeObject *obj = object();
// Beware JSObject::swap exchanging a native object for a non-native one.
if (!obj->isNative())
return;
if (IsInsideNursery(obj))
return;

Просмотреть файл

@ -64,11 +64,13 @@ for (n of [-Math.pow(2,31)-1, -Math.pow(2,31), -Math.pow(2,31)+1, -1, 0, 1, Math
var f = asmLink(asmCompile('glob', USE_ASM + 'var clz32=glob.Math.clz32; function f(i) { i=i|0; return clz32(i)|0 } return f'), this);
for (n of [0, 1, 2, 15, 16, Math.pow(2,31)-1, Math.pow(2,31), Math.pow(2,31)+1, Math.pow(2,32)-1, Math.pow(2,32), Math.pow(2,32)+1])
assertEq(f(n), Math.clz32(n|0));
assertEq(asmLink(asmCompile('glob', USE_ASM + 'var clz32=glob.Math.clz32; function f(i, j) { i=i|0;j=j|0; return (clz32(i) < (j|0))|0 } return f'), this)(0x1, 30), 0);
assertEq(asmLink(asmCompile('glob', USE_ASM + 'var clz32=glob.Math.clz32; function f(i, j) { i=i|0;j=j|0; return (clz32(i) < (j>>>0))|0 } return f'), this)(0x1, 30), 0);
var doubleNumbers = [NaN, Infinity, -Infinity, -10000, -3.4, -0, 0, 3.4, 10000];
var floatNumbers = [];
for (var x of doubleNumbers) floatNumbers.push(Math.fround(x));
var intNumbers = [-10000, -3, -1, 0, 3, 10000];
var intNumbers = [-Math.pow(2,31), -10000, -3, -1, 0, 3, 10000, Math.pow(2,31), Math.pow(2,31)+1];
function testBinary(f, g, numbers) {
for (n of numbers)
@ -84,22 +86,25 @@ assertAsmLinkFail(asmCompile('glob', USE_ASM + 'var at=glob.Math.atan2; function
assertAsmLinkFail(asmCompile('glob', USE_ASM + 'var at=glob.Math.atan2; function f(d,e) { d=+d;e=+e; return +at(d,e) } return f'), {Math:{atan2:null}});
testBinary(asmLink(asmCompile('glob', USE_ASM + 'var at=glob.Math.atan2; function f(d,e) { d=+d;e=+e; return +at(d,e) } return f'), {Math:{atan2:Math.atan2}}), Math.atan2, doubleNumbers);
function coercedMin(...args) { for (var i = 0; i < args.length; i++) args[i] = args[i]|0; return Math.min(...args) }
function coercedMax(...args) { for (var i = 0; i < args.length; i++) args[i] = args[i]|0; return Math.max(...args) }
assertAsmTypeFail('glob', USE_ASM + 'var min=glob.Math.min; function f(d) { d=+d; return +min(d) } return f');
assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + 'var i32=new glob.Int32Array(heap); var min=glob.Math.min; function f() { return min(i32[0], 5)|0 } return f');
assertAsmTypeFail('glob', USE_ASM + 'var min=glob.Math.min; function f(x) { x=x|0; return min(3 + x, 5)|0 } return f');
assertAsmTypeFail('glob', USE_ASM + 'var min=glob.Math.min; function f(x) { x=x|0; return min(5, 3 + x)|0 } return f');
assertAsmTypeFail('glob', USE_ASM + 'var min=glob.Math.min; function f(x) { x=x|0; return min(x, 1)|0 } return f');
assertAsmLinkFail(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e) { d=+d;e=+e; return +min(d,e) } return f'), {Math:{min:Math.sin}});
assertAsmLinkFail(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e) { d=+d;e=+e; return +min(d,e) } return f'), {Math:{min:null}});
testBinary(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e) { d=+d;e=+e; return +min(d,e) } return f'), {Math:{min:Math.min}}), Math.min, doubleNumbers);
testBinary(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; var f32=glob.Math.fround; function f(d,e) { d=f32(d);e=f32(e); return f32(min(d,e)) } return f'), this), Math.min, floatNumbers);
testBinary(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e) { d=d|0;e=e|0; return min(d,e)|0} return f'), {Math:{min:Math.min}}), Math.min, intNumbers);
testBinary(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e) { d=d|0;e=e|0; return min(d|0,e|0)|0} return f'), {Math:{min:Math.min}}), coercedMin, intNumbers);
assertAsmLinkFail(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e) { d=+d;e=+e; return +max(d,e) } return f'), {Math:{max:Math.sin}});
assertAsmLinkFail(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e) { d=+d;e=+e; return +max(d,e) } return f'), {Math:{max:null}});
testBinary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e) { d=+d;e=+e; return +max(d,e) } return f'), {Math:{max:Math.max}}), Math.max, doubleNumbers);
testBinary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; var f32=glob.Math.fround; function f(d,e) { d=f32(d);e=f32(e); return f32(max(d,e)) } return f'), this), Math.max, floatNumbers);
testBinary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e) { d=d|0;e=e|0; return max(d,e)|0} return f'), {Math:{max:Math.max}}), Math.max, intNumbers);
testBinary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e) { d=d|0;e=e|0; return max(d|0,e|0)|0} return f'), {Math:{max:Math.max}}), coercedMax, intNumbers);
function testTernary(f, g, numbers) {
for (n of numbers)
@ -111,11 +116,11 @@ function testTernary(f, g, numbers) {
assertAsmTypeFail('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e,g) { d=+d;e=+e;g=g|0; return +min(d,e,g) } return f');
assertAsmTypeFail('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e,g) { d=d|0;e=e|0;g=+g; return max(d,e,g)|0 } return f');
assertAsmTypeFail('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e,g) { d=+d;e=+e;g=+g; return min(d,e,g)|0 } return f');
testTernary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e,g) { d=d|0;e=e|0;g=g|0; return +max(d,e,g) } return f'), {Math:{max:Math.max}}), Math.max, intNumbers);
testTernary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e,g) { d=d|0;e=e|0;g=g|0; return max(d,e,g)|0 } return f'), {Math:{max:Math.max}}), Math.max, intNumbers);
testTernary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e,g) { d=d|0;e=e|0;g=g|0; return +max(d|0,e|0,g|0) } return f'), {Math:{max:Math.max}}), coercedMax, intNumbers);
testTernary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e,g) { d=d|0;e=e|0;g=g|0; return max(d|0,e|0,g|0)|0 } return f'), {Math:{max:Math.max}}), coercedMax, intNumbers);
testTernary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e,g) { d=+d;e=+e;g=+g; return +max(d,e,g) } return f'), {Math:{max:Math.max}}), Math.max, doubleNumbers);
testTernary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; var _=glob.Math.fround; function f(d,e,g) { d=_(d);e=_(e);g=_(g); return _(max(d,e,g)) } return f'), this), Math.max, floatNumbers);
testTernary(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e,g) { d=d|0;e=e|0;g=g|0; return min(d,e,g)|0 } return f'), {Math:{min:Math.min}}), Math.min, intNumbers);
testTernary(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e,g) { d=d|0;e=e|0;g=g|0; return min(d|0,e|0,g|0)|0 } return f'), {Math:{min:Math.min}}), coercedMin, intNumbers);
testTernary(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e,g) { d=+d;e=+e;g=+g; return +min(d,e,g) } return f'), {Math:{min:Math.min}}), Math.min, doubleNumbers);
testTernary(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; var _=glob.Math.fround; function f(d,e,g) { d=_(d);e=_(e);g=_(g); return _(min(d,e,g)) } return f'), this), Math.min, floatNumbers);
@ -139,7 +144,7 @@ assertAsmTypeFail('glob', USE_ASM + 'var sqrt=glob.Math.sqrt; function f(n) { n=
assertAsmTypeFail('glob', USE_ASM + 'var sqrt=glob.Math.sqrt; function f(n) { n=n|0; var d=3.; n = sqrt(d)|0 } return f');
assertEq(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d) { d=+d; d = min(d, 13.); return +d } return f'), this)(12), 12);
assertEq(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d) { d=d|0; d = min(d, 11); return d|0 } return f'), this)(12), 11);
assertEq(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d) { d=d|0; d = min(d|0, 11); return d|0 } return f'), this)(12), 11);
assertEq(asmLink(asmCompile('glob', USE_ASM + FROUND + 'var min=glob.Math.min; function f(d) { d=fround(d); d = min(d, fround(13.37)); return fround(d) } return f'), this)(14), Math.fround(13.37));
assertEq(asmLink(asmCompile('glob', USE_ASM + 'var sin=glob.Math.sin; function f(d) { d=+d; d = sin(d); return +d } return f'), this)(Math.PI), Math.sin(Math.PI));
@ -158,10 +163,10 @@ assertAsmTypeFail('glob', USE_ASM + 'var abs=glob.Math.abs; function f(d) { d=+d
assertEq(asmLink(asmCompile('glob', USE_ASM + 'var im=glob.Math.imul; function f(i) { i=i|0; var d=0.0; d = +im(i,i); return +d } return f'), this)(42), Math.imul(42, 42));
assertEq(asmLink(asmCompile('glob', USE_ASM + 'var abs=glob.Math.abs; function f(i) { i=i|0; var d=0.0; d = +abs(i|0); return +d } return f'), this)(-42), 42);
assertEq(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(i) { i=i|0; var d=0.0; d = +min(i, 0); return +d } return f'), this)(-42), -42);
assertEq(asmLink(asmCompile('glob', USE_ASM + FROUND + 'var min=glob.Math.min; function f(i) { i=i|0; var d=fround(0); d = fround(min(i, 0)); return +d } return f'), this)(-42), -42);
assertEq(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(i) { i=i|0; var d=0.0; d = +max(i, 0); return +d } return f'), this)(-42), 0);
assertEq(asmLink(asmCompile('glob', USE_ASM + FROUND + 'var max=glob.Math.max; function f(i) { i=i|0; var d=fround(0); d = fround(max(i, 0)); return +d } return f'), this)(-42), 0);
assertEq(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(i) { i=i|0; var d=0.0; d = +min(i|0, 0); return +d } return f'), this)(-42), -42);
assertEq(asmLink(asmCompile('glob', USE_ASM + FROUND + 'var min=glob.Math.min; function f(i) { i=i|0; var d=fround(0); d = fround(min(i|0, 0)); return +d } return f'), this)(-42), -42);
assertEq(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(i) { i=i|0; var d=0.0; d = +max(i|0, 0); return +d } return f'), this)(-42), 0);
assertEq(asmLink(asmCompile('glob', USE_ASM + FROUND + 'var max=glob.Math.max; function f(i) { i=i|0; var d=fround(0); d = fround(max(i|0, 0)); return +d } return f'), this)(-42), 0);
assertEq(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d) { d=+d; var i=0; i = ~~min(d, 0.)|0; return i|0 } return f'), this)(-42), -42);
assertEq(asmLink(asmCompile('glob', USE_ASM + FROUND + 'var min=glob.Math.min; function f(d) { d=fround(d); var i=0; i = ~~min(d, fround(0))|0; return i|0 } return f'), this)(-42), -42);
assertEq(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d) { d=+d; var i=0; i = ~~max(d, 0.)|0; return i|0 } return f'), this)(-42), 0);

Просмотреть файл

@ -0,0 +1,416 @@
// Basic functional tests for the Atomics primitives.
//
// These do not test atomicity, just that calling and coercions and
// indexing and exception behavior all work right.
//
// These do not test the futex operations.
var DEBUG = false; // Set to true for useful printouts
function dprint(...xs) {
if (!DEBUG)
return;
var s = "";
for ( var x in xs )
s += String(xs[x]);
print(s);
}
function testMethod(a, ...indices) {
dprint("Method: " + a.constructor.name);
var poison;
switch (a.BYTES_PER_ELEMENT) {
case 1: poison = 0x5A; break;
case 2: poison = 0x5A5A; break;
case 4: poison = 0x5A5A5A5A; break;
}
for ( var i=0 ; i < indices.length ; i++ ) {
var x = indices[i];
if (x > 0)
a[x-1] = poison;
if (x < a.length-1)
a[x+1] = poison;
// val = 0
assertEq(Atomics.compareExchange(a, x, 0, 37), 0);
// val = 37
assertEq(Atomics.compareExchange(a, x, 37, 5), 37);
// val = 5
assertEq(Atomics.compareExchange(a, x, 7, 8), 5); // ie should fail
// val = 5
assertEq(Atomics.compareExchange(a, x, 5, 9), 5);
// val = 9
assertEq(Atomics.compareExchange(a, x, 5, 0), 9); // should also fail
// val = 9
assertEq(Atomics.load(a, x), 9);
// val = 9
assertEq(Atomics.store(a, x, 14), 14); // What about coercion?
// val = 14
assertEq(Atomics.load(a, x), 14);
// val = 14
Atomics.store(a, x, 0);
// val = 0
Atomics.fence();
// val = 0
assertEq(Atomics.add(a, x, 3), 0);
// val = 3
assertEq(Atomics.sub(a, x, 2), 3);
// val = 1
assertEq(Atomics.or(a, x, 6), 1);
// val = 7
assertEq(Atomics.and(a, x, 14), 7);
// val = 6
assertEq(Atomics.xor(a, x, 5), 6);
// val = 3
assertEq(Atomics.load(a, x), 3);
// val = 3
Atomics.store(a, x, 0);
// val = 0
// Check adjacent elements were not affected
if (x > 0) {
assertEq(a[x-1], poison);
a[x-1] = 0;
}
if (x < a.length-1) {
assertEq(a[x+1], poison);
a[x+1] = 0;
}
}
}
function testFunction(a, ...indices) {
dprint("Function: " + a.constructor.name);
var poison;
switch (a.BYTES_PER_ELEMENT) {
case 1: poison = 0x5A; break;
case 2: poison = 0x5A5A; break;
case 4: poison = 0x5A5A5A5A; break;
}
for ( var i=0 ; i < indices.length ; i++ ) {
var x = indices[i];
if (x > 0)
a[x-1] = poison;
if (x < a.length-1)
a[x+1] = poison;
// val = 0
assertEq(gAtomics_compareExchange(a, x, 0, 37), 0);
// val = 37
assertEq(gAtomics_compareExchange(a, x, 37, 5), 37);
// val = 5
assertEq(gAtomics_compareExchange(a, x, 7, 8), 5); // ie should fail
// val = 5
assertEq(gAtomics_compareExchange(a, x, 5, 9), 5);
// val = 9
assertEq(gAtomics_compareExchange(a, x, 5, 0), 9); // should also fail
// val = 9
assertEq(gAtomics_load(a, x), 9);
// val = 9
assertEq(gAtomics_store(a, x, 14), 14); // What about coercion?
// val = 14
assertEq(gAtomics_load(a, x), 14);
// val = 14
gAtomics_store(a, x, 0);
// val = 0
gAtomics_fence();
// val = 0
assertEq(gAtomics_add(a, x, 3), 0);
// val = 3
assertEq(gAtomics_sub(a, x, 2), 3);
// val = 1
assertEq(gAtomics_or(a, x, 6), 1);
// val = 7
assertEq(gAtomics_and(a, x, 14), 7);
// val = 6
assertEq(gAtomics_xor(a, x, 5), 6);
// val = 3
assertEq(gAtomics_load(a, x), 3);
// val = 3
gAtomics_store(a, x, 0);
// val = 0
// Check adjacent elements were not affected
if (x > 0) {
assertEq(a[x-1], poison);
a[x-1] = 0;
}
if (x < a.length-1) {
assertEq(a[x+1], poison);
a[x+1] = 0;
}
}
}
function testTypeCAS(a) {
dprint("Type: " + a.constructor.name);
var thrown = false;
try {
Atomics.compareExchange([0], 0, 0, 1);
}
catch (e) {
thrown = true;
assertEq(e instanceof TypeError, true);
}
assertEq(thrown, true);
// All these variants should be OK
Atomics.compareExchange(a, 0, 0.7, 1.8);
Atomics.compareExchange(a, 0, "0", 1);
Atomics.compareExchange(a, 0, 0, "1");
Atomics.compareExchange(a, 0, 0);
}
function testTypeBinop(a, op) {
dprint("Type: " + a.constructor.name);
var thrown = false;
try {
op([0], 0, 1);
}
catch (e) {
thrown = true;
assertEq(e instanceof TypeError, true);
}
assertEq(thrown, true);
// These are all OK
op(a, 0, 0.7);
op(a, 0, "0");
op(a, 0);
}
function testRangeCAS(a) {
dprint("Range: " + a.constructor.name);
assertEq(Atomics.compareExchange(a, -1, 0, 1), undefined); // out of range => undefined, no effect
assertEq(a[0], 0);
a[0] = 0;
assertEq(Atomics.compareExchange(a, "hi", 0, 1), undefined); // invalid => undefined, no effect
assertEq(a[0], 0);
a[0] = 0;
assertEq(Atomics.compareExchange(a, a.length + 5, 0, 1), undefined); // out of range => undefined, no effect
assertEq(a[0], 0);
}
// Ad-hoc tests for extreme and out-of-range values
// None of these should throw
function testInt8Extremes(a) {
dprint("Int8 extremes");
a[10] = 0;
a[11] = 0;
Atomics.store(a, 10, 255);
assertEq(a[10], -1);
assertEq(Atomics.load(a, 10), -1);
Atomics.add(a, 10, 255); // should coerce to -1
assertEq(a[10], -2);
assertEq(Atomics.load(a, 10), -2);
Atomics.add(a, 10, -1);
assertEq(a[10], -3);
assertEq(Atomics.load(a, 10), -3);
Atomics.sub(a, 10, 255); // should coerce to -1
assertEq(a[10], -2);
assertEq(Atomics.load(a, 10), -2);
Atomics.sub(a, 10, 256); // should coerce to 0
assertEq(a[10], -2);
assertEq(Atomics.load(a, 10), -2);
Atomics.and(a, 10, -1); // Preserve all
assertEq(a[10], -2);
assertEq(Atomics.load(a, 10), -2);
Atomics.and(a, 10, 256); // Preserve none
assertEq(a[10], 0);
assertEq(Atomics.load(a, 10), 0);
assertEq(a[11], 0);
}
function testUint8Extremes(a) {
dprint("Uint8 extremes");
a[10] = 0;
a[11] = 0;
Atomics.store(a, 10, 255);
assertEq(a[10], 255);
assertEq(Atomics.load(a, 10), 255);
Atomics.add(a, 10, 255);
assertEq(a[10], 254);
assertEq(Atomics.load(a, 10), 254);
Atomics.add(a, 10, -1);
assertEq(a[10], 253);
assertEq(Atomics.load(a, 10), 253);
Atomics.sub(a, 10, 255);
assertEq(a[10], 254);
assertEq(Atomics.load(a, 10), 254);
Atomics.and(a, 10, -1); // Preserve all
assertEq(a[10], 254);
assertEq(Atomics.load(a, 10), 254);
Atomics.and(a, 10, 256); // Preserve none
assertEq(a[10], 0);
assertEq(Atomics.load(a, 10), 0);
assertEq(a[11], 0);
}
function testInt16Extremes(a) {
dprint("Int16 extremes");
a[10] = 0;
a[11] = 0;
Atomics.store(a, 10, 65535);
assertEq(a[10], -1);
assertEq(Atomics.load(a, 10), -1);
Atomics.add(a, 10, 65535); // should coerce to -1
assertEq(a[10], -2);
assertEq(Atomics.load(a, 10), -2);
Atomics.add(a, 10, -1);
assertEq(a[10], -3);
assertEq(Atomics.load(a, 10), -3);
Atomics.sub(a, 10, 65535); // should coerce to -1
assertEq(a[10], -2);
assertEq(Atomics.load(a, 10), -2);
Atomics.sub(a, 10, 65536); // should coerce to 0
assertEq(a[10], -2);
assertEq(Atomics.load(a, 10), -2);
Atomics.and(a, 10, -1); // Preserve all
assertEq(a[10], -2);
assertEq(Atomics.load(a, 10), -2);
Atomics.and(a, 10, 65536); // Preserve none
assertEq(a[10], 0);
assertEq(Atomics.load(a, 10), 0);
assertEq(a[11], 0);
}
function testUint32(a) {
var k = 0;
for ( var i=0 ; i < 20 ; i++ ) {
a[i] = i+5;
k += a[i];
}
var sum = 0;
for ( var i=0 ; i < 20 ; i++ )
sum += Atomics.add(a, i, 1);
assertEq(sum, k);
}
function isLittleEndian() {
var xxx = new ArrayBuffer(2);
var xxa = new Int16Array(xxx);
var xxb = new Int8Array(xxx);
xxa[0] = 37;
var is_little = xxb[0] == 37;
return is_little;
}
function runTests() {
var is_little = isLittleEndian();
// Currently the SharedArrayBuffer needs to be a multiple of 4K bytes in size.
var sab = new SharedArrayBuffer(4096);
// Test that two arrays created on the same storage alias
var t1 = new SharedInt8Array(sab);
var t2 = new SharedUint16Array(sab);
assertEq(t1[0], 0);
assertEq(t2[0], 0);
t1[0] = 37;
if (is_little)
assertEq(t2[0], 37);
else
assertEq(t2[0], 37 << 16);
t1[0] = 0;
// Test that invoking as Atomics.whatever() works, on correct arguments
testMethod(new SharedInt8Array(sab), 0, 42, 4095);
testMethod(new SharedUint8Array(sab), 0, 42, 4095);
testMethod(new SharedUint8ClampedArray(sab), 0, 42, 4095);
testMethod(new SharedInt16Array(sab), 0, 42, 2047);
testMethod(new SharedUint16Array(sab), 0, 42, 2047);
testMethod(new SharedInt32Array(sab), 0, 42, 1023);
testMethod(new SharedUint32Array(sab), 0, 42, 1023);
// Test that invoking as v = Atomics.whatever; v() works, on correct arguments
gAtomics_compareExchange = Atomics.compareExchange;
gAtomics_load = Atomics.load;
gAtomics_store = Atomics.store;
gAtomics_fence = Atomics.fence;
gAtomics_add = Atomics.add;
gAtomics_sub = Atomics.sub;
gAtomics_and = Atomics.and;
gAtomics_or = Atomics.or;
gAtomics_xor = Atomics.xor;
testFunction(new SharedInt8Array(sab), 0, 42, 4095);
testFunction(new SharedUint8Array(sab), 0, 42, 4095);
testFunction(new SharedUint8ClampedArray(sab), 0, 42, 4095);
testFunction(new SharedInt16Array(sab), 0, 42, 2047);
testFunction(new SharedUint16Array(sab), 0, 42, 2047);
testFunction(new SharedInt32Array(sab), 0, 42, 1023);
testFunction(new SharedUint32Array(sab), 0, 42, 1023);
// Test various range and type conditions
var v8 = new SharedInt8Array(sab);
var v32 = new SharedInt32Array(sab);
testTypeCAS(v8);
testTypeCAS(v32);
testTypeBinop(v8, Atomics.add);
testTypeBinop(v8, Atomics.sub);
testTypeBinop(v8, Atomics.and);
testTypeBinop(v8, Atomics.or);
testTypeBinop(v8, Atomics.xor);
testTypeBinop(v32, Atomics.add);
testTypeBinop(v32, Atomics.sub);
testTypeBinop(v32, Atomics.and);
testTypeBinop(v32, Atomics.or);
testTypeBinop(v32, Atomics.xor);
// Test out-of-range references
testRangeCAS(v8);
testRangeCAS(v32);
// Test extreme values
testInt8Extremes(new SharedInt8Array(sab));
testUint8Extremes(new SharedUint8Array(sab));
testInt16Extremes(new SharedInt16Array(sab));
testUint32(new SharedUint32Array(sab));
}
if (this.Atomics && this.SharedArrayBuffer && this.SharedInt32Array)
runTests();

Просмотреть файл

@ -0,0 +1,31 @@
// |jit-test| slow;
//
// This is intended to be run manually with IONFLAGS=logs and
// postprocessing by iongraph to verify manually (by inspecting the
// MIR) that:
//
// - the add operation is inlined as it should be
// - loads and stores are not moved across the add
//
// Be sure to run with --ion-eager --ion-offthread-compile=off.
function add(ta) {
var x = ta[0];
Atomics.add(ta, 86, 6);
var y = ta[1];
var z = y + 1;
var w = x + z;
return w;
}
if (!this.SharedArrayBuffer || !this.Atomics || !this.SharedInt32Array)
quit(0);
var sab = new SharedArrayBuffer(4096);
var ia = new SharedInt32Array(sab);
for ( var i=0, limit=ia.length ; i < limit ; i++ )
ia[i] = 37;
var v = 0;
for ( var i=0 ; i < 1000 ; i++ )
v += add(ia);
//print(v);

Просмотреть файл

@ -0,0 +1,31 @@
// |jit-test| slow;
//
// Like inline-add, but with SharedUint32Array, which is a special
// case because the value is representable only as a Number.
// All this tests is that the Uint32 path is being triggered.
//
// This is intended to be run manually with IONFLAGS=logs and
// postprocessing by iongraph to verify manually (by inspecting the
// MIR) that:
//
// - the add operation is inlined as it should be, with
// a return type 'Double'
// - loads and stores are not moved across the add
//
// Be sure to run with --ion-eager --ion-offthread-compile=off.
function add(ta) {
return Atomics.add(ta, 86, 6);
}
if (!this.SharedArrayBuffer || !this.Atomics || !this.SharedUint32Array)
quit(0);
var sab = new SharedArrayBuffer(4096);
var ia = new SharedUint32Array(sab);
for ( var i=0, limit=ia.length ; i < limit ; i++ )
ia[i] = 0xdeadbeef; // Important: Not an int32-capable value
var v = 0;
for ( var i=0 ; i < 1000 ; i++ )
v += add(ia);
//print(v);

Просмотреть файл

@ -0,0 +1,31 @@
// |jit-test| slow;
//
// This is intended to be run manually with IONFLAGS=logs and
// postprocessing by iongraph to verify manually (by inspecting the
// MIR) that:
//
// - the cmpxchg operation is inlined as it should be
// - loads and stores are not moved across the cmpxchg
//
// Be sure to run with --ion-eager --ion-offthread-compile=off.
function cmpxchg(ta) {
var x = ta[0];
Atomics.compareExchange(ta, 86, 37, 42);
var y = ta[1];
var z = y + 1;
var w = x + z;
return w;
}
if (!this.SharedArrayBuffer || !this.Atomics || !this.SharedInt32Array)
quit(0);
var sab = new SharedArrayBuffer(4096);
var ia = new SharedInt32Array(sab);
for ( var i=0, limit=ia.length ; i < limit ; i++ )
ia[i] = 37;
var v = 0;
for ( var i=0 ; i < 1000 ; i++ )
v += cmpxchg(ia);
//print(v);

Просмотреть файл

@ -0,0 +1,31 @@
// |jit-test| slow;
//
// This is intended to be run manually with IONFLAGS=logs and
// postprocessing by iongraph to verify manually (by inspecting the
// MIR) that:
//
// - the fence operation is inlined as it should be
// - loads and stores are not moved across the fence
//
// Be sure to run with --ion-eager --ion-offthread-compile=off.
function fence(ta) {
var x = ta[0];
Atomics.fence();
var y = ta[1];
var z = y + 1;
var w = x + z;
return w;
}
if (!this.SharedArrayBuffer || !this.Atomics || !this.SharedInt32Array)
quit(0);
var sab = new SharedArrayBuffer(4096);
var ia = new SharedInt32Array(sab);
for ( var i=0, limit=ia.length ; i < limit ; i++ )
ia[i] = 37;
var v = 0;
for ( var i=0 ; i < 1000 ; i++ )
v += fence(ia);
//print(v);

Просмотреть файл

@ -0,0 +1,7 @@
function f(x, y) {
return (y | 0 && x ? y | 0 : 0)
}
m = [1]
assertEq(f(m[0], m[0]), 1)
assertEq(f(m[1], m[0]), 0)
assertEq(f(m[2], m[0]), 0)

54
js/src/jit/AtomicOp.h Normal file
Просмотреть файл

@ -0,0 +1,54 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_AtomicOp_h
#define jit_AtomicOp_h
namespace js {
namespace jit {
// Types of atomic operation, shared by MIR and LIR.
enum AtomicOp {
AtomicFetchAddOp,
AtomicFetchSubOp,
AtomicFetchAndOp,
AtomicFetchOrOp,
AtomicFetchXorOp
};
// Memory barrier types, shared by MIR and LIR.
//
// MembarSynchronizing is here because some platforms can make the
// distinction (DSB vs DMB on ARM, SYNC vs parameterized SYNC on MIPS)
// but there's been no reason to use it yet.
enum MemoryBarrierBits {
MembarLoadLoad = 1,
MembarLoadStore = 2,
MembarStoreStore = 4,
MembarStoreLoad = 8,
MembarSynchronizing = 16,
// For validity testing
MembarAllbits = 31,
};
// Standard barrier bits for a full barrier.
static const int MembarFull = MembarLoadLoad|MembarLoadStore|MembarStoreLoad|MembarStoreStore;
// Standard sets of barrier bits for atomic loads and stores.
// See http://gee.cs.oswego.edu/dl/jmm/cookbook.html for more.
static const int MembarBeforeLoad = 0;
static const int MembarAfterLoad = MembarLoadLoad|MembarLoadStore;
static const int MembarBeforeStore = MembarStoreStore;
static const int MembarAfterStore = MembarStoreLoad;
} // namespace jit
} // namespace js
#endif /* jit_AtomicOp_h */

Просмотреть файл

@ -6160,9 +6160,7 @@ JitRuntime::generateLazyLinkStub(JSContext *cx)
GeneralRegisterSet regs = GeneralRegisterSet::Volatile();
Register temp0 = regs.takeAny();
uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS);
masm.Push(Imm32(descriptor));
masm.call(&call);
masm.callWithExitFrame(&call);
masm.jump(ReturnReg);
masm.bind(&call);
@ -8963,6 +8961,68 @@ CodeGenerator::visitStoreTypedArrayElementHole(LStoreTypedArrayElementHole *lir)
return true;
}
bool
CodeGenerator::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement *lir)
{
Register elements = ToRegister(lir->elements());
AnyRegister output = ToAnyRegister(lir->output());
Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
MOZ_ASSERT(lir->oldval()->isRegister());
MOZ_ASSERT(lir->newval()->isRegister());
Register oldval = ToRegister(lir->oldval());
Register newval = ToRegister(lir->newval());
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width);
masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
} else {
BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
}
return true;
}
template <typename T>
static inline void
AtomicBinopToTypedArray(MacroAssembler &masm, AtomicOp op,
Scalar::Type arrayType, const LAllocation *value, const T &mem,
Register temp1, Register temp2, AnyRegister output)
{
if (value->isConstant())
masm.atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, temp1, temp2, output);
else
masm.atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, temp1, temp2, output);
}
bool
CodeGenerator::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop *lir)
{
AnyRegister output = ToAnyRegister(lir->output());
Register elements = ToRegister(lir->elements());
Register temp1 = lir->temp1()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp1());
Register temp2 = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
const LAllocation* value = lir->value();
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);
AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
} else {
BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
}
return true;
}
bool
CodeGenerator::visitClampIToUint8(LClampIToUint8 *lir)
{

Просмотреть файл

@ -265,6 +265,8 @@ class CodeGenerator : public CodeGeneratorSpecific
bool visitLoadTypedArrayElementHole(LLoadTypedArrayElementHole *lir);
bool visitStoreTypedArrayElement(LStoreTypedArrayElement *lir);
bool visitStoreTypedArrayElementHole(LStoreTypedArrayElementHole *lir);
bool visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement *lir);
bool visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop *lir);
bool visitClampIToUint8(LClampIToUint8 *lir);
bool visitClampDToUint8(LClampDToUint8 *lir);
bool visitClampVToUint8(LClampVToUint8 *lir);

Просмотреть файл

@ -1874,9 +1874,6 @@ AttachFinishedCompilations(JSContext *cx)
if (!builder)
break;
// TODO bug 1047346: Enable lazy linking for other architectures again by
// fixing the lazy link stub.
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
// Try to defer linking if the script is on the stack, to postpone
// invalidating them.
if (builder->info().executionMode() == SequentialExecution &&
@ -1906,7 +1903,6 @@ AttachFinishedCompilations(JSContext *cx)
continue;
}
}
#endif
if (CodeGenerator *codegen = builder->backgroundCodegen()) {
RootedScript script(cx, builder->script());

Просмотреть файл

@ -727,6 +727,13 @@ class IonBuilder
InliningStatus inlineRegExpExec(CallInfo &callInfo);
InliningStatus inlineRegExpTest(CallInfo &callInfo);
// Atomics natives.
InliningStatus inlineAtomicsCompareExchange(CallInfo &callInfo);
InliningStatus inlineAtomicsLoad(CallInfo &callInfo);
InliningStatus inlineAtomicsStore(CallInfo &callInfo);
InliningStatus inlineAtomicsFence(CallInfo &callInfo);
InliningStatus inlineAtomicsBinop(CallInfo &callInfo, JSFunction *target);
// Array intrinsics.
InliningStatus inlineUnsafePutElements(CallInfo &callInfo);
bool inlineUnsafeSetDenseArrayElement(CallInfo &callInfo, uint32_t base);
@ -791,6 +798,9 @@ class IonBuilder
MTypeObjectDispatch *dispatch, MGetPropertyCache *cache,
MBasicBlock **fallbackTarget);
bool atomicsMeetsPreconditions(CallInfo &callInfo, Scalar::Type *arrayElementType);
void atomicsCheckBounds(CallInfo &callInfo, MInstruction **elements, MDefinition **index);
bool testNeedsArgumentCheck(JSFunction *target, CallInfo &callInfo);
MDefinition *makeCallsiteClone(JSFunction *target, MDefinition *fun);

Просмотреть файл

@ -11,6 +11,7 @@
#include "builtin/TypedObject.h"
#include "gc/GCTrace.h"
#include "jit/AtomicOp.h"
#include "jit/Bailouts.h"
#include "jit/BaselineFrame.h"
#include "jit/BaselineIC.h"
@ -397,6 +398,211 @@ template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const A
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const BaseIndex &src, const ValueOperand &dest,
bool allowDouble, Register temp, Label *fail);
template<typename T>
void
MacroAssembler::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T &mem,
Register oldval, Register newval,
Register temp, AnyRegister output)
{
switch (arrayType) {
case Scalar::Int8:
compareExchange8SignExtend(mem, oldval, newval, output.gpr());
break;
case Scalar::Uint8:
compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
break;
case Scalar::Uint8Clamped:
compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
break;
case Scalar::Int16:
compareExchange16SignExtend(mem, oldval, newval, output.gpr());
break;
case Scalar::Uint16:
compareExchange16ZeroExtend(mem, oldval, newval, output.gpr());
break;
case Scalar::Int32:
compareExchange32(mem, oldval, newval, output.gpr());
break;
case Scalar::Uint32:
// At the moment, the code in MCallOptimize.cpp requires the output
// type to be double for uint32 arrays. See bug 1077305.
MOZ_ASSERT(output.isFloat());
compareExchange32(mem, oldval, newval, temp);
convertUInt32ToDouble(temp, output.fpu());
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void
MacroAssembler::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address &mem,
Register oldval, Register newval, Register temp,
AnyRegister output);
template void
MacroAssembler::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex &mem,
Register oldval, Register newval, Register temp,
AnyRegister output);
template<typename S, typename T>
void
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S &value,
const T &mem, Register temp1, Register temp2, AnyRegister output)
{
// Uint8Clamped is explicitly not supported here
switch (arrayType) {
case Scalar::Int8:
switch (op) {
case AtomicFetchAddOp:
atomicFetchAdd8SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
atomicFetchSub8SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
atomicFetchAnd8SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
atomicFetchOr8SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
atomicFetchXor8SignExtend(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Uint8:
switch (op) {
case AtomicFetchAddOp:
atomicFetchAdd8ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
atomicFetchSub8ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
atomicFetchAnd8ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
atomicFetchOr8ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
atomicFetchXor8ZeroExtend(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int16:
switch (op) {
case AtomicFetchAddOp:
atomicFetchAdd16SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
atomicFetchSub16SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
atomicFetchAnd16SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
atomicFetchOr16SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
atomicFetchXor16SignExtend(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Uint16:
switch (op) {
case AtomicFetchAddOp:
atomicFetchAdd16ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
atomicFetchSub16ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
atomicFetchAnd16ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
atomicFetchOr16ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
atomicFetchXor16ZeroExtend(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int32:
switch (op) {
case AtomicFetchAddOp:
atomicFetchAdd32(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
atomicFetchSub32(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
atomicFetchAnd32(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
atomicFetchOr32(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
atomicFetchXor32(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Uint32:
// At the moment, the code in MCallOptimize.cpp requires the output
// type to be double for uint32 arrays. See bug 1077305.
MOZ_ASSERT(output.isFloat());
switch (op) {
case AtomicFetchAddOp:
atomicFetchAdd32(value, mem, InvalidReg, temp1);
break;
case AtomicFetchSubOp:
atomicFetchSub32(value, mem, InvalidReg, temp1);
break;
case AtomicFetchAndOp:
atomicFetchAnd32(value, mem, temp2, temp1);
break;
case AtomicFetchOrOp:
atomicFetchOr32(value, mem, temp2, temp1);
break;
case AtomicFetchXorOp:
atomicFetchXor32(value, mem, temp2, temp1);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
convertUInt32ToDouble(temp1, output.fpu());
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32 &value, const Address &mem,
Register temp1, Register temp2, AnyRegister output);
template void
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32 &value, const BaseIndex &mem,
Register temp1, Register temp2, AnyRegister output);
template void
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register &value, const Address &mem,
Register temp1, Register temp2, AnyRegister output);
template void
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register &value, const BaseIndex &mem,
Register temp1, Register temp2, AnyRegister output);
// Inlined version of gc::CheckAllocatorState that checks the bare essentials
// and bails for anything that cannot be handled with our jit allocators.
void

Просмотреть файл

@ -22,6 +22,7 @@
#else
# error "Unknown architecture!"
#endif
#include "jit/AtomicOp.h"
#include "jit/IonInstrumentation.h"
#include "jit/JitCompartment.h"
#include "jit/VMFunctions.h"
@ -738,6 +739,14 @@ class MacroAssembler : public MacroAssemblerSpecific
}
}
template<typename T>
void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T &mem, Register oldval, Register newval,
Register temp, AnyRegister output);
template<typename S, typename T>
void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S &value,
const T &mem, Register temp1, Register temp2, AnyRegister output);
void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const BaseIndex &dest);
void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address &dest);
@ -943,6 +952,15 @@ class MacroAssembler : public MacroAssemblerSpecific
return ret;
}
// see above comment for what is returned
uint32_t callWithExitFrame(Label *target) {
leaveSPSFrame();
MacroAssemblerSpecific::callWithExitFrame(target);
uint32_t ret = currentOffset();
reenterSPSFrame();
return ret;
}
// see above comment for what is returned
uint32_t callWithExitFrame(JitCode *target) {
leaveSPSFrame();

Просмотреть файл

@ -4871,6 +4871,80 @@ class LStoreTypedArrayElementStatic : public LInstructionHelper<0, 2, 0>
}
};
class LCompareExchangeTypedArrayElement : public LInstructionHelper<1, 4, 1>
{
public:
LIR_HEADER(CompareExchangeTypedArrayElement)
LCompareExchangeTypedArrayElement(const LAllocation &elements, const LAllocation &index,
const LAllocation &oldval, const LAllocation &newval,
const LDefinition &temp)
{
setOperand(0, elements);
setOperand(1, index);
setOperand(2, oldval);
setOperand(3, newval);
setTemp(0, temp);
}
const LAllocation *elements() {
return getOperand(0);
}
const LAllocation *index() {
return getOperand(1);
}
const LAllocation *oldval() {
return getOperand(2);
}
const LAllocation *newval() {
return getOperand(3);
}
const LDefinition *temp() {
return getTemp(0);
}
const MCompareExchangeTypedArrayElement *mir() const {
return mir_->toCompareExchangeTypedArrayElement();
}
};
class LAtomicTypedArrayElementBinop : public LInstructionHelper<1, 3, 2>
{
public:
LIR_HEADER(AtomicTypedArrayElementBinop)
LAtomicTypedArrayElementBinop(const LAllocation &elements, const LAllocation &index,
const LAllocation &value, const LDefinition &temp1,
const LDefinition &temp2)
{
setOperand(0, elements);
setOperand(1, index);
setOperand(2, value);
setTemp(0, temp1);
setTemp(1, temp2);
}
const LAllocation *elements() {
return getOperand(0);
}
const LAllocation *index() {
return getOperand(1);
}
const LAllocation *value() {
return getOperand(2);
}
const LDefinition *temp1() {
return getTemp(0);
}
const LDefinition *temp2() {
return getTemp(1);
}
const MAtomicTypedArrayElementBinop *mir() const {
return mir_->toAtomicTypedArrayElementBinop();
}
};
class LEffectiveAddress : public LInstructionHelper<1, 2, 0>
{
public:
@ -6628,6 +6702,30 @@ class LThrowUninitializedLexical : public LCallInstructionHelper<0, 0, 0>
}
};
class LMemoryBarrier : public LInstructionHelper<0, 0, 0>
{
private:
const int type_;
public:
LIR_HEADER(MemoryBarrier)
// The parameter 'type' is a bitwise 'or' of the barrier types needed,
// see AtomicOp.h.
explicit LMemoryBarrier(int type) : type_(type)
{
MOZ_ASSERT((type_ & ~MembarAllbits) == 0);
}
int type() const {
return type_;
}
const MMemoryBarrier *mir() const {
return mir_->toMemoryBarrier();
}
};
} // namespace jit
} // namespace js

Просмотреть файл

@ -658,7 +658,8 @@ class LNode
virtual void setOperand(size_t index, const LAllocation &a) = 0;
// Returns information about temporary registers needed. Each temporary
// register is an LUse with a TEMPORARY policy, or a fixed register.
// register is an LDefinition with a fixed or virtual register and
// either GENERAL, FLOAT32, or DOUBLE type.
virtual size_t numTemps() const = 0;
virtual LDefinition *getTemp(size_t index) = 0;
virtual void setTemp(size_t index, const LDefinition &a) = 0;

Просмотреть файл

@ -234,6 +234,8 @@
_(StoreTypedArrayElement) \
_(StoreTypedArrayElementHole) \
_(StoreTypedArrayElementStatic) \
_(CompareExchangeTypedArrayElement) \
_(AtomicTypedArrayElementBinop) \
_(EffectiveAddress) \
_(ClampIToUint8) \
_(ClampDToUint8) \
@ -327,6 +329,7 @@
_(AsmJSCall) \
_(InterruptCheckPar) \
_(RecompileCheck) \
_(MemoryBarrier) \
_(AssertRangeI) \
_(AssertRangeD) \
_(AssertRangeF) \

Просмотреть файл

@ -211,7 +211,7 @@ LinearScanAllocator::allocateRegisters()
*
* The algorithm is based on the one published in "Linear Scan Register
* Allocation on SSA Form" by C. Wimmer et al., for which the full citation
* appears above.
* appears in LiveRangeAllocator.cpp.
*/
bool
LinearScanAllocator::resolveControlFlow()

Просмотреть файл

@ -2859,10 +2859,22 @@ LIRGenerator::visitLoadTypedArrayElement(MLoadTypedArrayElement *ins)
if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
tempDef = temp();
if (ins->requiresMemoryBarrier()) {
LMemoryBarrier *fence = new(alloc()) LMemoryBarrier(MembarBeforeLoad);
if (!add(fence, ins))
return false;
}
LLoadTypedArrayElement *lir = new(alloc()) LLoadTypedArrayElement(elements, index, tempDef);
if (ins->fallible() && !assignSnapshot(lir, Bailout_Overflow))
return false;
return define(lir, ins);
if (!define(lir, ins))
return false;
if (ins->requiresMemoryBarrier()) {
LMemoryBarrier *fence = new(alloc()) LMemoryBarrier(MembarAfterLoad);
if (!add(fence, ins))
return false;
}
return true;
}
bool
@ -2946,7 +2958,24 @@ LIRGenerator::visitStoreTypedArrayElement(MStoreTypedArrayElement *ins)
value = useByteOpRegisterOrNonDoubleConstant(ins->value());
else
value = useRegisterOrNonDoubleConstant(ins->value());
return add(new(alloc()) LStoreTypedArrayElement(elements, index, value), ins);
// Optimization opportunity for atomics: on some platforms there
// is a store instruction that incorporates the necessary
// barriers, and we could use that instead of separate barrier and
// store instructions. See bug #1077027.
if (ins->requiresMemoryBarrier()) {
LMemoryBarrier *fence = new(alloc()) LMemoryBarrier(MembarBeforeStore);
if (!add(fence, ins))
return false;
}
if (!add(new(alloc()) LStoreTypedArrayElement(elements, index, value), ins))
return false;
if (ins->requiresMemoryBarrier()) {
LMemoryBarrier *fence = new(alloc()) LMemoryBarrier(MembarAfterStore);
if (!add(fence, ins))
return false;
}
return true;
}
bool
@ -3713,6 +3742,13 @@ LIRGenerator::visitRecompileCheck(MRecompileCheck *ins)
return assignSafepoint(lir, ins);
}
bool
LIRGenerator::visitMemoryBarrier(MMemoryBarrier *ins)
{
LMemoryBarrier *lir = new(alloc()) LMemoryBarrier(ins->type());
return add(lir, ins);
}
bool
LIRGenerator::visitSimdConstant(MSimdConstant *ins)
{

Просмотреть файл

@ -269,6 +269,7 @@ class LIRGenerator : public LIRGeneratorSpecific
bool visitGetDOMProperty(MGetDOMProperty *ins);
bool visitGetDOMMember(MGetDOMMember *ins);
bool visitRecompileCheck(MRecompileCheck *ins);
bool visitMemoryBarrier(MMemoryBarrier *ins);
bool visitSimdExtractElement(MSimdExtractElement *ins);
bool visitSimdInsertElement(MSimdInsertElement *ins);
bool visitSimdSignMask(MSimdSignMask *ins);

Просмотреть файл

@ -6,6 +6,7 @@
#include "jsmath.h"
#include "builtin/AtomicsObject.h"
#include "builtin/TestingFunctions.h"
#include "builtin/TypedObject.h"
#include "jit/BaselineInspector.h"
@ -34,6 +35,24 @@ IonBuilder::inlineNativeCall(CallInfo &callInfo, JSFunction *target)
if (!optimizationInfo().inlineNative())
return InliningStatus_NotInlined;
// Atomic natives.
if (native == atomics_compareExchange)
return inlineAtomicsCompareExchange(callInfo);
if (native == atomics_load)
return inlineAtomicsLoad(callInfo);
if (native == atomics_store)
return inlineAtomicsStore(callInfo);
if (native == atomics_fence)
return inlineAtomicsFence(callInfo);
if (native == atomics_add ||
native == atomics_sub ||
native == atomics_and ||
native == atomics_or ||
native == atomics_xor)
{
return inlineAtomicsBinop(callInfo, target);
}
// Array natives.
if (native == js_Array)
return inlineArray(callInfo);
@ -2235,6 +2254,225 @@ IonBuilder::inlineBoundFunction(CallInfo &nativeCallInfo, JSFunction *target)
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineAtomicsCompareExchange(CallInfo &callInfo)
{
if (callInfo.argc() != 4 || callInfo.constructing())
return InliningStatus_NotInlined;
Scalar::Type arrayType;
if (!atomicsMeetsPreconditions(callInfo, &arrayType))
return InliningStatus_NotInlined;
MDefinition *oldval = callInfo.getArg(2);
if (!(oldval->type() == MIRType_Int32 || oldval->type() == MIRType_Double))
return InliningStatus_NotInlined;
MDefinition *newval = callInfo.getArg(3);
if (!(newval->type() == MIRType_Int32 || newval->type() == MIRType_Double))
return InliningStatus_NotInlined;
callInfo.setImplicitlyUsedUnchecked();
MInstruction *elements;
MDefinition *index;
atomicsCheckBounds(callInfo, &elements, &index);
MDefinition *oldvalToWrite = oldval;
if (oldval->type() == MIRType_Double) {
oldvalToWrite = MTruncateToInt32::New(alloc(), oldval);
current->add(oldvalToWrite->toInstruction());
}
MDefinition *newvalToWrite = newval;
if (newval->type() == MIRType_Double) {
newvalToWrite = MTruncateToInt32::New(alloc(), newval);
current->add(newvalToWrite->toInstruction());
}
MCompareExchangeTypedArrayElement *cas =
MCompareExchangeTypedArrayElement::New(alloc(), elements, index, arrayType,
oldvalToWrite, newvalToWrite);
cas->setResultType(getInlineReturnType());
current->add(cas);
current->push(cas);
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineAtomicsLoad(CallInfo &callInfo)
{
if (callInfo.argc() != 2 || callInfo.constructing())
return InliningStatus_NotInlined;
Scalar::Type arrayType;
if (!atomicsMeetsPreconditions(callInfo, &arrayType))
return InliningStatus_NotInlined;
callInfo.setImplicitlyUsedUnchecked();
MInstruction *elements;
MDefinition *index;
atomicsCheckBounds(callInfo, &elements, &index);
MLoadTypedArrayElement *load =
MLoadTypedArrayElement::New(alloc(), elements, index, arrayType,
DoesRequireMemoryBarrier);
load->setResultType(getInlineReturnType());
current->add(load);
current->push(load);
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineAtomicsStore(CallInfo &callInfo)
{
if (callInfo.argc() != 3 || callInfo.constructing())
return InliningStatus_NotInlined;
Scalar::Type arrayType;
if (!atomicsMeetsPreconditions(callInfo, &arrayType))
return InliningStatus_NotInlined;
MDefinition *value = callInfo.getArg(2);
if (!(value->type() == MIRType_Int32 || value->type() == MIRType_Double))
return InliningStatus_NotInlined;
callInfo.setImplicitlyUsedUnchecked();
MInstruction *elements;
MDefinition *index;
atomicsCheckBounds(callInfo, &elements, &index);
MDefinition *toWrite = value;
if (value->type() == MIRType_Double) {
toWrite = MTruncateToInt32::New(alloc(), value);
current->add(toWrite->toInstruction());
}
MStoreTypedArrayElement *store =
MStoreTypedArrayElement::New(alloc(), elements, index, toWrite, arrayType,
DoesRequireMemoryBarrier);
current->add(store);
current->push(value);
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineAtomicsFence(CallInfo &callInfo)
{
if (callInfo.argc() != 0 || callInfo.constructing())
return InliningStatus_NotInlined;
callInfo.setImplicitlyUsedUnchecked();
MMemoryBarrier *fence = MMemoryBarrier::New(alloc());
current->add(fence);
pushConstant(UndefinedValue());
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineAtomicsBinop(CallInfo &callInfo, JSFunction *target)
{
if (callInfo.argc() != 3 || callInfo.constructing())
return InliningStatus_NotInlined;
Scalar::Type arrayType;
if (!atomicsMeetsPreconditions(callInfo, &arrayType))
return InliningStatus_NotInlined;
MDefinition *value = callInfo.getArg(2);
if (!(value->type() == MIRType_Int32 || value->type() == MIRType_Double))
return InliningStatus_NotInlined;
callInfo.setImplicitlyUsedUnchecked();
MInstruction *elements;
MDefinition *index;
atomicsCheckBounds(callInfo, &elements, &index);
JSNative native = target->native();
AtomicOp k = AtomicFetchAddOp;
if (native == atomics_add)
k = AtomicFetchAddOp;
else if (native == atomics_sub)
k = AtomicFetchSubOp;
else if (native == atomics_and)
k = AtomicFetchAndOp;
else if (native == atomics_or)
k = AtomicFetchOrOp;
else if (native == atomics_xor)
k = AtomicFetchXorOp;
else
MOZ_CRASH("Bad atomic operation");
MDefinition *toWrite = value;
if (value->type() == MIRType_Double) {
toWrite = MTruncateToInt32::New(alloc(), value);
current->add(toWrite->toInstruction());
}
MAtomicTypedArrayElementBinop *binop =
MAtomicTypedArrayElementBinop::New(alloc(), k, elements, index, arrayType, toWrite);
binop->setResultType(getInlineReturnType());
current->add(binop);
current->push(binop);
return InliningStatus_Inlined;
}
bool
IonBuilder::atomicsMeetsPreconditions(CallInfo &callInfo, Scalar::Type *arrayType)
{
if (callInfo.getArg(0)->type() != MIRType_Object)
return false;
if (callInfo.getArg(1)->type() != MIRType_Int32)
return false;
// Ensure that the first argument is a valid SharedTypedArray.
//
// Then check both that the element type is something we can
// optimize and that the return type is suitable for that element
// type.
types::TemporaryTypeSet *arg0Types = callInfo.getArg(0)->resultTypeSet();
if (!arg0Types)
return false;
*arrayType = arg0Types->getSharedTypedArrayType();
switch (*arrayType) {
case Scalar::Int8:
case Scalar::Uint8:
case Scalar::Int16:
case Scalar::Uint16:
case Scalar::Int32:
return getInlineReturnType() == MIRType_Int32;
case Scalar::Uint32:
// Bug 1077305: it would be attractive to allow inlining even
// if the inline return type is Int32, which it will frequently
// be.
return getInlineReturnType() == MIRType_Double;
default:
// Excludes floating types and Uint8Clamped
return false;
}
}
void
IonBuilder::atomicsCheckBounds(CallInfo &callInfo, MInstruction **elements, MDefinition **index)
{
// Perform bounds checking and extract the elements vector.
MDefinition *obj = callInfo.getArg(0);
MInstruction *length = nullptr;
*index = callInfo.getArg(1);
*elements = nullptr;
addTypedArrayLengthAndData(obj, DoBoundsCheck, index, &length, elements);
}
IonBuilder::InliningStatus
IonBuilder::inlineIsConstructing(CallInfo &callInfo)
{

Просмотреть файл

@ -1265,14 +1265,14 @@ MPhi::foldsTernary()
MTest *test = pred->lastIns()->toTest();
// True branch may only dominate one edge of MPhi.
if (test->ifTrue()->dominates(block()->getPredecessor(0)) &&
if (test->ifTrue()->dominates(block()->getPredecessor(0)) ==
test->ifTrue()->dominates(block()->getPredecessor(1)))
{
return nullptr;
}
// False branch may only dominate one edge of MPhi.
if (test->ifFalse()->dominates(block()->getPredecessor(0)) &&
if (test->ifFalse()->dominates(block()->getPredecessor(0)) ==
test->ifFalse()->dominates(block()->getPredecessor(1)))
{
return nullptr;

Просмотреть файл

@ -15,6 +15,7 @@
#include "mozilla/Array.h"
#include "mozilla/DebugOnly.h"
#include "jit/AtomicOp.h"
#include "jit/FixedList.h"
#include "jit/InlineList.h"
#include "jit/IonAllocPolicy.h"
@ -8037,17 +8038,33 @@ class MArrayJoin
MDefinition *foldsTo(TempAllocator &alloc);
};
// See comments above MMemoryBarrier, below.
enum MemoryBarrierRequirement
{
DoesNotRequireMemoryBarrier,
DoesRequireMemoryBarrier
};
// Also see comments above MMemoryBarrier, below.
class MLoadTypedArrayElement
: public MBinaryInstruction
{
Scalar::Type arrayType_;
bool requiresBarrier_;
MLoadTypedArrayElement(MDefinition *elements, MDefinition *index,
Scalar::Type arrayType)
: MBinaryInstruction(elements, index), arrayType_(arrayType)
Scalar::Type arrayType, MemoryBarrierRequirement requiresBarrier)
: MBinaryInstruction(elements, index),
arrayType_(arrayType),
requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier)
{
setResultType(MIRType_Value);
setMovable();
if (requiresBarrier_)
setGuard(); // Not removable or movable
else
setMovable();
MOZ_ASSERT(elements->type() == MIRType_Elements);
MOZ_ASSERT(index->type() == MIRType_Int32);
MOZ_ASSERT(arrayType >= 0 && arrayType < Scalar::TypeMax);
@ -8057,9 +8074,10 @@ class MLoadTypedArrayElement
INSTRUCTION_HEADER(LoadTypedArrayElement)
static MLoadTypedArrayElement *New(TempAllocator &alloc, MDefinition *elements, MDefinition *index,
Scalar::Type arrayType)
Scalar::Type arrayType,
MemoryBarrierRequirement requiresBarrier=DoesNotRequireMemoryBarrier)
{
return new(alloc) MLoadTypedArrayElement(elements, index, arrayType);
return new(alloc) MLoadTypedArrayElement(elements, index, arrayType, requiresBarrier);
}
Scalar::Type arrayType() const {
@ -8069,6 +8087,9 @@ class MLoadTypedArrayElement
// Bailout if the result does not fit in an int32.
return arrayType_ == Scalar::Uint32 && type() == MIRType_Int32;
}
bool requiresMemoryBarrier() const {
return requiresBarrier_;
}
MDefinition *elements() const {
return getOperand(0);
}
@ -8076,10 +8097,16 @@ class MLoadTypedArrayElement
return getOperand(1);
}
AliasSet getAliasSet() const {
// When a barrier is needed make the instruction effectful by
// giving it a "store" effect.
if (requiresBarrier_)
return AliasSet::Store(AliasSet::TypedArrayElement);
return AliasSet::Load(AliasSet::TypedArrayElement);
}
bool congruentTo(const MDefinition *ins) const {
if (requiresBarrier_)
return false;
if (!ins->isLoadTypedArrayElement())
return false;
const MLoadTypedArrayElement *other = ins->toLoadTypedArrayElement();
@ -8214,15 +8241,22 @@ class MStoreTypedArrayElement
public StoreTypedArrayPolicy::Data
{
Scalar::Type arrayType_;
bool requiresBarrier_;
// See note in MStoreElementCommon.
bool racy_;
MStoreTypedArrayElement(MDefinition *elements, MDefinition *index, MDefinition *value,
Scalar::Type arrayType)
: MTernaryInstruction(elements, index, value), arrayType_(arrayType), racy_(false)
Scalar::Type arrayType, MemoryBarrierRequirement requiresBarrier)
: MTernaryInstruction(elements, index, value),
arrayType_(arrayType),
requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier),
racy_(false)
{
setMovable();
if (requiresBarrier_)
setGuard(); // Not removable or movable
else
setMovable();
MOZ_ASSERT(elements->type() == MIRType_Elements);
MOZ_ASSERT(index->type() == MIRType_Int32);
MOZ_ASSERT(arrayType >= 0 && arrayType < Scalar::TypeMax);
@ -8232,9 +8266,11 @@ class MStoreTypedArrayElement
INSTRUCTION_HEADER(StoreTypedArrayElement)
static MStoreTypedArrayElement *New(TempAllocator &alloc, MDefinition *elements, MDefinition *index,
MDefinition *value, Scalar::Type arrayType)
MDefinition *value, Scalar::Type arrayType,
MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier)
{
return new(alloc) MStoreTypedArrayElement(elements, index, value, arrayType);
return new(alloc) MStoreTypedArrayElement(elements, index, value, arrayType,
requiresBarrier);
}
Scalar::Type arrayType() const {
@ -8261,6 +8297,9 @@ class MStoreTypedArrayElement
AliasSet getAliasSet() const {
return AliasSet::Store(AliasSet::TypedArrayElement);
}
bool requiresMemoryBarrier() const {
return requiresBarrier_;
}
bool racy() const {
return racy_;
}
@ -11452,6 +11491,159 @@ class MRecompileCheck : public MNullaryInstruction
}
};
// All barriered operations - MMemoryBarrier, MCompareExchangeTypedArrayElement,
// and MAtomicTypedArrayElementBinop, as well as MLoadTypedArrayElement and
// MStoreTypedArrayElement when they are marked as requiring a memory barrer - have
// the following attributes:
//
// - Not movable
// - Not removable
// - Not congruent with any other instruction
// - Effectful (they alias every TypedArray store)
//
// The intended effect of those constraints is to prevent all loads
// and stores preceding the barriered operation from being moved to
// after the barriered operation, and vice versa, and to prevent the
// barriered operation from being removed or hoisted.
class MMemoryBarrier
: public MNullaryInstruction
{
// The type is a combination of the memory barrier types in AtomicOp.h.
const int type_;
explicit MMemoryBarrier(int type)
: type_(type)
{
MOZ_ASSERT((type_ & ~MembarAllbits) == 0);
setGuard(); // Not removable
}
public:
INSTRUCTION_HEADER(MemoryBarrier);
static MMemoryBarrier *New(TempAllocator &alloc, int type=MembarFull) {
return new(alloc) MMemoryBarrier(type);
}
int type() const {
return type_;
}
AliasSet getAliasSet() const {
return AliasSet::Store(AliasSet::TypedArrayElement);
}
};
class MCompareExchangeTypedArrayElement
: public MAryInstruction<4>,
public MixPolicy< MixPolicy<ObjectPolicy<0>, IntPolicy<1> >, MixPolicy<IntPolicy<2>, IntPolicy<3> > >
{
Scalar::Type arrayType_;
explicit MCompareExchangeTypedArrayElement(MDefinition *elements, MDefinition *index,
Scalar::Type arrayType, MDefinition *oldval,
MDefinition *newval)
: arrayType_(arrayType)
{
initOperand(0, elements);
initOperand(1, index);
initOperand(2, oldval);
initOperand(3, newval);
setGuard(); // Not removable
}
public:
INSTRUCTION_HEADER(CompareExchangeTypedArrayElement);
static MCompareExchangeTypedArrayElement *New(TempAllocator &alloc, MDefinition *elements,
MDefinition *index, Scalar::Type arrayType,
MDefinition *oldval, MDefinition *newval)
{
return new(alloc) MCompareExchangeTypedArrayElement(elements, index, arrayType, oldval, newval);
}
bool isByteArray() const {
return (arrayType_ == Scalar::Int8 ||
arrayType_ == Scalar::Uint8 ||
arrayType_ == Scalar::Uint8Clamped);
}
MDefinition *elements() {
return getOperand(0);
}
MDefinition *index() {
return getOperand(1);
}
MDefinition *oldval() {
return getOperand(2);
}
int oldvalOperand() {
return 2;
}
MDefinition *newval() {
return getOperand(3);
}
Scalar::Type arrayType() const {
return arrayType_;
}
AliasSet getAliasSet() const {
return AliasSet::Store(AliasSet::TypedArrayElement);
}
};
class MAtomicTypedArrayElementBinop
: public MAryInstruction<3>,
public Mix3Policy< ObjectPolicy<0>, IntPolicy<1>, IntPolicy<2> >
{
private:
AtomicOp op_;
Scalar::Type arrayType_;
protected:
explicit MAtomicTypedArrayElementBinop(AtomicOp op, MDefinition *elements, MDefinition *index,
Scalar::Type arrayType, MDefinition *value)
: op_(op),
arrayType_(arrayType)
{
initOperand(0, elements);
initOperand(1, index);
initOperand(2, value);
setGuard(); // Not removable
}
public:
INSTRUCTION_HEADER(AtomicTypedArrayElementBinop);
static MAtomicTypedArrayElementBinop *New(TempAllocator &alloc, AtomicOp op,
MDefinition *elements, MDefinition *index,
Scalar::Type arrayType, MDefinition *value)
{
return new(alloc) MAtomicTypedArrayElementBinop(op, elements, index, arrayType, value);
}
bool isByteArray() const {
return (arrayType_ == Scalar::Int8 ||
arrayType_ == Scalar::Uint8 ||
arrayType_ == Scalar::Uint8Clamped);
}
AtomicOp operation() const {
return op_;
}
Scalar::Type arrayType() const {
return arrayType_;
}
MDefinition *elements() {
return getOperand(0);
}
MDefinition *index() {
return getOperand(1);
}
MDefinition *value() {
return getOperand(2);
}
AliasSet getAliasSet() const {
return AliasSet::Store(AliasSet::TypedArrayElement);
}
};
class MAsmJSNeg : public MUnaryInstruction
{
MAsmJSNeg(MDefinition *op, MIRType type)

Просмотреть файл

@ -184,6 +184,8 @@ namespace jit {
_(StoreTypedArrayElement) \
_(StoreTypedArrayElementHole) \
_(StoreTypedArrayElementStatic) \
_(CompareExchangeTypedArrayElement) \
_(AtomicTypedArrayElementBinop) \
_(EffectiveAddress) \
_(ClampToUint8) \
_(LoadFixedSlot) \
@ -251,6 +253,7 @@ namespace jit {
_(GuardThreadExclusive) \
_(InterruptCheckPar) \
_(RecompileCheck) \
_(MemoryBarrier) \
_(UnknownValue) \
_(LexicalCheck) \
_(ThrowUninitializedLexical)

Просмотреть файл

@ -348,11 +348,14 @@ class ParallelSafetyVisitor : public MDefinitionVisitor
UNSAFE_OP(AsmJSParameter)
UNSAFE_OP(AsmJSCall)
DROP_OP(RecompileCheck)
UNSAFE_OP(CompareExchangeTypedArrayElement)
UNSAFE_OP(AtomicTypedArrayElementBinop)
UNSAFE_OP(MemoryBarrier)
UNSAFE_OP(UnknownValue)
UNSAFE_OP(LexicalCheck)
UNSAFE_OP(ThrowUninitializedLexical)
// It looks like this could easily be made safe:
// It looks like these could easily be made safe:
UNSAFE_OP(ConvertElementsToDoubles)
UNSAFE_OP(MaybeCopyElementsForWrite)
};

Просмотреть файл

@ -424,6 +424,7 @@ IntPolicy<Op>::staticAdjustInputs(TempAllocator &alloc, MInstruction *def)
template bool IntPolicy<0>::staticAdjustInputs(TempAllocator &alloc, MInstruction *def);
template bool IntPolicy<1>::staticAdjustInputs(TempAllocator &alloc, MInstruction *def);
template bool IntPolicy<2>::staticAdjustInputs(TempAllocator &alloc, MInstruction *def);
template bool IntPolicy<3>::staticAdjustInputs(TempAllocator &alloc, MInstruction *def);
template <unsigned Op>
bool

Просмотреть файл

@ -2247,3 +2247,24 @@ JitRuntime::generateForkJoinGetSliceStub(JSContext *cx)
{
MOZ_CRASH("NYI");
}
void
CodeGeneratorARM::memoryBarrier(int barrier)
{
// On ARMv6 the optional argument (BarrierST, etc) is ignored.
if (barrier == (MembarStoreStore|MembarSynchronizing))
masm.ma_dsb(masm.BarrierST);
else if (barrier & MembarSynchronizing)
masm.ma_dsb();
else if (barrier == MembarStoreStore)
masm.ma_dmb(masm.BarrierST);
else if (barrier)
masm.ma_dmb();
}
bool
CodeGeneratorARM::visitMemoryBarrier(LMemoryBarrier *ins)
{
memoryBarrier(ins->type());
return true;
}

Просмотреть файл

@ -175,6 +175,8 @@ class CodeGeneratorARM : public CodeGeneratorShared
bool modICommon(MMod *mir, Register lhs, Register rhs, Register output, LSnapshot *snapshot,
Label &done);
void memoryBarrier(int barrier);
public:
CodeGeneratorARM(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm);
@ -206,6 +208,8 @@ class CodeGeneratorARM : public CodeGeneratorShared
bool visitForkJoinGetSlice(LForkJoinGetSlice *ins);
bool visitMemoryBarrier(LMemoryBarrier *ins);
bool generateInvalidateEpilogue();
protected:

Просмотреть файл

@ -570,4 +570,69 @@ LIRGeneratorARM::visitSimdValueX4(MSimdValueX4 *ins)
MOZ_CRASH("NYI");
}
//__aeabi_uidiv
bool
LIRGeneratorARM::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins)
{
MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegisterOrConstant(ins->index());
// For most operations we don't need any temps because there are
// enough scratch registers. tempDef2 is never needed on ARM.
//
// For a Uint32Array with a known double result we need a temp for
// the intermediate output, this is tempDef1.
//
// Optimization opportunity (bug 1077317): We can do better by
// allowing 'value' to remain as an imm32 if it is small enough to
// fit in an instruction.
LDefinition tempDef1 = LDefinition::BogusTemp();
LDefinition tempDef2 = LDefinition::BogusTemp();
const LAllocation value = useRegister(ins->value());
if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
tempDef1 = temp();
LAtomicTypedArrayElementBinop *lir =
new(alloc()) LAtomicTypedArrayElementBinop(elements, index, value, tempDef1, tempDef2);
return define(lir, ins);
}
bool
LIRGeneratorARM::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins)
{
MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegisterOrConstant(ins->index());
// If the target is a floating register then we need a temp at the
// CodeGenerator level for creating the result.
//
// Optimization opportunity (bug 1077317): We could do better by
// allowing oldval to remain an immediate, if it is small enough
// to fit in an instruction.
const LAllocation newval = useRegister(ins->newval());
const LAllocation oldval = useRegister(ins->oldval());
LDefinition tempDef = LDefinition::BogusTemp();
if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
tempDef = temp();
LCompareExchangeTypedArrayElement *lir =
new(alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval, newval, tempDef);
return define(lir, ins);
}

Просмотреть файл

@ -106,6 +106,8 @@ class LIRGeneratorARM : public LIRGeneratorShared
bool visitSimdTernaryBitwise(MSimdTernaryBitwise *ins);
bool visitSimdSplatX4(MSimdSplatX4 *ins);
bool visitSimdValueX4(MSimdValueX4 *ins);
bool visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins);
bool visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins);
};
typedef LIRGeneratorARM LIRGeneratorSpecific;

Просмотреть файл

@ -1804,6 +1804,15 @@ MacroAssemblerARMCompat::buildOOLFakeExitFrame(void *fakeReturnAddr)
return true;
}
void
MacroAssemblerARMCompat::callWithExitFrame(Label *target)
{
uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
Push(Imm32(descriptor)); // descriptor
ma_callIonHalfPush(target);
}
void
MacroAssemblerARMCompat::callWithExitFrame(JitCode *target)
{
@ -3719,6 +3728,17 @@ MacroAssemblerARM::ma_callIonHalfPush(const Register r)
as_blx(r);
}
void
MacroAssemblerARM::ma_callIonHalfPush(Label *label)
{
// The stack is unaligned by 4 bytes. We push the pc to the stack to align
// the stack before the call, when we return the pc is poped and the stack
// is restored to its unaligned state.
AutoForbidPools afp(this, 2);
ma_push(pc);
as_bl(label, Always);
}
void
MacroAssemblerARM::ma_call(ImmPtr dest)
{
@ -4690,4 +4710,280 @@ MacroAssemblerARMCompat::branchValueIsNurseryObject(Condition cond, ValueOperand
bind(&done);
}
namespace js {
namespace jit {
template<>
Register
MacroAssemblerARMCompat::computePointer<BaseIndex>(const BaseIndex &src, Register r)
{
Register base = src.base;
Register index = src.index;
uint32_t scale = Imm32::ShiftOf(src.scale).value;
int32_t offset = src.offset;
as_add(r, base, lsl(index, scale));
if (offset != 0)
ma_add(r, Imm32(offset), r);
return r;
}
template<>
Register
MacroAssemblerARMCompat::computePointer<Address>(const Address &src, Register r)
{
if (src.offset == 0)
return src.base;
ma_add(src.base, Imm32(src.offset), r);
return r;
}
} // namespace jit
} // namespace js
template<typename T>
void
MacroAssemblerARMCompat::compareExchange(int nbytes, bool signExtend, const T &mem,
Register oldval, Register newval, Register output)
{
// If LDREXB/H and STREXB/H are not available we use the
// word-width operations with read-modify-add. That does not
// abstract well, so fork.
//
// Bug 1077321: We may further optimize for ARMv8 here.
if (nbytes < 4 && !HasLDSTREXBHD())
compareExchangeARMv6(nbytes, signExtend, mem, oldval, newval, output);
else
compareExchangeARMv7(nbytes, signExtend, mem, oldval, newval, output);
}
// General algorithm:
//
// ... ptr, <addr> ; compute address of item
// dmb
// L0 ldrex* output, [ptr]
// sxt* output, output, 0 ; sign-extend if applicable
// *xt* tmp, oldval, 0 ; sign-extend or zero-extend if applicable
// cmp output, tmp
// bne L1 ; failed - values are different
// strex* tmp, newval, [ptr]
// cmp tmp, 1
// beq L0 ; failed - location is dirty, retry
// L1 dmb
//
// Discussion here: http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html.
// However note that that discussion uses 'isb' as the trailing fence.
// I've not quite figured out why, and I've gone with dmb here which
// is safe. Also see the LLVM source, which uses 'dmb ish' generally.
// (Apple's Swift CPU apparently handles ish in a non-default, faster
// way.)
template<typename T>
void
MacroAssemblerARMCompat::compareExchangeARMv7(int nbytes, bool signExtend, const T &mem,
Register oldval, Register newval, Register output)
{
Label Lagain;
Label Ldone;
ma_dmb(BarrierST);
Register ptr = computePointer(mem, secondScratchReg_);
bind(&Lagain);
switch (nbytes) {
case 1:
as_ldrexb(output, ptr);
if (signExtend) {
as_sxtb(output, output, 0);
as_sxtb(ScratchRegister, oldval, 0);
} else {
as_uxtb(ScratchRegister, oldval, 0);
}
break;
case 2:
as_ldrexh(output, ptr);
if (signExtend) {
as_sxth(output, output, 0);
as_sxth(ScratchRegister, oldval, 0);
} else {
as_uxth(ScratchRegister, oldval, 0);
}
break;
case 4:
MOZ_ASSERT(!signExtend);
as_ldrex(output, ptr);
break;
}
if (nbytes < 4)
as_cmp(output, O2Reg(ScratchRegister));
else
as_cmp(output, O2Reg(oldval));
as_b(&Ldone, NotEqual);
switch (nbytes) {
case 1:
as_strexb(ScratchRegister, newval, ptr);
break;
case 2:
as_strexh(ScratchRegister, newval, ptr);
break;
case 4:
as_strex(ScratchRegister, newval, ptr);
break;
}
as_cmp(ScratchRegister, Imm8(1));
as_b(&Lagain, Equal);
bind(&Ldone);
ma_dmb();
}
template<typename T>
void
MacroAssemblerARMCompat::compareExchangeARMv6(int nbytes, bool signExtend, const T &mem,
Register oldval, Register newval, Register output)
{
// Bug 1077318: Must use read-modify-write with LDREX / STREX.
MOZ_ASSERT(nbytes == 1 || nbytes == 2);
MOZ_CRASH("NYI");
}
template void
js::jit::MacroAssemblerARMCompat::compareExchange(int nbytes, bool signExtend,
const Address &address, Register oldval,
Register newval, Register output);
template void
js::jit::MacroAssemblerARMCompat::compareExchange(int nbytes, bool signExtend,
const BaseIndex &address, Register oldval,
Register newval, Register output);
template<typename T>
void
MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32 &value,
const T &mem, Register temp, Register output)
{
// The Imm32 value case is not needed yet because lowering always
// forces the value into a register at present (bug 1077317). But
// the method must be present for the platform-independent code to
// link.
MOZ_CRASH("Feature NYI");
}
// General algorithm:
//
// ... ptr, <addr> ; compute address of item
// dmb
// L0 ldrex* output, [ptr]
// sxt* output, output, 0 ; sign-extend if applicable
// OP tmp, output, value ; compute value to store
// strex* tmp, tmp, [ptr]
// cmp tmp, 1
// beq L0 ; failed - location is dirty, retry
// dmb ; ordering barrier required
//
// Also see notes above at compareExchange re the barrier strategy.
//
// Observe that the value being operated into the memory element need
// not be sign-extended because no OP will make use of bits to the
// left of the bits indicated by the width of the element, and neither
// output nor the bits stored are affected by OP.
template<typename T>
void
MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
const Register &value, const T &mem, Register temp,
Register output)
{
// Fork for non-word operations on ARMv6.
//
// Bug 1077321: We may further optimize for ARMv8 here.
if (nbytes < 4 && !HasLDSTREXBHD())
atomicFetchOpARMv6(nbytes, signExtend, op, value, mem, temp, output);
else {
MOZ_ASSERT(temp == InvalidReg);
atomicFetchOpARMv7(nbytes, signExtend, op, value, mem, output);
}
}
template<typename T>
void
MacroAssemblerARMCompat::atomicFetchOpARMv7(int nbytes, bool signExtend, AtomicOp op,
const Register &value, const T &mem, Register output)
{
Label Lagain;
Register ptr = computePointer(mem, secondScratchReg_);
ma_dmb();
bind(&Lagain);
switch (nbytes) {
case 1:
as_ldrexb(output, ptr);
if (signExtend)
as_sxtb(output, output, 0);
break;
case 2:
as_ldrexh(output, ptr);
if (signExtend)
as_sxth(output, output, 0);
break;
case 4:
MOZ_ASSERT(!signExtend);
as_ldrex(output, ptr);
break;
}
switch (op) {
case AtomicFetchAddOp:
as_add(ScratchRegister, output, O2Reg(value));
break;
case AtomicFetchSubOp:
as_sub(ScratchRegister, output, O2Reg(value));
break;
case AtomicFetchAndOp:
as_and(ScratchRegister, output, O2Reg(value));
break;
case AtomicFetchOrOp:
as_orr(ScratchRegister, output, O2Reg(value));
break;
case AtomicFetchXorOp:
as_eor(ScratchRegister, output, O2Reg(value));
break;
}
switch (nbytes) {
case 1:
as_strexb(ScratchRegister, ScratchRegister, ptr);
break;
case 2:
as_strexh(ScratchRegister, ScratchRegister, ptr);
break;
case 4:
as_strex(ScratchRegister, ScratchRegister, ptr);
break;
}
as_cmp(ScratchRegister, Imm8(1));
as_b(&Lagain, Equal);
ma_dmb();
}
template<typename T>
void
MacroAssemblerARMCompat::atomicFetchOpARMv6(int nbytes, bool signExtend, AtomicOp op,
const Register &value, const T &mem, Register temp,
Register output)
{
// Bug 1077318: Must use read-modify-write with LDREX / STREX.
MOZ_ASSERT(nbytes == 1 || nbytes == 2);
MOZ_CRASH("NYI");
}
template void
js::jit::MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
const Imm32 &value, const Address &mem,
Register temp, Register output);
template void
js::jit::MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
const Imm32 &value, const BaseIndex &mem,
Register temp, Register output);
template void
js::jit::MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
const Register &value, const Address &mem,
Register temp, Register output);
template void
js::jit::MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
const Register &value, const BaseIndex &mem,
Register temp, Register output);
#endif

Просмотреть файл

@ -12,6 +12,7 @@
#include "jsopcode.h"
#include "jit/arm/Assembler-arm.h"
#include "jit/AtomicOp.h"
#include "jit/IonCaches.h"
#include "jit/IonFrames.h"
#include "jit/MoveResolver.h"
@ -406,6 +407,9 @@ class MacroAssemblerARM : public Assembler
// Calls an ion function, assuming that the stack is currently not 8 byte
// aligned.
void ma_callIonHalfPush(const Register reg);
// Calls an ion function, assuming that the stack is currently not 8 byte
// aligned.
void ma_callIonHalfPush(Label *label);
void ma_call(ImmPtr dest);
@ -1275,6 +1279,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
// non-function. Returns offset to be passed to markSafepointAt().
bool buildFakeExitFrame(Register scratch, uint32_t *offset);
void callWithExitFrame(Label *target);
void callWithExitFrame(JitCode *target);
void callWithExitFrame(JitCode *target, Register dynStack);
@ -1420,6 +1425,172 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
ma_vstr(VFPRegister(src).singleOverlay(), addr.base, addr.index, scale);
}
private:
template<typename T>
Register computePointer(const T &src, Register r);
template<typename T>
void compareExchangeARMv6(int nbytes, bool signExtend, const T &mem, Register oldval,
Register newval, Register output);
template<typename T>
void compareExchangeARMv7(int nbytes, bool signExtend, const T &mem, Register oldval,
Register newval, Register output);
template<typename T>
void compareExchange(int nbytes, bool signExtend, const T &address, Register oldval,
Register newval, Register output);
template<typename T>
void atomicFetchOpARMv6(int nbytes, bool signExtend, AtomicOp op, const Register &value,
const T &mem, Register temp, Register output);
template<typename T>
void atomicFetchOpARMv7(int nbytes, bool signExtend, AtomicOp op, const Register &value,
const T &mem, Register output);
template<typename T>
void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32 &value,
const T &address, Register temp, Register output);
template<typename T>
void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register &value,
const T &address, Register temp, Register output);
public:
// T in {Address,BaseIndex}
// S in {Imm32,Register}
template<typename T>
void compareExchange8SignExtend(const T &mem, Register oldval, Register newval, Register output)
{
compareExchange(1, true, mem, oldval, newval, output);
}
template<typename T>
void compareExchange8ZeroExtend(const T &mem, Register oldval, Register newval, Register output)
{
compareExchange(1, false, mem, oldval, newval, output);
}
template<typename T>
void compareExchange16SignExtend(const T &mem, Register oldval, Register newval, Register output)
{
compareExchange(2, true, mem, oldval, newval, output);
}
template<typename T>
void compareExchange16ZeroExtend(const T &mem, Register oldval, Register newval, Register output)
{
compareExchange(2, false, mem, oldval, newval, output);
}
template<typename T>
void compareExchange32(const T &mem, Register oldval, Register newval, Register output) {
compareExchange(4, false, mem, oldval, newval, output);
}
template<typename T, typename S>
void atomicFetchAdd8SignExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchAdd8ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchAdd16SignExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchAdd16ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchAdd32(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchSub8SignExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchSub8ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchSub16SignExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchSub16ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchSub32(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchAnd8SignExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchAnd8ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchAnd16SignExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchAnd16ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchAnd32(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchOr8SignExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchOr8ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchOr16SignExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchOr16ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchOr32(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchXor8SignExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchXor8ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchXor16SignExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchXor16ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, temp, output);
}
template<typename T, typename S>
void atomicFetchXor32(const S &value, const T &mem, Register temp, Register output) {
atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, temp, output);
}
void clampIntToUint8(Register reg) {
// Look at (reg >> 8) if it is 0, then reg shouldn't be clamped if it is
// <0, then we want to clamp to 0, otherwise, we wish to clamp to 255

Просмотреть файл

@ -924,32 +924,43 @@ MacroAssemblerMIPS::ma_b(Label *label, JumpKind jumpKind)
}
void
MacroAssemblerMIPS::ma_bal(Label *label, JumpKind jumpKind)
MacroAssemblerMIPS::ma_bal(Label *label, DelaySlotFill delaySlotFill)
{
branchWithCode(getBranchCode(BranchIsCall), label, jumpKind);
if (label->bound()) {
// Generate the long jump for calls because return address has to be
// the address after the reserved block.
addLongJump(nextOffset());
ma_liPatchable(ScratchRegister, Imm32(label->offset()));
as_jalr(ScratchRegister);
if (delaySlotFill == FillDelaySlot)
as_nop();
return;
}
// Second word holds a pointer to the next branch in label's chain.
uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
// Make the whole branch continous in the buffer.
m_buffer.ensureSpace(4 * sizeof(uint32_t));
BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
writeInst(nextInChain);
label->use(bo.getOffset());
// Leave space for long jump.
as_nop();
if (delaySlotFill == FillDelaySlot)
as_nop();
}
void
MacroAssemblerMIPS::branchWithCode(InstImm code, Label *label, JumpKind jumpKind)
{
InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
if (label->bound()) {
int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
// Generate the long jump for calls because return address has to be
// the address after the reserved block.
if (code.encode() == inst_bgezal.encode()) {
MOZ_ASSERT(jumpKind != ShortJump);
// Handle long call
addLongJump(nextOffset());
ma_liPatchable(ScratchRegister, Imm32(label->offset()));
as_jalr(ScratchRegister);
as_nop();
return;
}
if (BOffImm16::IsInRange(offset))
jumpKind = ShortJump;
@ -997,8 +1008,7 @@ MacroAssemblerMIPS::branchWithCode(InstImm code, Label *label, JumpKind jumpKind
return;
}
bool conditional = (code.encode() != inst_bgezal.encode() &&
code.encode() != inst_beq.encode());
bool conditional = code.encode() != inst_beq.encode();
// Make the whole branch continous in the buffer.
m_buffer.ensureSpace((conditional ? 5 : 4) * sizeof(uint32_t));
@ -1489,6 +1499,15 @@ MacroAssemblerMIPSCompat::buildOOLFakeExitFrame(void *fakeReturnAddr)
return true;
}
void
MacroAssemblerMIPSCompat::callWithExitFrame(Label *target)
{
uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
Push(Imm32(descriptor)); // descriptor
ma_callIonHalfPush(target);
}
void
MacroAssemblerMIPSCompat::callWithExitFrame(JitCode *target)
{
@ -3088,6 +3107,17 @@ MacroAssemblerMIPS::ma_callIonHalfPush(const Register r)
as_sw(ra, StackPointer, 0);
}
// This macrosintruction calls the ion code and pushes the return address to
// the stack in the case when stack is not alligned.
void
MacroAssemblerMIPS::ma_callIonHalfPush(Label *label)
{
// This is a MIPS hack to push return address during jalr delay slot.
as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
// TODO
// TODO
}
void
MacroAssemblerMIPS::ma_call(ImmPtr dest)
{

Просмотреть файл

@ -37,6 +37,12 @@ enum JumpKind
ShortJump = 1
};
enum DelaySlotFill
{
DontFillDelaySlot = 0,
FillDelaySlot = 1
};
struct ImmTag : public Imm32
{
ImmTag(JSValueTag mask)
@ -233,7 +239,7 @@ class MacroAssemblerMIPS : public Assembler
}
void ma_b(Label *l, JumpKind jumpKind = LongJump);
void ma_bal(Label *l, JumpKind jumpKind = LongJump);
void ma_bal(Label *l, DelaySlotFill delaySlotFill = FillDelaySlot);
// fp instructions
void ma_lis(FloatRegister dest, float value);
@ -977,6 +983,7 @@ public:
// non-function. Returns offset to be passed to markSafepointAt().
bool buildFakeExitFrame(Register scratch, uint32_t *offset);
void callWithExitFrame(Label *target);
void callWithExitFrame(JitCode *target);
void callWithExitFrame(JitCode *target, Register dynStack);

Просмотреть файл

@ -78,6 +78,8 @@ class LIRGeneratorNone : public LIRGeneratorShared
bool visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins) { MOZ_CRASH(); }
bool visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins) { MOZ_CRASH(); }
bool visitForkJoinGetSlice(MForkJoinGetSlice *ins) { MOZ_CRASH(); }
bool visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins) { MOZ_CRASH(); }
bool visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins) { MOZ_CRASH(); }
LTableSwitch *newLTableSwitch(LAllocation, LDefinition, MTableSwitch *) { MOZ_CRASH(); }
LTableSwitchV *newLTableSwitchV(MTableSwitch *) { MOZ_CRASH(); }

Просмотреть файл

@ -185,6 +185,7 @@ class MacroAssemblerNone : public Assembler
void setupUnalignedABICall(uint32_t, Register) { MOZ_CRASH(); }
template <typename T> void passABIArg(T, MoveOp::Type v = MoveOp::GENERAL) { MOZ_CRASH(); }
void callWithExitFrame(Label *) { MOZ_CRASH(); }
void callWithExitFrame(JitCode *) { MOZ_CRASH(); }
void callWithExitFrame(JitCode *, Register) { MOZ_CRASH(); }
@ -296,6 +297,37 @@ class MacroAssemblerNone : public Assembler
template <typename T> void computeEffectiveAddress(T, Register) { MOZ_CRASH(); }
template <typename T> void compareExchange8SignExtend(const T &mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
template <typename T> void compareExchange8ZeroExtend(const T &mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
template <typename T> void compareExchange16SignExtend(const T &mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
template <typename T> void compareExchange16ZeroExtend(const T &mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
template <typename T> void compareExchange32(const T &mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchAdd8SignExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchAdd8ZeroExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchAdd16SignExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchAdd16ZeroExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchAdd32(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchSub8SignExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchSub8ZeroExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchSub16SignExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchSub16ZeroExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchSub32(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchAnd8SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchAnd8ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchAnd16SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchAnd16ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchAnd32(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchOr8SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchOr8ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchOr16SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchOr16ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchOr32(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchXor8SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchXor8ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchXor16SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchXor16ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
template <typename T, typename S> void atomicFetchXor32(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
void clampIntToUint8(Register) { MOZ_CRASH(); }
Register splitTagForTest(ValueOperand) { MOZ_CRASH(); }

Просмотреть файл

@ -629,6 +629,9 @@ class AssemblerX86Shared : public AssemblerShared
MOZ_CRASH("unexpected operand kind");
}
}
void movsbl(Register src, Register dest) {
masm.movsbl_rr(src.code(), dest.code());
}
void movsbl(const Operand &src, Register dest) {
switch (src.kind()) {
case Operand::MEM_REG_DISP:
@ -641,6 +644,21 @@ class AssemblerX86Shared : public AssemblerShared
MOZ_CRASH("unexpected operand kind");
}
}
void movb(const Operand &src, Register dest) {
switch (src.kind()) {
case Operand::MEM_REG_DISP:
masm.movb_mr(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_SCALE:
masm.movb_mr(src.disp(), src.base(), src.index(), src.scale(), dest.code());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
}
void movb(Imm32 src, Register dest) {
masm.movb_i8r(src.value & 255, dest.code());
}
void movb(Register src, const Operand &dest) {
switch (dest.kind()) {
case Operand::MEM_REG_DISP:
@ -683,6 +701,14 @@ class AssemblerX86Shared : public AssemblerShared
void movzwl(Register src, Register dest) {
masm.movzwl_rr(src.code(), dest.code());
}
void movw(const Operand &src, Register dest) {
masm.prefix_16_for_32();
movl(src, dest);
}
void movw(Imm32 src, Register dest) {
masm.prefix_16_for_32();
movl(src, dest);
}
void movw(Register src, const Operand &dest) {
switch (dest.kind()) {
case Operand::MEM_REG_DISP:
@ -707,6 +733,9 @@ class AssemblerX86Shared : public AssemblerShared
MOZ_CRASH("unexpected operand kind");
}
}
void movswl(Register src, Register dest) {
masm.movswl_rr(src.code(), dest.code());
}
void movswl(const Operand &src, Register dest) {
switch (src.kind()) {
case Operand::MEM_REG_DISP:
@ -921,9 +950,7 @@ class AssemblerX86Shared : public AssemblerShared
masm.int3();
}
#ifdef DEBUG
static bool HasSSE2() { return CPUInfo::IsSSE2Present(); }
#endif
static bool HasSSE3() { return CPUInfo::IsSSE3Present(); }
static bool HasSSE41() { return CPUInfo::IsSSE41Present(); }
static bool SupportsFloatingPoint() { return CPUInfo::IsSSE2Present(); }
@ -1060,6 +1087,12 @@ class AssemblerX86Shared : public AssemblerShared
MOZ_CRASH("unexpected operand kind");
}
}
// Note, lock_addl() is used for a memory barrier on non-SSE2 systems.
// Do not optimize, replace by XADDL, or similar.
void lock_addl(Imm32 imm, const Operand &op) {
masm.prefix_lock();
addl(imm, op);
}
void subl(Imm32 imm, Register dest) {
masm.subl_ir(imm.value, dest.code());
}
@ -1311,24 +1344,69 @@ class AssemblerX86Shared : public AssemblerShared
decl(op);
}
void lock_cmpxchg32(Register src, const Operand &op) {
void lock_cmpxchg8(Register src, const Operand &mem) {
masm.prefix_lock();
switch (op.kind()) {
switch (mem.kind()) {
case Operand::MEM_REG_DISP:
masm.cmpxchg32(src.code(), op.disp(), op.base());
masm.cmpxchg8(src.code(), mem.disp(), mem.base());
break;
case Operand::MEM_SCALE:
masm.cmpxchg8(src.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
}
void lock_cmpxchg16(Register src, const Operand &mem) {
masm.prefix_lock();
switch (mem.kind()) {
case Operand::MEM_REG_DISP:
masm.cmpxchg16(src.code(), mem.disp(), mem.base());
break;
case Operand::MEM_SCALE:
masm.cmpxchg16(src.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
}
void lock_cmpxchg32(Register src, const Operand &mem) {
masm.prefix_lock();
switch (mem.kind()) {
case Operand::MEM_REG_DISP:
masm.cmpxchg32(src.code(), mem.disp(), mem.base());
break;
case Operand::MEM_SCALE:
masm.cmpxchg32(src.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
}
void xaddl(Register srcdest, const Operand &mem) {
void lock_xaddb(Register srcdest, const Operand &mem) {
switch (mem.kind()) {
case Operand::MEM_REG_DISP:
masm.xaddl_rm(srcdest.code(), mem.disp(), mem.base());
masm.lock_xaddb_rm(srcdest.code(), mem.disp(), mem.base());
break;
case Operand::MEM_SCALE:
masm.xaddl_rm(srcdest.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
masm.lock_xaddb_rm(srcdest.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
}
void lock_xaddw(Register srcdest, const Operand &mem) {
masm.prefix_16_for_32();
lock_xaddl(srcdest, mem);
}
void lock_xaddl(Register srcdest, const Operand &mem) {
switch (mem.kind()) {
case Operand::MEM_REG_DISP:
masm.lock_xaddl_rm(srcdest.code(), mem.disp(), mem.base());
break;
case Operand::MEM_SCALE:
masm.lock_xaddl_rm(srcdest.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
break;
default:
MOZ_CRASH("unexpected operand kind");

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше