зеркало из https://github.com/mozilla/gecko-dev.git
Merge mozilla-central to autoland. CLOSED TREE
This commit is contained in:
Коммит
78e651853f
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -17,6 +17,89 @@ const uint32_t kSnapshotTimeoutMs = 20000;
|
|||
|
||||
} // namespace
|
||||
|
||||
/**
|
||||
* Coalescing manipulation queue used by `LSSnapshot`. Used by `LSSnapshot` to
|
||||
* buffer and coalesce manipulations before they are sent to the parent process,
|
||||
* when a Snapshot Checkpoints. (This can only be done when there are no
|
||||
* observers for other content processes.)
|
||||
*/
|
||||
class SnapshotWriteOptimizer final
|
||||
: public LSWriteOptimizer<nsAString, nsString> {
|
||||
public:
|
||||
void Enumerate(nsTArray<LSWriteInfo>& aWriteInfos);
|
||||
};
|
||||
|
||||
void SnapshotWriteOptimizer::Enumerate(nsTArray<LSWriteInfo>& aWriteInfos) {
|
||||
AssertIsOnOwningThread();
|
||||
|
||||
// The mWriteInfos hash table contains all write infos, but it keeps them in
|
||||
// an arbitrary order, which means write infos need to be sorted before being
|
||||
// processed.
|
||||
|
||||
nsTArray<WriteInfo*> writeInfos;
|
||||
GetSortedWriteInfos(writeInfos);
|
||||
|
||||
for (WriteInfo* writeInfo : writeInfos) {
|
||||
switch (writeInfo->GetType()) {
|
||||
case WriteInfo::InsertItem: {
|
||||
auto insertItemInfo = static_cast<InsertItemInfo*>(writeInfo);
|
||||
|
||||
LSSetItemInfo setItemInfo;
|
||||
setItemInfo.key() = insertItemInfo->GetKey();
|
||||
setItemInfo.value() = LSValue(insertItemInfo->GetValue());
|
||||
|
||||
aWriteInfos.AppendElement(std::move(setItemInfo));
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case WriteInfo::UpdateItem: {
|
||||
auto updateItemInfo = static_cast<UpdateItemInfo*>(writeInfo);
|
||||
|
||||
if (updateItemInfo->UpdateWithMove()) {
|
||||
// See the comment in LSWriteOptimizer::InsertItem for more details
|
||||
// about the UpdateWithMove flag.
|
||||
|
||||
LSRemoveItemInfo removeItemInfo;
|
||||
removeItemInfo.key() = updateItemInfo->GetKey();
|
||||
|
||||
aWriteInfos.AppendElement(std::move(removeItemInfo));
|
||||
}
|
||||
|
||||
LSSetItemInfo setItemInfo;
|
||||
setItemInfo.key() = updateItemInfo->GetKey();
|
||||
setItemInfo.value() = LSValue(updateItemInfo->GetValue());
|
||||
|
||||
aWriteInfos.AppendElement(std::move(setItemInfo));
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case WriteInfo::DeleteItem: {
|
||||
auto deleteItemInfo = static_cast<DeleteItemInfo*>(writeInfo);
|
||||
|
||||
LSRemoveItemInfo removeItemInfo;
|
||||
removeItemInfo.key() = deleteItemInfo->GetKey();
|
||||
|
||||
aWriteInfos.AppendElement(std::move(removeItemInfo));
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case WriteInfo::Truncate: {
|
||||
LSClearInfo clearInfo;
|
||||
|
||||
aWriteInfos.AppendElement(std::move(clearInfo));
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
MOZ_CRASH("Bad type!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
LSSnapshot::LSSnapshot(LSDatabase* aDatabase)
|
||||
: mDatabase(aDatabase),
|
||||
mActor(nullptr),
|
||||
|
@ -25,6 +108,7 @@ LSSnapshot::LSSnapshot(LSDatabase* aDatabase)
|
|||
mExactUsage(0),
|
||||
mPeakUsage(0),
|
||||
mLoadState(LoadState::Initial),
|
||||
mHasOtherProcessObservers(false),
|
||||
mExplicit(false),
|
||||
mHasPendingStableStateCallback(false),
|
||||
mHasPendingTimerCallback(false),
|
||||
|
@ -102,12 +186,20 @@ nsresult LSSnapshot::Init(const nsAString& aKey,
|
|||
|
||||
mLoadState = aInitInfo.loadState();
|
||||
|
||||
mHasOtherProcessObservers = aInitInfo.hasOtherProcessObservers();
|
||||
|
||||
mExplicit = aExplicit;
|
||||
|
||||
#ifdef DEBUG
|
||||
mInitialized = true;
|
||||
#endif
|
||||
|
||||
if (mHasOtherProcessObservers) {
|
||||
mWriteAndNotifyInfos = new nsTArray<LSWriteAndNotifyInfo>();
|
||||
} else {
|
||||
mWriteOptimizer = new SnapshotWriteOptimizer();
|
||||
}
|
||||
|
||||
if (!mExplicit) {
|
||||
mTimer = NS_NewTimer();
|
||||
MOZ_ASSERT(mTimer);
|
||||
|
@ -241,12 +333,24 @@ nsresult LSSnapshot::SetItem(const nsAString& aKey, const nsAString& aValue,
|
|||
mLength++;
|
||||
}
|
||||
|
||||
LSSetItemInfo setItemInfo;
|
||||
setItemInfo.key() = aKey;
|
||||
setItemInfo.oldValue() = LSValue(oldValue);
|
||||
setItemInfo.value() = LSValue(aValue);
|
||||
if (mHasOtherProcessObservers) {
|
||||
MOZ_ASSERT(mWriteAndNotifyInfos);
|
||||
|
||||
mWriteInfos.AppendElement(std::move(setItemInfo));
|
||||
LSSetItemAndNotifyInfo setItemAndNotifyInfo;
|
||||
setItemAndNotifyInfo.key() = aKey;
|
||||
setItemAndNotifyInfo.oldValue() = LSValue(oldValue);
|
||||
setItemAndNotifyInfo.value() = LSValue(aValue);
|
||||
|
||||
mWriteAndNotifyInfos->AppendElement(std::move(setItemAndNotifyInfo));
|
||||
} else {
|
||||
MOZ_ASSERT(mWriteOptimizer);
|
||||
|
||||
if (oldValue.IsVoid()) {
|
||||
mWriteOptimizer->InsertItem(aKey, aValue);
|
||||
} else {
|
||||
mWriteOptimizer->UpdateItem(aKey, aValue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
aNotifyInfo.changed() = changed;
|
||||
|
@ -287,11 +391,19 @@ nsresult LSSnapshot::RemoveItem(const nsAString& aKey,
|
|||
mLength--;
|
||||
}
|
||||
|
||||
LSRemoveItemInfo removeItemInfo;
|
||||
removeItemInfo.key() = aKey;
|
||||
removeItemInfo.oldValue() = LSValue(oldValue);
|
||||
if (mHasOtherProcessObservers) {
|
||||
MOZ_ASSERT(mWriteAndNotifyInfos);
|
||||
|
||||
mWriteInfos.AppendElement(std::move(removeItemInfo));
|
||||
LSRemoveItemAndNotifyInfo removeItemAndNotifyInfo;
|
||||
removeItemAndNotifyInfo.key() = aKey;
|
||||
removeItemAndNotifyInfo.oldValue() = LSValue(oldValue);
|
||||
|
||||
mWriteAndNotifyInfos->AppendElement(std::move(removeItemAndNotifyInfo));
|
||||
} else {
|
||||
MOZ_ASSERT(mWriteOptimizer);
|
||||
|
||||
mWriteOptimizer->DeleteItem(aKey);
|
||||
}
|
||||
}
|
||||
|
||||
aNotifyInfo.changed() = changed;
|
||||
|
@ -334,9 +446,17 @@ nsresult LSSnapshot::Clear(LSNotifyInfo& aNotifyInfo) {
|
|||
|
||||
mValues.Clear();
|
||||
|
||||
LSClearInfo clearInfo;
|
||||
if (mHasOtherProcessObservers) {
|
||||
MOZ_ASSERT(mWriteAndNotifyInfos);
|
||||
|
||||
mWriteInfos.AppendElement(std::move(clearInfo));
|
||||
LSClearInfo clearInfo;
|
||||
|
||||
mWriteAndNotifyInfos->AppendElement(std::move(clearInfo));
|
||||
} else {
|
||||
MOZ_ASSERT(mWriteOptimizer);
|
||||
|
||||
mWriteOptimizer->Truncate();
|
||||
}
|
||||
}
|
||||
|
||||
aNotifyInfo.changed() = changed;
|
||||
|
@ -593,25 +713,66 @@ nsresult LSSnapshot::EnsureAllKeys() {
|
|||
newValues.Put(key, VoidString());
|
||||
}
|
||||
|
||||
for (uint32_t index = 0; index < mWriteInfos.Length(); index++) {
|
||||
const LSWriteInfo& writeInfo = mWriteInfos[index];
|
||||
if (mHasOtherProcessObservers) {
|
||||
MOZ_ASSERT(mWriteAndNotifyInfos);
|
||||
|
||||
switch (writeInfo.type()) {
|
||||
case LSWriteInfo::TLSSetItemInfo: {
|
||||
newValues.Put(writeInfo.get_LSSetItemInfo().key(), VoidString());
|
||||
break;
|
||||
}
|
||||
case LSWriteInfo::TLSRemoveItemInfo: {
|
||||
newValues.Remove(writeInfo.get_LSRemoveItemInfo().key());
|
||||
break;
|
||||
}
|
||||
case LSWriteInfo::TLSClearInfo: {
|
||||
newValues.Clear();
|
||||
break;
|
||||
}
|
||||
if (!mWriteAndNotifyInfos->IsEmpty()) {
|
||||
for (uint32_t index = 0; index < mWriteAndNotifyInfos->Length();
|
||||
index++) {
|
||||
const LSWriteAndNotifyInfo& writeAndNotifyInfo =
|
||||
mWriteAndNotifyInfos->ElementAt(index);
|
||||
|
||||
default:
|
||||
MOZ_CRASH("Should never get here!");
|
||||
switch (writeAndNotifyInfo.type()) {
|
||||
case LSWriteAndNotifyInfo::TLSSetItemAndNotifyInfo: {
|
||||
newValues.Put(writeAndNotifyInfo.get_LSSetItemAndNotifyInfo().key(),
|
||||
VoidString());
|
||||
break;
|
||||
}
|
||||
case LSWriteAndNotifyInfo::TLSRemoveItemAndNotifyInfo: {
|
||||
newValues.Remove(
|
||||
writeAndNotifyInfo.get_LSRemoveItemAndNotifyInfo().key());
|
||||
break;
|
||||
}
|
||||
case LSWriteAndNotifyInfo::TLSClearInfo: {
|
||||
newValues.Clear();
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
MOZ_CRASH("Should never get here!");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
MOZ_ASSERT(mWriteOptimizer);
|
||||
|
||||
if (mWriteOptimizer->HasWrites()) {
|
||||
nsTArray<LSWriteInfo> writeInfos;
|
||||
mWriteOptimizer->Enumerate(writeInfos);
|
||||
|
||||
MOZ_ASSERT(!writeInfos.IsEmpty());
|
||||
|
||||
for (uint32_t index = 0; index < writeInfos.Length(); index++) {
|
||||
const LSWriteInfo& writeInfo = writeInfos[index];
|
||||
|
||||
switch (writeInfo.type()) {
|
||||
case LSWriteInfo::TLSSetItemInfo: {
|
||||
newValues.Put(writeInfo.get_LSSetItemInfo().key(), VoidString());
|
||||
break;
|
||||
}
|
||||
case LSWriteInfo::TLSRemoveItemInfo: {
|
||||
newValues.Remove(writeInfo.get_LSRemoveItemInfo().key());
|
||||
break;
|
||||
}
|
||||
case LSWriteInfo::TLSClearInfo: {
|
||||
newValues.Clear();
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
MOZ_CRASH("Should never get here!");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -679,10 +840,27 @@ nsresult LSSnapshot::Checkpoint() {
|
|||
MOZ_ASSERT(mInitialized);
|
||||
MOZ_ASSERT(!mSentFinish);
|
||||
|
||||
if (!mWriteInfos.IsEmpty()) {
|
||||
MOZ_ALWAYS_TRUE(mActor->SendCheckpoint(mWriteInfos));
|
||||
if (mHasOtherProcessObservers) {
|
||||
MOZ_ASSERT(mWriteAndNotifyInfos);
|
||||
|
||||
mWriteInfos.Clear();
|
||||
if (!mWriteAndNotifyInfos->IsEmpty()) {
|
||||
MOZ_ALWAYS_TRUE(mActor->SendCheckpointAndNotify(*mWriteAndNotifyInfos));
|
||||
|
||||
mWriteAndNotifyInfos->Clear();
|
||||
}
|
||||
} else {
|
||||
MOZ_ASSERT(mWriteOptimizer);
|
||||
|
||||
if (mWriteOptimizer->HasWrites()) {
|
||||
nsTArray<LSWriteInfo> writeInfos;
|
||||
mWriteOptimizer->Enumerate(writeInfos);
|
||||
|
||||
MOZ_ASSERT(!writeInfos.IsEmpty());
|
||||
|
||||
MOZ_ALWAYS_TRUE(mActor->SendCheckpoint(writeInfos));
|
||||
|
||||
mWriteOptimizer->Reset();
|
||||
}
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
|
|
|
@ -16,7 +16,8 @@ class LSDatabase;
|
|||
class LSNotifyInfo;
|
||||
class LSSnapshotChild;
|
||||
class LSSnapshotInitInfo;
|
||||
class LSWriteInfo;
|
||||
class LSWriteAndNotifyInfo;
|
||||
class SnapshotWriteOptimizer;
|
||||
|
||||
class LSSnapshot final : public nsIRunnable {
|
||||
public:
|
||||
|
@ -79,7 +80,8 @@ class LSSnapshot final : public nsIRunnable {
|
|||
nsTHashtable<nsStringHashKey> mLoadedItems;
|
||||
nsTHashtable<nsStringHashKey> mUnknownItems;
|
||||
nsDataHashtable<nsStringHashKey, nsString> mValues;
|
||||
nsTArray<LSWriteInfo> mWriteInfos;
|
||||
nsAutoPtr<SnapshotWriteOptimizer> mWriteOptimizer;
|
||||
nsAutoPtr<nsTArray<LSWriteAndNotifyInfo>> mWriteAndNotifyInfos;
|
||||
|
||||
uint32_t mInitLength;
|
||||
uint32_t mLength;
|
||||
|
@ -88,6 +90,7 @@ class LSSnapshot final : public nsIRunnable {
|
|||
|
||||
LoadState mLoadState;
|
||||
|
||||
bool mHasOtherProcessObservers;
|
||||
bool mExplicit;
|
||||
bool mHasPendingStableStateCallback;
|
||||
bool mHasPendingTimerCallback;
|
||||
|
|
|
@ -0,0 +1,115 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "LSWriteOptimizer.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace dom {
|
||||
|
||||
class LSWriteOptimizerBase::WriteInfoComparator {
|
||||
public:
|
||||
bool Equals(const WriteInfo* a, const WriteInfo* b) const {
|
||||
return a && b ? a->SerialNumber() == b->SerialNumber()
|
||||
: !a && !b ? true : false;
|
||||
}
|
||||
|
||||
bool LessThan(const WriteInfo* a, const WriteInfo* b) const {
|
||||
return a && b ? a->SerialNumber() < b->SerialNumber() : b ? true : false;
|
||||
}
|
||||
};
|
||||
|
||||
void LSWriteOptimizerBase::DeleteItem(const nsAString& aKey, int64_t aDelta) {
|
||||
AssertIsOnOwningThread();
|
||||
|
||||
WriteInfo* existingWriteInfo;
|
||||
if (mWriteInfos.Get(aKey, &existingWriteInfo) &&
|
||||
existingWriteInfo->GetType() == WriteInfo::InsertItem) {
|
||||
mWriteInfos.Remove(aKey);
|
||||
} else {
|
||||
nsAutoPtr<WriteInfo> newWriteInfo(
|
||||
new DeleteItemInfo(NextSerialNumber(), aKey));
|
||||
mWriteInfos.Put(aKey, newWriteInfo.forget());
|
||||
}
|
||||
|
||||
mTotalDelta += aDelta;
|
||||
}
|
||||
|
||||
void LSWriteOptimizerBase::Truncate(int64_t aDelta) {
|
||||
AssertIsOnOwningThread();
|
||||
|
||||
mWriteInfos.Clear();
|
||||
|
||||
if (!mTruncateInfo) {
|
||||
mTruncateInfo = new TruncateInfo(NextSerialNumber());
|
||||
}
|
||||
|
||||
mTotalDelta += aDelta;
|
||||
}
|
||||
|
||||
void LSWriteOptimizerBase::GetSortedWriteInfos(
|
||||
nsTArray<WriteInfo*>& aWriteInfos) {
|
||||
AssertIsOnOwningThread();
|
||||
|
||||
if (mTruncateInfo) {
|
||||
aWriteInfos.InsertElementSorted(mTruncateInfo, WriteInfoComparator());
|
||||
}
|
||||
|
||||
for (auto iter = mWriteInfos.ConstIter(); !iter.Done(); iter.Next()) {
|
||||
WriteInfo* writeInfo = iter.Data();
|
||||
|
||||
aWriteInfos.InsertElementSorted(writeInfo, WriteInfoComparator());
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
void LSWriteOptimizer<T, U>::InsertItem(const nsAString& aKey, const T& aValue,
|
||||
int64_t aDelta) {
|
||||
AssertIsOnOwningThread();
|
||||
|
||||
WriteInfo* existingWriteInfo;
|
||||
nsAutoPtr<WriteInfo> newWriteInfo;
|
||||
if (mWriteInfos.Get(aKey, &existingWriteInfo) &&
|
||||
existingWriteInfo->GetType() == WriteInfo::DeleteItem) {
|
||||
// We could just simply replace the deletion with ordinary update, but that
|
||||
// would preserve item's original position/index. Imagine a case when we
|
||||
// have only one existing key k1. Now let's create a new optimizer and
|
||||
// remove k1, add k2 and add k1 back. The final order should be k2, k1
|
||||
// (ordinary update would produce k1, k2). So we need to differentiate
|
||||
// between normal update and "optimized" update which resulted from a
|
||||
// deletion followed by an insertion. We use the UpdateWithMove flag for
|
||||
// this.
|
||||
|
||||
newWriteInfo = new UpdateItemInfo(NextSerialNumber(), aKey, aValue,
|
||||
/* aUpdateWithMove */ true);
|
||||
} else {
|
||||
newWriteInfo = new InsertItemInfo(NextSerialNumber(), aKey, aValue);
|
||||
}
|
||||
mWriteInfos.Put(aKey, newWriteInfo.forget());
|
||||
|
||||
mTotalDelta += aDelta;
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
void LSWriteOptimizer<T, U>::UpdateItem(const nsAString& aKey, const T& aValue,
|
||||
int64_t aDelta) {
|
||||
AssertIsOnOwningThread();
|
||||
|
||||
WriteInfo* existingWriteInfo;
|
||||
nsAutoPtr<WriteInfo> newWriteInfo;
|
||||
if (mWriteInfos.Get(aKey, &existingWriteInfo) &&
|
||||
existingWriteInfo->GetType() == WriteInfo::InsertItem) {
|
||||
newWriteInfo = new InsertItemInfo(NextSerialNumber(), aKey, aValue);
|
||||
} else {
|
||||
newWriteInfo = new UpdateItemInfo(NextSerialNumber(), aKey, aValue,
|
||||
/* aUpdateWithMove */ false);
|
||||
}
|
||||
mWriteInfos.Put(aKey, newWriteInfo.forget());
|
||||
|
||||
mTotalDelta += aDelta;
|
||||
}
|
||||
|
||||
} // namespace dom
|
||||
} // namespace mozilla
|
|
@ -0,0 +1,187 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef mozilla_dom_localstorage_LSWriteOptimizer_h
|
||||
#define mozilla_dom_localstorage_LSWriteOptimizer_h
|
||||
|
||||
#include "mozilla/CheckedInt.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace dom {
|
||||
|
||||
/**
|
||||
* Base class for coalescing manipulation queue.
|
||||
*/
|
||||
class LSWriteOptimizerBase {
|
||||
class WriteInfoComparator;
|
||||
|
||||
protected:
|
||||
class WriteInfo;
|
||||
class DeleteItemInfo;
|
||||
class TruncateInfo;
|
||||
|
||||
nsAutoPtr<WriteInfo> mTruncateInfo;
|
||||
nsClassHashtable<nsStringHashKey, WriteInfo> mWriteInfos;
|
||||
CheckedUint64 mLastSerialNumber;
|
||||
int64_t mTotalDelta;
|
||||
|
||||
NS_DECL_OWNINGTHREAD
|
||||
|
||||
public:
|
||||
LSWriteOptimizerBase() : mLastSerialNumber(0), mTotalDelta(0) {}
|
||||
|
||||
LSWriteOptimizerBase(LSWriteOptimizerBase&& aWriteOptimizer)
|
||||
: mTruncateInfo(std::move(aWriteOptimizer.mTruncateInfo)) {
|
||||
AssertIsOnOwningThread();
|
||||
MOZ_ASSERT(&aWriteOptimizer != this);
|
||||
|
||||
mWriteInfos.SwapElements(aWriteOptimizer.mWriteInfos);
|
||||
mTotalDelta = aWriteOptimizer.mTotalDelta;
|
||||
aWriteOptimizer.mTotalDelta = 0;
|
||||
}
|
||||
|
||||
void AssertIsOnOwningThread() const {
|
||||
NS_ASSERT_OWNINGTHREAD(LSWriteOptimizerBase);
|
||||
}
|
||||
|
||||
void DeleteItem(const nsAString& aKey, int64_t aDelta = 0);
|
||||
|
||||
void Truncate(int64_t aDelta = 0);
|
||||
|
||||
bool HasWrites() const {
|
||||
AssertIsOnOwningThread();
|
||||
|
||||
return mTruncateInfo || !mWriteInfos.IsEmpty();
|
||||
}
|
||||
|
||||
void Reset() {
|
||||
AssertIsOnOwningThread();
|
||||
|
||||
mTruncateInfo = nullptr;
|
||||
mWriteInfos.Clear();
|
||||
}
|
||||
|
||||
protected:
|
||||
uint64_t NextSerialNumber() {
|
||||
AssertIsOnOwningThread();
|
||||
|
||||
mLastSerialNumber++;
|
||||
|
||||
MOZ_ASSERT(mLastSerialNumber.isValid());
|
||||
|
||||
return mLastSerialNumber.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* This method can be used by derived classes to get a sorted list of write
|
||||
* infos. Write infos are sorted by the serial number.
|
||||
*/
|
||||
void GetSortedWriteInfos(nsTArray<WriteInfo*>& aWriteInfos);
|
||||
};
|
||||
|
||||
/**
|
||||
* Base class for specific mutations.
|
||||
*/
|
||||
class LSWriteOptimizerBase::WriteInfo {
|
||||
uint64_t mSerialNumber;
|
||||
|
||||
public:
|
||||
WriteInfo(uint64_t aSerialNumber) : mSerialNumber(aSerialNumber) {}
|
||||
|
||||
virtual ~WriteInfo() = default;
|
||||
|
||||
uint64_t SerialNumber() const { return mSerialNumber; }
|
||||
|
||||
enum Type { InsertItem = 0, UpdateItem, DeleteItem, Truncate };
|
||||
|
||||
virtual Type GetType() = 0;
|
||||
};
|
||||
|
||||
class LSWriteOptimizerBase::DeleteItemInfo final : public WriteInfo {
|
||||
nsString mKey;
|
||||
|
||||
public:
|
||||
DeleteItemInfo(uint64_t aSerialNumber, const nsAString& aKey)
|
||||
: WriteInfo(aSerialNumber), mKey(aKey) {}
|
||||
|
||||
const nsAString& GetKey() const { return mKey; }
|
||||
|
||||
private:
|
||||
Type GetType() override { return DeleteItem; }
|
||||
};
|
||||
|
||||
/**
|
||||
* Truncate mutation.
|
||||
*/
|
||||
class LSWriteOptimizerBase::TruncateInfo final : public WriteInfo {
|
||||
public:
|
||||
explicit TruncateInfo(uint64_t aSerialNumber) : WriteInfo(aSerialNumber) {}
|
||||
|
||||
private:
|
||||
Type GetType() override { return Truncate; }
|
||||
};
|
||||
|
||||
/**
|
||||
* Coalescing manipulation queue.
|
||||
*/
|
||||
template <typename T, typename U = T>
|
||||
class LSWriteOptimizer;
|
||||
|
||||
template <typename T, typename U>
|
||||
class LSWriteOptimizer : public LSWriteOptimizerBase {
|
||||
protected:
|
||||
class InsertItemInfo;
|
||||
class UpdateItemInfo;
|
||||
|
||||
public:
|
||||
void InsertItem(const nsAString& aKey, const T& aValue, int64_t aDelta = 0);
|
||||
|
||||
void UpdateItem(const nsAString& aKey, const T& aValue, int64_t aDelta = 0);
|
||||
};
|
||||
|
||||
/**
|
||||
* Insert mutation (the key did not previously exist).
|
||||
*/
|
||||
template <typename T, typename U>
|
||||
class LSWriteOptimizer<T, U>::InsertItemInfo : public WriteInfo {
|
||||
nsString mKey;
|
||||
U mValue;
|
||||
|
||||
public:
|
||||
InsertItemInfo(uint64_t aSerialNumber, const nsAString& aKey, const T& aValue)
|
||||
: WriteInfo(aSerialNumber), mKey(aKey), mValue(aValue) {}
|
||||
|
||||
const nsAString& GetKey() const { return mKey; }
|
||||
|
||||
const T& GetValue() const { return mValue; }
|
||||
|
||||
private:
|
||||
WriteInfo::Type GetType() override { return InsertItem; }
|
||||
};
|
||||
|
||||
/**
|
||||
* Update mutation (the key already existed).
|
||||
*/
|
||||
template <typename T, typename U>
|
||||
class LSWriteOptimizer<T, U>::UpdateItemInfo final : public InsertItemInfo {
|
||||
bool mUpdateWithMove;
|
||||
|
||||
public:
|
||||
UpdateItemInfo(uint64_t aSerialNumber, const nsAString& aKey, const T& aValue,
|
||||
bool aUpdateWithMove)
|
||||
: InsertItemInfo(aSerialNumber, aKey, aValue),
|
||||
mUpdateWithMove(aUpdateWithMove) {}
|
||||
|
||||
bool UpdateWithMove() const { return mUpdateWithMove; }
|
||||
|
||||
private:
|
||||
WriteInfo::Type GetType() override { return WriteInfo::UpdateItem; }
|
||||
};
|
||||
|
||||
} // namespace dom
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // mozilla_dom_localstorage_LSWriteOptimizer_h
|
|
@ -16,7 +16,7 @@ namespace mozilla {
|
|||
namespace dom {
|
||||
|
||||
/**
|
||||
* Initial LSSnapshot state as produced by Datastore::GetSnapshotInitInfo. See
|
||||
* Initial LSSnapshot state as produced by Datastore::GetSnapshotLoadInfo. See
|
||||
* `LSSnapshot::LoadState` for more details about the possible states and a
|
||||
* high level overview.
|
||||
*/
|
||||
|
@ -59,6 +59,11 @@ struct LSSnapshotInitInfo
|
|||
int64_t peakUsage;
|
||||
// See `LSSnapshot::LoadState` in `LSSnapshot.h`
|
||||
LoadState loadState;
|
||||
/**
|
||||
* Boolean indicating whether there where cross-process observers registered
|
||||
* for this origin at the time the snapshot was created.
|
||||
*/
|
||||
bool hasOtherProcessObservers;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -18,14 +18,12 @@ namespace dom {
|
|||
struct LSSetItemInfo
|
||||
{
|
||||
nsString key;
|
||||
LSValue oldValue;
|
||||
LSValue value;
|
||||
};
|
||||
|
||||
struct LSRemoveItemInfo
|
||||
{
|
||||
nsString key;
|
||||
LSValue oldValue;
|
||||
};
|
||||
|
||||
struct LSClearInfo
|
||||
|
@ -42,6 +40,29 @@ union LSWriteInfo
|
|||
LSClearInfo;
|
||||
};
|
||||
|
||||
struct LSSetItemAndNotifyInfo
|
||||
{
|
||||
nsString key;
|
||||
LSValue oldValue;
|
||||
LSValue value;
|
||||
};
|
||||
|
||||
struct LSRemoveItemAndNotifyInfo
|
||||
{
|
||||
nsString key;
|
||||
LSValue oldValue;
|
||||
};
|
||||
|
||||
/**
|
||||
* Union of LocalStorage mutation types.
|
||||
*/
|
||||
union LSWriteAndNotifyInfo
|
||||
{
|
||||
LSSetItemAndNotifyInfo;
|
||||
LSRemoveItemAndNotifyInfo;
|
||||
LSClearInfo;
|
||||
};
|
||||
|
||||
sync protocol PBackgroundLSSnapshot
|
||||
{
|
||||
manager PBackgroundLSDatabase;
|
||||
|
@ -51,6 +72,8 @@ parent:
|
|||
|
||||
async Checkpoint(LSWriteInfo[] writeInfos);
|
||||
|
||||
async CheckpointAndNotify(LSWriteAndNotifyInfo[] writeAndNotifyInfos);
|
||||
|
||||
async Finish();
|
||||
|
||||
async Loaded();
|
||||
|
|
|
@ -35,6 +35,7 @@ EXPORTS.mozilla.dom += [
|
|||
'LSObserver.h',
|
||||
'LSSnapshot.h',
|
||||
'LSValue.h',
|
||||
'LSWriteOptimizer.h',
|
||||
'SnappyUtils.h',
|
||||
]
|
||||
|
||||
|
@ -48,6 +49,7 @@ UNIFIED_SOURCES += [
|
|||
'LSObserver.cpp',
|
||||
'LSSnapshot.cpp',
|
||||
'LSValue.cpp',
|
||||
'LSWriteOptimizer.cpp',
|
||||
'ReportInternalError.cpp',
|
||||
'SnappyUtils.cpp',
|
||||
]
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/**
|
||||
* Any copyright is dedicated to the Public Domain.
|
||||
* http://creativecommons.org/publicdomain/zero/1.0/
|
||||
*/
|
||||
|
||||
async function testSteps() {
|
||||
const url = "http://example.com";
|
||||
|
||||
info("Setting pref");
|
||||
|
||||
Services.prefs.setBoolPref("dom.storage.snapshot_reusing", false);
|
||||
|
||||
const items = [
|
||||
{ key: "key01", value: "value01" },
|
||||
{ key: "key02", value: "value02" },
|
||||
{ key: "key03", value: "value03" },
|
||||
{ key: "key04", value: "value04" },
|
||||
{ key: "key05", value: "value05" },
|
||||
];
|
||||
|
||||
info("Getting storage");
|
||||
|
||||
let storage = getLocalStorage(getPrincipal(url));
|
||||
|
||||
// 1st snapshot
|
||||
|
||||
info("Adding data");
|
||||
|
||||
for (let item of items) {
|
||||
storage.setItem(item.key, item.value);
|
||||
}
|
||||
|
||||
info("Returning to event loop");
|
||||
|
||||
await returnToEventLoop();
|
||||
|
||||
// 2nd snapshot
|
||||
|
||||
// Remove first two items, add some new items and add the two items back.
|
||||
|
||||
storage.removeItem("key01");
|
||||
storage.removeItem("key02");
|
||||
|
||||
storage.setItem("key06", "value06");
|
||||
storage.setItem("key07", "value07");
|
||||
storage.setItem("key08", "value08");
|
||||
|
||||
storage.setItem("key01", "value01");
|
||||
storage.setItem("key02", "value02");
|
||||
|
||||
info("Saving key order");
|
||||
|
||||
let savedKeys = Object.keys(storage);
|
||||
|
||||
info("Returning to event loop");
|
||||
|
||||
await returnToEventLoop();
|
||||
|
||||
// 3rd snapshot
|
||||
|
||||
info("Verifying key order");
|
||||
|
||||
let keys = Object.keys(storage);
|
||||
|
||||
is(keys.length, savedKeys.length);
|
||||
|
||||
for (let i = 0; i < keys.length; i++) {
|
||||
is(keys[i], savedKeys[i], "Correct key");
|
||||
}
|
||||
}
|
|
@ -11,15 +11,15 @@ async function testSteps() {
|
|||
Services.prefs.setBoolPref("dom.storage.snapshot_reusing", false);
|
||||
|
||||
const items = [
|
||||
{ key: "key1", value: "value1" },
|
||||
{ key: "key2", value: "value2" },
|
||||
{ key: "key3", value: "value3" },
|
||||
{ key: "key4", value: "value4" },
|
||||
{ key: "key5", value: "value5" },
|
||||
{ key: "key6", value: "value6" },
|
||||
{ key: "key7", value: "value7" },
|
||||
{ key: "key8", value: "value8" },
|
||||
{ key: "key9", value: "value9" },
|
||||
{ key: "key01", value: "value01" },
|
||||
{ key: "key02", value: "value02" },
|
||||
{ key: "key03", value: "value03" },
|
||||
{ key: "key04", value: "value04" },
|
||||
{ key: "key05", value: "value05" },
|
||||
{ key: "key06", value: "value06" },
|
||||
{ key: "key07", value: "value07" },
|
||||
{ key: "key08", value: "value08" },
|
||||
{ key: "key09", value: "value09" },
|
||||
{ key: "key10", value: "value10" },
|
||||
];
|
||||
|
||||
|
@ -194,14 +194,14 @@ async function testSteps() {
|
|||
// 3rd snapshot
|
||||
|
||||
// Force key2 to load.
|
||||
storage.getItem("key2");
|
||||
storage.getItem("key02");
|
||||
|
||||
// Fill out write infos a bit.
|
||||
storage.removeItem("key5");
|
||||
storage.setItem("key5", "value5");
|
||||
storage.removeItem("key5");
|
||||
storage.removeItem("key05");
|
||||
storage.setItem("key05", "value05");
|
||||
storage.removeItem("key05");
|
||||
storage.setItem("key11", "value11");
|
||||
storage.setItem("key5", "value5");
|
||||
storage.setItem("key05", "value05");
|
||||
|
||||
items.push({ key: "key11", value: "value11" });
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ run-sequentially = this test depends on a file produced by test_databaseShadowin
|
|||
[test_groupLimit.js]
|
||||
[test_groupMismatch.js]
|
||||
[test_migration.js]
|
||||
[test_orderingAfterRemoveAdd.js]
|
||||
[test_originInit.js]
|
||||
[test_schema3upgrade.js]
|
||||
[test_snapshotting.js]
|
||||
|
|
|
@ -714,12 +714,16 @@ static void qcms_transform_data_tetra_clut_template(const qcms_transform *transf
|
|||
}
|
||||
}
|
||||
|
||||
static void qcms_transform_data_tetra_clut_rgb(const qcms_transform *transform, const unsigned char *src, unsigned char *dest, size_t length) {
|
||||
qcms_transform_data_tetra_clut_template<RGBA_R_INDEX, RGBA_G_INDEX, RGBA_B_INDEX>(transform, src, dest, length);
|
||||
}
|
||||
|
||||
static void qcms_transform_data_tetra_clut_rgba(const qcms_transform *transform, const unsigned char *src, unsigned char *dest, size_t length) {
|
||||
qcms_transform_data_tetra_clut_template<RGBA_R_INDEX, RGBA_G_INDEX, RGBA_B_INDEX, RGBA_A_INDEX>(transform, src, dest, length);
|
||||
}
|
||||
|
||||
static void qcms_transform_data_tetra_clut(const qcms_transform *transform, const unsigned char *src, unsigned char *dest, size_t length) {
|
||||
qcms_transform_data_tetra_clut_template<RGBA_R_INDEX, RGBA_G_INDEX, RGBA_B_INDEX>(transform, src, dest, length);
|
||||
static void qcms_transform_data_tetra_clut_bgra(const qcms_transform *transform, const unsigned char *src, unsigned char *dest, size_t length) {
|
||||
qcms_transform_data_tetra_clut_template<BGRA_R_INDEX, BGRA_G_INDEX, BGRA_B_INDEX, BGRA_A_INDEX>(transform, src, dest, length);
|
||||
}
|
||||
|
||||
template <size_t kRIndex, size_t kGIndex, size_t kBIndex, size_t kAIndex = NO_A_INDEX>
|
||||
|
@ -1101,9 +1105,12 @@ qcms_transform* qcms_transform_precacheLUT_float(qcms_transform *transform, qcms
|
|||
transform->grid_size = samples;
|
||||
if (in_type == QCMS_DATA_RGBA_8) {
|
||||
transform->transform_fn = qcms_transform_data_tetra_clut_rgba;
|
||||
} else {
|
||||
transform->transform_fn = qcms_transform_data_tetra_clut;
|
||||
} else if (in_type == QCMS_DATA_BGRA_8) {
|
||||
transform->transform_fn = qcms_transform_data_tetra_clut_bgra;
|
||||
} else if (in_type == QCMS_DATA_RGB_8) {
|
||||
transform->transform_fn = qcms_transform_data_tetra_clut_rgb;
|
||||
}
|
||||
assert(transform->transform_fn);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1162,7 +1169,7 @@ qcms_transform* qcms_transform_create(
|
|||
|
||||
// This precache assumes RGB_SIGNATURE (fails on GRAY_SIGNATURE, for instance)
|
||||
if (qcms_supports_iccv4 &&
|
||||
(in_type == QCMS_DATA_RGB_8 || in_type == QCMS_DATA_RGBA_8) &&
|
||||
(in_type == QCMS_DATA_RGB_8 || in_type == QCMS_DATA_RGBA_8 || in_type == QCMS_DATA_BGRA_8) &&
|
||||
(in->A2B0 || out->B2A0 || in->mAB || out->mAB))
|
||||
{
|
||||
// Precache the transformation to a CLUT 33x33x33 in size.
|
||||
|
|
|
@ -2923,13 +2923,24 @@ void gfxPlatform::InitWebRenderConfig() {
|
|||
}
|
||||
}
|
||||
#if defined(MOZ_WIDGET_GTK)
|
||||
else if (gfxConfig::IsEnabled(Feature::HW_COMPOSITING)) {
|
||||
// Hardware compositing should be disabled by default if we aren't using
|
||||
// WebRender. We had to check if it is enabled at all, because it may
|
||||
// already have been forced disabled (e.g. safe mode, headless). It may
|
||||
// still be forced on by the user, and if so, this should have no effect.
|
||||
gfxConfig::Disable(Feature::HW_COMPOSITING, FeatureStatus::Blocked,
|
||||
"Acceleration blocked by platform");
|
||||
else {
|
||||
if (gfxConfig::IsEnabled(Feature::HW_COMPOSITING)) {
|
||||
// Hardware compositing should be disabled by default if we aren't using
|
||||
// WebRender. We had to check if it is enabled at all, because it may
|
||||
// already have been forced disabled (e.g. safe mode, headless). It may
|
||||
// still be forced on by the user, and if so, this should have no effect.
|
||||
gfxConfig::Disable(Feature::HW_COMPOSITING, FeatureStatus::Blocked,
|
||||
"Acceleration blocked by platform");
|
||||
}
|
||||
|
||||
if (!gfxConfig::IsEnabled(Feature::HW_COMPOSITING) &&
|
||||
gfxConfig::IsEnabled(Feature::GPU_PROCESS) &&
|
||||
!StaticPrefs::GPUProcessAllowSoftware()) {
|
||||
// We have neither WebRender nor OpenGL, we don't allow the GPU process
|
||||
// for basic compositor, and it wasn't disabled already.
|
||||
gfxConfig::Disable(Feature::GPU_PROCESS, FeatureStatus::Unavailable,
|
||||
"Hardware compositing is unavailable.");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -434,9 +434,9 @@ namespace JS {
|
|||
D(DISABLE_GENERATIONAL_GC, 24) \
|
||||
D(FINISH_GC, 25) \
|
||||
D(PREPARE_FOR_TRACING, 26) \
|
||||
D(INCREMENTAL_ALLOC_TRIGGER, 27) \
|
||||
\
|
||||
/* These are reserved for future use. */ \
|
||||
D(RESERVED3, 27) \
|
||||
D(RESERVED4, 28) \
|
||||
D(RESERVED5, 29) \
|
||||
D(RESERVED6, 30) \
|
||||
|
|
|
@ -758,8 +758,7 @@ ModuleObject* ModuleObject::create(JSContext* cx) {
|
|||
|
||||
self->initReservedSlot(ImportBindingsSlot, PrivateValue(bindings));
|
||||
|
||||
FunctionDeclarationVector* funDecls =
|
||||
cx->new_<FunctionDeclarationVector>(cx->zone());
|
||||
FunctionDeclarationVector* funDecls = cx->new_<FunctionDeclarationVector>();
|
||||
if (!funDecls) {
|
||||
return nullptr;
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include "jsapi.h"
|
||||
|
||||
#include "builtin/SelfHostingDefines.h"
|
||||
#include "gc/Zone.h"
|
||||
#include "gc/ZoneAllocator.h"
|
||||
#include "js/GCVector.h"
|
||||
#include "js/Id.h"
|
||||
#include "js/UniquePtr.h"
|
||||
|
@ -215,8 +215,10 @@ struct FunctionDeclaration {
|
|||
HeapPtr<JSFunction*> fun;
|
||||
};
|
||||
|
||||
// A vector of function bindings to be instantiated. This can be created in a
|
||||
// helper thread zone and so can't use ZoneAllocPolicy.
|
||||
using FunctionDeclarationVector =
|
||||
GCVector<FunctionDeclaration, 0, ZoneAllocPolicy>;
|
||||
GCVector<FunctionDeclaration, 0, SystemAllocPolicy>;
|
||||
|
||||
// Possible values for ModuleStatus are defined in SelfHostingDefines.h.
|
||||
using ModuleStatus = int32_t;
|
||||
|
|
|
@ -602,7 +602,7 @@ Arena* GCRuntime::allocateArena(Chunk* chunk, Zone* zone, AllocKind thingKind,
|
|||
|
||||
// Trigger an incremental slice if needed.
|
||||
if (checkThresholds != ShouldCheckThresholds::DontCheckThresholds) {
|
||||
maybeAllocTriggerZoneGC(zone);
|
||||
maybeAllocTriggerZoneGC(zone, ArenaSize);
|
||||
}
|
||||
|
||||
return arena;
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "gc/ArenaList.h"
|
||||
|
||||
#include "gc/Heap.h"
|
||||
#include "gc/Zone.h"
|
||||
|
||||
void js::gc::SortedArenaListSegment::append(Arena* arena) {
|
||||
MOZ_ASSERT(arena);
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
|
||||
#include "builtin/TypedObject.h"
|
||||
#include "gc/Policy.h"
|
||||
#include "gc/Zone.h"
|
||||
#include "js/HashTable.h"
|
||||
#include "js/Value.h"
|
||||
#include "vm/BigIntType.h" // JS::BigInt
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
#include "gc/FreeOp.h"
|
||||
|
||||
#include "gc/Zone.h"
|
||||
#include "gc/ZoneAllocator.h"
|
||||
|
||||
namespace js {
|
||||
|
||||
|
|
|
@ -3414,7 +3414,11 @@ bool GCRuntime::triggerGC(JS::GCReason reason) {
|
|||
return true;
|
||||
}
|
||||
|
||||
void GCRuntime::maybeAllocTriggerZoneGC(Zone* zone) {
|
||||
void js::gc::MaybeAllocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc) {
|
||||
rt->gc.maybeAllocTriggerZoneGC(Zone::from(zoneAlloc));
|
||||
}
|
||||
|
||||
void GCRuntime::maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes) {
|
||||
if (!CurrentThreadCanAccessRuntime(rt)) {
|
||||
// Zones in use by a helper thread can't be collected.
|
||||
MOZ_ASSERT(zone->usedByHelperThread() || zone->isAtomsZone());
|
||||
|
@ -3423,7 +3427,7 @@ void GCRuntime::maybeAllocTriggerZoneGC(Zone* zone) {
|
|||
|
||||
MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
|
||||
|
||||
size_t usedBytes = zone->totalBytes();
|
||||
size_t usedBytes = zone->totalBytes(); // This already includes |nbytes|.
|
||||
size_t thresholdBytes = zone->threshold.gcTriggerBytes();
|
||||
|
||||
if (usedBytes >= thresholdBytes) {
|
||||
|
@ -3442,11 +3446,12 @@ void GCRuntime::maybeAllocTriggerZoneGC(Zone* zone) {
|
|||
size_t igcThresholdBytes = thresholdBytes * zoneGCThresholdFactor;
|
||||
|
||||
if (usedBytes >= igcThresholdBytes) {
|
||||
// Reduce the delay to the start of the next incremental slice.
|
||||
if (zone->gcDelayBytes < ArenaSize) {
|
||||
// During an incremental GC, reduce the delay to the start of the next
|
||||
// incremental slice.
|
||||
if (zone->gcDelayBytes < nbytes) {
|
||||
zone->gcDelayBytes = 0;
|
||||
} else {
|
||||
zone->gcDelayBytes -= ArenaSize;
|
||||
zone->gcDelayBytes -= nbytes;
|
||||
}
|
||||
|
||||
if (!zone->gcDelayBytes) {
|
||||
|
@ -3454,7 +3459,7 @@ void GCRuntime::maybeAllocTriggerZoneGC(Zone* zone) {
|
|||
// to try to avoid performing non-incremental GCs on zones
|
||||
// which allocate a lot of data, even when incremental slices
|
||||
// can't be triggered via scheduling in the event loop.
|
||||
triggerZoneGC(zone, JS::GCReason::ALLOC_TRIGGER, usedBytes,
|
||||
triggerZoneGC(zone, JS::GCReason::INCREMENTAL_ALLOC_TRIGGER, usedBytes,
|
||||
igcThresholdBytes);
|
||||
|
||||
// Delay the next slice until a certain amount of allocation
|
||||
|
@ -6883,6 +6888,7 @@ void GCRuntime::finishCollection() {
|
|||
for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
|
||||
if (zone->isCollecting()) {
|
||||
zone->changeGCState(Zone::Finished, Zone::NoGC);
|
||||
zone->gcDelayBytes = 0;
|
||||
zone->notifyObservingDebuggers();
|
||||
}
|
||||
|
||||
|
@ -6967,6 +6973,7 @@ GCRuntime::IncrementalResult GCRuntime::resetIncrementalGC(
|
|||
for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
|
||||
zone->setNeedsIncrementalBarrier(false);
|
||||
zone->changeGCState(Zone::MarkBlackOnly, Zone::NoGC);
|
||||
zone->gcDelayBytes = 0;
|
||||
zone->arenas.unmarkPreMarkedFreeCells();
|
||||
}
|
||||
|
||||
|
@ -7494,7 +7501,7 @@ static void ScheduleZones(GCRuntime* gc) {
|
|||
}
|
||||
}
|
||||
|
||||
static void UnScheduleZones(GCRuntime* gc) {
|
||||
static void UnscheduleZones(GCRuntime* gc) {
|
||||
for (ZonesIter zone(gc->rt, WithAtoms); !zone.done(); zone.next()) {
|
||||
zone->unscheduleGC();
|
||||
}
|
||||
|
@ -7838,7 +7845,7 @@ void GCRuntime::collect(bool nonincrementalByAPI, SliceBudget budget,
|
|||
#endif
|
||||
stats().writeLogMessage("GC ending in state %s", StateName(incrementalState));
|
||||
|
||||
UnScheduleZones(this);
|
||||
UnscheduleZones(this);
|
||||
}
|
||||
|
||||
js::AutoEnqueuePendingParseTasksAfterGC::
|
||||
|
|
|
@ -31,6 +31,7 @@ class AutoLockGC;
|
|||
class AutoLockGCBgAlloc;
|
||||
class AutoLockHelperThreadState;
|
||||
class VerifyPreTracer;
|
||||
class ZoneAllocator;
|
||||
|
||||
namespace gc {
|
||||
|
||||
|
@ -257,7 +258,9 @@ class GCRuntime {
|
|||
uint32_t getParameter(JSGCParamKey key, const AutoLockGC& lock);
|
||||
|
||||
MOZ_MUST_USE bool triggerGC(JS::GCReason reason);
|
||||
void maybeAllocTriggerZoneGC(Zone* zone);
|
||||
// Check whether to trigger a zone GC. During an incremental GC, optionally
|
||||
// count |nbytes| towards the threshold for performing the next slice.
|
||||
void maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes = 0);
|
||||
// The return value indicates if we were able to do the GC.
|
||||
bool triggerZoneGC(Zone* zone, JS::GCReason reason, size_t usedBytes,
|
||||
size_t thresholdBytes);
|
||||
|
|
|
@ -310,9 +310,15 @@
|
|||
#include "mozilla/Atomics.h"
|
||||
#include "mozilla/DebugOnly.h"
|
||||
|
||||
#include "gc/GCEnum.h"
|
||||
#include "js/HashTable.h"
|
||||
#include "threading/ProtectedData.h"
|
||||
|
||||
namespace js {
|
||||
|
||||
class AutoLockGC;
|
||||
class ZoneAllocPolicy;
|
||||
|
||||
namespace gc {
|
||||
|
||||
struct Cell;
|
||||
|
@ -645,6 +651,8 @@ class ZoneHeapThreshold {
|
|||
const AutoLockGC& lock);
|
||||
};
|
||||
|
||||
#ifdef DEBUG
|
||||
|
||||
// Counts memory associated with GC things in a zone.
|
||||
//
|
||||
// In debug builds, this records details of the cell the memory allocations is
|
||||
|
@ -652,51 +660,21 @@ class ZoneHeapThreshold {
|
|||
// builds it's just a counter.
|
||||
class MemoryTracker {
|
||||
public:
|
||||
#ifdef DEBUG
|
||||
MemoryTracker();
|
||||
~MemoryTracker();
|
||||
void fixupAfterMovingGC();
|
||||
#endif
|
||||
|
||||
void addMemory(Cell* cell, size_t nbytes, MemoryUse use) {
|
||||
MOZ_ASSERT(cell);
|
||||
MOZ_ASSERT(nbytes);
|
||||
mozilla::DebugOnly<size_t> initialBytes(bytes_);
|
||||
MOZ_ASSERT(initialBytes + nbytes > initialBytes);
|
||||
|
||||
bytes_ += nbytes;
|
||||
|
||||
#ifdef DEBUG
|
||||
trackMemory(cell, nbytes, use);
|
||||
#endif
|
||||
}
|
||||
void removeMemory(Cell* cell, size_t nbytes, MemoryUse use) {
|
||||
MOZ_ASSERT(cell);
|
||||
MOZ_ASSERT(nbytes);
|
||||
MOZ_ASSERT(bytes_ >= nbytes);
|
||||
|
||||
bytes_ -= nbytes;
|
||||
|
||||
#ifdef DEBUG
|
||||
untrackMemory(cell, nbytes, use);
|
||||
#endif
|
||||
}
|
||||
void swapMemory(Cell* a, Cell* b, MemoryUse use) {
|
||||
#ifdef DEBUG
|
||||
swapTrackedMemory(a, b, use);
|
||||
#endif
|
||||
}
|
||||
|
||||
size_t bytes() const { return bytes_; }
|
||||
|
||||
void adopt(MemoryTracker& other);
|
||||
|
||||
private:
|
||||
mozilla::Atomic<size_t, mozilla::Relaxed,
|
||||
mozilla::recordreplay::Behavior::DontPreserve>
|
||||
bytes_;
|
||||
void trackMemory(Cell* cell, size_t nbytes, MemoryUse use);
|
||||
void untrackMemory(Cell* cell, size_t nbytes, MemoryUse use);
|
||||
void swapMemory(Cell* a, Cell* b, MemoryUse use);
|
||||
void registerPolicy(ZoneAllocPolicy* policy);
|
||||
void unregisterPolicy(ZoneAllocPolicy* policy);
|
||||
void incPolicyMemory(ZoneAllocPolicy* policy, size_t nbytes);
|
||||
void decPolicyMemory(ZoneAllocPolicy* policy, size_t nbytes);
|
||||
|
||||
#ifdef DEBUG
|
||||
private:
|
||||
struct Key {
|
||||
Key(Cell* cell, MemoryUse use);
|
||||
Cell* cell() const;
|
||||
|
@ -720,18 +698,24 @@ class MemoryTracker {
|
|||
static void rekey(Key& k, const Key& newKey);
|
||||
};
|
||||
|
||||
// Map containing the allocated size associated with (cell, use) pairs.
|
||||
using Map = HashMap<Key, size_t, Hasher, SystemAllocPolicy>;
|
||||
|
||||
void trackMemory(Cell* cell, size_t nbytes, MemoryUse use);
|
||||
void untrackMemory(Cell* cell, size_t nbytes, MemoryUse use);
|
||||
void swapTrackedMemory(Cell* a, Cell* b, MemoryUse use);
|
||||
// Map containing the allocated size associated with each instance of a
|
||||
// container that uses ZoneAllocPolicy.
|
||||
using ZoneAllocPolicyMap =
|
||||
HashMap<ZoneAllocPolicy*, size_t, DefaultHasher<ZoneAllocPolicy*>,
|
||||
SystemAllocPolicy>;
|
||||
|
||||
size_t getAndRemoveEntry(const Key& key, LockGuard<Mutex>& lock);
|
||||
|
||||
Mutex mutex;
|
||||
Map map;
|
||||
#endif
|
||||
ZoneAllocPolicyMap policyMap;
|
||||
};
|
||||
|
||||
#endif // DEBUG
|
||||
|
||||
} // namespace gc
|
||||
} // namespace js
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include "gc/WeakMap.h"
|
||||
|
||||
#include "gc/Zone.h"
|
||||
#include "js/TraceKind.h"
|
||||
#include "vm/JSContext.h"
|
||||
|
||||
|
|
|
@ -12,13 +12,9 @@
|
|||
#include "gc/Barrier.h"
|
||||
#include "gc/DeletePolicy.h"
|
||||
#include "gc/Tracer.h"
|
||||
#include "gc/Zone.h"
|
||||
#include "gc/ZoneAllocator.h"
|
||||
#include "js/HashTable.h"
|
||||
|
||||
namespace JS {
|
||||
class Zone;
|
||||
} // namespace JS
|
||||
|
||||
namespace js {
|
||||
|
||||
class GCMarker;
|
||||
|
|
|
@ -17,24 +17,6 @@ inline bool JS::Zone::requireGCTracer() const {
|
|||
}
|
||||
#endif
|
||||
|
||||
inline void JS::Zone::updateAllGCMallocCountersOnGCStart() {
|
||||
gcMallocCounter.updateOnGCStart();
|
||||
jitCodeCounter.updateOnGCStart();
|
||||
}
|
||||
|
||||
inline void JS::Zone::updateAllGCMallocCountersOnGCEnd(
|
||||
const js::AutoLockGC& lock) {
|
||||
auto& gc = runtimeFromAnyThread()->gc;
|
||||
gcMallocCounter.updateOnGCEnd(gc.tunables, lock);
|
||||
jitCodeCounter.updateOnGCEnd(gc.tunables, lock);
|
||||
}
|
||||
|
||||
inline js::gc::TriggerKind JS::Zone::shouldTriggerGCForTooMuchMalloc() {
|
||||
auto& gc = runtimeFromAnyThread()->gc;
|
||||
return std::max(gcMallocCounter.shouldTriggerGC(gc.tunables),
|
||||
jitCodeCounter.shouldTriggerGC(gc.tunables));
|
||||
}
|
||||
|
||||
/* static */ inline js::HashNumber JS::Zone::UniqueIdToHash(uint64_t uid) {
|
||||
return mozilla::HashGeneric(uid);
|
||||
}
|
||||
|
|
|
@ -31,8 +31,46 @@ using namespace js::gc;
|
|||
|
||||
Zone* const Zone::NotOnList = reinterpret_cast<Zone*>(1);
|
||||
|
||||
ZoneAllocator::ZoneAllocator(JSRuntime* rt)
|
||||
: JS::shadow::Zone(rt, &rt->gc.marker), zoneSize(&rt->gc.heapSize) {
|
||||
AutoLockGC lock(rt);
|
||||
threshold.updateAfterGC(8192, GC_NORMAL, rt->gc.tunables,
|
||||
rt->gc.schedulingState, lock);
|
||||
setGCMaxMallocBytes(rt->gc.tunables.maxMallocBytes(), lock);
|
||||
jitCodeCounter.setMax(jit::MaxCodeBytesPerProcess * 0.8, lock);
|
||||
}
|
||||
|
||||
ZoneAllocator::~ZoneAllocator() {
|
||||
MOZ_ASSERT_IF(runtimeFromAnyThread()->gc.shutdownCollectedEverything(),
|
||||
gcMallocBytes == 0);
|
||||
}
|
||||
|
||||
void ZoneAllocator::fixupAfterMovingGC() {
|
||||
#ifdef DEBUG
|
||||
gcMallocTracker.fixupAfterMovingGC();
|
||||
#endif
|
||||
}
|
||||
|
||||
void js::ZoneAllocator::updateAllGCMallocCountersOnGCStart() {
|
||||
gcMallocCounter.updateOnGCStart();
|
||||
jitCodeCounter.updateOnGCStart();
|
||||
}
|
||||
|
||||
void js::ZoneAllocator::updateAllGCMallocCountersOnGCEnd(
|
||||
const js::AutoLockGC& lock) {
|
||||
auto& gc = runtimeFromAnyThread()->gc;
|
||||
gcMallocCounter.updateOnGCEnd(gc.tunables, lock);
|
||||
jitCodeCounter.updateOnGCEnd(gc.tunables, lock);
|
||||
}
|
||||
|
||||
js::gc::TriggerKind js::ZoneAllocator::shouldTriggerGCForTooMuchMalloc() {
|
||||
auto& gc = runtimeFromAnyThread()->gc;
|
||||
return std::max(gcMallocCounter.shouldTriggerGC(gc.tunables),
|
||||
jitCodeCounter.shouldTriggerGC(gc.tunables));
|
||||
}
|
||||
|
||||
JS::Zone::Zone(JSRuntime* rt)
|
||||
: JS::shadow::Zone(rt, &rt->gc.marker),
|
||||
: ZoneAllocator(rt),
|
||||
// Note: don't use |this| before initializing helperThreadUse_!
|
||||
// ProtectedData checks in CheckZone::check may read this field.
|
||||
helperThreadUse_(HelperThreadUse::None),
|
||||
|
@ -57,9 +95,6 @@ JS::Zone::Zone(JSRuntime* rt)
|
|||
functionToStringCache_(this),
|
||||
keepAtomsCount(this, 0),
|
||||
purgeAtomsDeferred(this, 0),
|
||||
zoneSize(&rt->gc.heapSize),
|
||||
threshold(),
|
||||
gcDelayBytes(0),
|
||||
tenuredStrings(this, 0),
|
||||
allocNurseryStrings(this, true),
|
||||
propertyTree_(this, this),
|
||||
|
@ -80,12 +115,6 @@ JS::Zone::Zone(JSRuntime* rt)
|
|||
/* Ensure that there are no vtables to mess us up here. */
|
||||
MOZ_ASSERT(reinterpret_cast<JS::shadow::Zone*>(this) ==
|
||||
static_cast<JS::shadow::Zone*>(this));
|
||||
|
||||
AutoLockGC lock(rt);
|
||||
threshold.updateAfterGC(8192, GC_NORMAL, rt->gc.tunables,
|
||||
rt->gc.schedulingState, lock);
|
||||
setGCMaxMallocBytes(rt->gc.tunables.maxMallocBytes(), lock);
|
||||
jitCodeCounter.setMax(jit::MaxCodeBytesPerProcess * 0.8, lock);
|
||||
}
|
||||
|
||||
Zone::~Zone() {
|
||||
|
@ -467,9 +496,7 @@ void Zone::clearTables() {
|
|||
}
|
||||
|
||||
void Zone::fixupAfterMovingGC() {
|
||||
#ifdef DEBUG
|
||||
gcMallocSize.fixupAfterMovingGC();
|
||||
#endif
|
||||
ZoneAllocator::fixupAfterMovingGC();
|
||||
fixupInitialShapeTable();
|
||||
}
|
||||
|
||||
|
@ -555,8 +582,9 @@ void Zone::traceAtomCache(JSTracer* trc) {
|
|||
}
|
||||
}
|
||||
|
||||
void* Zone::onOutOfMemory(js::AllocFunction allocFunc, arena_id_t arena,
|
||||
size_t nbytes, void* reallocPtr) {
|
||||
void* ZoneAllocator::onOutOfMemory(js::AllocFunction allocFunc,
|
||||
arena_id_t arena, size_t nbytes,
|
||||
void* reallocPtr) {
|
||||
if (!js::CurrentThreadCanAccessRuntime(runtime_)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -564,22 +592,26 @@ void* Zone::onOutOfMemory(js::AllocFunction allocFunc, arena_id_t arena,
|
|||
reallocPtr);
|
||||
}
|
||||
|
||||
void Zone::reportAllocationOverflow() { js::ReportAllocationOverflow(nullptr); }
|
||||
void ZoneAllocator::reportAllocationOverflow() const {
|
||||
js::ReportAllocationOverflow(nullptr);
|
||||
}
|
||||
|
||||
void JS::Zone::maybeTriggerGCForTooMuchMalloc(js::gc::MemoryCounter& counter,
|
||||
TriggerKind trigger) {
|
||||
void ZoneAllocator::maybeTriggerGCForTooMuchMalloc(
|
||||
js::gc::MemoryCounter& counter, TriggerKind trigger) {
|
||||
JSRuntime* rt = runtimeFromAnyThread();
|
||||
|
||||
if (!js::CurrentThreadCanAccessRuntime(rt)) {
|
||||
return;
|
||||
}
|
||||
|
||||
bool wouldInterruptGC = rt->gc.isIncrementalGCInProgress() && !isCollecting();
|
||||
auto zone = JS::Zone::from(this);
|
||||
bool wouldInterruptGC =
|
||||
rt->gc.isIncrementalGCInProgress() && !zone->isCollecting();
|
||||
if (wouldInterruptGC && !counter.shouldResetIncrementalGC(rt->gc.tunables)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!rt->gc.triggerZoneGC(this, JS::GCReason::TOO_MUCH_MALLOC,
|
||||
if (!rt->gc.triggerZoneGC(zone, JS::GCReason::TOO_MUCH_MALLOC,
|
||||
counter.bytes(), counter.maxBytes())) {
|
||||
return;
|
||||
}
|
||||
|
@ -587,23 +619,30 @@ void JS::Zone::maybeTriggerGCForTooMuchMalloc(js::gc::MemoryCounter& counter,
|
|||
counter.recordTrigger(trigger);
|
||||
}
|
||||
|
||||
void MemoryTracker::adopt(MemoryTracker& other) {
|
||||
bytes_ += other.bytes_;
|
||||
other.bytes_ = 0;
|
||||
|
||||
#ifdef DEBUG
|
||||
|
||||
void MemoryTracker::adopt(MemoryTracker& other) {
|
||||
LockGuard<Mutex> lock(mutex);
|
||||
|
||||
AutoEnterOOMUnsafeRegion oomUnsafe;
|
||||
|
||||
for (auto r = other.map.all(); !r.empty(); r.popFront()) {
|
||||
if (!map.put(r.front().key(), r.front().value())) {
|
||||
oomUnsafe.crash("MemoryTracker::adopt");
|
||||
}
|
||||
}
|
||||
other.map.clear();
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
// There may still be ZoneAllocPolicies associated with the old zone since
|
||||
// some are not destroyed until the zone itself dies. Instead check there is
|
||||
// no memory associated with them and clear their zone pointer in debug builds
|
||||
// to catch further memory association.
|
||||
for (auto r = other.policyMap.all(); !r.empty(); r.popFront()) {
|
||||
MOZ_ASSERT(r.front().value() == 0);
|
||||
r.front().key()->zone_ = nullptr;
|
||||
}
|
||||
other.policyMap.clear();
|
||||
}
|
||||
|
||||
static const char* MemoryUseName(MemoryUse use) {
|
||||
switch (use) {
|
||||
|
@ -625,18 +664,26 @@ MemoryTracker::~MemoryTracker() {
|
|||
return;
|
||||
}
|
||||
|
||||
if (map.empty()) {
|
||||
MOZ_ASSERT(bytes() == 0);
|
||||
return;
|
||||
bool ok = true;
|
||||
|
||||
if (!map.empty()) {
|
||||
ok = false;
|
||||
fprintf(stderr, "Missing calls to JS::RemoveAssociatedMemory:\n");
|
||||
for (auto r = map.all(); !r.empty(); r.popFront()) {
|
||||
fprintf(stderr, " %p 0x%zx %s\n", r.front().key().cell(),
|
||||
r.front().value(), MemoryUseName(r.front().key().use()));
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr, "Missing calls to JS::RemoveAssociatedMemory:\n");
|
||||
for (auto r = map.all(); !r.empty(); r.popFront()) {
|
||||
fprintf(stderr, " %p 0x%zx %s\n", r.front().key().cell(),
|
||||
r.front().value(), MemoryUseName(r.front().key().use()));
|
||||
if (!policyMap.empty()) {
|
||||
ok = false;
|
||||
fprintf(stderr, "Missing calls to Zone::decPolicyMemory:\n");
|
||||
for (auto r = policyMap.all(); !r.empty(); r.popFront()) {
|
||||
fprintf(stderr, " %p 0x%zx\n", r.front().key(), r.front().value());
|
||||
}
|
||||
}
|
||||
|
||||
MOZ_CRASH();
|
||||
MOZ_ASSERT(ok);
|
||||
}
|
||||
|
||||
void MemoryTracker::trackMemory(Cell* cell, size_t nbytes, MemoryUse use) {
|
||||
|
@ -665,8 +712,8 @@ void MemoryTracker::untrackMemory(Cell* cell, size_t nbytes, MemoryUse use) {
|
|||
Key key{cell, use};
|
||||
auto ptr = map.lookup(key);
|
||||
if (!ptr) {
|
||||
MOZ_CRASH_UNSAFE_PRINTF("Association not found: %p 0x%x %s", cell,
|
||||
unsigned(nbytes), MemoryUseName(use));
|
||||
MOZ_CRASH_UNSAFE_PRINTF("Association not found: %p 0x%zx %s", cell, nbytes,
|
||||
MemoryUseName(use));
|
||||
}
|
||||
if (ptr->value() != nbytes) {
|
||||
MOZ_CRASH_UNSAFE_PRINTF(
|
||||
|
@ -677,7 +724,7 @@ void MemoryTracker::untrackMemory(Cell* cell, size_t nbytes, MemoryUse use) {
|
|||
map.remove(ptr);
|
||||
}
|
||||
|
||||
void MemoryTracker::swapTrackedMemory(Cell* a, Cell* b, MemoryUse use) {
|
||||
void MemoryTracker::swapMemory(Cell* a, Cell* b, MemoryUse use) {
|
||||
MOZ_ASSERT(a->isTenured());
|
||||
MOZ_ASSERT(b->isTenured());
|
||||
|
||||
|
@ -691,13 +738,13 @@ void MemoryTracker::swapTrackedMemory(Cell* a, Cell* b, MemoryUse use) {
|
|||
|
||||
AutoEnterOOMUnsafeRegion oomUnsafe;
|
||||
|
||||
if ((sa && !map.put(kb, sa)) ||
|
||||
(sb && !map.put(ka, sb))) {
|
||||
if ((sa && !map.put(kb, sa)) || (sb && !map.put(ka, sb))) {
|
||||
oomUnsafe.crash("MemoryTracker::swapTrackedMemory");
|
||||
}
|
||||
}
|
||||
|
||||
size_t MemoryTracker::getAndRemoveEntry(const Key& key, LockGuard<Mutex>& lock) {
|
||||
size_t MemoryTracker::getAndRemoveEntry(const Key& key,
|
||||
LockGuard<Mutex>& lock) {
|
||||
auto ptr = map.lookup(key);
|
||||
if (!ptr) {
|
||||
return 0;
|
||||
|
@ -708,6 +755,66 @@ size_t MemoryTracker::getAndRemoveEntry(const Key& key, LockGuard<Mutex>& lock)
|
|||
return size;
|
||||
}
|
||||
|
||||
void MemoryTracker::registerPolicy(ZoneAllocPolicy* policy) {
|
||||
LockGuard<Mutex> lock(mutex);
|
||||
|
||||
auto ptr = policyMap.lookupForAdd(policy);
|
||||
if (ptr) {
|
||||
MOZ_CRASH_UNSAFE_PRINTF("ZoneAllocPolicy %p already registeredd", policy);
|
||||
}
|
||||
|
||||
AutoEnterOOMUnsafeRegion oomUnsafe;
|
||||
if (!policyMap.add(ptr, policy, 0)) {
|
||||
oomUnsafe.crash("MemoryTracker::incTrackedPolicyMemory");
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryTracker::unregisterPolicy(ZoneAllocPolicy* policy) {
|
||||
LockGuard<Mutex> lock(mutex);
|
||||
|
||||
auto ptr = policyMap.lookup(policy);
|
||||
if (!ptr) {
|
||||
MOZ_CRASH_UNSAFE_PRINTF("ZoneAllocPolicy %p not found", policy);
|
||||
}
|
||||
if (ptr->value() != 0) {
|
||||
MOZ_CRASH_UNSAFE_PRINTF(
|
||||
"ZoneAllocPolicy %p still has 0x%zx bytes associated", policy,
|
||||
ptr->value());
|
||||
}
|
||||
|
||||
policyMap.remove(ptr);
|
||||
}
|
||||
|
||||
void MemoryTracker::incPolicyMemory(ZoneAllocPolicy* policy, size_t nbytes) {
|
||||
LockGuard<Mutex> lock(mutex);
|
||||
|
||||
auto ptr = policyMap.lookup(policy);
|
||||
if (!ptr) {
|
||||
MOZ_CRASH_UNSAFE_PRINTF("ZoneAllocPolicy %p not found", policy);
|
||||
}
|
||||
|
||||
ptr->value() += nbytes;
|
||||
}
|
||||
|
||||
void MemoryTracker::decPolicyMemory(ZoneAllocPolicy* policy, size_t nbytes) {
|
||||
LockGuard<Mutex> lock(mutex);
|
||||
|
||||
auto ptr = policyMap.lookup(policy);
|
||||
if (!ptr) {
|
||||
MOZ_CRASH_UNSAFE_PRINTF("ZoneAllocPolicy %p not found", policy);
|
||||
}
|
||||
|
||||
size_t& value = ptr->value();
|
||||
if (value < nbytes) {
|
||||
MOZ_CRASH_UNSAFE_PRINTF(
|
||||
"ZoneAllocPolicy %p is too small: "
|
||||
"expected at least 0x%zx but got 0x%zx bytes",
|
||||
policy, nbytes, value);
|
||||
}
|
||||
|
||||
value -= nbytes;
|
||||
}
|
||||
|
||||
void MemoryTracker::fixupAfterMovingGC() {
|
||||
// Update the table after we move GC things. We don't use MovableCellHasher
|
||||
// because that would create a difference between debug and release builds.
|
||||
|
|
176
js/src/gc/Zone.h
176
js/src/gc/Zone.h
|
@ -12,6 +12,7 @@
|
|||
#include "mozilla/SegmentedVector.h"
|
||||
|
||||
#include "gc/FindSCCs.h"
|
||||
#include "gc/ZoneAllocator.h"
|
||||
#include "js/GCHashTable.h"
|
||||
#include "vm/MallocProvider.h"
|
||||
#include "vm/Runtime.h"
|
||||
|
@ -136,15 +137,17 @@ namespace JS {
|
|||
//
|
||||
// We always guarantee that a zone has at least one live compartment by refusing
|
||||
// to delete the last compartment in a live zone.
|
||||
class Zone : public JS::shadow::Zone,
|
||||
public js::gc::GraphNodeBase<JS::Zone>,
|
||||
public js::MallocProvider<JS::Zone> {
|
||||
class Zone : public js::ZoneAllocator, public js::gc::GraphNodeBase<JS::Zone> {
|
||||
public:
|
||||
explicit Zone(JSRuntime* rt);
|
||||
~Zone();
|
||||
MOZ_MUST_USE bool init(bool isSystem);
|
||||
void destroy(js::FreeOp* fop);
|
||||
|
||||
static JS::Zone* from(ZoneAllocator* zoneAlloc) {
|
||||
return static_cast<Zone*>(zoneAlloc);
|
||||
}
|
||||
|
||||
private:
|
||||
enum class HelperThreadUse : uint32_t { None, Pending, Active };
|
||||
mozilla::Atomic<HelperThreadUse, mozilla::SequentiallyConsistent,
|
||||
|
@ -222,11 +225,6 @@ class Zone : public JS::shadow::Zone,
|
|||
std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
MOZ_MUST_USE void* onOutOfMemory(js::AllocFunction allocFunc,
|
||||
arena_id_t arena, size_t nbytes,
|
||||
void* reallocPtr = nullptr);
|
||||
void reportAllocationOverflow();
|
||||
|
||||
void beginSweepTypes();
|
||||
|
||||
bool hasMarkedRealms();
|
||||
|
@ -444,35 +442,6 @@ class Zone : public JS::shadow::Zone,
|
|||
private:
|
||||
js::ZoneData<JS::WeakCache<TypeDescrObjectSet>> typeDescrObjects_;
|
||||
|
||||
// Malloc counter to measure memory pressure for GC scheduling. This counter
|
||||
// is used for allocations where the size of the allocation is not known on
|
||||
// free. Currently this is used for all internal malloc allocations.
|
||||
js::gc::MemoryCounter gcMallocCounter;
|
||||
|
||||
// Malloc counter used for allocations where size information is
|
||||
// available. Used for some internal and all tracked external allocations.
|
||||
js::gc::MemoryTracker gcMallocSize;
|
||||
|
||||
// Counter of JIT code executable memory for GC scheduling. Also imprecise,
|
||||
// since wasm can generate code that outlives a zone.
|
||||
js::gc::MemoryCounter jitCodeCounter;
|
||||
|
||||
void updateMemoryCounter(js::gc::MemoryCounter& counter, size_t nbytes) {
|
||||
JSRuntime* rt = runtimeFromAnyThread();
|
||||
|
||||
counter.update(nbytes);
|
||||
auto trigger = counter.shouldTriggerGC(rt->gc.tunables);
|
||||
if (MOZ_LIKELY(trigger == js::gc::NoTrigger) ||
|
||||
trigger <= counter.triggered()) {
|
||||
return;
|
||||
}
|
||||
|
||||
maybeTriggerGCForTooMuchMalloc(counter, trigger);
|
||||
}
|
||||
|
||||
void maybeTriggerGCForTooMuchMalloc(js::gc::MemoryCounter& counter,
|
||||
js::gc::TriggerKind trigger);
|
||||
|
||||
js::MainThreadData<js::UniquePtr<js::RegExpZone>> regExps_;
|
||||
|
||||
public:
|
||||
|
@ -484,42 +453,6 @@ class Zone : public JS::shadow::Zone,
|
|||
|
||||
bool addTypeDescrObject(JSContext* cx, HandleObject obj);
|
||||
|
||||
void setGCMaxMallocBytes(size_t value, const js::AutoLockGC& lock) {
|
||||
gcMallocCounter.setMax(value, lock);
|
||||
}
|
||||
void updateMallocCounter(size_t nbytes) {
|
||||
updateMemoryCounter(gcMallocCounter, nbytes);
|
||||
}
|
||||
void adoptMallocBytes(Zone* other) {
|
||||
gcMallocCounter.adopt(other->gcMallocCounter);
|
||||
gcMallocSize.adopt(other->gcMallocSize);
|
||||
}
|
||||
size_t GCMaxMallocBytes() const { return gcMallocCounter.maxBytes(); }
|
||||
size_t GCMallocBytes() const { return gcMallocCounter.bytes(); }
|
||||
|
||||
void updateJitCodeMallocBytes(size_t nbytes) {
|
||||
updateMemoryCounter(jitCodeCounter, nbytes);
|
||||
}
|
||||
|
||||
void updateAllGCMallocCountersOnGCStart();
|
||||
void updateAllGCMallocCountersOnGCEnd(const js::AutoLockGC& lock);
|
||||
js::gc::TriggerKind shouldTriggerGCForTooMuchMalloc();
|
||||
|
||||
// Memory accounting APIs for memory owned by GC cells.
|
||||
void addCellMemory(js::gc::Cell* cell, size_t nbytes, js::MemoryUse use) {
|
||||
gcMallocSize.addMemory(cell, nbytes, use);
|
||||
}
|
||||
void removeCellMemory(js::gc::Cell* cell, size_t nbytes, js::MemoryUse use) {
|
||||
gcMallocSize.removeMemory(cell, nbytes, use);
|
||||
}
|
||||
void swapCellMemory(js::gc::Cell* a, js::gc::Cell* b, js::MemoryUse use) {
|
||||
gcMallocSize.swapMemory(a, b, use);
|
||||
}
|
||||
|
||||
size_t totalBytes() const {
|
||||
return zoneSize.gcBytes() + gcMallocSize.bytes();
|
||||
}
|
||||
|
||||
void keepAtoms() { keepAtomsCount++; }
|
||||
void releaseAtoms();
|
||||
bool hasKeptAtoms() const { return keepAtomsCount; }
|
||||
|
@ -570,16 +503,6 @@ class Zone : public JS::shadow::Zone,
|
|||
return functionToStringCache_.ref();
|
||||
}
|
||||
|
||||
// Track heap size under this Zone.
|
||||
js::gc::HeapSize zoneSize;
|
||||
|
||||
// Thresholds used to trigger GC.
|
||||
js::gc::ZoneHeapThreshold threshold;
|
||||
|
||||
// Amount of data to allocate before triggering a new incremental slice for
|
||||
// the current GC.
|
||||
js::UnprotectedData<size_t> gcDelayBytes;
|
||||
|
||||
js::ZoneData<uint32_t> tenuredStrings;
|
||||
js::ZoneData<bool> allocNurseryStrings;
|
||||
|
||||
|
@ -700,91 +623,4 @@ class Zone : public JS::shadow::Zone,
|
|||
|
||||
} // namespace JS
|
||||
|
||||
namespace js {
|
||||
|
||||
/*
|
||||
* Allocation policy that uses Zone::pod_malloc and friends, so that memory
|
||||
* pressure is accounted for on the zone. This is suitable for memory associated
|
||||
* with GC things allocated in the zone.
|
||||
*
|
||||
* Since it doesn't hold a JSContext (those may not live long enough), it can't
|
||||
* report out-of-memory conditions itself; the caller must check for OOM and
|
||||
* take the appropriate action.
|
||||
*
|
||||
* FIXME bug 647103 - replace these *AllocPolicy names.
|
||||
*/
|
||||
class ZoneAllocPolicy {
|
||||
JS::Zone* const zone;
|
||||
|
||||
public:
|
||||
MOZ_IMPLICIT ZoneAllocPolicy(JS::Zone* z) : zone(z) {}
|
||||
|
||||
template <typename T>
|
||||
T* maybe_pod_malloc(size_t numElems) {
|
||||
return zone->maybe_pod_malloc<T>(numElems);
|
||||
}
|
||||
template <typename T>
|
||||
T* maybe_pod_calloc(size_t numElems) {
|
||||
return zone->maybe_pod_calloc<T>(numElems);
|
||||
}
|
||||
template <typename T>
|
||||
T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
|
||||
return zone->maybe_pod_realloc<T>(p, oldSize, newSize);
|
||||
}
|
||||
template <typename T>
|
||||
T* pod_malloc(size_t numElems) {
|
||||
return zone->pod_malloc<T>(numElems);
|
||||
}
|
||||
template <typename T>
|
||||
T* pod_calloc(size_t numElems) {
|
||||
return zone->pod_calloc<T>(numElems);
|
||||
}
|
||||
template <typename T>
|
||||
T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
|
||||
return zone->pod_realloc<T>(p, oldSize, newSize);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void free_(T* p, size_t numElems = 0) {
|
||||
js_free(p);
|
||||
}
|
||||
void reportAllocOverflow() const {}
|
||||
|
||||
MOZ_MUST_USE bool checkSimulatedOOM() const {
|
||||
return !js::oom::ShouldFailWithOOM();
|
||||
}
|
||||
};
|
||||
|
||||
// Convenience functions for memory accounting on the zone.
|
||||
|
||||
// Associate malloc memory with a GC thing. This call must be matched by a
|
||||
// following call to RemoveCellMemory with the same size and use. The total
|
||||
// amount of malloc memory associated with a zone is used to trigger GC.
|
||||
inline void AddCellMemory(gc::TenuredCell* cell, size_t nbytes, MemoryUse use) {
|
||||
if (nbytes) {
|
||||
cell->zone()->addCellMemory(cell, nbytes, use);
|
||||
}
|
||||
}
|
||||
inline void AddCellMemory(gc::Cell* cell, size_t nbytes, MemoryUse use) {
|
||||
if (cell->isTenured()) {
|
||||
AddCellMemory(&cell->asTenured(), nbytes, use);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove association between malloc memory and a GC thing. This call must
|
||||
// follow a call to AddCellMemory with the same size and use.
|
||||
inline void RemoveCellMemory(gc::TenuredCell* cell, size_t nbytes,
|
||||
MemoryUse use) {
|
||||
if (nbytes) {
|
||||
cell->zoneFromAnyThread()->removeCellMemory(cell, nbytes, use);
|
||||
}
|
||||
}
|
||||
inline void RemoveCellMemory(gc::Cell* cell, size_t nbytes, MemoryUse use) {
|
||||
if (cell->isTenured()) {
|
||||
RemoveCellMemory(&cell->asTenured(), nbytes, use);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace js
|
||||
|
||||
#endif // gc_Zone_h
|
||||
|
|
|
@ -0,0 +1,304 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
|
||||
* vim: set ts=8 sts=2 et sw=2 tw=80:
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
/*
|
||||
* Public header for allocating memory associated with GC things.
|
||||
*/
|
||||
|
||||
#ifndef gc_ZoneAllocator_h
|
||||
#define gc_ZoneAllocator_h
|
||||
|
||||
#include "gc/Scheduling.h"
|
||||
#include "vm/Runtime.h" // For JSRuntime::gc.
|
||||
|
||||
namespace JS {
|
||||
class Zone;
|
||||
} // namespace JS
|
||||
|
||||
namespace js {
|
||||
|
||||
namespace gc {
|
||||
void MaybeAllocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc);
|
||||
}
|
||||
|
||||
// Base class of JS::Zone that provides malloc memory allocation and accounting.
|
||||
class ZoneAllocator : public JS::shadow::Zone,
|
||||
public js::MallocProvider<JS::Zone> {
|
||||
protected:
|
||||
explicit ZoneAllocator(JSRuntime* rt);
|
||||
~ZoneAllocator();
|
||||
void fixupAfterMovingGC();
|
||||
|
||||
public:
|
||||
static ZoneAllocator* from(JS::Zone* zone) {
|
||||
// This is a safe downcast, but the compiler hasn't seen the definition yet.
|
||||
return reinterpret_cast<ZoneAllocator*>(zone);
|
||||
}
|
||||
|
||||
MOZ_MUST_USE void* onOutOfMemory(js::AllocFunction allocFunc,
|
||||
arena_id_t arena, size_t nbytes,
|
||||
void* reallocPtr = nullptr);
|
||||
void reportAllocationOverflow() const;
|
||||
|
||||
void setGCMaxMallocBytes(size_t value, const js::AutoLockGC& lock) {
|
||||
gcMallocCounter.setMax(value, lock);
|
||||
}
|
||||
void updateMallocCounter(size_t nbytes) {
|
||||
updateMemoryCounter(gcMallocCounter, nbytes);
|
||||
}
|
||||
void adoptMallocBytes(ZoneAllocator* other) {
|
||||
gcMallocCounter.adopt(other->gcMallocCounter);
|
||||
gcMallocBytes += other->gcMallocBytes;
|
||||
other->gcMallocBytes = 0;
|
||||
#ifdef DEBUG
|
||||
gcMallocTracker.adopt(other->gcMallocTracker);
|
||||
#endif
|
||||
}
|
||||
size_t GCMaxMallocBytes() const { return gcMallocCounter.maxBytes(); }
|
||||
size_t GCMallocBytes() const { return gcMallocCounter.bytes(); }
|
||||
|
||||
void updateJitCodeMallocBytes(size_t nbytes) {
|
||||
updateMemoryCounter(jitCodeCounter, nbytes);
|
||||
}
|
||||
|
||||
void updateAllGCMallocCountersOnGCStart();
|
||||
void updateAllGCMallocCountersOnGCEnd(const js::AutoLockGC& lock);
|
||||
js::gc::TriggerKind shouldTriggerGCForTooMuchMalloc();
|
||||
|
||||
// Memory accounting APIs for malloc memory owned by GC cells.
|
||||
|
||||
void addCellMemory(js::gc::Cell* cell, size_t nbytes, js::MemoryUse use) {
|
||||
MOZ_ASSERT(cell);
|
||||
MOZ_ASSERT(nbytes);
|
||||
mozilla::DebugOnly<size_t> initialBytes(gcMallocBytes);
|
||||
MOZ_ASSERT(initialBytes + nbytes > initialBytes);
|
||||
|
||||
gcMallocBytes += nbytes;
|
||||
// We don't currently check GC triggers here.
|
||||
|
||||
#ifdef DEBUG
|
||||
gcMallocTracker.trackMemory(cell, nbytes, use);
|
||||
#endif
|
||||
}
|
||||
|
||||
void removeCellMemory(js::gc::Cell* cell, size_t nbytes, js::MemoryUse use) {
|
||||
MOZ_ASSERT(cell);
|
||||
MOZ_ASSERT(nbytes);
|
||||
MOZ_ASSERT(gcMallocBytes >= nbytes);
|
||||
|
||||
gcMallocBytes -= nbytes;
|
||||
|
||||
#ifdef DEBUG
|
||||
gcMallocTracker.untrackMemory(cell, nbytes, use);
|
||||
#endif
|
||||
}
|
||||
|
||||
void swapCellMemory(js::gc::Cell* a, js::gc::Cell* b, js::MemoryUse use) {
|
||||
#ifdef DEBUG
|
||||
gcMallocTracker.swapMemory(a, b, use);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
void registerPolicy(js::ZoneAllocPolicy* policy) {
|
||||
return gcMallocTracker.registerPolicy(policy);
|
||||
}
|
||||
void unregisterPolicy(js::ZoneAllocPolicy* policy) {
|
||||
return gcMallocTracker.unregisterPolicy(policy);
|
||||
}
|
||||
#endif
|
||||
|
||||
void incPolicyMemory(js::ZoneAllocPolicy* policy, size_t nbytes) {
|
||||
MOZ_ASSERT(nbytes);
|
||||
mozilla::DebugOnly<size_t> initialBytes(gcMallocBytes);
|
||||
MOZ_ASSERT(initialBytes + nbytes > initialBytes);
|
||||
|
||||
gcMallocBytes += nbytes;
|
||||
|
||||
#ifdef DEBUG
|
||||
gcMallocTracker.incPolicyMemory(policy, nbytes);
|
||||
#endif
|
||||
|
||||
maybeAllocTriggerZoneGC();
|
||||
}
|
||||
void decPolicyMemory(js::ZoneAllocPolicy* policy, size_t nbytes) {
|
||||
MOZ_ASSERT(nbytes);
|
||||
MOZ_ASSERT(gcMallocBytes >= nbytes);
|
||||
|
||||
gcMallocBytes -= nbytes;
|
||||
|
||||
#ifdef DEBUG
|
||||
gcMallocTracker.decPolicyMemory(policy, nbytes);
|
||||
#endif
|
||||
}
|
||||
|
||||
size_t totalBytes() const { return zoneSize.gcBytes() + gcMallocBytes; }
|
||||
|
||||
// Check allocation threshold and trigger a zone GC if necessary.
|
||||
void maybeAllocTriggerZoneGC() {
|
||||
JSRuntime* rt = runtimeFromAnyThread();
|
||||
if (totalBytes() >= threshold.gcTriggerBytes() &&
|
||||
rt->heapState() == JS::HeapState::Idle) {
|
||||
gc::MaybeAllocTriggerZoneGC(rt, this);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void updateMemoryCounter(js::gc::MemoryCounter& counter, size_t nbytes) {
|
||||
JSRuntime* rt = runtimeFromAnyThread();
|
||||
|
||||
counter.update(nbytes);
|
||||
auto trigger = counter.shouldTriggerGC(rt->gc.tunables);
|
||||
if (MOZ_LIKELY(trigger == js::gc::NoTrigger) ||
|
||||
trigger <= counter.triggered()) {
|
||||
return;
|
||||
}
|
||||
|
||||
maybeTriggerGCForTooMuchMalloc(counter, trigger);
|
||||
}
|
||||
|
||||
void maybeTriggerGCForTooMuchMalloc(js::gc::MemoryCounter& counter,
|
||||
js::gc::TriggerKind trigger);
|
||||
|
||||
public:
|
||||
// Track heap size under this Zone.
|
||||
js::gc::HeapSize zoneSize;
|
||||
|
||||
// Thresholds used to trigger GC.
|
||||
js::gc::ZoneHeapThreshold threshold;
|
||||
|
||||
// Amount of data to allocate before triggering a new incremental slice for
|
||||
// the current GC.
|
||||
js::MainThreadData<size_t> gcDelayBytes;
|
||||
|
||||
private:
|
||||
// Malloc counter to measure memory pressure for GC scheduling. This counter
|
||||
// is used for allocations where the size of the allocation is not known on
|
||||
// free. Currently this is used for all internal malloc allocations.
|
||||
js::gc::MemoryCounter gcMallocCounter;
|
||||
|
||||
// Malloc counter used for allocations where size information is
|
||||
// available. Used for some internal and all tracked external allocations.
|
||||
mozilla::Atomic<size_t, mozilla::Relaxed,
|
||||
mozilla::recordreplay::Behavior::DontPreserve>
|
||||
gcMallocBytes;
|
||||
|
||||
#ifdef DEBUG
|
||||
// In debug builds, malloc allocations can be tracked to make debugging easier
|
||||
// (possible?) if allocation and free sizes don't balance.
|
||||
js::gc::MemoryTracker gcMallocTracker;
|
||||
#endif
|
||||
|
||||
// Counter of JIT code executable memory for GC scheduling. Also imprecise,
|
||||
// since wasm can generate code that outlives a zone.
|
||||
js::gc::MemoryCounter jitCodeCounter;
|
||||
|
||||
friend class js::gc::GCRuntime;
|
||||
};
|
||||
|
||||
/*
|
||||
* Allocation policy that performs precise memory tracking on the zone. This
|
||||
* should be used for all containers associated with a GC thing or a zone.
|
||||
*
|
||||
* Since it doesn't hold a JSContext (those may not live long enough), it can't
|
||||
* report out-of-memory conditions itself; the caller must check for OOM and
|
||||
* take the appropriate action.
|
||||
*
|
||||
* FIXME bug 647103 - replace these *AllocPolicy names.
|
||||
*/
|
||||
class ZoneAllocPolicy : public MallocProvider<ZoneAllocPolicy> {
|
||||
ZoneAllocator* zone_;
|
||||
|
||||
#ifdef DEBUG
|
||||
friend class js::gc::MemoryTracker; // Can clear |zone_| on merge.
|
||||
#endif
|
||||
|
||||
public:
|
||||
MOZ_IMPLICIT ZoneAllocPolicy(ZoneAllocator* z) : zone_(z) {
|
||||
#ifdef DEBUG
|
||||
zone()->registerPolicy(this);
|
||||
#endif
|
||||
}
|
||||
ZoneAllocPolicy(ZoneAllocPolicy& other) : ZoneAllocPolicy(other.zone_) {}
|
||||
ZoneAllocPolicy(ZoneAllocPolicy&& other) : ZoneAllocPolicy(other.zone_) {}
|
||||
~ZoneAllocPolicy() {
|
||||
#ifdef DEBUG
|
||||
if (zone_) {
|
||||
zone_->unregisterPolicy(this);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Public methods required to fulfill the AllocPolicy interface.
|
||||
|
||||
template <typename T>
|
||||
void free_(T* p, size_t numElems) {
|
||||
if (p) {
|
||||
decMemory(numElems * sizeof(T));
|
||||
js_free(p);
|
||||
}
|
||||
}
|
||||
|
||||
MOZ_MUST_USE bool checkSimulatedOOM() const {
|
||||
return !js::oom::ShouldFailWithOOM();
|
||||
}
|
||||
|
||||
void reportAllocOverflow() const { reportAllocationOverflow(); }
|
||||
|
||||
// Internal methods called by the MallocProvider implementation.
|
||||
|
||||
MOZ_MUST_USE void* onOutOfMemory(js::AllocFunction allocFunc,
|
||||
arena_id_t arena, size_t nbytes,
|
||||
void* reallocPtr = nullptr) {
|
||||
return zone()->onOutOfMemory(allocFunc, arena, nbytes, reallocPtr);
|
||||
}
|
||||
void reportAllocationOverflow() const { zone()->reportAllocationOverflow(); }
|
||||
void updateMallocCounter(size_t nbytes) {
|
||||
zone()->incPolicyMemory(this, nbytes);
|
||||
}
|
||||
|
||||
private:
|
||||
ZoneAllocator* zone() const {
|
||||
MOZ_ASSERT(zone_);
|
||||
return zone_;
|
||||
}
|
||||
void decMemory(size_t nbytes) { zone_->decPolicyMemory(this, nbytes); }
|
||||
};
|
||||
|
||||
// Convenience functions for memory accounting on the zone.
|
||||
|
||||
// Associate malloc memory with a GC thing. This call must be matched by a
|
||||
// following call to RemoveCellMemory with the same size and use. The total
|
||||
// amount of malloc memory associated with a zone is used to trigger GC.
|
||||
inline void AddCellMemory(gc::TenuredCell* cell, size_t nbytes, MemoryUse use) {
|
||||
if (nbytes) {
|
||||
ZoneAllocator::from(cell->zone())->addCellMemory(cell, nbytes, use);
|
||||
}
|
||||
}
|
||||
inline void AddCellMemory(gc::Cell* cell, size_t nbytes, MemoryUse use) {
|
||||
if (cell->isTenured()) {
|
||||
AddCellMemory(&cell->asTenured(), nbytes, use);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove association between malloc memory and a GC thing. This call must
|
||||
// follow a call to AddCellMemory with the same size and use.
|
||||
inline void RemoveCellMemory(gc::TenuredCell* cell, size_t nbytes,
|
||||
MemoryUse use) {
|
||||
if (nbytes) {
|
||||
auto zoneBase = ZoneAllocator::from(cell->zoneFromAnyThread());
|
||||
zoneBase->removeCellMemory(cell, nbytes, use);
|
||||
}
|
||||
}
|
||||
inline void RemoveCellMemory(gc::Cell* cell, size_t nbytes, MemoryUse use) {
|
||||
if (cell->isTenured()) {
|
||||
RemoveCellMemory(&cell->asTenured(), nbytes, use);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace js
|
||||
|
||||
#endif // gc_ZoneAllocator_h
|
|
@ -27,6 +27,7 @@
|
|||
|
||||
#include "jit/ExecutableAllocator.h"
|
||||
|
||||
#include "gc/Zone.h"
|
||||
#include "jit/JitRealm.h"
|
||||
#include "js/MemoryMetrics.h"
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include "jit/JitOptions.h"
|
||||
#include "vm/JSContext.h"
|
||||
#include "vm/Realm.h"
|
||||
#include "vm/TypeInference.h"
|
||||
|
||||
namespace js {
|
||||
namespace jit {
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
#include "mozilla/FloatingPoint.h"
|
||||
#include "mozilla/MathAlgorithms.h"
|
||||
|
||||
#include "gc/Zone.h"
|
||||
|
||||
#if defined(JS_CODEGEN_X86)
|
||||
# include "jit/x86/MacroAssembler-x86-inl.h"
|
||||
#elif defined(JS_CODEGEN_X64)
|
||||
|
|
|
@ -1172,7 +1172,7 @@ JS_PUBLIC_API void JS::AddAssociatedMemory(JSObject* obj, size_t nbytes,
|
|||
|
||||
Zone* zone = obj->zone();
|
||||
zone->addCellMemory(obj, nbytes, js::MemoryUse(use));
|
||||
zone->runtimeFromMainThread()->gc.maybeAllocTriggerZoneGC(zone);
|
||||
zone->maybeAllocTriggerZoneGC();
|
||||
}
|
||||
|
||||
JS_PUBLIC_API void JS::RemoveAssociatedMemory(JSObject* obj, size_t nbytes,
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include "threading/ProtectedData.h"
|
||||
|
||||
#include "gc/Heap.h"
|
||||
#include "gc/Zone.h"
|
||||
#include "vm/HelperThreads.h"
|
||||
#include "vm/JSContext.h"
|
||||
|
||||
|
|
|
@ -553,7 +553,7 @@ bool StartOffThreadIonFree(jit::IonBuilder* builder,
|
|||
struct AllCompilations {};
|
||||
struct ZonesInState {
|
||||
JSRuntime* runtime;
|
||||
JS::Zone::GCState state;
|
||||
JS::shadow::Zone::GCState state;
|
||||
};
|
||||
struct CompilationsUsingNursery {
|
||||
JSRuntime* runtime;
|
||||
|
@ -582,7 +582,7 @@ inline void CancelOffThreadIonCompile(Zone* zone) {
|
|||
}
|
||||
|
||||
inline void CancelOffThreadIonCompile(JSRuntime* runtime,
|
||||
JS::Zone::GCState state) {
|
||||
JS::shadow::Zone::GCState state) {
|
||||
CancelOffThreadIonCompile(CompilationSelector(ZonesInState{runtime, state}),
|
||||
true);
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "vm/JSContext.h"
|
||||
|
||||
#include "builtin/Object.h"
|
||||
#include "gc/Zone.h"
|
||||
#include "jit/JitFrames.h"
|
||||
#include "proxy/Proxy.h"
|
||||
#include "vm/BigIntType.h"
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
|
||||
#include "vm/ObjectGroup.h"
|
||||
|
||||
#include "gc/Zone.h"
|
||||
|
||||
namespace js {
|
||||
|
||||
inline bool ObjectGroup::needsSweep() {
|
||||
|
|
|
@ -927,24 +927,6 @@ class ErrorCopier {
|
|||
~ErrorCopier();
|
||||
};
|
||||
|
||||
class MOZ_RAII AutoSuppressAllocationMetadataBuilder {
|
||||
JS::Zone* zone;
|
||||
bool saved;
|
||||
|
||||
public:
|
||||
explicit AutoSuppressAllocationMetadataBuilder(JSContext* cx)
|
||||
: AutoSuppressAllocationMetadataBuilder(cx->realm()->zone()) {}
|
||||
|
||||
explicit AutoSuppressAllocationMetadataBuilder(JS::Zone* zone)
|
||||
: zone(zone), saved(zone->suppressAllocationMetadataBuilder) {
|
||||
zone->suppressAllocationMetadataBuilder = true;
|
||||
}
|
||||
|
||||
~AutoSuppressAllocationMetadataBuilder() {
|
||||
zone->suppressAllocationMetadataBuilder = saved;
|
||||
}
|
||||
};
|
||||
|
||||
} /* namespace js */
|
||||
|
||||
#endif /* vm_Realm_h */
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include "gc/Barrier.h"
|
||||
#include "gc/Heap.h"
|
||||
#include "gc/Marking.h"
|
||||
#include "gc/Zone.h"
|
||||
#include "gc/ZoneAllocator.h"
|
||||
#include "js/AllocPolicy.h"
|
||||
#include "js/RegExpFlags.h" // JS::RegExpFlag, JS::RegExpFlags
|
||||
#include "js/UbiNode.h"
|
||||
|
|
|
@ -367,6 +367,24 @@ inline bool ObjectGroup::hasUnanalyzedPreliminaryObjects() {
|
|||
maybePreliminaryObjectsDontCheckGeneration();
|
||||
}
|
||||
|
||||
class MOZ_RAII AutoSuppressAllocationMetadataBuilder {
|
||||
JS::Zone* zone;
|
||||
bool saved;
|
||||
|
||||
public:
|
||||
explicit AutoSuppressAllocationMetadataBuilder(JSContext* cx)
|
||||
: AutoSuppressAllocationMetadataBuilder(cx->realm()->zone()) {}
|
||||
|
||||
explicit AutoSuppressAllocationMetadataBuilder(JS::Zone* zone)
|
||||
: zone(zone), saved(zone->suppressAllocationMetadataBuilder) {
|
||||
zone->suppressAllocationMetadataBuilder = true;
|
||||
}
|
||||
|
||||
~AutoSuppressAllocationMetadataBuilder() {
|
||||
zone->suppressAllocationMetadataBuilder = saved;
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Structure for type inference entry point functions. All functions which can
|
||||
* change type information must use this, and functions which depend on
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include "jsnum.h"
|
||||
|
||||
#include "builtin/Array.h"
|
||||
#include "gc/Zone.h"
|
||||
#include "jit/AtomicOperations.h"
|
||||
#include "js/Conversions.h"
|
||||
#include "js/Value.h"
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
|
||||
#include "builtin/TypedObject.h"
|
||||
#include "gc/Barrier.h"
|
||||
#include "gc/Zone.h"
|
||||
#include "vm/SharedMem.h"
|
||||
#include "wasm/WasmCode.h"
|
||||
#include "wasm/WasmDebug.h"
|
||||
|
|
|
@ -4968,14 +4968,19 @@ pref("layers.acceleration.disabled", false);
|
|||
// and output the result to stderr.
|
||||
pref("layers.bench.enabled", false);
|
||||
|
||||
#if defined(XP_WIN)
|
||||
#if defined(XP_WIN) || defined(MOZ_WIDGET_GTK)
|
||||
pref("layers.gpu-process.enabled", true);
|
||||
pref("layers.gpu-process.allow-software", true);
|
||||
#ifdef NIGHTLY_BUILD
|
||||
pref("layers.gpu-process.max_restarts", 3);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(XP_WIN)
|
||||
pref("layers.gpu-process.allow-software", true);
|
||||
#elif defined(MOZ_WIDGET_GTK)
|
||||
pref("layers.gpu-process.allow-software", false);
|
||||
#endif
|
||||
|
||||
// Whether to force acceleration on, ignoring blacklists.
|
||||
#ifdef ANDROID
|
||||
// bug 838603 -- on Android, accidentally blacklisting OpenGL layers
|
||||
|
|
|
@ -1866,6 +1866,13 @@ nsresult NS_NewURI(nsIURI** aURI, const nsACString& aSpec,
|
|||
.Finalize(aURI);
|
||||
}
|
||||
|
||||
#if defined(MOZ_THUNDERBIRD) || defined(MOZ_SUITE)
|
||||
rv = NS_NewMailnewsURI(aURI, aSpec, aCharset, aBaseURI, aIOService);
|
||||
if (rv != NS_ERROR_UNKNOWN_PROTOCOL) {
|
||||
return rv;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (aBaseURI) {
|
||||
nsAutoCString newSpec;
|
||||
rv = aBaseURI->Resolve(aSpec, newSpec);
|
||||
|
@ -1883,10 +1890,6 @@ nsresult NS_NewURI(nsIURI** aURI, const nsACString& aSpec,
|
|||
.Finalize(aURI);
|
||||
}
|
||||
|
||||
#if defined(MOZ_THUNDERBIRD) || defined(MOZ_SUITE)
|
||||
return NS_NewMailnewsURI(aURI, aSpec, aCharset, aBaseURI, aIOService);
|
||||
#endif
|
||||
|
||||
// Falls back to external protocol handler.
|
||||
return NS_MutateURI(new nsSimpleURI::Mutator()).SetSpec(aSpec).Finalize(aURI);
|
||||
}
|
||||
|
|
|
@ -16,3 +16,9 @@ XPCSHELL_TESTS_MANIFESTS += [
|
|||
'xpcshell/xpcshell-unpack.ini',
|
||||
'xpcshell/xpcshell.ini',
|
||||
]
|
||||
|
||||
with Files('xpcshell/rs-blocklist/**'):
|
||||
BUG_COMPONENT = ('Toolkit', 'Blockist Implementation')
|
||||
|
||||
with Files('xpcshell/xml-blocklist/**'):
|
||||
BUG_COMPONENT = ('Toolkit', 'Blockist Implementation')
|
||||
|
|
Загрузка…
Ссылка в новой задаче