зеркало из https://github.com/mozilla/gecko-dev.git
Backed out 5 changesets (bug 1691894) for causing hazard failures in nsXULPrototypeCache. CLOSED TREE
Backed out changeset 22dc870ee609 (bug 1691894) Backed out changeset 58c31e9d6ae3 (bug 1691894) Backed out changeset 7483e84149d8 (bug 1691894) Backed out changeset f977d6cfa973 (bug 1691894) Backed out changeset db4503476f34 (bug 1691894)
This commit is contained in:
Родитель
8029c8fc2e
Коммит
1afbbe67e1
|
@ -160,15 +160,18 @@ inline DocAccessible::AttrRelProviders* DocAccessible::GetOrCreateRelProviders(
|
|||
dom::Element* aElement, const nsAString& aID) {
|
||||
dom::DocumentOrShadowRoot* docOrShadowRoot =
|
||||
aElement->GetUncomposedDocOrConnectedShadowRoot();
|
||||
DependentIDsHashtable* hash =
|
||||
mDependentIDsHashes
|
||||
.GetOrInsertWith(docOrShadowRoot,
|
||||
[] { return MakeUnique<DependentIDsHashtable>(); })
|
||||
.get();
|
||||
DependentIDsHashtable* hash = mDependentIDsHashes.Get(docOrShadowRoot);
|
||||
if (!hash) {
|
||||
hash = new DependentIDsHashtable();
|
||||
mDependentIDsHashes.Put(docOrShadowRoot, hash);
|
||||
}
|
||||
|
||||
return hash
|
||||
->GetOrInsertWith(aID, [] { return MakeUnique<AttrRelProviders>(); })
|
||||
.get();
|
||||
AttrRelProviders* providers = hash->Get(aID);
|
||||
if (!providers) {
|
||||
providers = new AttrRelProviders();
|
||||
hash->Put(aID, providers);
|
||||
}
|
||||
return providers;
|
||||
}
|
||||
|
||||
inline void DocAccessible::RemoveRelProvidersIfEmpty(dom::Element* aElement,
|
||||
|
|
|
@ -60,13 +60,13 @@ void nsChromeRegistryContent::RegisterPackage(const ChromePackage& aPackage) {
|
|||
if (NS_FAILED(rv)) return;
|
||||
}
|
||||
|
||||
UniquePtr<PackageEntry> entry = MakeUnique<PackageEntry>();
|
||||
PackageEntry* entry = new PackageEntry;
|
||||
entry->flags = aPackage.flags;
|
||||
entry->contentBaseURI = content;
|
||||
entry->localeBaseURI = locale;
|
||||
entry->skinBaseURI = skin;
|
||||
|
||||
mPackagesHash.Put(aPackage.package, std::move(entry));
|
||||
mPackagesHash.Put(aPackage.package, entry);
|
||||
}
|
||||
|
||||
void nsChromeRegistryContent::RegisterSubstitution(
|
||||
|
|
|
@ -36,8 +36,8 @@ void ChildProcessChannelListener::OnChannelReady(
|
|||
aResolver(rv);
|
||||
} else {
|
||||
mChannelArgs.Put(aIdentifier,
|
||||
CallbackArgs{aLoadState, std::move(aStreamFilterEndpoints),
|
||||
aTiming, std::move(aResolver)});
|
||||
{aLoadState, std::move(aStreamFilterEndpoints), aTiming,
|
||||
std::move(aResolver)});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -933,8 +933,8 @@ nsresult ExternalResourceMap::AddExternalResource(nsIURI* aURI,
|
|||
}
|
||||
}
|
||||
|
||||
ExternalResource* newResource =
|
||||
mMap.Put(aURI, MakeUnique<ExternalResource>()).get();
|
||||
ExternalResource* newResource = new ExternalResource();
|
||||
mMap.Put(aURI, newResource);
|
||||
|
||||
newResource->mDocument = doc;
|
||||
newResource->mViewer = aViewer;
|
||||
|
|
|
@ -201,11 +201,11 @@ EventSourceEventService::AddListener(uint64_t aInnerWindowID,
|
|||
}
|
||||
++mCountListeners;
|
||||
|
||||
WindowListener* listener =
|
||||
mWindows
|
||||
.GetOrInsertWith(aInnerWindowID,
|
||||
[] { return MakeUnique<WindowListener>(); })
|
||||
.get();
|
||||
WindowListener* listener = mWindows.Get(aInnerWindowID);
|
||||
if (!listener) {
|
||||
listener = new WindowListener();
|
||||
mWindows.Put(aInnerWindowID, listener);
|
||||
}
|
||||
|
||||
listener->mListeners.AppendElement(aListener);
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ void PointerEventHandler::UpdateActivePointerState(WidgetMouseEvent* aEvent,
|
|||
// In this case we have to know information about available mouse pointers
|
||||
sActivePointersIds->Put(
|
||||
aEvent->pointerId,
|
||||
MakeUnique<PointerInfo>(false, aEvent->mInputSource, true, nullptr));
|
||||
new PointerInfo(false, aEvent->mInputSource, true, nullptr));
|
||||
|
||||
MaybeCacheSpoofedPointerID(aEvent->mInputSource, aEvent->pointerId);
|
||||
break;
|
||||
|
@ -90,7 +90,7 @@ void PointerEventHandler::UpdateActivePointerState(WidgetMouseEvent* aEvent,
|
|||
// nullptr, not sure if this also happens on real usage.
|
||||
sActivePointersIds->Put(
|
||||
pointerEvent->pointerId,
|
||||
MakeUnique<PointerInfo>(
|
||||
new PointerInfo(
|
||||
true, pointerEvent->mInputSource, pointerEvent->mIsPrimary,
|
||||
aTargetContent ? aTargetContent->OwnerDoc() : nullptr));
|
||||
MaybeCacheSpoofedPointerID(pointerEvent->mInputSource,
|
||||
|
@ -109,8 +109,8 @@ void PointerEventHandler::UpdateActivePointerState(WidgetMouseEvent* aEvent,
|
|||
MouseEvent_Binding::MOZ_SOURCE_TOUCH) {
|
||||
sActivePointersIds->Put(
|
||||
pointerEvent->pointerId,
|
||||
MakeUnique<PointerInfo>(false, pointerEvent->mInputSource,
|
||||
pointerEvent->mIsPrimary, nullptr));
|
||||
new PointerInfo(false, pointerEvent->mInputSource,
|
||||
pointerEvent->mIsPrimary, nullptr));
|
||||
} else {
|
||||
sActivePointersIds->Remove(pointerEvent->pointerId);
|
||||
}
|
||||
|
@ -149,13 +149,12 @@ void PointerEventHandler::RequestPointerCaptureById(uint32_t aPointerId,
|
|||
void PointerEventHandler::SetPointerCaptureById(uint32_t aPointerId,
|
||||
Element* aElement) {
|
||||
MOZ_ASSERT(aElement);
|
||||
sPointerCaptureList->WithEntryHandle(aPointerId, [&](auto&& entry) {
|
||||
if (entry) {
|
||||
entry.Data()->mPendingElement = aElement;
|
||||
} else {
|
||||
entry.Insert(MakeUnique<PointerCaptureInfo>(aElement));
|
||||
}
|
||||
});
|
||||
PointerCaptureInfo* pointerCaptureInfo = GetPointerCaptureInfo(aPointerId);
|
||||
if (pointerCaptureInfo) {
|
||||
pointerCaptureInfo->mPendingElement = aElement;
|
||||
} else {
|
||||
sPointerCaptureList->Put(aPointerId, new PointerCaptureInfo(aElement));
|
||||
}
|
||||
}
|
||||
|
||||
/* static */
|
||||
|
|
|
@ -100,13 +100,13 @@ void RemoteLazyInputStreamStorage::AddStream(nsIInputStream* aInputStream,
|
|||
uint64_t aChildID) {
|
||||
MOZ_ASSERT(aInputStream);
|
||||
|
||||
UniquePtr<StreamData> data = MakeUnique<StreamData>();
|
||||
StreamData* data = new StreamData();
|
||||
data->mInputStream = aInputStream;
|
||||
data->mChildID = aChildID;
|
||||
data->mSize = aSize;
|
||||
|
||||
mozilla::StaticMutexAutoLock lock(gMutex);
|
||||
mStorage.Put(aID, std::move(data));
|
||||
mStorage.Put(aID, data);
|
||||
}
|
||||
|
||||
nsCOMPtr<nsIInputStream> RemoteLazyInputStreamStorage::ForgetStream(
|
||||
|
|
|
@ -530,12 +530,11 @@ static void AddDataEntryInternal(const nsACString& aURI, T aObject,
|
|||
gDataTable = new nsClassHashtable<nsCStringHashKey, mozilla::dom::DataInfo>;
|
||||
}
|
||||
|
||||
mozilla::UniquePtr<mozilla::dom::DataInfo> info =
|
||||
mozilla::MakeUnique<mozilla::dom::DataInfo>(aObject, aPrincipal,
|
||||
aAgentClusterId);
|
||||
BlobURLsReporter::GetJSStackForBlob(info.get());
|
||||
mozilla::dom::DataInfo* info =
|
||||
new mozilla::dom::DataInfo(aObject, aPrincipal, aAgentClusterId);
|
||||
BlobURLsReporter::GetJSStackForBlob(info);
|
||||
|
||||
gDataTable->Put(aURI, std::move(info));
|
||||
gDataTable->Put(aURI, info);
|
||||
}
|
||||
|
||||
void BlobURLProtocolHandler::Init(void) {
|
||||
|
|
|
@ -672,12 +672,11 @@ void FileHandleThreadPool::Enqueue(FileHandle* aFileHandle,
|
|||
const nsAString& fileName = mutableFile->FileName();
|
||||
bool modeIsWrite = aFileHandle->Mode() == FileMode::Readwrite;
|
||||
|
||||
DirectoryInfo* directoryInfo =
|
||||
mDirectoryInfos
|
||||
.GetOrInsertWith(
|
||||
directoryId,
|
||||
[&] { return UniquePtr<DirectoryInfo>(new DirectoryInfo(this)); })
|
||||
.get();
|
||||
DirectoryInfo* directoryInfo;
|
||||
if (!mDirectoryInfos.Get(directoryId, &directoryInfo)) {
|
||||
directoryInfo = new DirectoryInfo(this);
|
||||
mDirectoryInfos.Put(directoryId, directoryInfo);
|
||||
}
|
||||
|
||||
FileHandleQueue* existingFileHandleQueue =
|
||||
directoryInfo->GetFileHandleQueue(aFileHandle);
|
||||
|
|
|
@ -56,14 +56,15 @@ void FileSystemSecurity::GrantAccessToContentProcess(
|
|||
MOZ_ASSERT(NS_IsMainThread());
|
||||
mozilla::ipc::AssertIsInMainProcess();
|
||||
|
||||
mPaths.WithEntryHandle(aId, [&](auto&& entry) {
|
||||
if (entry && entry.Data()->Contains(aDirectoryPath)) {
|
||||
return;
|
||||
}
|
||||
nsTArray<nsString>* paths;
|
||||
if (!mPaths.Get(aId, &paths)) {
|
||||
paths = new nsTArray<nsString>();
|
||||
mPaths.Put(aId, paths);
|
||||
} else if (paths->Contains(aDirectoryPath)) {
|
||||
return;
|
||||
}
|
||||
|
||||
entry.OrInsertWith([] { return MakeUnique<nsTArray<nsString>>(); })
|
||||
->AppendElement(aDirectoryPath);
|
||||
});
|
||||
paths->AppendElement(aDirectoryPath);
|
||||
}
|
||||
|
||||
void FileSystemSecurity::Forget(ContentParentId aId) {
|
||||
|
|
|
@ -2385,7 +2385,7 @@ void HTMLFormElement::AddToPastNamesMap(const nsAString& aName,
|
|||
// previous entry with the same name, if any.
|
||||
nsCOMPtr<nsIContent> node = do_QueryInterface(aChild);
|
||||
if (node) {
|
||||
mPastNameLookupTable.Put(aName, ToSupports(node));
|
||||
mPastNameLookupTable.Put(aName, node);
|
||||
node->SetFlags(MAY_BE_IN_PAST_NAMES_MAP);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7626,14 +7626,8 @@ nsresult DatabaseConnection::UpdateRefcountFunction::ProcessValue(
|
|||
const int64_t id = file.FileInfo().Id();
|
||||
MOZ_ASSERT(id > 0);
|
||||
|
||||
const auto entry =
|
||||
WrapNotNull(mFileInfoEntries
|
||||
.GetOrInsertWith(id,
|
||||
[&file] {
|
||||
return MakeUnique<FileInfoEntry>(
|
||||
file.FileInfoPtr());
|
||||
})
|
||||
.get());
|
||||
const auto entry = WrapNotNull(mFileInfoEntries.LookupOrAddFromFactory(
|
||||
id, [&file] { return MakeUnique<FileInfoEntry>(file.FileInfoPtr()); }));
|
||||
|
||||
if (mInSavepoint) {
|
||||
mSavepointEntriesIndex.Put(id, entry);
|
||||
|
@ -7885,26 +7879,29 @@ uint64_t ConnectionPool::Start(
|
|||
|
||||
const uint64_t transactionId = ++mNextTransactionId;
|
||||
|
||||
// To avoid always acquiring a lock, we don't use WithEntryHandle here, which
|
||||
// would require a lock in any case.
|
||||
DatabaseInfo* dbInfo = mDatabases.Get(aDatabaseId);
|
||||
|
||||
const bool databaseInfoIsNew = !dbInfo;
|
||||
|
||||
if (databaseInfoIsNew) {
|
||||
dbInfo = new DatabaseInfo(this, aDatabaseId);
|
||||
|
||||
MutexAutoLock lock(mDatabasesMutex);
|
||||
|
||||
dbInfo =
|
||||
mDatabases.Put(aDatabaseId, MakeUnique<DatabaseInfo>(this, aDatabaseId))
|
||||
.get();
|
||||
mDatabases.Put(aDatabaseId, dbInfo);
|
||||
}
|
||||
|
||||
MOZ_ASSERT(!mTransactions.Contains(transactionId));
|
||||
auto& transactionInfo = *mTransactions.Put(
|
||||
transactionId, MakeUnique<TransactionInfo>(
|
||||
*dbInfo, aBackgroundChildLoggingId, aDatabaseId,
|
||||
transactionId, aLoggingSerialNumber, aObjectStoreNames,
|
||||
aIsWriteTransaction, aTransactionOp));
|
||||
auto& transactionInfo = [&]() -> TransactionInfo& {
|
||||
auto* transactionInfo = new TransactionInfo(
|
||||
*dbInfo, aBackgroundChildLoggingId, aDatabaseId, transactionId,
|
||||
aLoggingSerialNumber, aObjectStoreNames, aIsWriteTransaction,
|
||||
aTransactionOp);
|
||||
|
||||
MOZ_ASSERT(!mTransactions.Get(transactionId));
|
||||
mTransactions.Put(transactionId, transactionInfo);
|
||||
|
||||
return *transactionInfo;
|
||||
}();
|
||||
|
||||
if (aIsWriteTransaction) {
|
||||
MOZ_ASSERT(dbInfo->mWriteTransactionCount < UINT32_MAX);
|
||||
|
@ -16776,13 +16773,10 @@ void OpenDatabaseOp::EnsureDatabaseActor() {
|
|||
info->mLiveDatabases.AppendElement(
|
||||
WrapNotNullUnchecked(mDatabase.unsafeGetRawPtr()));
|
||||
} else {
|
||||
// XXX Maybe use GetOrInsertWith above, to avoid a second lookup here?
|
||||
info = gLiveDatabaseHashtable
|
||||
->Put(mDatabaseId,
|
||||
MakeUnique<DatabaseActorInfo>(
|
||||
mMetadata.clonePtr(),
|
||||
WrapNotNullUnchecked(mDatabase.unsafeGetRawPtr())))
|
||||
.get();
|
||||
info = new DatabaseActorInfo(
|
||||
mMetadata.clonePtr(),
|
||||
WrapNotNullUnchecked(mDatabase.unsafeGetRawPtr()));
|
||||
gLiveDatabaseHashtable->Put(mDatabaseId, info);
|
||||
}
|
||||
|
||||
// Balanced in Database::CleanupMetadata().
|
||||
|
|
|
@ -1552,10 +1552,8 @@ mozilla::ipc::IPCResult BrowserChild::RecvRealMouseMoveEvent(
|
|||
mToBeDispatchedMouseData.Push(dispatchData.release());
|
||||
|
||||
// Put new data to replace the old one in the hash table.
|
||||
CoalescedMouseData* newData =
|
||||
mCoalescedMouseData
|
||||
.Put(aEvent.pointerId, MakeUnique<CoalescedMouseData>())
|
||||
.get();
|
||||
CoalescedMouseData* newData = new CoalescedMouseData();
|
||||
mCoalescedMouseData.Put(aEvent.pointerId, newData);
|
||||
newData->Coalesce(aEvent, aGuid, aInputBlockId);
|
||||
|
||||
// Dispatch all pending mouse events.
|
||||
|
|
|
@ -222,11 +222,11 @@ Result<Ok, nsresult> SharedMap::MaybeRebuild() {
|
|||
// indicate memory corruption, and are fatal.
|
||||
MOZ_RELEASE_ASSERT(!buffer.error());
|
||||
|
||||
// Note: While the order of evaluation of the arguments to Put doesn't
|
||||
// matter for this (the actual move will only happen within Put), to be
|
||||
// clear about this, we call entry->Name() before calling Put.
|
||||
const auto& name = entry->Name();
|
||||
mEntries.Put(name, std::move(entry));
|
||||
// Note: Order of evaluation of function arguments is not guaranteed, so we
|
||||
// can't use entry.release() in place of entry.get() without entry->Name()
|
||||
// sometimes resulting in a null dereference.
|
||||
mEntries.Put(entry->Name(), entry.get());
|
||||
Unused << entry.release();
|
||||
}
|
||||
|
||||
return Ok();
|
||||
|
|
|
@ -80,7 +80,7 @@ bool SharedStringMap::Find(const nsCString& aKey, size_t* aIndex) {
|
|||
|
||||
void SharedStringMapBuilder::Add(const nsCString& aKey,
|
||||
const nsString& aValue) {
|
||||
mEntries.Put(aKey, Entry{mKeyTable.Add(aKey), mValueTable.Add(aValue)});
|
||||
mEntries.Put(aKey, {mKeyTable.Add(aKey), mValueTable.Add(aValue)});
|
||||
}
|
||||
|
||||
Result<Ok, nsresult> SharedStringMapBuilder::Finalize(
|
||||
|
|
|
@ -3212,12 +3212,11 @@ bool RecvPBackgroundLSObserverConstructor(PBackgroundLSObserverParent* aActor,
|
|||
|
||||
const auto notNullObserver = WrapNotNull(observer.get());
|
||||
|
||||
nsTArray<NotNull<Observer*>>* const array =
|
||||
gObservers
|
||||
->GetOrInsertWith(
|
||||
notNullObserver->Origin(),
|
||||
[] { return MakeUnique<nsTArray<NotNull<Observer*>>>(); })
|
||||
.get();
|
||||
nsTArray<NotNull<Observer*>>* array;
|
||||
if (!gObservers->Get(notNullObserver->Origin(), &array)) {
|
||||
array = new nsTArray<NotNull<Observer*>>();
|
||||
gObservers->Put(notNullObserver->Origin(), array);
|
||||
}
|
||||
array->AppendElement(notNullObserver);
|
||||
|
||||
if (RefPtr<Datastore> datastore = GetDatastore(observer->Origin())) {
|
||||
|
@ -7253,13 +7252,14 @@ void PrepareDatastoreOp::GetResponse(LSRequestResponse& aResponse) {
|
|||
|
||||
mDatastoreId = ++gLastDatastoreId;
|
||||
|
||||
auto preparedDatastore = MakeUnique<PreparedDatastore>(
|
||||
mDatastore, mContentParentId, Origin(), mDatastoreId,
|
||||
/* aForPreload */ mForPreload);
|
||||
|
||||
if (!gPreparedDatastores) {
|
||||
gPreparedDatastores = new PreparedDatastoreHashtable();
|
||||
}
|
||||
const auto& preparedDatastore = gPreparedDatastores->Put(
|
||||
mDatastoreId, MakeUnique<PreparedDatastore>(
|
||||
mDatastore, mContentParentId, Origin(), mDatastoreId,
|
||||
/* aForPreload */ mForPreload));
|
||||
gPreparedDatastores->Put(mDatastoreId, preparedDatastore.get());
|
||||
|
||||
if (mInvalidated) {
|
||||
preparedDatastore->Invalidate();
|
||||
|
@ -7267,6 +7267,8 @@ void PrepareDatastoreOp::GetResponse(LSRequestResponse& aResponse) {
|
|||
|
||||
mPreparedDatastoreRegistered.Flip();
|
||||
|
||||
Unused << preparedDatastore.release();
|
||||
|
||||
if (mForPreload) {
|
||||
LSRequestPreloadDatastoreResponse preloadDatastoreResponse;
|
||||
|
||||
|
|
|
@ -2763,12 +2763,11 @@ RefPtr<MediaManager::StreamPromise> MediaManager::GetUserMedia(
|
|||
|
||||
// Add a WindowID cross-reference so OnNavigation can tear
|
||||
// things down
|
||||
nsTArray<nsString>* const array =
|
||||
self->mCallIds
|
||||
.GetOrInsertWith(
|
||||
windowID,
|
||||
[] { return MakeUnique<nsTArray<nsString>>(); })
|
||||
.get();
|
||||
nsTArray<nsString>* array;
|
||||
if (!self->mCallIds.Get(windowID, &array)) {
|
||||
array = new nsTArray<nsString>();
|
||||
self->mCallIds.Put(windowID, array);
|
||||
}
|
||||
array->AppendElement(callID);
|
||||
|
||||
nsCOMPtr<nsIObserverService> obs = services::GetObserverService();
|
||||
|
|
|
@ -119,7 +119,7 @@ class GMPDiskStorage : public GMPStorage {
|
|||
continue;
|
||||
}
|
||||
|
||||
mRecords.Put(recordName, MakeUnique<Record>(filename, recordName));
|
||||
mRecords.Put(recordName, new Record(filename, recordName));
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
|
@ -127,24 +127,17 @@ class GMPDiskStorage : public GMPStorage {
|
|||
|
||||
GMPErr Open(const nsCString& aRecordName) override {
|
||||
MOZ_ASSERT(!IsOpen(aRecordName));
|
||||
|
||||
Record* const record =
|
||||
mRecords.WithEntryHandle(aRecordName, [&](auto&& entry) -> Record* {
|
||||
if (!entry) {
|
||||
// New file.
|
||||
nsAutoString filename;
|
||||
nsresult rv = GetUnusedFilename(aRecordName, filename);
|
||||
if (NS_WARN_IF(NS_FAILED(rv))) {
|
||||
return nullptr;
|
||||
}
|
||||
return entry.Insert(MakeUnique<Record>(filename, aRecordName))
|
||||
.get();
|
||||
}
|
||||
|
||||
return entry.Data().get();
|
||||
});
|
||||
if (!record) {
|
||||
return GMPGenericErr;
|
||||
nsresult rv;
|
||||
Record* record = nullptr;
|
||||
if (!mRecords.Get(aRecordName, &record)) {
|
||||
// New file.
|
||||
nsAutoString filename;
|
||||
rv = GetUnusedFilename(aRecordName, filename);
|
||||
if (NS_WARN_IF(NS_FAILED(rv))) {
|
||||
return GMPGenericErr;
|
||||
}
|
||||
record = new Record(filename, aRecordName);
|
||||
mRecords.Put(aRecordName, record);
|
||||
}
|
||||
|
||||
MOZ_ASSERT(record);
|
||||
|
@ -153,8 +146,7 @@ class GMPDiskStorage : public GMPStorage {
|
|||
return GMPRecordInUse;
|
||||
}
|
||||
|
||||
nsresult rv =
|
||||
OpenStorageFile(record->mFilename, ReadWrite, &record->mFileDesc);
|
||||
rv = OpenStorageFile(record->mFilename, ReadWrite, &record->mFileDesc);
|
||||
if (NS_WARN_IF(NS_FAILED(rv))) {
|
||||
return GMPGenericErr;
|
||||
}
|
||||
|
|
|
@ -13,10 +13,11 @@ class GMPMemoryStorage : public GMPStorage {
|
|||
GMPErr Open(const nsCString& aRecordName) override {
|
||||
MOZ_ASSERT(!IsOpen(aRecordName));
|
||||
|
||||
Record* record =
|
||||
mRecords
|
||||
.GetOrInsertWith(aRecordName, [] { return MakeUnique<Record>(); })
|
||||
.get();
|
||||
Record* record = nullptr;
|
||||
if (!mRecords.Get(aRecordName, &record)) {
|
||||
record = new Record();
|
||||
mRecords.Put(aRecordName, record);
|
||||
}
|
||||
record->mIsOpen = true;
|
||||
return GMPNoErr;
|
||||
}
|
||||
|
|
|
@ -430,16 +430,15 @@ void GeckoMediaPluginService::ConnectCrashHelper(uint32_t aPluginId,
|
|||
if (!aHelper) {
|
||||
return;
|
||||
}
|
||||
|
||||
MutexAutoLock lock(mMutex);
|
||||
mPluginCrashHelpers.WithEntryHandle(aPluginId, [&](auto&& entry) {
|
||||
if (!entry) {
|
||||
entry.Insert(MakeUnique<nsTArray<RefPtr<GMPCrashHelper>>>());
|
||||
} else if (entry.Data()->Contains(aHelper)) {
|
||||
return;
|
||||
}
|
||||
entry.Data()->AppendElement(aHelper);
|
||||
});
|
||||
nsTArray<RefPtr<GMPCrashHelper>>* helpers;
|
||||
if (!mPluginCrashHelpers.Get(aPluginId, &helpers)) {
|
||||
helpers = new nsTArray<RefPtr<GMPCrashHelper>>();
|
||||
mPluginCrashHelpers.Put(aPluginId, helpers);
|
||||
} else if (helpers->Contains(aHelper)) {
|
||||
return;
|
||||
}
|
||||
helpers->AppendElement(aHelper);
|
||||
}
|
||||
|
||||
void GeckoMediaPluginService::DisconnectCrashHelper(GMPCrashHelper* aHelper) {
|
||||
|
|
|
@ -1093,24 +1093,20 @@ nsresult GeckoMediaPluginServiceParent::GetNodeId(
|
|||
// name, so that if the same origin pair is opened for the same GMP in this
|
||||
// session, it gets the same node id.
|
||||
const uint32_t pbHash = AddToHash(HashString(aGMPName), hash);
|
||||
return mTempNodeIds.WithEntryHandle(pbHash, [&](auto&& entry) {
|
||||
if (!entry) {
|
||||
// No salt stored, generate and temporarily store some for this id.
|
||||
nsAutoCString newSalt;
|
||||
rv = GenerateRandomPathName(newSalt, NodeIdSaltLength);
|
||||
if (NS_WARN_IF(NS_FAILED(rv))) {
|
||||
return rv;
|
||||
}
|
||||
auto salt = MakeUnique<nsCString>(newSalt);
|
||||
|
||||
mPersistentStorageAllowed.Put(*salt, false);
|
||||
|
||||
entry.Insert(std::move(salt));
|
||||
nsCString* salt = nullptr;
|
||||
if (!(salt = mTempNodeIds.Get(pbHash))) {
|
||||
// No salt stored, generate and temporarily store some for this id.
|
||||
nsAutoCString newSalt;
|
||||
rv = GenerateRandomPathName(newSalt, NodeIdSaltLength);
|
||||
if (NS_WARN_IF(NS_FAILED(rv))) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
aOutId = *entry.Data();
|
||||
return NS_OK;
|
||||
});
|
||||
salt = new nsCString(newSalt);
|
||||
mTempNodeIds.Put(pbHash, salt);
|
||||
mPersistentStorageAllowed.Put(*salt, false);
|
||||
}
|
||||
aOutId = *salt;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
// Otherwise, try to see if we've previously generated and stored salt
|
||||
|
|
|
@ -150,10 +150,9 @@ bool GMPInfoFileParser::Init(nsIFile* aInfoFile) {
|
|||
ToLowerCase(key);
|
||||
key.Trim(" ");
|
||||
|
||||
auto value = MakeUnique<nsCString>(Substring(line, colon + 1));
|
||||
nsCString* value = new nsCString(Substring(line, colon + 1));
|
||||
value->Trim(" ");
|
||||
mValues.Put(key,
|
||||
std::move(value)); // Hashtable assumes ownership of value.
|
||||
mValues.Put(key, value); // Hashtable assumes ownership of value.
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -31,9 +31,9 @@ extern LazyLogModule gMediaDecoderLog;
|
|||
using media::TimeUnit;
|
||||
|
||||
/** Decoder base class for Ogg-encapsulated streams. */
|
||||
UniquePtr<OggCodecState> OggCodecState::Create(
|
||||
rlbox_sandbox_ogg* aSandbox, tainted_opaque_ogg<ogg_page*> aPage,
|
||||
uint32_t aSerial) {
|
||||
OggCodecState* OggCodecState::Create(rlbox_sandbox_ogg* aSandbox,
|
||||
tainted_opaque_ogg<ogg_page*> aPage,
|
||||
uint32_t aSerial) {
|
||||
NS_ASSERTION(sandbox_invoke(*aSandbox, ogg_page_bos, aPage)
|
||||
.unverified_safe_because(RLBOX_SAFE_DEBUG_ASSERTION),
|
||||
"Only call on BOS page!");
|
||||
|
@ -69,12 +69,8 @@ UniquePtr<OggCodecState> OggCodecState::Create(
|
|||
// Can't use MakeUnique here, OggCodecState is protected.
|
||||
codecState.reset(new OggCodecState(aSandbox, aPage, aSerial, false));
|
||||
}
|
||||
|
||||
if (!codecState->OggCodecState::InternalInit()) {
|
||||
codecState.reset();
|
||||
}
|
||||
|
||||
return codecState;
|
||||
return codecState->OggCodecState::InternalInit() ? codecState.release()
|
||||
: nullptr;
|
||||
}
|
||||
|
||||
OggCodecState::OggCodecState(rlbox_sandbox_ogg* aSandbox,
|
||||
|
@ -1563,7 +1559,7 @@ bool SkeletonState::DecodeIndex(ogg_packet* aPacket) {
|
|||
|
||||
int32_t keyPointsRead = keyPoints->Length();
|
||||
if (keyPointsRead > 0) {
|
||||
mIndex.Put(serialno, std::move(keyPoints));
|
||||
mIndex.Put(serialno, keyPoints.release());
|
||||
}
|
||||
|
||||
LOG(LogLevel::Debug, ("Loaded %d keypoints for Skeleton on stream %u",
|
||||
|
|
|
@ -118,9 +118,9 @@ class OggCodecState {
|
|||
|
||||
// Factory for creating nsCodecStates. Use instead of constructor.
|
||||
// aPage should be a beginning-of-stream page.
|
||||
static UniquePtr<OggCodecState> Create(rlbox_sandbox_ogg* aSandbox,
|
||||
tainted_opaque_ogg<ogg_page*> aPage,
|
||||
uint32_t aSerial);
|
||||
static OggCodecState* Create(rlbox_sandbox_ogg* aSandbox,
|
||||
tainted_opaque_ogg<ogg_page*> aPage,
|
||||
uint32_t aSerial);
|
||||
|
||||
virtual CodecType GetType() { return TYPE_UNKNOWN; }
|
||||
|
||||
|
|
|
@ -12,10 +12,9 @@ namespace mozilla {
|
|||
|
||||
OggCodecStore::OggCodecStore() : mMonitor("CodecStore") {}
|
||||
|
||||
OggCodecState* OggCodecStore::Add(uint32_t serial,
|
||||
UniquePtr<OggCodecState> codecState) {
|
||||
void OggCodecStore::Add(uint32_t serial, OggCodecState* codecState) {
|
||||
MonitorAutoLock mon(mMonitor);
|
||||
return mCodecStates.Put(serial, std::move(codecState)).get();
|
||||
mCodecStates.Put(serial, codecState);
|
||||
}
|
||||
|
||||
bool OggCodecStore::Contains(uint32_t serial) {
|
||||
|
|
|
@ -19,7 +19,7 @@ namespace mozilla {
|
|||
class OggCodecStore {
|
||||
public:
|
||||
OggCodecStore();
|
||||
OggCodecState* Add(uint32_t serial, UniquePtr<OggCodecState> codecState);
|
||||
void Add(uint32_t serial, OggCodecState* codecState);
|
||||
bool Contains(uint32_t serial);
|
||||
OggCodecState* Get(uint32_t serial);
|
||||
bool IsKnownStream(uint32_t aSerial);
|
||||
|
|
|
@ -510,9 +510,9 @@ nsresult OggDemuxer::ReadMetadata() {
|
|||
// We've not encountered a stream with this serial number before. Create
|
||||
// an OggCodecState to demux it, and map that to the OggCodecState
|
||||
// in mCodecStates.
|
||||
OggCodecState* const codecState = mCodecStore.Add(
|
||||
serial,
|
||||
OggCodecState::Create(mSandbox.get(), page.to_opaque(), serial));
|
||||
OggCodecState* codecState =
|
||||
OggCodecState::Create(mSandbox.get(), page.to_opaque(), serial);
|
||||
mCodecStore.Add(serial, codecState);
|
||||
bitstreams.AppendElement(codecState);
|
||||
serials.AppendElement(serial);
|
||||
}
|
||||
|
@ -685,7 +685,7 @@ bool OggDemuxer::ReadOggChain(const media::TimeUnit& aLastEndTime) {
|
|||
|
||||
OggCodecState* state;
|
||||
|
||||
mCodecStore.Add(serial, std::move(codecState));
|
||||
mCodecStore.Add(serial, codecState.release());
|
||||
state = mCodecStore.Get(serial);
|
||||
|
||||
NS_ENSURE_TRUE(state != nullptr, false);
|
||||
|
|
|
@ -145,7 +145,7 @@ class EMEDecryptor : public MediaDataDecoder,
|
|||
return;
|
||||
}
|
||||
|
||||
mDecrypts.Put(aSample, MakeUnique<DecryptPromiseRequestHolder>());
|
||||
mDecrypts.Put(aSample, new DecryptPromiseRequestHolder());
|
||||
mProxy->Decrypt(aSample)
|
||||
->Then(mThread, __func__, this, &EMEDecryptor::Decrypted,
|
||||
&EMEDecryptor::Decrypted)
|
||||
|
|
|
@ -71,7 +71,8 @@ class OriginKeyStore : public nsISupports {
|
|||
if (NS_WARN_IF(NS_FAILED(rv))) {
|
||||
return rv;
|
||||
}
|
||||
key = mKeys.Put(principalString, MakeUnique<OriginKey>(salt)).get();
|
||||
key = new OriginKey(salt);
|
||||
mKeys.Put(principalString, key);
|
||||
}
|
||||
if (aPersist && !key->mSecondsStamp) {
|
||||
key->mSecondsStamp = PR_Now() / PR_USEC_PER_SEC;
|
||||
|
@ -258,7 +259,7 @@ class OriginKeyStore : public nsISupports {
|
|||
if (NS_FAILED(rv)) {
|
||||
continue;
|
||||
}
|
||||
mKeys.Put(origin, MakeUnique<OriginKey>(key, secondsstamp));
|
||||
mKeys.Put(origin, new OriginKey(key, secondsstamp));
|
||||
}
|
||||
mPersistCount = mKeys.Count();
|
||||
return NS_OK;
|
||||
|
|
|
@ -24,18 +24,17 @@ MediaSystemResourceManagerParent::~MediaSystemResourceManagerParent() {
|
|||
mozilla::ipc::IPCResult MediaSystemResourceManagerParent::RecvAcquire(
|
||||
const uint32_t& aId, const MediaSystemResourceType& aResourceType,
|
||||
const bool& aWillWait) {
|
||||
mResourceRequests.WithEntryHandle(aId, [&](auto&& request) {
|
||||
MOZ_ASSERT(!request);
|
||||
if (request) {
|
||||
// Send fail response
|
||||
mozilla::Unused << SendResponse(aId, false /* fail */);
|
||||
return;
|
||||
}
|
||||
|
||||
request.Insert(MakeUnique<MediaSystemResourceRequest>(aId, aResourceType));
|
||||
mMediaSystemResourceService->Acquire(this, aId, aResourceType, aWillWait);
|
||||
});
|
||||
MediaSystemResourceRequest* request = mResourceRequests.Get(aId);
|
||||
MOZ_ASSERT(!request);
|
||||
if (request) {
|
||||
// Send fail response
|
||||
mozilla::Unused << SendResponse(aId, false /* fail */);
|
||||
return IPC_OK();
|
||||
}
|
||||
|
||||
request = new MediaSystemResourceRequest(aId, aResourceType);
|
||||
mResourceRequests.Put(aId, request);
|
||||
mMediaSystemResourceService->Acquire(this, aId, aResourceType, aWillWait);
|
||||
return IPC_OK();
|
||||
}
|
||||
|
||||
|
|
|
@ -104,13 +104,11 @@ bool MessagePortService::RequestEntangling(MessagePortParent* aParent,
|
|||
return false;
|
||||
}
|
||||
|
||||
mPorts.Put(aDestinationUUID,
|
||||
MakeUnique<MessagePortServiceData>(aParent->ID()));
|
||||
data = new MessagePortServiceData(aParent->ID());
|
||||
mPorts.Put(aDestinationUUID, data);
|
||||
|
||||
data = mPorts
|
||||
.Put(aParent->ID(),
|
||||
MakeUnique<MessagePortServiceData>(aDestinationUUID))
|
||||
.get();
|
||||
data = new MessagePortServiceData(aDestinationUUID);
|
||||
mPorts.Put(aParent->ID(), data);
|
||||
}
|
||||
|
||||
// This is a security check.
|
||||
|
|
|
@ -2082,8 +2082,7 @@ void PluginInstanceParent::SubclassPluginWindow(HWND aWnd) {
|
|||
mPluginWndProc = nullptr;
|
||||
// Note sPluginInstanceList wil delete 'this' if we do not remove
|
||||
// it on shutdown.
|
||||
sPluginInstanceList->Put((void*)mPluginHWND,
|
||||
UniquePtr<PluginInstanceParent>(this));
|
||||
sPluginInstanceList->Put((void*)mPluginHWND, this);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -79,10 +79,13 @@ class PresentationServiceBase {
|
|||
return;
|
||||
}
|
||||
|
||||
mRespondingSessionIds
|
||||
.GetOrInsertWith(aWindowId,
|
||||
[] { return MakeUnique<nsTArray<nsString>>(); })
|
||||
->AppendElement(nsString(aSessionId));
|
||||
nsTArray<nsString>* sessionIdArray;
|
||||
if (!mRespondingSessionIds.Get(aWindowId, &sessionIdArray)) {
|
||||
sessionIdArray = new nsTArray<nsString>();
|
||||
mRespondingSessionIds.Put(aWindowId, sessionIdArray);
|
||||
}
|
||||
|
||||
sessionIdArray->AppendElement(nsString(aSessionId));
|
||||
mRespondingWindowIds.Put(aSessionId, aWindowId);
|
||||
}
|
||||
|
||||
|
@ -148,14 +151,12 @@ class PresentationServiceBase {
|
|||
aAddedUrls.Clear();
|
||||
nsTArray<nsString> knownAvailableUrls;
|
||||
for (const auto& url : aAvailabilityUrls) {
|
||||
AvailabilityEntry* const entry =
|
||||
mAvailabilityUrlTable
|
||||
.GetOrInsertWith(url,
|
||||
[&] {
|
||||
aAddedUrls.AppendElement(url);
|
||||
return MakeUnique<AvailabilityEntry>();
|
||||
})
|
||||
.get();
|
||||
AvailabilityEntry* entry;
|
||||
if (!mAvailabilityUrlTable.Get(url, &entry)) {
|
||||
entry = new AvailabilityEntry();
|
||||
mAvailabilityUrlTable.Put(url, entry);
|
||||
aAddedUrls.AppendElement(url);
|
||||
}
|
||||
if (!entry->mListeners.Contains(aListener)) {
|
||||
entry->mListeners.AppendElement(aListener);
|
||||
}
|
||||
|
@ -227,10 +228,12 @@ class PresentationServiceBase {
|
|||
for (uint32_t i = 0; i < entry->mListeners.Length(); ++i) {
|
||||
nsIPresentationAvailabilityListener* listener =
|
||||
entry->mListeners.ObjectAt(i);
|
||||
availabilityListenerTable
|
||||
.GetOrInsertWith(
|
||||
listener, [] { return MakeUnique<nsTArray<nsString>>(); })
|
||||
->AppendElement(it.Key());
|
||||
nsTArray<nsString>* urlArray;
|
||||
if (!availabilityListenerTable.Get(listener, &urlArray)) {
|
||||
urlArray = new nsTArray<nsString>();
|
||||
availabilityListenerTable.Put(listener, urlArray);
|
||||
}
|
||||
urlArray->AppendElement(it.Key());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3278,20 +3278,21 @@ void QuotaManager::RegisterDirectoryLock(DirectoryLockImpl& aLock) {
|
|||
DirectoryLockTable& directoryLockTable =
|
||||
GetDirectoryLockTable(aLock.GetPersistenceType());
|
||||
|
||||
nsTArray<NotNull<DirectoryLockImpl*>>* array;
|
||||
if (!directoryLockTable.Get(aLock.Origin(), &array)) {
|
||||
array = new nsTArray<NotNull<DirectoryLockImpl*>>();
|
||||
directoryLockTable.Put(aLock.Origin(), array);
|
||||
|
||||
if (!IsShuttingDown()) {
|
||||
UpdateOriginAccessTime(aLock.GetPersistenceType(),
|
||||
aLock.OriginMetadata());
|
||||
}
|
||||
}
|
||||
|
||||
// XXX It seems that the contents of the array are never actually used, we
|
||||
// just use that like an inefficient use counter. Can't we just change
|
||||
// DirectoryLockTable to a nsDataHashtable<nsCStringHashKey, uint32_t>?
|
||||
directoryLockTable
|
||||
.GetOrInsertWith(
|
||||
aLock.Origin(),
|
||||
[this, &aLock] {
|
||||
if (!IsShuttingDown()) {
|
||||
UpdateOriginAccessTime(aLock.GetPersistenceType(),
|
||||
aLock.OriginMetadata());
|
||||
}
|
||||
return MakeUnique<nsTArray<NotNull<DirectoryLockImpl*>>>();
|
||||
})
|
||||
->AppendElement(WrapNotNullUnchecked(&aLock));
|
||||
array->AppendElement(WrapNotNullUnchecked(&aLock));
|
||||
}
|
||||
|
||||
aLock.SetRegistered(true);
|
||||
|
@ -6609,10 +6610,12 @@ already_AddRefed<GroupInfo> QuotaManager::LockedGetOrCreateGroupInfo(
|
|||
mQuotaMutex.AssertCurrentThreadOwns();
|
||||
MOZ_ASSERT(aPersistenceType != PERSISTENCE_TYPE_PERSISTENT);
|
||||
|
||||
GroupInfoPair* const pair =
|
||||
mGroupInfoPairs
|
||||
.GetOrInsertWith(aGroup, [] { return MakeUnique<GroupInfoPair>(); })
|
||||
.get();
|
||||
GroupInfoPair* pair;
|
||||
if (!mGroupInfoPairs.Get(aGroup, &pair)) {
|
||||
pair = new GroupInfoPair();
|
||||
mGroupInfoPairs.Put(aGroup, pair);
|
||||
// The hashtable is now responsible to delete the GroupInfoPair.
|
||||
}
|
||||
|
||||
RefPtr<GroupInfo> groupInfo = pair->LockedGetGroupInfo(aPersistenceType);
|
||||
if (!groupInfo) {
|
||||
|
|
|
@ -195,7 +195,7 @@ void ReportingHeader::ReportingFromChannel(nsIHttpChannel* aChannel) {
|
|||
}
|
||||
|
||||
// Here we override the previous data.
|
||||
mOrigins.Put(origin, std::move(client));
|
||||
mOrigins.Put(origin, client.release());
|
||||
|
||||
MaybeCreateCleanupTimer();
|
||||
}
|
||||
|
|
|
@ -1589,9 +1589,8 @@ ServiceWorkerManager::GetOrCreateJobQueue(const nsACString& aKey,
|
|||
// XXX we could use WithEntryHandle here to avoid a hashtable lookup, except
|
||||
// that leads to a false positive assertion, see bug 1370674 comment 7.
|
||||
if (!mRegistrationInfos.Get(aKey, &data)) {
|
||||
data =
|
||||
mRegistrationInfos.Put(aKey, MakeUnique<RegistrationDataPerPrincipal>())
|
||||
.get();
|
||||
data = new RegistrationDataPerPrincipal();
|
||||
mRegistrationInfos.Put(aKey, data);
|
||||
}
|
||||
|
||||
return data->mJobQueues
|
||||
|
|
|
@ -73,8 +73,8 @@ SessionStorageManagerBase::GetOriginRecord(
|
|||
OriginKeyHashTable* table;
|
||||
if (!mOATable.Get(aOriginAttrs, &table)) {
|
||||
if (aMakeIfNeeded) {
|
||||
table =
|
||||
mOATable.Put(aOriginAttrs, MakeUnique<OriginKeyHashTable>()).get();
|
||||
table = new OriginKeyHashTable();
|
||||
mOATable.Put(aOriginAttrs, table);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -83,13 +83,13 @@ SessionStorageManagerBase::GetOriginRecord(
|
|||
OriginRecord* originRecord;
|
||||
if (!table->Get(aOriginKey, &originRecord)) {
|
||||
if (aMakeIfNeeded) {
|
||||
auto newOriginRecord = MakeUnique<OriginRecord>();
|
||||
originRecord = new OriginRecord();
|
||||
if (aCloneFrom) {
|
||||
newOriginRecord->mCache = aCloneFrom->Clone();
|
||||
originRecord->mCache = aCloneFrom->Clone();
|
||||
} else {
|
||||
newOriginRecord->mCache = new SessionStorageCache();
|
||||
originRecord->mCache = new SessionStorageCache();
|
||||
}
|
||||
originRecord = table->Put(aOriginKey, std::move(newOriginRecord)).get();
|
||||
table->Put(aOriginKey, originRecord);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
|
|
|
@ -304,7 +304,7 @@ void StorageDBThread::SyncPreload(LocalStorageCacheBridge* aCache,
|
|||
// need to be flushed first.
|
||||
// Schedule preload for this cache as the first operation.
|
||||
nsresult rv =
|
||||
InsertDBOp(MakeUnique<DBOperation>(DBOperation::opPreloadUrgent, aCache));
|
||||
InsertDBOp(new DBOperation(DBOperation::opPreloadUrgent, aCache));
|
||||
|
||||
// LoadWait exits after LoadDone of the cache has been called.
|
||||
if (NS_SUCCEEDED(rv)) {
|
||||
|
@ -330,10 +330,12 @@ void StorageDBThread::GetOriginsHavingData(nsTArray<nsCString>* aOrigins) {
|
|||
}
|
||||
}
|
||||
|
||||
nsresult StorageDBThread::InsertDBOp(
|
||||
UniquePtr<StorageDBThread::DBOperation> aOperation) {
|
||||
nsresult StorageDBThread::InsertDBOp(StorageDBThread::DBOperation* aOperation) {
|
||||
MonitorAutoLock monitor(mThreadObserver->GetMonitor());
|
||||
|
||||
// Sentinel to don't forget to delete the operation when we exit early.
|
||||
UniquePtr<StorageDBThread::DBOperation> opScope(aOperation);
|
||||
|
||||
if (NS_FAILED(mStatus)) {
|
||||
MonitorAutoUnlock unlock(mThreadObserver->GetMonitor());
|
||||
aOperation->Finalize(mStatus);
|
||||
|
@ -375,11 +377,14 @@ nsresult StorageDBThread::InsertDBOp(
|
|||
case DBOperation::opGetUsage:
|
||||
if (aOperation->Type() == DBOperation::opPreloadUrgent) {
|
||||
SetHigherPriority(); // Dropped back after urgent preload execution
|
||||
mPreloads.InsertElementAt(0, aOperation.release());
|
||||
mPreloads.InsertElementAt(0, aOperation);
|
||||
} else {
|
||||
mPreloads.AppendElement(aOperation.release());
|
||||
mPreloads.AppendElement(aOperation);
|
||||
}
|
||||
|
||||
// DB operation adopted, don't delete it.
|
||||
Unused << opScope.release();
|
||||
|
||||
// Immediately start executing this.
|
||||
monitor.Notify();
|
||||
break;
|
||||
|
@ -387,7 +392,10 @@ nsresult StorageDBThread::InsertDBOp(
|
|||
default:
|
||||
// Update operations are first collected, coalesced and then flushed
|
||||
// after a short time.
|
||||
mPendingTasks.Add(std::move(aOperation));
|
||||
mPendingTasks.Add(aOperation);
|
||||
|
||||
// DB operation adopted, don't delete it.
|
||||
Unused << opScope.release();
|
||||
|
||||
ScheduleFlush();
|
||||
break;
|
||||
|
@ -1300,13 +1308,14 @@ bool StorageDBThread::PendingOperations::CheckForCoalesceOpportunity(
|
|||
}
|
||||
|
||||
void StorageDBThread::PendingOperations::Add(
|
||||
UniquePtr<StorageDBThread::DBOperation> aOperation) {
|
||||
StorageDBThread::DBOperation* aOperation) {
|
||||
// Optimize: when a key to remove has never been written to disk
|
||||
// just bypass this operation. A key is new when an operation scheduled
|
||||
// to write it to the database is of type opAddItem.
|
||||
if (CheckForCoalesceOpportunity(aOperation.get(), DBOperation::opAddItem,
|
||||
if (CheckForCoalesceOpportunity(aOperation, DBOperation::opAddItem,
|
||||
DBOperation::opRemoveItem)) {
|
||||
mUpdates.Remove(aOperation->Target());
|
||||
delete aOperation;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1314,7 +1323,7 @@ void StorageDBThread::PendingOperations::Add(
|
|||
// written to disk, keep type of the operation to store it at opAddItem.
|
||||
// This allows optimization to just forget adding a new key when
|
||||
// it is removed from the storage before flush.
|
||||
if (CheckForCoalesceOpportunity(aOperation.get(), DBOperation::opAddItem,
|
||||
if (CheckForCoalesceOpportunity(aOperation, DBOperation::opAddItem,
|
||||
DBOperation::opUpdateItem)) {
|
||||
aOperation->mType = DBOperation::opAddItem;
|
||||
}
|
||||
|
@ -1323,7 +1332,7 @@ void StorageDBThread::PendingOperations::Add(
|
|||
// remove/set/remove on a previously existing key we have to change
|
||||
// opAddItem to opUpdateItem on the new operation when there is opRemoveItem
|
||||
// pending for the key.
|
||||
if (CheckForCoalesceOpportunity(aOperation.get(), DBOperation::opRemoveItem,
|
||||
if (CheckForCoalesceOpportunity(aOperation, DBOperation::opRemoveItem,
|
||||
DBOperation::opAddItem)) {
|
||||
aOperation->mType = DBOperation::opUpdateItem;
|
||||
}
|
||||
|
@ -1335,7 +1344,7 @@ void StorageDBThread::PendingOperations::Add(
|
|||
case DBOperation::opUpdateItem:
|
||||
case DBOperation::opRemoveItem:
|
||||
// Override any existing operation for the target (=scope+key).
|
||||
mUpdates.Put(aOperation->Target(), std::move(aOperation));
|
||||
mUpdates.Put(aOperation->Target(), aOperation);
|
||||
break;
|
||||
|
||||
// Clear operations
|
||||
|
@ -1372,14 +1381,14 @@ void StorageDBThread::PendingOperations::Add(
|
|||
iter.Remove();
|
||||
}
|
||||
|
||||
mClears.Put(aOperation->Target(), std::move(aOperation));
|
||||
mClears.Put(aOperation->Target(), aOperation);
|
||||
break;
|
||||
|
||||
case DBOperation::opClearAll:
|
||||
// Drop simply everything, this is a super-operation.
|
||||
mUpdates.Clear();
|
||||
mClears.Clear();
|
||||
mClears.Put(aOperation->Target(), std::move(aOperation));
|
||||
mClears.Put(aOperation->Target(), aOperation);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -216,7 +216,7 @@ class StorageDBThread final {
|
|||
|
||||
// Method responsible for coalescing redundant update operations with the
|
||||
// same |Target()| or clear operations with the same or matching |Origin()|
|
||||
void Add(UniquePtr<DBOperation> aOperation);
|
||||
void Add(DBOperation* aOperation);
|
||||
|
||||
// True when there are some scheduled operations to flush on disk
|
||||
bool HasTasks() const;
|
||||
|
@ -330,7 +330,7 @@ class StorageDBThread final {
|
|||
|
||||
virtual void AsyncPreload(LocalStorageCacheBridge* aCache,
|
||||
bool aPriority = false) {
|
||||
InsertDBOp(MakeUnique<DBOperation>(
|
||||
InsertDBOp(new DBOperation(
|
||||
aPriority ? DBOperation::opPreloadUrgent : DBOperation::opPreload,
|
||||
aCache));
|
||||
}
|
||||
|
@ -339,46 +339,45 @@ class StorageDBThread final {
|
|||
bool aForce = false);
|
||||
|
||||
virtual void AsyncGetUsage(StorageUsageBridge* aUsage) {
|
||||
InsertDBOp(MakeUnique<DBOperation>(DBOperation::opGetUsage, aUsage));
|
||||
InsertDBOp(new DBOperation(DBOperation::opGetUsage, aUsage));
|
||||
}
|
||||
|
||||
virtual nsresult AsyncAddItem(LocalStorageCacheBridge* aCache,
|
||||
const nsAString& aKey,
|
||||
const nsAString& aValue) {
|
||||
return InsertDBOp(
|
||||
MakeUnique<DBOperation>(DBOperation::opAddItem, aCache, aKey, aValue));
|
||||
new DBOperation(DBOperation::opAddItem, aCache, aKey, aValue));
|
||||
}
|
||||
|
||||
virtual nsresult AsyncUpdateItem(LocalStorageCacheBridge* aCache,
|
||||
const nsAString& aKey,
|
||||
const nsAString& aValue) {
|
||||
return InsertDBOp(MakeUnique<DBOperation>(DBOperation::opUpdateItem, aCache,
|
||||
aKey, aValue));
|
||||
return InsertDBOp(
|
||||
new DBOperation(DBOperation::opUpdateItem, aCache, aKey, aValue));
|
||||
}
|
||||
|
||||
virtual nsresult AsyncRemoveItem(LocalStorageCacheBridge* aCache,
|
||||
const nsAString& aKey) {
|
||||
return InsertDBOp(
|
||||
MakeUnique<DBOperation>(DBOperation::opRemoveItem, aCache, aKey));
|
||||
return InsertDBOp(new DBOperation(DBOperation::opRemoveItem, aCache, aKey));
|
||||
}
|
||||
|
||||
virtual nsresult AsyncClear(LocalStorageCacheBridge* aCache) {
|
||||
return InsertDBOp(MakeUnique<DBOperation>(DBOperation::opClear, aCache));
|
||||
return InsertDBOp(new DBOperation(DBOperation::opClear, aCache));
|
||||
}
|
||||
|
||||
virtual void AsyncClearAll() {
|
||||
InsertDBOp(MakeUnique<DBOperation>(DBOperation::opClearAll));
|
||||
InsertDBOp(new DBOperation(DBOperation::opClearAll));
|
||||
}
|
||||
|
||||
virtual void AsyncClearMatchingOrigin(const nsACString& aOriginNoSuffix) {
|
||||
InsertDBOp(MakeUnique<DBOperation>(DBOperation::opClearMatchingOrigin,
|
||||
aOriginNoSuffix));
|
||||
InsertDBOp(
|
||||
new DBOperation(DBOperation::opClearMatchingOrigin, aOriginNoSuffix));
|
||||
}
|
||||
|
||||
virtual void AsyncClearMatchingOriginAttributes(
|
||||
const OriginAttributesPattern& aPattern) {
|
||||
InsertDBOp(MakeUnique<DBOperation>(
|
||||
DBOperation::opClearMatchingOriginAttributes, aPattern));
|
||||
InsertDBOp(new DBOperation(DBOperation::opClearMatchingOriginAttributes,
|
||||
aPattern));
|
||||
}
|
||||
|
||||
virtual void AsyncFlush();
|
||||
|
@ -444,7 +443,7 @@ class StorageDBThread final {
|
|||
|
||||
// Helper to direct an operation to one of the arrays above;
|
||||
// also checks IsOriginClearPending for preloads
|
||||
nsresult InsertDBOp(UniquePtr<DBOperation> aOperation);
|
||||
nsresult InsertDBOp(DBOperation* aOperation);
|
||||
|
||||
// Opens the database, first thing we do after start of the thread.
|
||||
nsresult OpenDatabaseConnection();
|
||||
|
|
|
@ -1463,11 +1463,12 @@ mozilla::ipc::IPCResult RecvPBackgroundLocalStorageCacheConstructor(
|
|||
gLocalStorageCacheParents = new LocalStorageCacheParentHashtable();
|
||||
}
|
||||
|
||||
gLocalStorageCacheParents
|
||||
->GetOrInsertWith(
|
||||
aOriginKey,
|
||||
[] { return MakeUnique<nsTArray<LocalStorageCacheParent*>>(); })
|
||||
->AppendElement(actor);
|
||||
nsTArray<LocalStorageCacheParent*>* array;
|
||||
if (!gLocalStorageCacheParents->Get(aOriginKey, &array)) {
|
||||
array = new nsTArray<LocalStorageCacheParent*>();
|
||||
gLocalStorageCacheParents->Put(aOriginKey, array);
|
||||
}
|
||||
array->AppendElement(actor);
|
||||
|
||||
// We are currently trusting the content process not to lie to us. It is
|
||||
// future work to consult the ClientManager to determine whether this is a
|
||||
|
|
|
@ -55,12 +55,10 @@ enum SVGTag {
|
|||
void SVGElementFactory::Init() {
|
||||
sTagAtomTable = new TagAtomTable(64);
|
||||
|
||||
#define SVG_TAG(_tag, _classname) \
|
||||
sTagAtomTable->Put(nsGkAtoms::_tag, SVGContentCreatorFunction( \
|
||||
NS_NewSVG##_classname##Element));
|
||||
#define SVG_FROM_PARSER_TAG(_tag, _classname) \
|
||||
sTagAtomTable->Put(nsGkAtoms::_tag, SVGContentCreatorFunction( \
|
||||
NS_NewSVG##_classname##Element));
|
||||
#define SVG_TAG(_tag, _classname) \
|
||||
sTagAtomTable->Put(nsGkAtoms::_tag, NS_NewSVG##_classname##Element);
|
||||
#define SVG_FROM_PARSER_TAG(_tag, _classname) \
|
||||
sTagAtomTable->Put(nsGkAtoms::_tag, NS_NewSVG##_classname##Element);
|
||||
#include "SVGTagList.h"
|
||||
#undef SVG_TAG
|
||||
#undef SVG_FROM_PARSER_TAG
|
||||
|
|
|
@ -619,12 +619,12 @@ PersistNodeFixup::PersistNodeFixup(WebBrowserPersistLocalDocument* aParent,
|
|||
NS_ENSURE_SUCCESS_VOID(rv);
|
||||
for (uint32_t i = 0; i < mapSize; ++i) {
|
||||
nsAutoCString urlFrom;
|
||||
auto urlTo = MakeUnique<nsCString>();
|
||||
auto* urlTo = new nsCString();
|
||||
|
||||
rv = aMap->GetURIMapping(i, urlFrom, *urlTo);
|
||||
MOZ_ASSERT(NS_SUCCEEDED(rv));
|
||||
if (NS_SUCCEEDED(rv)) {
|
||||
mMap.Put(urlFrom, std::move(urlTo));
|
||||
mMap.Put(urlFrom, urlTo);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -572,7 +572,7 @@ nsresult nsWebBrowserPersist::StartUpload(nsIInputStream* aInputStream,
|
|||
|
||||
// add this to the upload list
|
||||
nsCOMPtr<nsISupports> keyPtr = do_QueryInterface(destChannel);
|
||||
mUploadList.Put(keyPtr, MakeUnique<UploadData>(aDestinationURI));
|
||||
mUploadList.Put(keyPtr, new UploadData(aDestinationURI));
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
@ -1507,7 +1507,7 @@ nsresult nsWebBrowserPersist::SaveChannelInternal(nsIChannel* aChannel,
|
|||
MutexAutoLock lock(mOutputMapMutex);
|
||||
// Add the output transport to the output map with the channel as the key
|
||||
nsCOMPtr<nsISupports> keyPtr = do_QueryInterface(aChannel);
|
||||
mOutputMap.Put(keyPtr, MakeUnique<OutputData>(aFile, mURI, aCalcFileExt));
|
||||
mOutputMap.Put(keyPtr, new OutputData(aFile, mURI, aCalcFileExt));
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
@ -2424,7 +2424,7 @@ nsresult nsWebBrowserPersist::FixRedirectedChannelEntry(
|
|||
// Store data again with new channel unless told to ignore redirects.
|
||||
if (!(mPersistFlags & PERSIST_FLAGS_IGNORE_REDIRECTED_DATA)) {
|
||||
nsCOMPtr<nsISupports> keyPtr = do_QueryInterface(aNewChannel);
|
||||
mOutputMap.Put(keyPtr, std::move(outputData));
|
||||
mOutputMap.Put(keyPtr, outputData.release());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2743,7 +2743,7 @@ nsresult nsWebBrowserPersist::MakeAndStoreLocalFilenameInURIMap(
|
|||
|
||||
if (aNeedsPersisting) mCurrentThingsToPersist++;
|
||||
|
||||
mURIMap.Put(spec, UniquePtr<URIData>(data));
|
||||
mURIMap.Put(spec, data);
|
||||
if (aData) {
|
||||
*aData = data;
|
||||
}
|
||||
|
|
|
@ -194,7 +194,7 @@ nsresult nsXULPrototypeCache::PutScript(nsIURI* aURI,
|
|||
}
|
||||
#endif
|
||||
|
||||
mScriptTable.Put(aURI, JS::Heap<JSScript*>{aScriptObject});
|
||||
mScriptTable.Put(aURI, aScriptObject);
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
|
|
@ -415,10 +415,9 @@ mozilla::ipc::IPCResult CompositorBridgeChild::RecvSharedCompositorFrameMetrics(
|
|||
const mozilla::ipc::SharedMemoryBasic::Handle& metrics,
|
||||
const CrossProcessMutexHandle& handle, const LayersId& aLayersId,
|
||||
const uint32_t& aAPZCId) {
|
||||
auto data =
|
||||
MakeUnique<SharedFrameMetricsData>(metrics, handle, aLayersId, aAPZCId);
|
||||
const auto& viewID = data->GetViewID();
|
||||
mFrameMetricsTable.Put(viewID, std::move(data));
|
||||
SharedFrameMetricsData* data =
|
||||
new SharedFrameMetricsData(metrics, handle, aLayersId, aAPZCId);
|
||||
mFrameMetricsTable.Put(data->GetViewID(), data);
|
||||
return IPC_OK();
|
||||
}
|
||||
|
||||
|
|
|
@ -101,22 +101,21 @@ void AsyncImagePipelineManager::AddPipeline(const wr::PipelineId& aPipelineId,
|
|||
if (mDestroyed) {
|
||||
return;
|
||||
}
|
||||
uint64_t id = wr::AsUint64(aPipelineId);
|
||||
|
||||
mPipelineTexturesHolders.WithEntryHandle(
|
||||
wr::AsUint64(aPipelineId), [&](auto&& holder) {
|
||||
if (holder) {
|
||||
// This could happen during tab move between different windows.
|
||||
// Previously removed holder could be still alive for waiting
|
||||
// destroyed.
|
||||
MOZ_ASSERT(holder.Data()->mDestroyedEpoch.isSome());
|
||||
holder.Data()->mDestroyedEpoch = Nothing(); // Revive holder
|
||||
holder.Data()->mWrBridge = aWrBridge;
|
||||
return;
|
||||
}
|
||||
|
||||
holder.Insert(MakeUnique<PipelineTexturesHolder>())->mWrBridge =
|
||||
aWrBridge;
|
||||
});
|
||||
PipelineTexturesHolder* holder =
|
||||
mPipelineTexturesHolders.Get(wr::AsUint64(aPipelineId));
|
||||
if (holder) {
|
||||
// This could happen during tab move between different windows.
|
||||
// Previously removed holder could be still alive for waiting destroyed.
|
||||
MOZ_ASSERT(holder->mDestroyedEpoch.isSome());
|
||||
holder->mDestroyedEpoch = Nothing(); // Revive holder
|
||||
holder->mWrBridge = aWrBridge;
|
||||
return;
|
||||
}
|
||||
holder = new PipelineTexturesHolder();
|
||||
holder->mWrBridge = aWrBridge;
|
||||
mPipelineTexturesHolders.Put(id, holder);
|
||||
}
|
||||
|
||||
void AsyncImagePipelineManager::RemovePipeline(
|
||||
|
@ -162,10 +161,10 @@ void AsyncImagePipelineManager::AddAsyncImagePipeline(
|
|||
MOZ_ASSERT(aImageHost);
|
||||
uint64_t id = wr::AsUint64(aPipelineId);
|
||||
|
||||
MOZ_ASSERT(!mAsyncImagePipelines.Contains(id));
|
||||
auto holder = MakeUnique<AsyncImagePipeline>();
|
||||
MOZ_ASSERT(!mAsyncImagePipelines.Get(id));
|
||||
AsyncImagePipeline* holder = new AsyncImagePipeline();
|
||||
holder->mImageHost = aImageHost;
|
||||
mAsyncImagePipelines.Put(id, std::move(holder));
|
||||
mAsyncImagePipelines.Put(id, holder);
|
||||
AddPipeline(aPipelineId, /* aWrBridge */ nullptr);
|
||||
}
|
||||
|
||||
|
|
|
@ -373,17 +373,20 @@ class BlurCache final : public nsExpirationTracker<BlurCacheData, 4> {
|
|||
return blur;
|
||||
}
|
||||
|
||||
void RegisterEntry(UniquePtr<BlurCacheData> aValue) {
|
||||
nsresult rv = AddObject(aValue.get());
|
||||
// Returns true if we successfully register the blur in the cache, false
|
||||
// otherwise.
|
||||
bool RegisterEntry(BlurCacheData* aValue) {
|
||||
nsresult rv = AddObject(aValue);
|
||||
if (NS_FAILED(rv)) {
|
||||
// We are OOM, and we cannot track this object. We don't want stall
|
||||
// entries in the hash table (since the expiration tracker is responsible
|
||||
// for removing the cache entries), so we avoid putting that entry in the
|
||||
// table, which is a good thing considering we are short on memory
|
||||
// table, which is a good things considering we are short on memory
|
||||
// anyway, we probably don't want to retain things.
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
mHashEntries.Put(aValue->mKey, std::move(aValue));
|
||||
mHashEntries.Put(aValue->mKey, aValue);
|
||||
return true;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -443,10 +446,13 @@ static void CacheBlur(DrawTarget* aDT, const IntSize& aMinSize,
|
|||
const RectCornerRadii* aCornerRadii,
|
||||
const sRGBColor& aShadowColor,
|
||||
const IntMargin& aBlurMargin, SourceSurface* aBoxShadow) {
|
||||
gBlurCache->RegisterEntry(MakeUnique<BlurCacheData>(
|
||||
aBoxShadow, aBlurMargin,
|
||||
BlurCacheKey(aMinSize, aBlurRadius, aCornerRadii, aShadowColor,
|
||||
aDT->GetBackendType())));
|
||||
BlurCacheKey key(aMinSize, aBlurRadius, aCornerRadii, aShadowColor,
|
||||
aDT->GetBackendType());
|
||||
BlurCacheData* data =
|
||||
new BlurCacheData(aBoxShadow, aBlurMargin, std::move(key));
|
||||
if (!gBlurCache->RegisterEntry(data)) {
|
||||
delete data;
|
||||
}
|
||||
}
|
||||
|
||||
// Blurs a small surface and creates the colored box shadow.
|
||||
|
@ -995,9 +1001,11 @@ static void CacheInsetBlur(const IntSize& aMinOuterSize,
|
|||
BlurCacheKey key(aMinOuterSize, aMinInnerSize, aBlurRadius, aCornerRadii,
|
||||
aShadowColor, isInsetBlur, aBackendType);
|
||||
IntMargin blurMargin(0, 0, 0, 0);
|
||||
|
||||
gBlurCache->RegisterEntry(
|
||||
MakeUnique<BlurCacheData>(aBoxShadow, blurMargin, std::move(key)));
|
||||
BlurCacheData* data =
|
||||
new BlurCacheData(aBoxShadow, blurMargin, std::move(key));
|
||||
if (!gBlurCache->RegisterEntry(data)) {
|
||||
delete data;
|
||||
}
|
||||
}
|
||||
|
||||
already_AddRefed<SourceSurface> gfxAlphaBoxBlur::GetInsetBlur(
|
||||
|
|
|
@ -1876,12 +1876,10 @@ nsresult gfxDWriteFontList::GetFontSubstitutes() {
|
|||
}
|
||||
if (SharedFontList()->FindFamily(actualFontName,
|
||||
/*aPrimaryNameOnly*/ true)) {
|
||||
mSubstitutions.Put(substituteName,
|
||||
MakeUnique<nsCString>(actualFontName));
|
||||
mSubstitutions.Put(substituteName, new nsCString(actualFontName));
|
||||
} else if (mSubstitutions.Get(actualFontName)) {
|
||||
mSubstitutions.Put(
|
||||
substituteName,
|
||||
MakeUnique<nsCString>(*mSubstitutions.Get(actualFontName)));
|
||||
mSubstitutions.Put(substituteName,
|
||||
new nsCString(*mSubstitutions.Get(actualFontName)));
|
||||
} else {
|
||||
mNonExistingFonts.AppendElement(substituteName);
|
||||
}
|
||||
|
@ -1927,8 +1925,7 @@ void gfxDWriteFontList::GetDirectWriteSubstitutes() {
|
|||
BuildKeyNameFromFontName(actualFontName);
|
||||
if (SharedFontList()->FindFamily(actualFontName,
|
||||
/*aPrimaryNameOnly*/ true)) {
|
||||
mSubstitutions.Put(substituteName,
|
||||
MakeUnique<nsCString>(actualFontName));
|
||||
mSubstitutions.Put(substituteName, new nsCString(actualFontName));
|
||||
} else {
|
||||
mNonExistingFonts.AppendElement(substituteName);
|
||||
}
|
||||
|
|
|
@ -638,9 +638,8 @@ void gfxFT2FontList::CollectInitData(const FontListEntry& aFLE,
|
|||
BuildKeyNameFromFontName(key);
|
||||
auto faceList = mFaceInitData.Get(key);
|
||||
if (!faceList) {
|
||||
faceList =
|
||||
mFaceInitData.Put(key, MakeUnique<nsTArray<fontlist::Face::InitData>>())
|
||||
.get();
|
||||
faceList = new nsTArray<fontlist::Face::InitData>;
|
||||
mFaceInitData.Put(key, faceList);
|
||||
mFamilyInitData.AppendElement(
|
||||
fontlist::Family::InitData{key, aFLE.familyName()});
|
||||
}
|
||||
|
@ -1736,7 +1735,7 @@ gfxFontEntry* gfxFT2FontList::LookupLocalFont(const nsACString& aFontName,
|
|||
|
||||
// if so, iterate over faces in this family to see if there is a match
|
||||
if (family.Equals(fullNameFamily, nsCaseInsensitiveCStringComparator)) {
|
||||
nsTArray<RefPtr<gfxFontEntry>>& fontList = fontFamily->GetFontList();
|
||||
nsTArray<RefPtr<gfxFontEntry> >& fontList = fontFamily->GetFontList();
|
||||
int index, len = fontList.Length();
|
||||
for (index = 0; index < len; index++) {
|
||||
gfxFontEntry* fe = fontList[index];
|
||||
|
|
|
@ -2178,79 +2178,73 @@ gfxPlatformFontList::PrefFontList* gfxFcPlatformFontList::FindGenericFamilies(
|
|||
genericLang.Append(fcLang);
|
||||
|
||||
// try to get the family from the cache
|
||||
return mGenericMappings.WithEntryHandle(
|
||||
genericLang, [&](auto&& entry) -> PrefFontList* {
|
||||
if (!entry) {
|
||||
// if not found, ask fontconfig to pick the appropriate font
|
||||
RefPtr<FcPattern> genericPattern = dont_AddRef(FcPatternCreate());
|
||||
FcPatternAddString(genericPattern, FC_FAMILY,
|
||||
ToFcChar8Ptr(aGeneric.get()));
|
||||
PrefFontList* prefFonts = mGenericMappings.Get(genericLang);
|
||||
if (prefFonts) {
|
||||
return prefFonts;
|
||||
}
|
||||
|
||||
// -- prefer scalable fonts
|
||||
FcPatternAddBool(genericPattern, FC_SCALABLE, FcTrue);
|
||||
// if not found, ask fontconfig to pick the appropriate font
|
||||
RefPtr<FcPattern> genericPattern = dont_AddRef(FcPatternCreate());
|
||||
FcPatternAddString(genericPattern, FC_FAMILY, ToFcChar8Ptr(aGeneric.get()));
|
||||
|
||||
// -- add the lang to the pattern
|
||||
if (!fcLang.IsEmpty()) {
|
||||
FcPatternAddString(genericPattern, FC_LANG,
|
||||
ToFcChar8Ptr(fcLang.get()));
|
||||
// -- prefer scalable fonts
|
||||
FcPatternAddBool(genericPattern, FC_SCALABLE, FcTrue);
|
||||
|
||||
// -- add the lang to the pattern
|
||||
if (!fcLang.IsEmpty()) {
|
||||
FcPatternAddString(genericPattern, FC_LANG, ToFcChar8Ptr(fcLang.get()));
|
||||
}
|
||||
|
||||
// -- perform substitutions
|
||||
FcConfigSubstitute(nullptr, genericPattern, FcMatchPattern);
|
||||
FcDefaultSubstitute(genericPattern);
|
||||
|
||||
// -- sort to get the closest matches
|
||||
FcResult result;
|
||||
UniquePtr<FcFontSet> faces(
|
||||
FcFontSort(nullptr, genericPattern, FcFalse, nullptr, &result));
|
||||
|
||||
if (!faces) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// -- select the fonts to be used for the generic
|
||||
prefFonts = new PrefFontList; // can be empty but in practice won't happen
|
||||
uint32_t limit = gfxPlatformGtk::GetPlatform()->MaxGenericSubstitions();
|
||||
bool foundFontWithLang = false;
|
||||
for (int i = 0; i < faces->nfont; i++) {
|
||||
FcPattern* font = faces->fonts[i];
|
||||
FcChar8* mappedGeneric = nullptr;
|
||||
|
||||
FcPatternGetString(font, FC_FAMILY, 0, &mappedGeneric);
|
||||
if (mappedGeneric) {
|
||||
nsAutoCString mappedGenericName(ToCharPtr(mappedGeneric));
|
||||
AutoTArray<FamilyAndGeneric, 1> genericFamilies;
|
||||
if (gfxPlatformFontList::FindAndAddFamilies(
|
||||
StyleGenericFontFamily::None, mappedGenericName, &genericFamilies,
|
||||
FindFamiliesFlags(0))) {
|
||||
MOZ_ASSERT(genericFamilies.Length() == 1, "expected a single family");
|
||||
if (!prefFonts->Contains(genericFamilies[0].mFamily)) {
|
||||
prefFonts->AppendElement(genericFamilies[0].mFamily);
|
||||
bool foundLang = !fcLang.IsEmpty() &&
|
||||
PatternHasLang(font, ToFcChar8Ptr(fcLang.get()));
|
||||
foundFontWithLang = foundFontWithLang || foundLang;
|
||||
// check to see if the list is full
|
||||
if (prefFonts->Length() >= limit) {
|
||||
break;
|
||||
}
|
||||
|
||||
// -- perform substitutions
|
||||
FcConfigSubstitute(nullptr, genericPattern, FcMatchPattern);
|
||||
FcDefaultSubstitute(genericPattern);
|
||||
|
||||
// -- sort to get the closest matches
|
||||
FcResult result;
|
||||
UniquePtr<FcFontSet> faces(
|
||||
FcFontSort(nullptr, genericPattern, FcFalse, nullptr, &result));
|
||||
|
||||
if (!faces) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// -- select the fonts to be used for the generic
|
||||
auto prefFonts = MakeUnique<PrefFontList>(); // can be empty but in
|
||||
// practice won't happen
|
||||
uint32_t limit =
|
||||
gfxPlatformGtk::GetPlatform()->MaxGenericSubstitions();
|
||||
bool foundFontWithLang = false;
|
||||
for (int i = 0; i < faces->nfont; i++) {
|
||||
FcPattern* font = faces->fonts[i];
|
||||
FcChar8* mappedGeneric = nullptr;
|
||||
|
||||
FcPatternGetString(font, FC_FAMILY, 0, &mappedGeneric);
|
||||
if (mappedGeneric) {
|
||||
nsAutoCString mappedGenericName(ToCharPtr(mappedGeneric));
|
||||
AutoTArray<FamilyAndGeneric, 1> genericFamilies;
|
||||
if (gfxPlatformFontList::FindAndAddFamilies(
|
||||
StyleGenericFontFamily::None, mappedGenericName,
|
||||
&genericFamilies, FindFamiliesFlags(0))) {
|
||||
MOZ_ASSERT(genericFamilies.Length() == 1,
|
||||
"expected a single family");
|
||||
if (!prefFonts->Contains(genericFamilies[0].mFamily)) {
|
||||
prefFonts->AppendElement(genericFamilies[0].mFamily);
|
||||
bool foundLang =
|
||||
!fcLang.IsEmpty() &&
|
||||
PatternHasLang(font, ToFcChar8Ptr(fcLang.get()));
|
||||
foundFontWithLang = foundFontWithLang || foundLang;
|
||||
// check to see if the list is full
|
||||
if (prefFonts->Length() >= limit) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if no font in the list matches the lang, trim all but the first one
|
||||
if (!prefFonts->IsEmpty() && !foundFontWithLang) {
|
||||
prefFonts->TruncateLength(1);
|
||||
}
|
||||
|
||||
entry.Insert(std::move(prefFonts));
|
||||
}
|
||||
return entry.Data().get();
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if no font in the list matches the lang, trim all but the first one
|
||||
if (!prefFonts->IsEmpty() && !foundFontWithLang) {
|
||||
prefFonts->TruncateLength(1);
|
||||
}
|
||||
|
||||
mGenericMappings.Put(genericLang, prefFonts);
|
||||
return prefFonts;
|
||||
}
|
||||
|
||||
bool gfxFcPlatformFontList::PrefFontListsUseOnlyGenerics() {
|
||||
|
|
|
@ -132,17 +132,20 @@ class GradientCache final : public nsExpirationTracker<GradientCacheData, 4> {
|
|||
return gradient;
|
||||
}
|
||||
|
||||
void RegisterEntry(UniquePtr<GradientCacheData> aValue) {
|
||||
nsresult rv = AddObject(aValue.get());
|
||||
// Returns true if we successfully register the gradient in the cache, false
|
||||
// otherwise.
|
||||
bool RegisterEntry(GradientCacheData* aValue) {
|
||||
nsresult rv = AddObject(aValue);
|
||||
if (NS_FAILED(rv)) {
|
||||
// We are OOM, and we cannot track this object. We don't want stall
|
||||
// entries in the hash table (since the expiration tracker is responsible
|
||||
// for removing the cache entries), so we avoid putting that entry in the
|
||||
// table, which is a good thing considering we are short on memory
|
||||
// table, which is a good things considering we are short on memory
|
||||
// anyway, we probably don't want to retain things.
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
mHashEntries.Put(aValue->mKey, std::move(aValue));
|
||||
mHashEntries.Put(aValue->mKey, aValue);
|
||||
return true;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -187,8 +190,11 @@ already_AddRefed<GradientStops> gfxGradientCache::GetOrCreateGradientStops(
|
|||
if (!gs) {
|
||||
return nullptr;
|
||||
}
|
||||
gGradientCache->RegisterEntry(MakeUnique<GradientCacheData>(
|
||||
gs, GradientCacheKey(aStops, aExtend, aDT->GetBackendType())));
|
||||
GradientCacheData* cached = new GradientCacheData(
|
||||
gs, GradientCacheKey(aStops, aExtend, aDT->GetBackendType()));
|
||||
if (!gGradientCache->RegisterEntry(cached)) {
|
||||
delete cached;
|
||||
}
|
||||
}
|
||||
return gs.forget();
|
||||
}
|
||||
|
|
|
@ -107,28 +107,23 @@ gfxSVGGlyphsDocument* gfxSVGGlyphs::FindOrCreateGlyphsDocument(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
return mGlyphDocs.WithEntryHandle(
|
||||
entry->mDocOffset, [&](auto&& glyphDocsEntry) -> gfxSVGGlyphsDocument* {
|
||||
if (!glyphDocsEntry) {
|
||||
unsigned int length;
|
||||
const uint8_t* data =
|
||||
(const uint8_t*)hb_blob_get_data(mSVGData, &length);
|
||||
if (entry->mDocOffset > 0 && uint64_t(mHeader->mDocIndexOffset) +
|
||||
entry->mDocOffset +
|
||||
entry->mDocLength <=
|
||||
length) {
|
||||
return glyphDocsEntry
|
||||
.Insert(MakeUnique<gfxSVGGlyphsDocument>(
|
||||
data + mHeader->mDocIndexOffset + entry->mDocOffset,
|
||||
entry->mDocLength, this))
|
||||
.get();
|
||||
}
|
||||
gfxSVGGlyphsDocument* result = mGlyphDocs.Get(entry->mDocOffset);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
if (!result) {
|
||||
unsigned int length;
|
||||
const uint8_t* data = (const uint8_t*)hb_blob_get_data(mSVGData, &length);
|
||||
if (entry->mDocOffset > 0 && uint64_t(mHeader->mDocIndexOffset) +
|
||||
entry->mDocOffset +
|
||||
entry->mDocLength <=
|
||||
length) {
|
||||
result = new gfxSVGGlyphsDocument(
|
||||
data + mHeader->mDocIndexOffset + entry->mDocOffset,
|
||||
entry->mDocLength, this);
|
||||
mGlyphDocs.Put(entry->mDocOffset, result);
|
||||
}
|
||||
}
|
||||
|
||||
return glyphDocsEntry.Data().get();
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
nsresult gfxSVGGlyphsDocument::SetupPresentation() {
|
||||
|
|
|
@ -177,18 +177,16 @@ void ModifyWakeLock(const nsAString& aTopic, hal::WakeLockControl aLockAdjust,
|
|||
return;
|
||||
}
|
||||
|
||||
ProcessLockTable* table = sLockTable->Get(aTopic);
|
||||
LockCount processCount;
|
||||
LockCount totalCount;
|
||||
ProcessLockTable* const table =
|
||||
sLockTable->WithEntryHandle(aTopic, [&](auto&& entry) {
|
||||
if (!entry) {
|
||||
entry.Insert(MakeUnique<ProcessLockTable>());
|
||||
} else {
|
||||
entry.Data()->Get(aProcessID, &processCount);
|
||||
CountWakeLocks(entry.Data().get(), &totalCount);
|
||||
}
|
||||
return entry.Data().get();
|
||||
});
|
||||
if (!table) {
|
||||
table = new ProcessLockTable();
|
||||
sLockTable->Put(aTopic, table);
|
||||
} else {
|
||||
table->Get(aProcessID, &processCount);
|
||||
CountWakeLocks(table, &totalCount);
|
||||
}
|
||||
|
||||
MOZ_ASSERT(processCount.numLocks >= processCount.numHidden);
|
||||
MOZ_ASSERT(aLockAdjust >= 0 || processCount.numLocks > 0);
|
||||
|
|
|
@ -570,8 +570,8 @@ Result<Ok, nsresult> ScriptPreloader::InitCacheInternal(
|
|||
script->mReadyToExecute = true;
|
||||
}
|
||||
|
||||
const auto& cachePath = script->mCachePath;
|
||||
mScripts.Put(cachePath, std::move(script));
|
||||
mScripts.Put(script->mCachePath, script.get());
|
||||
Unused << script.release();
|
||||
}
|
||||
|
||||
if (buf.error()) {
|
||||
|
|
|
@ -1311,7 +1311,7 @@ nsresult mozJSComponentLoader::Import(JSContext* aCx,
|
|||
return NS_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
mLocations.Put(newEntry->resolvedURL, MakeUnique<nsCString>(info.Key()));
|
||||
mLocations.Put(newEntry->resolvedURL, new nsCString(info.Key()));
|
||||
|
||||
RootedValue exception(aCx);
|
||||
{
|
||||
|
@ -1363,7 +1363,7 @@ nsresult mozJSComponentLoader::Import(JSContext* aCx,
|
|||
|
||||
// Cache this module for later
|
||||
if (newEntry) {
|
||||
mImports.Put(info.Key(), std::move(newEntry));
|
||||
mImports.Put(info.Key(), newEntry.release());
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
|
|
|
@ -479,8 +479,7 @@ void SharedStyleSheetCache::InsertIntoCompleteCacheIfNeeded(
|
|||
}
|
||||
|
||||
mCompleteSheets.Put(
|
||||
key, CompleteSheet{aData.mExpirationTime, std::move(counters),
|
||||
std::move(sheet)});
|
||||
key, {aData.mExpirationTime, std::move(counters), std::move(sheet)});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2583,7 +2583,7 @@ nsPrefBranch::GetChildList(const char* aStartingAt,
|
|||
NS_IMETHODIMP
|
||||
nsPrefBranch::AddObserverImpl(const nsACString& aDomain, nsIObserver* aObserver,
|
||||
bool aHoldWeak) {
|
||||
UniquePtr<PrefCallback> pCallback;
|
||||
PrefCallback* pCallback;
|
||||
|
||||
NS_ENSURE_ARG(aObserver);
|
||||
|
||||
|
@ -2600,25 +2600,26 @@ nsPrefBranch::AddObserverImpl(const nsACString& aDomain, nsIObserver* aObserver,
|
|||
}
|
||||
|
||||
// Construct a PrefCallback with a weak reference to the observer.
|
||||
pCallback = MakeUnique<PrefCallback>(prefName, weakRefFactory, this);
|
||||
pCallback = new PrefCallback(prefName, weakRefFactory, this);
|
||||
|
||||
} else {
|
||||
// Construct a PrefCallback with a strong reference to the observer.
|
||||
pCallback = MakeUnique<PrefCallback>(prefName, aObserver, this);
|
||||
pCallback = new PrefCallback(prefName, aObserver, this);
|
||||
}
|
||||
|
||||
mObservers.WithEntryHandle(pCallback.get(), [&](auto&& p) {
|
||||
mObservers.WithEntryHandle(pCallback, [&](auto&& p) {
|
||||
if (p) {
|
||||
NS_WARNING("Ignoring duplicate observer.");
|
||||
delete pCallback;
|
||||
} else {
|
||||
p.Insert(UniquePtr<PrefCallback>{pCallback});
|
||||
|
||||
// We must pass a fully qualified preference name to the callback
|
||||
// aDomain == nullptr is the only possible failure, and we trapped it with
|
||||
// NS_ENSURE_ARG above.
|
||||
Preferences::RegisterCallback(NotifyObserver, prefName, pCallback.get(),
|
||||
Preferences::RegisterCallback(NotifyObserver, prefName, pCallback,
|
||||
Preferences::PrefixMatch,
|
||||
/* isPriority */ false);
|
||||
|
||||
p.Insert(std::move(pCallback));
|
||||
}
|
||||
});
|
||||
|
||||
|
|
|
@ -205,20 +205,17 @@ nsresult SSLTokensCache::Put(const nsACString& aKey, const uint8_t* aToken,
|
|||
return rv;
|
||||
}
|
||||
|
||||
TokenCacheRecord* const rec =
|
||||
gInstance->mTokenCacheRecords.WithEntryHandle(aKey, [&](auto&& entry) {
|
||||
if (!entry) {
|
||||
auto rec = MakeUnique<TokenCacheRecord>();
|
||||
rec->mKey = aKey;
|
||||
gInstance->mExpirationArray.AppendElement(rec.get());
|
||||
entry.Insert(std::move(rec));
|
||||
} else {
|
||||
gInstance->mCacheSize -= entry.Data()->Size();
|
||||
entry.Data()->Reset();
|
||||
}
|
||||
TokenCacheRecord* rec = nullptr;
|
||||
|
||||
return entry.Data().get();
|
||||
});
|
||||
if (!gInstance->mTokenCacheRecords.Get(aKey, &rec)) {
|
||||
rec = new TokenCacheRecord();
|
||||
rec->mKey = aKey;
|
||||
gInstance->mTokenCacheRecords.Put(aKey, rec);
|
||||
gInstance->mExpirationArray.AppendElement(rec);
|
||||
} else {
|
||||
gInstance->mCacheSize -= rec->Size();
|
||||
rec->Reset();
|
||||
}
|
||||
|
||||
rec->mExpirationTime = aExpirationTime;
|
||||
MOZ_ASSERT(rec->mToken.IsEmpty());
|
||||
|
|
|
@ -1263,7 +1263,7 @@ nsresult nsOfflineCacheDevice::InitActiveCaches() {
|
|||
statement->GetUTF8String(1, clientID);
|
||||
|
||||
mActiveCaches.PutEntry(clientID);
|
||||
mActiveCachesByGroup.Put(group, MakeUnique<nsCString>(clientID));
|
||||
mActiveCachesByGroup.Put(group, new nsCString(clientID));
|
||||
|
||||
rv = statement->ExecuteStep(&hasRows);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
|
@ -2536,7 +2536,7 @@ nsresult nsOfflineCacheDevice::ActivateCache(const nsACString& group,
|
|||
|
||||
if (!clientID.IsEmpty()) {
|
||||
mActiveCaches.PutEntry(clientID);
|
||||
mActiveCachesByGroup.Put(group, MakeUnique<nsCString>(clientID));
|
||||
mActiveCachesByGroup.Put(group, new nsCString(clientID));
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
|
|
|
@ -2195,9 +2195,13 @@ void CacheFile::QueueChunkListener(uint32_t aIndex,
|
|||
}
|
||||
item->mCallback = aCallback;
|
||||
|
||||
mChunkListeners
|
||||
.GetOrInsertWith(aIndex, [] { return MakeUnique<ChunkListeners>(); })
|
||||
->mItems.AppendElement(item);
|
||||
ChunkListeners* listeners;
|
||||
if (!mChunkListeners.Get(aIndex, &listeners)) {
|
||||
listeners = new ChunkListeners();
|
||||
mChunkListeners.Put(aIndex, listeners);
|
||||
}
|
||||
|
||||
listeners->mItems.AppendElement(item);
|
||||
}
|
||||
|
||||
nsresult CacheFile::NotifyChunkListeners(uint32_t aIndex, nsresult aResult,
|
||||
|
|
|
@ -1192,11 +1192,8 @@ void CacheStorageService::RecordMemoryOnlyEntry(CacheEntry* aEntry,
|
|||
return;
|
||||
}
|
||||
|
||||
entries =
|
||||
sGlobalEntryTables
|
||||
->Put(memoryStorageID,
|
||||
MakeUnique<CacheEntryTable>(CacheEntryTable::MEMORY_ONLY))
|
||||
.get();
|
||||
entries = new CacheEntryTable(CacheEntryTable::MEMORY_ONLY);
|
||||
sGlobalEntryTables->Put(memoryStorageID, entries);
|
||||
LOG((" new memory-only storage table for %s", memoryStorageID.get()));
|
||||
}
|
||||
|
||||
|
@ -1563,17 +1560,13 @@ nsresult CacheStorageService::AddStorageEntry(
|
|||
NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED);
|
||||
|
||||
// Ensure storage table
|
||||
CacheEntryTable* const entries =
|
||||
sGlobalEntryTables
|
||||
->GetOrInsertWith(
|
||||
aContextKey,
|
||||
[&aContextKey] {
|
||||
LOG((" new storage entries table for context '%s'",
|
||||
aContextKey.BeginReading()));
|
||||
return MakeUnique<CacheEntryTable>(
|
||||
CacheEntryTable::ALL_ENTRIES);
|
||||
})
|
||||
.get();
|
||||
CacheEntryTable* entries;
|
||||
if (!sGlobalEntryTables->Get(aContextKey, &entries)) {
|
||||
entries = new CacheEntryTable(CacheEntryTable::ALL_ENTRIES);
|
||||
sGlobalEntryTables->Put(aContextKey, entries);
|
||||
LOG((" new storage entries table for context '%s'",
|
||||
aContextKey.BeginReading()));
|
||||
}
|
||||
|
||||
bool entryExists = entries->Get(entryKey, getter_AddRefs(entry));
|
||||
|
||||
|
|
|
@ -1418,10 +1418,13 @@ nsDNSService::ReportFailedSVCDomainName(const nsACString& aOwnerName,
|
|||
const nsACString& aSVCDomainName) {
|
||||
MutexAutoLock lock(mLock);
|
||||
|
||||
mFailedSVCDomainNames
|
||||
.GetOrInsertWith(aOwnerName,
|
||||
[] { return MakeUnique<nsTArray<nsCString>>(1); })
|
||||
->AppendElement(aSVCDomainName);
|
||||
nsTArray<nsCString>* failedList = mFailedSVCDomainNames.Get(aOwnerName);
|
||||
if (!failedList) {
|
||||
failedList = new nsTArray<nsCString>(1);
|
||||
mFailedSVCDomainNames.Put(aOwnerName, failedList);
|
||||
}
|
||||
|
||||
failedList->AppendElement(aSVCDomainName);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -56,14 +56,11 @@ void PendingTransactionQueue::InsertTransactionNormal(
|
|||
info->Transaction()->TopLevelOuterContentWindowId()));
|
||||
|
||||
uint64_t windowId = TabIdForQueuing(info->Transaction());
|
||||
nsTArray<RefPtr<PendingTransactionInfo>>* const infoArray =
|
||||
mPendingTransactionTable
|
||||
.GetOrInsertWith(
|
||||
windowId,
|
||||
[] {
|
||||
return MakeUnique<nsTArray<RefPtr<PendingTransactionInfo>>>();
|
||||
})
|
||||
.get();
|
||||
nsTArray<RefPtr<PendingTransactionInfo>>* infoArray;
|
||||
if (!mPendingTransactionTable.Get(windowId, &infoArray)) {
|
||||
infoArray = new nsTArray<RefPtr<PendingTransactionInfo>>();
|
||||
mPendingTransactionTable.Put(windowId, infoArray);
|
||||
}
|
||||
|
||||
InsertTransactionSorted(*infoArray, info, aInsertAsFirstForTheSamePriority);
|
||||
}
|
||||
|
|
|
@ -272,7 +272,11 @@ nsPreflightCache::CacheEntry* nsPreflightCache::GetEntry(
|
|||
|
||||
// This is a new entry, allocate and insert into the table now so that any
|
||||
// failures don't cause items to be removed from a full cache.
|
||||
auto newEntry = MakeUnique<CacheEntry>(key);
|
||||
CacheEntry* newEntry = new CacheEntry(key);
|
||||
if (!newEntry) {
|
||||
NS_WARNING("Failed to allocate new cache entry!");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
NS_ASSERTION(mTable.Count() <= PREFLIGHT_CACHE_SIZE,
|
||||
"Something is borked, too many entries in the cache!");
|
||||
|
@ -306,10 +310,10 @@ nsPreflightCache::CacheEntry* nsPreflightCache::GetEntry(
|
|||
}
|
||||
}
|
||||
|
||||
auto* newEntryWeakRef = mTable.Put(key, std::move(newEntry)).get();
|
||||
mList.insertFront(newEntryWeakRef);
|
||||
mTable.Put(key, newEntry);
|
||||
mList.insertFront(newEntry);
|
||||
|
||||
return newEntryWeakRef;
|
||||
return newEntry;
|
||||
}
|
||||
|
||||
void nsPreflightCache::RemoveEntries(
|
||||
|
|
|
@ -122,15 +122,14 @@ nsresult nsHttpAuthCache::SetAuthEntry(const char* scheme, const char* host,
|
|||
|
||||
if (!node) {
|
||||
// create a new entry node and set the given entry
|
||||
auto node = UniquePtr<nsHttpAuthNode>(new nsHttpAuthNode);
|
||||
LOG((" new nsHttpAuthNode %p for key='%s'", node.get(), key.get()));
|
||||
node = new nsHttpAuthNode();
|
||||
LOG((" new nsHttpAuthNode %p for key='%s'", node, key.get()));
|
||||
rv = node->SetAuthEntry(path, realm, creds, challenge, ident, metadata);
|
||||
if (NS_FAILED(rv)) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
mDB.Put(key, std::move(node));
|
||||
return NS_OK;
|
||||
if (NS_FAILED(rv))
|
||||
delete node;
|
||||
else
|
||||
mDB.Put(key, node);
|
||||
return rv;
|
||||
}
|
||||
|
||||
return node->SetAuthEntry(path, realm, creds, challenge, ident, metadata);
|
||||
|
|
|
@ -803,16 +803,15 @@ void nsHttpConnectionMgr::UpdateCoalescingForNewConn(
|
|||
"UpdateCoalescingForNewConn() registering newConn %p %s under key %s\n",
|
||||
newConn, newConn->ConnectionInfo()->HashKey().get(),
|
||||
ent->mCoalescingKeys[i].get()));
|
||||
|
||||
mCoalescingHash
|
||||
.GetOrInsertWith(
|
||||
ent->mCoalescingKeys[i],
|
||||
[] {
|
||||
LOG(("UpdateCoalescingForNewConn() need new list element\n"));
|
||||
return MakeUnique<nsTArray<nsWeakPtr>>(1);
|
||||
})
|
||||
->AppendElement(do_GetWeakReference(
|
||||
static_cast<nsISupportsWeakReference*>(newConn)));
|
||||
nsTArray<nsWeakPtr>* listOfWeakConns =
|
||||
mCoalescingHash.Get(ent->mCoalescingKeys[i]);
|
||||
if (!listOfWeakConns) {
|
||||
LOG(("UpdateCoalescingForNewConn() need new list element\n"));
|
||||
listOfWeakConns = new nsTArray<nsWeakPtr>(1);
|
||||
mCoalescingHash.Put(ent->mCoalescingKeys[i], listOfWeakConns);
|
||||
}
|
||||
listOfWeakConns->AppendElement(
|
||||
do_GetWeakReference(static_cast<nsISupportsWeakReference*>(newConn)));
|
||||
}
|
||||
|
||||
// this is a new connection that can be coalesced onto. hooray!
|
||||
|
@ -3344,11 +3343,13 @@ void nsHttpConnectionMgr::RegisterOriginCoalescingKey(HttpConnectionBase* conn,
|
|||
|
||||
nsCString newKey;
|
||||
BuildOriginFrameHashKey(newKey, ci, host, port);
|
||||
mCoalescingHash
|
||||
.GetOrInsertWith(newKey,
|
||||
[] { return MakeUnique<nsTArray<nsWeakPtr>>(1); })
|
||||
->AppendElement(
|
||||
do_GetWeakReference(static_cast<nsISupportsWeakReference*>(conn)));
|
||||
nsTArray<nsWeakPtr>* listOfWeakConns = mCoalescingHash.Get(newKey);
|
||||
if (!listOfWeakConns) {
|
||||
listOfWeakConns = new nsTArray<nsWeakPtr>(1);
|
||||
mCoalescingHash.Put(newKey, listOfWeakConns);
|
||||
}
|
||||
listOfWeakConns->AppendElement(
|
||||
do_GetWeakReference(static_cast<nsISupportsWeakReference*>(conn)));
|
||||
|
||||
LOG(
|
||||
("nsHttpConnectionMgr::RegisterOriginCoalescingKey "
|
||||
|
|
|
@ -1865,9 +1865,8 @@ void nsHttpHandler::PrefsChanged(const char* pref) {
|
|||
nsAutoCString token{tokenSubstring};
|
||||
int32_t index = token.Find(";");
|
||||
if (index != kNotFound) {
|
||||
mAltSvcMappingTemptativeMap.Put(
|
||||
Substring(token, 0, index),
|
||||
MakeUnique<nsCString>(Substring(token, index + 1)));
|
||||
auto* map = new nsCString(Substring(token, index + 1));
|
||||
mAltSvcMappingTemptativeMap.Put(Substring(token, 0, index), map);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -369,25 +369,22 @@ WebSocketEventService::AddListener(uint64_t aInnerWindowID,
|
|||
|
||||
++mCountListeners;
|
||||
|
||||
mWindows
|
||||
.GetOrInsertWith(
|
||||
aInnerWindowID,
|
||||
[&] {
|
||||
auto listener = MakeUnique<WindowListener>();
|
||||
WindowListener* listener = mWindows.Get(aInnerWindowID);
|
||||
if (!listener) {
|
||||
listener = new WindowListener();
|
||||
|
||||
if (IsChildProcess()) {
|
||||
PWebSocketEventListenerChild* actor =
|
||||
gNeckoChild->SendPWebSocketEventListenerConstructor(
|
||||
aInnerWindowID);
|
||||
if (IsChildProcess()) {
|
||||
PWebSocketEventListenerChild* actor =
|
||||
gNeckoChild->SendPWebSocketEventListenerConstructor(aInnerWindowID);
|
||||
|
||||
listener->mActor =
|
||||
static_cast<WebSocketEventListenerChild*>(actor);
|
||||
MOZ_ASSERT(listener->mActor);
|
||||
}
|
||||
listener->mActor = static_cast<WebSocketEventListenerChild*>(actor);
|
||||
MOZ_ASSERT(listener->mActor);
|
||||
}
|
||||
|
||||
return listener;
|
||||
})
|
||||
->mListeners.AppendElement(aListener);
|
||||
mWindows.Put(aInnerWindowID, listener);
|
||||
}
|
||||
|
||||
listener->mListeners.AppendElement(aListener);
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
|
|
@ -118,15 +118,17 @@ nsresult nsStreamConverterService::AddAdjacency(const char* aContractID) {
|
|||
// Each MIME-type is a vertex in the graph, so first lets make sure
|
||||
// each MIME-type is represented as a key in our hashtable.
|
||||
|
||||
nsTArray<RefPtr<nsAtom>>* const fromEdges =
|
||||
mAdjacencyList
|
||||
.GetOrInsertWith(
|
||||
fromStr,
|
||||
[] { return mozilla::MakeUnique<nsTArray<RefPtr<nsAtom>>>(); })
|
||||
.get();
|
||||
nsTArray<RefPtr<nsAtom>>* fromEdges = mAdjacencyList.Get(fromStr);
|
||||
if (!fromEdges) {
|
||||
// There is no fromStr vertex, create one.
|
||||
fromEdges = new nsTArray<RefPtr<nsAtom>>();
|
||||
mAdjacencyList.Put(fromStr, fromEdges);
|
||||
}
|
||||
|
||||
mozilla::Unused << mAdjacencyList.GetOrInsertWith(
|
||||
toStr, [] { return mozilla::MakeUnique<nsTArray<RefPtr<nsAtom>>>(); });
|
||||
if (!mAdjacencyList.Get(toStr)) {
|
||||
// There is no toStr vertex, create one.
|
||||
mAdjacencyList.Put(toStr, new nsTArray<RefPtr<nsAtom>>());
|
||||
}
|
||||
|
||||
// Now we know the FROM and TO types are represented as keys in the hashtable.
|
||||
// Let's "connect" the verticies, making an edge.
|
||||
|
@ -197,7 +199,7 @@ nsresult nsStreamConverterService::FindConverter(
|
|||
for (auto iter = mAdjacencyList.Iter(); !iter.Done(); iter.Next()) {
|
||||
const nsACString& key = iter.Key();
|
||||
MOZ_ASSERT(iter.UserData(), "no data in the table iteration");
|
||||
lBFSTable.Put(key, mozilla::MakeUnique<BFSTableData>(key));
|
||||
lBFSTable.Put(key, new BFSTableData(key));
|
||||
}
|
||||
|
||||
NS_ASSERTION(lBFSTable.Count() == vertexCount,
|
||||
|
|
|
@ -768,8 +768,8 @@ void NetlinkService::OnLinkMessage(struct nlmsghdr* aNlh) {
|
|||
if (!linkInfo) {
|
||||
LOG(("Creating new link [index=%u, name=%s, flags=%u, type=%u]",
|
||||
linkIndex, linkName.get(), link->GetFlags(), link->GetType()));
|
||||
linkInfo =
|
||||
mLinks.Put(linkIndex, MakeUnique<LinkInfo>(std::move(link))).get();
|
||||
linkInfo = new LinkInfo(std::move(link));
|
||||
mLinks.Put(linkIndex, linkInfo);
|
||||
} else {
|
||||
LOG(("Updating link [index=%u, name=%s, flags=%u, type=%u]", linkIndex,
|
||||
linkName.get(), link->GetFlags(), link->GetType()));
|
||||
|
@ -1049,7 +1049,7 @@ void NetlinkService::OnNeighborMessage(struct nlmsghdr* aNlh) {
|
|||
neigh->GetAsString(neighDbgStr);
|
||||
LOG(("Adding neighbor: %s", neighDbgStr.get()));
|
||||
}
|
||||
linkInfo->mNeighbors.Put(key, std::move(neigh));
|
||||
linkInfo->mNeighbors.Put(key, neigh.release());
|
||||
} else {
|
||||
if (LOG_ENABLED()) {
|
||||
nsAutoCString neighDbgStr;
|
||||
|
|
|
@ -210,7 +210,7 @@ AsyncBindingParams::BindByName(const nsACString& aName, nsIVariant* aValue) {
|
|||
RefPtr<Variant_base> variant = convertVariantToStorageVariant(aValue);
|
||||
if (!variant) return NS_ERROR_UNEXPECTED;
|
||||
|
||||
mNamedParameters.Put(aName, nsCOMPtr<nsIVariant>{variant});
|
||||
mNamedParameters.Put(aName, variant);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -662,10 +662,9 @@ nsresult NativeFileWatcherIOTask::AddPathRunnableMethod(
|
|||
nsresult rv = AddDirectoryToWatchList(resourceDesc.get());
|
||||
if (NS_SUCCEEDED(rv)) {
|
||||
// Add the resource pointer to both indexes.
|
||||
mWatchedResourcesByHandle.Put(
|
||||
resHandle, mWatchedResourcesByPath
|
||||
.Put(wrappedParameters->mPath, std::move(resourceDesc))
|
||||
.get());
|
||||
WatchedResourceDescriptor* resource = resourceDesc.release();
|
||||
mWatchedResourcesByPath.Put(wrappedParameters->mPath, resource);
|
||||
mWatchedResourcesByHandle.Put(resHandle, resource);
|
||||
|
||||
// Dispatch the success callback.
|
||||
nsresult rv = ReportSuccess(wrappedParameters->mSuccessCallbackHandle,
|
||||
|
@ -1096,13 +1095,15 @@ void NativeFileWatcherIOTask::AppendCallbacksToHashtables(
|
|||
const nsMainThreadPtrHandle<nsINativeFileWatcherCallback>& aOnChangeHandle,
|
||||
const nsMainThreadPtrHandle<nsINativeFileWatcherErrorCallback>&
|
||||
aOnErrorHandle) {
|
||||
ChangeCallbackArray* const callbacksArray =
|
||||
mChangeCallbacksTable
|
||||
.GetOrInsertWith(aPath,
|
||||
[] { return MakeUnique<ChangeCallbackArray>(); })
|
||||
.get();
|
||||
// First check to see if we've got an entry already.
|
||||
ChangeCallbackArray* callbacksArray = mChangeCallbacksTable.Get(aPath);
|
||||
if (!callbacksArray) {
|
||||
// We don't have an entry. Create an array and put it into the hash table.
|
||||
callbacksArray = new ChangeCallbackArray();
|
||||
mChangeCallbacksTable.Put(aPath, callbacksArray);
|
||||
}
|
||||
|
||||
// Now we do have an entry for that path. Check to see if the callback is
|
||||
// We do have an entry for that path. Check to see if the callback is
|
||||
// already there.
|
||||
ChangeCallbackArray::index_type changeCallbackIndex =
|
||||
callbacksArray->IndexOf(aOnChangeHandle);
|
||||
|
@ -1113,11 +1114,12 @@ void NativeFileWatcherIOTask::AppendCallbacksToHashtables(
|
|||
}
|
||||
|
||||
// Same thing for the error callback.
|
||||
ErrorCallbackArray* const errorCallbacksArray =
|
||||
mErrorCallbacksTable
|
||||
.GetOrInsertWith(aPath,
|
||||
[] { return MakeUnique<ErrorCallbackArray>(); })
|
||||
.get();
|
||||
ErrorCallbackArray* errorCallbacksArray = mErrorCallbacksTable.Get(aPath);
|
||||
if (!errorCallbacksArray) {
|
||||
// We don't have an entry. Create an array and put it into the hash table.
|
||||
errorCallbacksArray = new ErrorCallbackArray();
|
||||
mErrorCallbacksTable.Put(aPath, errorCallbacksArray);
|
||||
}
|
||||
|
||||
ErrorCallbackArray::index_type errorCallbackIndex =
|
||||
errorCallbacksArray->IndexOf(aOnErrorHandle);
|
||||
|
|
|
@ -30,13 +30,11 @@
|
|||
#include "TelemetryEventData.h"
|
||||
#include "TelemetryScalar.h"
|
||||
|
||||
using mozilla::MakeUnique;
|
||||
using mozilla::Maybe;
|
||||
using mozilla::StaticAutoPtr;
|
||||
using mozilla::StaticMutex;
|
||||
using mozilla::StaticMutexAutoLock;
|
||||
using mozilla::TimeStamp;
|
||||
using mozilla::UniquePtr;
|
||||
using mozilla::Telemetry::ChildEventData;
|
||||
using mozilla::Telemetry::EventExtraEntry;
|
||||
using mozilla::Telemetry::LABELS_TELEMETRY_EVENT_RECORDING_ERROR;
|
||||
|
@ -375,10 +373,12 @@ bool IsExpired(const EventKey& key) { return key.id == kExpiredEventId; }
|
|||
|
||||
EventRecordArray* GetEventRecordsForProcess(const StaticMutexAutoLock& lock,
|
||||
ProcessID processType) {
|
||||
return gEventRecords
|
||||
.GetOrInsertWith(uint32_t(processType),
|
||||
[] { return MakeUnique<EventRecordArray>(); })
|
||||
.get();
|
||||
EventRecordArray* eventRecords = nullptr;
|
||||
if (!gEventRecords.Get(uint32_t(processType), &eventRecords)) {
|
||||
eventRecords = new EventRecordArray();
|
||||
gEventRecords.Put(uint32_t(processType), eventRecords);
|
||||
}
|
||||
return eventRecords;
|
||||
}
|
||||
|
||||
EventKey* GetEventKey(const StaticMutexAutoLock& lock,
|
||||
|
@ -539,8 +539,7 @@ void RegisterEvents(const StaticMutexAutoLock& lock, const nsACString& category,
|
|||
gDynamicEventInfo->AppendElement(eventInfos[i]);
|
||||
uint32_t eventId =
|
||||
eventExpired[i] ? kExpiredEventId : gDynamicEventInfo->Length() - 1;
|
||||
gEventNameIDMap.Put(eventName,
|
||||
UniquePtr<EventKey>{new EventKey{eventId, true}});
|
||||
gEventNameIDMap.Put(eventName, new EventKey{eventId, true});
|
||||
}
|
||||
|
||||
// If it is a builtin, add the category name in order to enable it later.
|
||||
|
@ -709,8 +708,7 @@ void TelemetryEvent::InitializeGlobalState(bool aCanRecordBase,
|
|||
eventId = kExpiredEventId;
|
||||
}
|
||||
|
||||
gEventNameIDMap.Put(UniqueEventName(info),
|
||||
UniquePtr<EventKey>{new EventKey{eventId, false}});
|
||||
gEventNameIDMap.Put(UniqueEventName(info), new EventKey{eventId, false});
|
||||
gCategoryNames.PutEntry(info.common_info.category());
|
||||
}
|
||||
|
||||
|
@ -1291,7 +1289,7 @@ nsresult TelemetryEvent::CreateSnapshots(uint32_t aDataset, bool aClear,
|
|||
gEventRecords.Clear();
|
||||
for (auto& pair : leftovers) {
|
||||
gEventRecords.Put(pair.first,
|
||||
MakeUnique<EventRecordArray>(std::move(pair.second)));
|
||||
new EventRecordArray(std::move(pair.second)));
|
||||
}
|
||||
leftovers.Clear();
|
||||
}
|
||||
|
|
|
@ -35,10 +35,8 @@ using base::CountHistogram;
|
|||
using base::FlagHistogram;
|
||||
using base::LinearHistogram;
|
||||
using mozilla::MakeTuple;
|
||||
using mozilla::MakeUnique;
|
||||
using mozilla::StaticMutex;
|
||||
using mozilla::StaticMutexAutoLock;
|
||||
using mozilla::UniquePtr;
|
||||
using mozilla::Telemetry::HistogramAccumulation;
|
||||
using mozilla::Telemetry::HistogramCount;
|
||||
using mozilla::Telemetry::HistogramID;
|
||||
|
@ -981,6 +979,7 @@ Histogram::Histogram(HistogramID histogramId, const HistogramInfo& info,
|
|||
return;
|
||||
}
|
||||
|
||||
base::Histogram* h;
|
||||
const int bucketsOffset = gHistogramBucketLowerBoundIndex[histogramId];
|
||||
|
||||
if (info.is_single_store()) {
|
||||
|
@ -989,9 +988,8 @@ Histogram::Histogram(HistogramID histogramId, const HistogramInfo& info,
|
|||
for (uint32_t i = 0; i < info.store_count; i++) {
|
||||
auto store = nsDependentCString(
|
||||
&gHistogramStringTable[gHistogramStoresTable[info.store_index + i]]);
|
||||
mStorage.Put(store, UniquePtr<base::Histogram>(
|
||||
internal_CreateBaseHistogramInstance(
|
||||
info, bucketsOffset)));
|
||||
h = internal_CreateBaseHistogramInstance(info, bucketsOffset);
|
||||
mStorage.Put(store, h);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1133,7 +1131,7 @@ KeyedHistogram::KeyedHistogram(HistogramID id, const HistogramInfo& info,
|
|||
for (uint32_t i = 0; i < info.store_count; i++) {
|
||||
auto store = nsDependentCString(
|
||||
&gHistogramStringTable[gHistogramStoresTable[info.store_index + i]]);
|
||||
mStorage.Put(store, MakeUnique<KeyedHistogramMapType>());
|
||||
mStorage.Put(store, new KeyedHistogramMapType);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1167,16 +1165,16 @@ nsresult KeyedHistogram::GetHistogram(const nsCString& aStore,
|
|||
}
|
||||
|
||||
int bucketsOffset = gHistogramBucketLowerBoundIndex[mId];
|
||||
auto h = UniquePtr<base::Histogram>{
|
||||
internal_CreateBaseHistogramInstance(mHistogramInfo, bucketsOffset)};
|
||||
base::Histogram* h =
|
||||
internal_CreateBaseHistogramInstance(mHistogramInfo, bucketsOffset);
|
||||
if (!h) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
h->ClearFlags(base::Histogram::kUmaTargetedHistogramFlag);
|
||||
*histogram = h.get();
|
||||
*histogram = h;
|
||||
|
||||
bool inserted = histogramMap->Put(key, std::move(h), mozilla::fallible);
|
||||
bool inserted = histogramMap->Put(key, h, mozilla::fallible);
|
||||
if (MOZ_UNLIKELY(!inserted)) {
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
|
|
@ -31,14 +31,12 @@
|
|||
#include "nsVariant.h"
|
||||
#include "TelemetryScalarData.h"
|
||||
|
||||
using mozilla::MakeUnique;
|
||||
using mozilla::Nothing;
|
||||
using mozilla::Preferences;
|
||||
using mozilla::Some;
|
||||
using mozilla::StaticAutoPtr;
|
||||
using mozilla::StaticMutex;
|
||||
using mozilla::StaticMutexAutoLock;
|
||||
using mozilla::UniquePtr;
|
||||
using mozilla::Telemetry::DynamicScalarDefinition;
|
||||
using mozilla::Telemetry::KeyedScalarAction;
|
||||
using mozilla::Telemetry::ProcessID;
|
||||
|
@ -1131,7 +1129,7 @@ ScalarResult KeyedScalar::GetScalarForKey(const StaticMutexAutoLock& locker,
|
|||
return ScalarResult::InvalidType;
|
||||
}
|
||||
|
||||
mScalarKeys.Put(utf8Key, UniquePtr<ScalarBase>(scalar));
|
||||
mScalarKeys.Put(utf8Key, scalar);
|
||||
|
||||
*aRet = scalar;
|
||||
return ScalarResult::Ok;
|
||||
|
@ -1501,6 +1499,7 @@ nsresult internal_GetScalarByEnum(const StaticMutexAutoLock& lock,
|
|||
}
|
||||
|
||||
ScalarBase* scalar = nullptr;
|
||||
ScalarStorageMapType* scalarStorage = nullptr;
|
||||
// Initialize the scalar storage to the parent storage. This will get
|
||||
// set to the child storage if needed.
|
||||
uint32_t storageId = static_cast<uint32_t>(aProcessStorage);
|
||||
|
@ -1513,11 +1512,10 @@ nsresult internal_GetScalarByEnum(const StaticMutexAutoLock& lock,
|
|||
|
||||
// Get the process-specific storage or create one if it's not
|
||||
// available.
|
||||
ScalarStorageMapType* const scalarStorage =
|
||||
processStorage
|
||||
.GetOrInsertWith(storageId,
|
||||
[] { return MakeUnique<ScalarStorageMapType>(); })
|
||||
.get();
|
||||
if (!processStorage.Get(storageId, &scalarStorage)) {
|
||||
scalarStorage = new ScalarStorageMapType();
|
||||
processStorage.Put(storageId, scalarStorage);
|
||||
}
|
||||
|
||||
// Check if the scalar is already allocated in the parent or in the child
|
||||
// storage.
|
||||
|
@ -1548,7 +1546,7 @@ nsresult internal_GetScalarByEnum(const StaticMutexAutoLock& lock,
|
|||
return NS_ERROR_INVALID_ARG;
|
||||
}
|
||||
|
||||
scalarStorage->Put(aId.id, UniquePtr<ScalarBase>(scalar));
|
||||
scalarStorage->Put(aId.id, scalar);
|
||||
*aRet = scalar;
|
||||
return NS_OK;
|
||||
}
|
||||
|
@ -1785,6 +1783,7 @@ nsresult internal_GetKeyedScalarByEnum(const StaticMutexAutoLock& lock,
|
|||
}
|
||||
|
||||
KeyedScalar* scalar = nullptr;
|
||||
KeyedScalarStorageMapType* scalarStorage = nullptr;
|
||||
// Initialize the scalar storage to the parent storage. This will get
|
||||
// set to the child storage if needed.
|
||||
uint32_t storageId = static_cast<uint32_t>(aProcessStorage);
|
||||
|
@ -1797,11 +1796,10 @@ nsresult internal_GetKeyedScalarByEnum(const StaticMutexAutoLock& lock,
|
|||
|
||||
// Get the process-specific storage or create one if it's not
|
||||
// available.
|
||||
KeyedScalarStorageMapType* const scalarStorage =
|
||||
processStorage
|
||||
.GetOrInsertWith(
|
||||
storageId, [] { return MakeUnique<KeyedScalarStorageMapType>(); })
|
||||
.get();
|
||||
if (!processStorage.Get(storageId, &scalarStorage)) {
|
||||
scalarStorage = new KeyedScalarStorageMapType();
|
||||
processStorage.Put(storageId, scalarStorage);
|
||||
}
|
||||
|
||||
if (scalarStorage->Get(aId.id, &scalar)) {
|
||||
*aRet = scalar;
|
||||
|
@ -1823,7 +1821,7 @@ nsresult internal_GetKeyedScalarByEnum(const StaticMutexAutoLock& lock,
|
|||
return NS_ERROR_INVALID_ARG;
|
||||
}
|
||||
|
||||
scalarStorage->Put(aId.id, UniquePtr<KeyedScalar>(scalar));
|
||||
scalarStorage->Put(aId.id, scalar);
|
||||
*aRet = scalar;
|
||||
return NS_OK;
|
||||
}
|
||||
|
|
|
@ -186,7 +186,7 @@ void TableUpdateV4::NewPrefixes(int32_t aSize, const nsACString& aPrefixes) {
|
|||
aPrefixes.Length() / aSize));
|
||||
}
|
||||
|
||||
mPrefixesMap.Put(aSize, MakeUnique<nsCString>(aPrefixes));
|
||||
mPrefixesMap.Put(aSize, new nsCString(aPrefixes));
|
||||
}
|
||||
|
||||
nsresult TableUpdateV4::NewRemovalIndices(const uint32_t* aIndices,
|
||||
|
|
|
@ -516,7 +516,7 @@ VLPrefixSet::VLPrefixSet(const PrefixStringMap& aMap) : mCount(0) {
|
|||
uint32_t size = iter.Key();
|
||||
MOZ_ASSERT(iter.Data()->Length() % size == 0,
|
||||
"PrefixString must be a multiple of the prefix size.");
|
||||
mMap.Put(size, MakeUnique<PrefixString>(*iter.Data(), size));
|
||||
mMap.Put(size, new PrefixString(*iter.Data(), size));
|
||||
mCount += iter.Data()->Length() / size;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -116,7 +116,7 @@ nsresult VariableLengthPrefixSet::SetPrefixes(AddPrefixArray& aAddPrefixes,
|
|||
const char* buf = reinterpret_cast<const char*>(completions[i].buf);
|
||||
completionStr->Append(buf, COMPLETE_SIZE);
|
||||
}
|
||||
mVLPrefixSet.Put(COMPLETE_SIZE, std::move(completionStr));
|
||||
mVLPrefixSet.Put(COMPLETE_SIZE, completionStr.release());
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ nsresult VariableLengthPrefixSet::SetPrefixes(PrefixStringMap& aPrefixMap) {
|
|||
continue;
|
||||
}
|
||||
|
||||
mVLPrefixSet.Put(iter.Key(), MakeUnique<nsCString>(*iter.Data()));
|
||||
mVLPrefixSet.Put(iter.Key(), new nsCString(*iter.Data()));
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
|
@ -203,12 +203,12 @@ nsresult VariableLengthPrefixSet::GetPrefixes(PrefixStringMap& aPrefixMap) {
|
|||
begin[i] = NativeEndian::swapToBigEndian(array[i]);
|
||||
}
|
||||
|
||||
aPrefixMap.Put(PREFIX_SIZE_FIXED, std::move(prefixes));
|
||||
aPrefixMap.Put(PREFIX_SIZE_FIXED, prefixes.release());
|
||||
}
|
||||
|
||||
// Copy variable-length prefix set
|
||||
for (auto iter = mVLPrefixSet.ConstIter(); !iter.Done(); iter.Next()) {
|
||||
aPrefixMap.Put(iter.Key(), MakeUnique<nsCString>(*iter.Data()));
|
||||
aPrefixMap.Put(iter.Key(), new nsCString(*iter.Data()));
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
|
@ -351,7 +351,7 @@ nsresult VariableLengthPrefixSet::LoadPrefixes(nsCOMPtr<nsIInputStream>& in) {
|
|||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
NS_ENSURE_TRUE(read == stringLength, NS_ERROR_FAILURE);
|
||||
|
||||
mVLPrefixSet.Put(prefixSize, std::move(vlPrefixes));
|
||||
mVLPrefixSet.Put(prefixSize, vlPrefixes.release());
|
||||
totalPrefixes += prefixCount;
|
||||
LOG(("[%s] Loaded %u %u-byte prefixes", mName.get(), prefixCount,
|
||||
prefixSize));
|
||||
|
|
|
@ -875,7 +875,7 @@ nsresult nsUrlClassifierUtils::ReadProvidersFromPrefs(ProviderDictType& aDict) {
|
|||
nsTArray<nsCString> tables;
|
||||
Classifier::SplitTables(owningLists, tables);
|
||||
for (auto tableName : tables) {
|
||||
aDict.Put(tableName, MakeUnique<nsCString>(provider));
|
||||
aDict.Put(tableName, new nsCString(provider));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -128,7 +128,7 @@ nsresult PrefixArrayToPrefixStringMap(const _PrefixArray& aPrefixArray,
|
|||
uint32_t size = iter.Key();
|
||||
uint32_t count = iter.Data()->Length();
|
||||
|
||||
auto str = MakeUnique<_Prefix>();
|
||||
_Prefix* str = new _Prefix();
|
||||
str->SetLength(size * count);
|
||||
|
||||
char* dst = str->BeginWriting();
|
||||
|
@ -139,7 +139,7 @@ nsresult PrefixArrayToPrefixStringMap(const _PrefixArray& aPrefixArray,
|
|||
dst += size;
|
||||
}
|
||||
|
||||
aOut.Put(size, std::move(str));
|
||||
aOut.Put(size, str);
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
|
|
|
@ -229,7 +229,7 @@ NS_IMETHODIMP ContentHandlerService::GetTypeFromExtension(
|
|||
mHandlerServiceChild->SendGetTypeFromExtension(nsCString(aFileExtension),
|
||||
&type);
|
||||
_retval.Assign(type);
|
||||
mExtToTypeMap.Put(nsCString(aFileExtension), MakeUnique<nsCString>(type));
|
||||
mExtToTypeMap.Put(nsCString(aFileExtension), new nsCString(type));
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
|
|
@ -868,10 +868,11 @@ nsresult EventDispatcher::IterateEvents(JSContext* aCx, JS::HandleValue aEvents,
|
|||
|
||||
nsresult EventDispatcher::RegisterEventLocked(
|
||||
const nsAString& aEvent, nsIAndroidEventListener* aListener) {
|
||||
ListenersList* list =
|
||||
mListenersMap
|
||||
.GetOrInsertWith(aEvent, [] { return MakeUnique<ListenersList>(); })
|
||||
.get();
|
||||
ListenersList* list = mListenersMap.Get(aEvent);
|
||||
if (!list) {
|
||||
list = new ListenersList();
|
||||
mListenersMap.Put(aEvent, list);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
for (ssize_t i = 0; i < list->listeners.Count(); i++) {
|
||||
|
|
|
@ -26,7 +26,7 @@ nsSystemStatusBarCocoa::AddItem(Element* aElement) {
|
|||
}
|
||||
|
||||
nsCOMPtr<nsISupports> keyPtr = aElement;
|
||||
mItems.Put(keyPtr, mozilla::MakeUnique<StatusItem>(menu));
|
||||
mItems.Put(keyPtr, new StatusItem(menu));
|
||||
|
||||
return NS_OK;
|
||||
|
||||
|
|
|
@ -482,12 +482,11 @@ nsresult WakeLockListener::Callback(const nsAString& topic,
|
|||
!topic.Equals(u"video-playing"_ns))
|
||||
return NS_OK;
|
||||
|
||||
WakeLockTopic* const topicLock =
|
||||
mTopics
|
||||
.GetOrInsertWith(
|
||||
topic,
|
||||
[&] { return MakeUnique<WakeLockTopic>(topic, mConnection); })
|
||||
.get();
|
||||
WakeLockTopic* topicLock = mTopics.Get(topic);
|
||||
if (!topicLock) {
|
||||
topicLock = new WakeLockTopic(topic, mConnection);
|
||||
mTopics.Put(topic, topicLock);
|
||||
}
|
||||
|
||||
// Treat "locked-background" the same as "unlocked" on desktop linux.
|
||||
bool shouldLock = state.EqualsLiteral("locked-foreground");
|
||||
|
|
|
@ -150,46 +150,50 @@ nsresult nsWindowBase::SynthesizeNativeTouchPoint(
|
|||
uint32_t pressure = (uint32_t)ceil(aPointerPressure * 1024);
|
||||
|
||||
// If we already know about this pointer id get it's record
|
||||
return mActivePointers.WithEntryHandle(aPointerId, [&](auto&& entry) {
|
||||
POINTER_FLAGS flags;
|
||||
PointerInfo* info = mActivePointers.Get(aPointerId);
|
||||
|
||||
// We know about this pointer, send an update
|
||||
if (entry) {
|
||||
flags = POINTER_FLAG_UPDATE;
|
||||
if (hover) {
|
||||
flags |= POINTER_FLAG_INRANGE;
|
||||
} else if (contact) {
|
||||
flags |= POINTER_FLAG_INCONTACT | POINTER_FLAG_INRANGE;
|
||||
} else if (remove) {
|
||||
flags = POINTER_FLAG_UP;
|
||||
// Remove the pointer from our tracking list. This is UniquePtr wrapped,
|
||||
// so shouldn't leak.
|
||||
entry.Remove();
|
||||
}
|
||||
// We know about this pointer, send an update
|
||||
if (info) {
|
||||
POINTER_FLAGS flags = POINTER_FLAG_UPDATE;
|
||||
if (hover) {
|
||||
flags |= POINTER_FLAG_INRANGE;
|
||||
} else if (contact) {
|
||||
flags |= POINTER_FLAG_INCONTACT | POINTER_FLAG_INRANGE;
|
||||
} else if (remove) {
|
||||
flags = POINTER_FLAG_UP;
|
||||
// Remove the pointer from our tracking list. This is nsAutPtr wrapped,
|
||||
// so shouldn't leak.
|
||||
mActivePointers.Remove(aPointerId);
|
||||
}
|
||||
|
||||
if (cancel) {
|
||||
flags |= POINTER_FLAG_CANCELED;
|
||||
}
|
||||
} else {
|
||||
// Missing init state, error out
|
||||
if (remove || cancel) {
|
||||
return NS_ERROR_INVALID_ARG;
|
||||
}
|
||||
|
||||
// Create a new pointer
|
||||
flags = POINTER_FLAG_INRANGE;
|
||||
if (contact) {
|
||||
flags |= POINTER_FLAG_INCONTACT | POINTER_FLAG_DOWN;
|
||||
}
|
||||
|
||||
entry.Insert(MakeUnique<PointerInfo>(aPointerId, aPoint));
|
||||
if (cancel) {
|
||||
flags |= POINTER_FLAG_CANCELED;
|
||||
}
|
||||
|
||||
return !InjectTouchPoint(aPointerId, aPoint, flags, pressure,
|
||||
aPointerOrientation)
|
||||
? NS_ERROR_UNEXPECTED
|
||||
: NS_OK;
|
||||
});
|
||||
}
|
||||
|
||||
// Missing init state, error out
|
||||
if (remove || cancel) {
|
||||
return NS_ERROR_INVALID_ARG;
|
||||
}
|
||||
|
||||
// Create a new pointer
|
||||
info = new PointerInfo(aPointerId, aPoint);
|
||||
|
||||
POINTER_FLAGS flags = POINTER_FLAG_INRANGE;
|
||||
if (contact) {
|
||||
flags |= POINTER_FLAG_INCONTACT | POINTER_FLAG_DOWN;
|
||||
}
|
||||
|
||||
mActivePointers.Put(aPointerId, info);
|
||||
return !InjectTouchPoint(aPointerId, aPoint, flags, pressure,
|
||||
aPointerOrientation)
|
||||
? NS_ERROR_UNEXPECTED
|
||||
: NS_OK;
|
||||
}
|
||||
|
||||
nsresult nsWindowBase::ClearNativeTouchSequence(nsIObserver* aObserver) {
|
||||
|
|
|
@ -533,13 +533,13 @@ class LogModuleManager {
|
|||
|
||||
LogModule* CreateOrGetModule(const char* aName) {
|
||||
OffTheBooksMutexAutoLock guard(mModulesLock);
|
||||
return mModules
|
||||
.GetOrInsertWith(aName,
|
||||
[&] {
|
||||
return UniquePtr<LogModule>(
|
||||
new LogModule{aName, LogLevel::Disabled});
|
||||
})
|
||||
.get();
|
||||
LogModule* module = nullptr;
|
||||
if (!mModules.Get(aName, &module)) {
|
||||
module = new LogModule(aName, LogLevel::Disabled);
|
||||
mModules.Put(aName, module);
|
||||
}
|
||||
|
||||
return module;
|
||||
}
|
||||
|
||||
void Print(const char* aName, LogLevel aLevel, const char* aFmt,
|
||||
|
|
|
@ -38,8 +38,7 @@ already_AddRefed<nsIErrorService> nsErrorService::GetOrCreate() {
|
|||
NS_IMETHODIMP
|
||||
nsErrorService::RegisterErrorStringBundle(int16_t aErrorModule,
|
||||
const char* aStringBundleURL) {
|
||||
mErrorStringBundleURLMap.Put(aErrorModule,
|
||||
MakeUnique<nsCString>(aStringBundleURL));
|
||||
mErrorStringBundleURLMap.Put(aErrorModule, new nsCString(aStringBundleURL));
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -199,29 +199,28 @@ nsresult nsINIParser::SetString(const char* aSection, const char* aKey,
|
|||
return NS_ERROR_INVALID_ARG;
|
||||
}
|
||||
|
||||
mSections.WithEntryHandle(aSection, [&](auto&& entry) {
|
||||
if (!entry) {
|
||||
entry.Insert(MakeUnique<INIValue>(aKey, aValue));
|
||||
return;
|
||||
}
|
||||
INIValue* v;
|
||||
if (!mSections.Get(aSection, &v)) {
|
||||
v = new INIValue(aKey, aValue);
|
||||
|
||||
INIValue* v = entry.Data().get();
|
||||
mSections.Put(aSection, v);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
// Check whether this key has already been specified; overwrite
|
||||
// if so, or append if not.
|
||||
while (v) {
|
||||
if (!strcmp(aKey, v->key)) {
|
||||
v->SetValue(aValue);
|
||||
break;
|
||||
}
|
||||
if (!v->next) {
|
||||
v->next = MakeUnique<INIValue>(aKey, aValue);
|
||||
break;
|
||||
}
|
||||
v = v->next.get();
|
||||
// Check whether this key has already been specified; overwrite
|
||||
// if so, or append if not.
|
||||
while (v) {
|
||||
if (!strcmp(aKey, v->key)) {
|
||||
v->SetValue(aValue);
|
||||
break;
|
||||
}
|
||||
NS_ASSERTION(v, "v should never be null coming out of this loop");
|
||||
});
|
||||
if (!v->next) {
|
||||
v->next = MakeUnique<INIValue>(aKey, aValue);
|
||||
break;
|
||||
}
|
||||
v = v->next.get();
|
||||
}
|
||||
NS_ASSERTION(v, "v should never be null coming out of this loop");
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
@ -241,7 +240,7 @@ nsresult nsINIParser::DeleteString(const char* aSection, const char* aKey) {
|
|||
if (!val->next) {
|
||||
mSections.Remove(aSection);
|
||||
} else {
|
||||
mSections.Put(aSection, std::move(val->next));
|
||||
mSections.Put(aSection, val->next.release());
|
||||
delete val;
|
||||
}
|
||||
return NS_OK;
|
||||
|
@ -283,7 +282,7 @@ nsresult nsINIParser::RenameSection(const char* aSection,
|
|||
|
||||
mozilla::UniquePtr<INIValue> val;
|
||||
if (mSections.Remove(aSection, &val)) {
|
||||
mSections.Put(aNewName, std::move(val));
|
||||
mSections.Put(aNewName, val.release());
|
||||
} else {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
|
|
@ -307,10 +307,8 @@ static BloatEntry* GetBloatEntry(const char* aTypeName,
|
|||
EnsureBloatView();
|
||||
BloatEntry* entry = gBloatView->Get(aTypeName);
|
||||
if (!entry && aInstanceSize > 0) {
|
||||
entry =
|
||||
gBloatView
|
||||
->Put(aTypeName, MakeUnique<BloatEntry>(aTypeName, aInstanceSize))
|
||||
.get();
|
||||
entry = new BloatEntry(aTypeName, aInstanceSize);
|
||||
gBloatView->Put(aTypeName, entry);
|
||||
} else {
|
||||
MOZ_ASSERT(
|
||||
aInstanceSize == 0 || entry->GetClassSize() == aInstanceSize,
|
||||
|
|
|
@ -511,11 +511,9 @@ void nsCategoryManager::AddCategoryEntry(const nsACString& aCategoryName,
|
|||
|
||||
if (!category) {
|
||||
// That category doesn't exist yet; let's make it.
|
||||
category =
|
||||
mTable
|
||||
.Put(MaybeStrdup(aCategoryName, &mArena),
|
||||
UniquePtr<CategoryNode>{CategoryNode::Create(&mArena)})
|
||||
.get();
|
||||
category = CategoryNode::Create(&mArena);
|
||||
|
||||
mTable.Put(MaybeStrdup(aCategoryName, &mArena), category);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -724,10 +724,13 @@ void nsComponentManagerImpl::ManifestComponent(ManifestProcessingContext& aCx,
|
|||
return;
|
||||
}
|
||||
|
||||
KnownModule* const km =
|
||||
mKnownModules
|
||||
.GetOrInsertWith(hash, [&] { return MakeUnique<KnownModule>(fl); })
|
||||
.get();
|
||||
KnownModule* km;
|
||||
|
||||
km = mKnownModules.Get(hash);
|
||||
if (!km) {
|
||||
km = new KnownModule(fl);
|
||||
mKnownModules.Put(hash, km);
|
||||
}
|
||||
|
||||
void* place = mArena.Allocate(sizeof(nsCID));
|
||||
nsID* permanentCID = static_cast<nsID*>(place);
|
||||
|
|
|
@ -189,56 +189,61 @@ class nsBaseHashtable
|
|||
}
|
||||
|
||||
/**
|
||||
* Add aKey to the table if not already present, and return a reference to its
|
||||
* value. If aKey is not already in the table then the a default-constructed
|
||||
* or the provided value aData is used.
|
||||
* Add key to the table if not already present, and return a reference to its
|
||||
* value. If key is not already in the table then the value is default
|
||||
* constructed.
|
||||
*
|
||||
* If the arguments are non-trivial to provide, consider using GetOrInsertWith
|
||||
* instead.
|
||||
* This function can only be used if DataType is default-constructible. Use
|
||||
* WithEntryHandle with non-default-constructible DataType for now.
|
||||
*
|
||||
* TODO: Add a function GetOrInsertWith that will use a function for
|
||||
* DataType construction.
|
||||
*/
|
||||
template <typename... Args>
|
||||
DataType& GetOrInsert(const KeyType& aKey, Args&&... aArgs) {
|
||||
return WithEntryHandle(aKey, [&](auto entryHandle) -> DataType& {
|
||||
return entryHandle.OrInsert(std::forward<Args>(aArgs)...);
|
||||
});
|
||||
DataType& GetOrInsert(const KeyType& aKey) {
|
||||
EntryType* ent = this->PutEntry(aKey);
|
||||
return ent->mData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add aKey to the table if not already present, and return a reference to its
|
||||
* value. If aKey is not already in the table then the value is
|
||||
* constructed using the given factory.
|
||||
* Put a new value for the associated key
|
||||
* @param aKey the key to put
|
||||
* @param aData the new data
|
||||
*/
|
||||
template <typename F>
|
||||
DataType& GetOrInsertWith(const KeyType& aKey, F&& aFunc) {
|
||||
return WithEntryHandle(aKey, [&aFunc](auto entryHandle) -> DataType& {
|
||||
return entryHandle.OrInsertWith(std::forward<F>(aFunc));
|
||||
void Put(KeyType aKey, const UserDataType& aData) {
|
||||
WithEntryHandle(aKey, [&aData](auto entryHandle) {
|
||||
entryHandle.InsertOrUpdate(Converter::Wrap(aData));
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* If it does not yet, inserts a new entry with the handle's key and the
|
||||
* value passed to this function. Otherwise, it updates the entry by the
|
||||
* value passed to this function.
|
||||
*
|
||||
* \tparam U DataType must be implicitly convertible (and assignable) from U
|
||||
* \post HasEntry()
|
||||
* \param aKey the key to put
|
||||
* \param aData the new data
|
||||
*/
|
||||
template <typename U>
|
||||
DataType& Put(KeyType aKey, U&& aData) {
|
||||
return WithEntryHandle(aKey, [&aData](auto entryHandle) -> DataType& {
|
||||
return entryHandle.InsertOrUpdate(std::forward<U>(aData));
|
||||
});
|
||||
}
|
||||
|
||||
template <typename U>
|
||||
[[nodiscard]] bool Put(KeyType aKey, U&& aData, const fallible_t& aFallible) {
|
||||
[[nodiscard]] bool Put(KeyType aKey, const UserDataType& aData,
|
||||
const fallible_t& aFallible) {
|
||||
return WithEntryHandle(aKey, aFallible, [&aData](auto maybeEntryHandle) {
|
||||
if (!maybeEntryHandle) {
|
||||
return false;
|
||||
}
|
||||
maybeEntryHandle->InsertOrUpdate(std::forward<U>(aData));
|
||||
maybeEntryHandle->InsertOrUpdate(Converter::Wrap(aData));
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Put a new value for the associated key
|
||||
* @param aKey the key to put
|
||||
* @param aData the new data
|
||||
*/
|
||||
void Put(KeyType aKey, UserDataType&& aData) {
|
||||
WithEntryHandle(aKey, [&aData](auto entryHandle) {
|
||||
entryHandle.InsertOrUpdate(Converter::Wrap(std::move(aData)));
|
||||
});
|
||||
}
|
||||
|
||||
[[nodiscard]] bool Put(KeyType aKey, UserDataType&& aData,
|
||||
const fallible_t& aFallible) {
|
||||
return WithEntryHandle(aKey, aFallible, [&aData](auto maybeEntryHandle) {
|
||||
if (!maybeEntryHandle) {
|
||||
return false;
|
||||
}
|
||||
maybeEntryHandle->InsertOrUpdate(Converter::Wrap(std::move(aData)));
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
@ -423,13 +428,13 @@ class nsBaseHashtable
|
|||
* Inserts a new entry with the handle's key and the value passed to this
|
||||
* function.
|
||||
*
|
||||
* \tparam Args DataType must be constructible from Args
|
||||
* \tparam U DataType must be constructible from U
|
||||
* \pre !HasEntry()
|
||||
* \post HasEntry()
|
||||
*/
|
||||
template <typename... Args>
|
||||
DataType& Insert(Args&&... aArgs) {
|
||||
Base::InsertInternal(std::forward<Args>(aArgs)...);
|
||||
template <typename U>
|
||||
DataType& Insert(U&& aData) {
|
||||
Base::InsertInternal(std::forward<U>(aData));
|
||||
return Data();
|
||||
}
|
||||
|
||||
|
@ -438,13 +443,13 @@ class nsBaseHashtable
|
|||
* the value passed to this function. The value is not consumed if no insert
|
||||
* takes place.
|
||||
*
|
||||
* \tparam Args DataType must be constructible from Args
|
||||
* \tparam U DataType must be constructible from U
|
||||
* \post HasEntry()
|
||||
*/
|
||||
template <typename... Args>
|
||||
DataType& OrInsert(Args&&... aArgs) {
|
||||
template <typename U>
|
||||
DataType& OrInsert(U&& aData) {
|
||||
if (!HasEntry()) {
|
||||
return Insert(std::forward<Args>(aArgs)...);
|
||||
return Insert(std::forward<U>(aData));
|
||||
}
|
||||
return Data();
|
||||
}
|
||||
|
@ -454,7 +459,7 @@ class nsBaseHashtable
|
|||
* the result of the functor passed to this function. The functor is not
|
||||
* called if no insert takes place.
|
||||
*
|
||||
* \tparam F must return a value that is implicitly convertible to DataType
|
||||
* \tparam F must return a value that DataType is constructible from
|
||||
* \post HasEntry()
|
||||
*/
|
||||
template <typename F>
|
||||
|
@ -512,7 +517,7 @@ class nsBaseHashtable
|
|||
* value passed to this function. Otherwise, it updates the entry by the
|
||||
* value passed to this function.
|
||||
*
|
||||
* \tparam U DataType must be implicitly convertible (and assignable) from U
|
||||
* \tparam U DataType must be constructible and assignable from U
|
||||
* \post HasEntry()
|
||||
*/
|
||||
template <typename U>
|
||||
|
|
|
@ -58,6 +58,14 @@ class nsClassHashtable : public nsBaseHashtable<KeyClass, mozilla::UniquePtr<T>,
|
|||
template <typename... Args>
|
||||
UserDataType LookupOrAdd(KeyType aKey, Args&&... aConstructionArgs);
|
||||
|
||||
/**
|
||||
* Looks up aKey in the hash table. If it doesn't exist a new object of
|
||||
* KeyClass will be created (using the factory function provided, whose return
|
||||
* value must be convertible to UniquePtr<T>) and then returned.
|
||||
*/
|
||||
template <typename Factory>
|
||||
UserDataType LookupOrAddFromFactory(KeyType aKey, const Factory& aFactory);
|
||||
|
||||
/**
|
||||
* @copydoc nsBaseHashtable::Get
|
||||
* @param aData if the key doesn't exist, pData will be set to nullptr.
|
||||
|
@ -69,6 +77,16 @@ class nsClassHashtable : public nsBaseHashtable<KeyClass, mozilla::UniquePtr<T>,
|
|||
* @returns nullptr if the key is not present.
|
||||
*/
|
||||
UserDataType Get(KeyType aKey) const;
|
||||
|
||||
// For now, overload Put, rather than hiding it.
|
||||
using base_type::Put;
|
||||
|
||||
template <typename U, typename = std::enable_if_t<std::is_base_of_v<T, U>>>
|
||||
void Put(KeyType aKey, mozilla::UniquePtr<U>&& aData);
|
||||
|
||||
template <typename U, typename = std::enable_if_t<std::is_base_of_v<T, U>>>
|
||||
[[nodiscard]] bool Put(KeyType aKey, mozilla::UniquePtr<U>&& aData,
|
||||
const mozilla::fallible_t&);
|
||||
};
|
||||
|
||||
template <typename K, typename T>
|
||||
|
@ -94,13 +112,21 @@ template <class KeyClass, class T>
|
|||
template <typename... Args>
|
||||
T* nsClassHashtable<KeyClass, T>::LookupOrAdd(KeyType aKey,
|
||||
Args&&... aConstructionArgs) {
|
||||
return this
|
||||
->GetOrInsertWith(std::move(aKey),
|
||||
[&] {
|
||||
return mozilla::MakeUnique<T>(
|
||||
std::forward<Args>(aConstructionArgs)...);
|
||||
})
|
||||
.get();
|
||||
return LookupOrAddFromFactory(std::move(aKey), [&] {
|
||||
return mozilla::MakeUnique<T>(std::forward<Args>(aConstructionArgs)...);
|
||||
});
|
||||
}
|
||||
|
||||
template <class KeyClass, class T>
|
||||
template <typename Factory>
|
||||
T* nsClassHashtable<KeyClass, T>::LookupOrAddFromFactory(
|
||||
KeyType aKey, const Factory& aFactory) {
|
||||
auto count = this->Count();
|
||||
typename base_type::EntryType* ent = this->PutEntry(aKey);
|
||||
if (count != this->Count()) {
|
||||
ent->SetData(aFactory());
|
||||
}
|
||||
return ent->GetData().get();
|
||||
}
|
||||
|
||||
template <class KeyClass, class T>
|
||||
|
@ -132,4 +158,29 @@ T* nsClassHashtable<KeyClass, T>::Get(KeyType aKey) const {
|
|||
return ent->GetData().get();
|
||||
}
|
||||
|
||||
template <class KeyClass, class T>
|
||||
template <typename U, typename>
|
||||
void nsClassHashtable<KeyClass, T>::Put(KeyType aKey,
|
||||
mozilla::UniquePtr<U>&& aData) {
|
||||
if (!Put(aKey, std::move(aData), mozilla::fallible)) {
|
||||
NS_ABORT_OOM(this->mTable.EntrySize() * this->mTable.EntryCount());
|
||||
}
|
||||
}
|
||||
|
||||
template <class KeyClass, class T>
|
||||
template <typename U, typename>
|
||||
bool nsClassHashtable<KeyClass, T>::Put(KeyType aKey,
|
||||
mozilla::UniquePtr<U>&& aData,
|
||||
const mozilla::fallible_t&) {
|
||||
typename base_type::EntryType* ent = this->PutEntry(aKey, mozilla::fallible);
|
||||
|
||||
if (!ent) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ent->SetData(std::move(aData));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // nsClassHashtable_h__
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#include <numeric>
|
||||
|
||||
using mozilla::MakeRefPtr;
|
||||
using mozilla::MakeUnique;
|
||||
using mozilla::UniquePtr;
|
||||
|
||||
namespace TestHashtables {
|
||||
|
@ -402,32 +401,32 @@ struct NonDefaultConstructible_NonDefaultConstructible {
|
|||
using DataType = NonDefaultConstructible;
|
||||
using UserDataType = NonDefaultConstructible;
|
||||
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Contains = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_GetGeneration = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Contains = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_GetGeneration = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_SizeOfExcludingThis = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_SizeOfIncludingThis = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Count = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_IsEmpty = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Get_OutputParam = 5;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_MaybeGet = 5;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Put = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Put_Fallible = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Put_Rvalue = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Put_Rvalue_Fallible = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Remove = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_GetAndRemove = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_RemoveIf = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Lookup = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Lookup_Remove = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Iter = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_ConstIter = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_begin_end = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_cbegin_cend = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Clear = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_ShallowSizeOfExcludingThis = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_ShallowSizeOfIncludingThis = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_SwapElements = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_MarkImmutable = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Count = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_IsEmpty = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Get_OutputParam = 6;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_MaybeGet = 6;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Put = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Put_Fallible = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Put_Rvalue = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Put_Rvalue_Fallible = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Remove = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_GetAndRemove = 4;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_RemoveIf = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Lookup = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Lookup_Remove = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Iter = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_ConstIter = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_begin_end = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_cbegin_cend = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Clear = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_ShallowSizeOfExcludingThis = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_ShallowSizeOfIncludingThis = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_SwapElements = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_MarkImmutable = 3;
|
||||
};
|
||||
|
||||
struct NonDefaultConstructible_MovingNonDefaultConstructible {
|
||||
|
@ -684,7 +683,8 @@ TYPED_TEST_P(BaseHashtableTest, MaybeGet) {
|
|||
EXPECT_EQ(data.CharRef()->GetChar(), 42u);
|
||||
}
|
||||
|
||||
TYPED_TEST_P(BaseHashtableTest, GetOrInsert_Default) {
|
||||
TYPED_TEST_P(BaseHashtableTest, GetOrInsert) {
|
||||
// The GetOrInsert function can't support non-default-constructible DataType.
|
||||
if constexpr (std::is_default_constructible_v<typename TypeParam::DataType>) {
|
||||
auto table = MakeEmptyBaseHashtable<TypeParam>();
|
||||
|
||||
|
@ -696,48 +696,6 @@ TYPED_TEST_P(BaseHashtableTest, GetOrInsert_Default) {
|
|||
}
|
||||
}
|
||||
|
||||
TYPED_TEST_P(BaseHashtableTest, GetOrInsert_NonDefault) {
|
||||
auto table = MakeEmptyBaseHashtable<TypeParam>();
|
||||
|
||||
typename TypeParam::DataType& data = table.GetOrInsert(
|
||||
1, typename TypeParam::DataType{MakeRefPtr<TestUniCharRefCounted>(42)});
|
||||
EXPECT_NE(data.CharRef(), nullptr);
|
||||
}
|
||||
|
||||
TYPED_TEST_P(BaseHashtableTest, GetOrInsert_NonDefault_AlreadyPresent) {
|
||||
auto table = MakeEmptyBaseHashtable<TypeParam>();
|
||||
|
||||
typename TypeParam::DataType& data1 = table.GetOrInsert(
|
||||
1, typename TypeParam::DataType{MakeRefPtr<TestUniCharRefCounted>(42)});
|
||||
TestUniCharRefCounted* const address = data1.CharRef();
|
||||
typename TypeParam::DataType& data2 = table.GetOrInsert(
|
||||
1,
|
||||
typename TypeParam::DataType{MakeRefPtr<TestUniCharRefCounted>(42, 1)});
|
||||
EXPECT_EQ(&data1, &data2);
|
||||
EXPECT_EQ(address, data2.CharRef());
|
||||
}
|
||||
|
||||
TYPED_TEST_P(BaseHashtableTest, GetOrInsertWith) {
|
||||
auto table = MakeEmptyBaseHashtable<TypeParam>();
|
||||
|
||||
typename TypeParam::DataType& data = table.GetOrInsertWith(1, [] {
|
||||
return typename TypeParam::DataType{MakeRefPtr<TestUniCharRefCounted>(42)};
|
||||
});
|
||||
EXPECT_NE(data.CharRef(), nullptr);
|
||||
}
|
||||
|
||||
TYPED_TEST_P(BaseHashtableTest, GetOrInsertWith_AlreadyPresent) {
|
||||
auto table = MakeEmptyBaseHashtable<TypeParam>();
|
||||
|
||||
table.GetOrInsertWith(1, [] {
|
||||
return typename TypeParam::DataType{MakeRefPtr<TestUniCharRefCounted>(42)};
|
||||
});
|
||||
table.GetOrInsertWith(1, [] {
|
||||
ADD_FAILURE();
|
||||
return typename TypeParam::DataType{MakeRefPtr<TestUniCharRefCounted>(42)};
|
||||
});
|
||||
}
|
||||
|
||||
TYPED_TEST_P(BaseHashtableTest, Put) {
|
||||
auto table = MakeEmptyBaseHashtable<TypeParam>();
|
||||
|
||||
|
@ -992,12 +950,10 @@ TYPED_TEST_P(BaseHashtableTest, MarkImmutable) {
|
|||
REGISTER_TYPED_TEST_CASE_P(
|
||||
BaseHashtableTest, Contains, GetGeneration, SizeOfExcludingThis,
|
||||
SizeOfIncludingThis, Count, IsEmpty, Get_OutputParam, Get, MaybeGet,
|
||||
GetOrInsert_Default, GetOrInsert_NonDefault,
|
||||
GetOrInsert_NonDefault_AlreadyPresent, GetOrInsertWith,
|
||||
GetOrInsertWith_AlreadyPresent, Put, Put_Fallible, Put_Rvalue,
|
||||
Put_Rvalue_Fallible, Remove_OutputParam, Remove, GetAndRemove, RemoveIf,
|
||||
Lookup, Lookup_Remove, WithEntryHandle_NoOp,
|
||||
WithEntryHandle_NotFound_OrInsert, WithEntryHandle_NotFound_OrInsertFrom,
|
||||
GetOrInsert, Put, Put_Fallible, Put_Rvalue, Put_Rvalue_Fallible,
|
||||
Remove_OutputParam, Remove, GetAndRemove, RemoveIf, Lookup, Lookup_Remove,
|
||||
WithEntryHandle_NoOp, WithEntryHandle_NotFound_OrInsert,
|
||||
WithEntryHandle_NotFound_OrInsertFrom,
|
||||
WithEntryHandle_NotFound_OrInsertFrom_Exists,
|
||||
WithEntryHandle_NotFound_OrRemove, WithEntryHandle_NotFound_OrRemove_Exists,
|
||||
Iter, ConstIter, begin_end, cbegin_cend, Clear, ShallowSizeOfExcludingThis,
|
||||
|
@ -1163,8 +1119,8 @@ TEST(Hashtables, ClassHashtable_RangeBasedFor)
|
|||
nsClassHashtable<nsCStringHashKey, TestUniChar> EntToUniClass(ENTITY_COUNT);
|
||||
|
||||
for (auto& entity : gEntities) {
|
||||
EntToUniClass.Put(nsDependentCString(entity.mStr),
|
||||
MakeUnique<TestUniChar>(entity.mUnicode));
|
||||
auto* temp = new TestUniChar(entity.mUnicode);
|
||||
EntToUniClass.Put(nsDependentCString(entity.mStr), temp);
|
||||
}
|
||||
|
||||
// const range-based for
|
||||
|
@ -1464,7 +1420,7 @@ TEST(Hashtables, ClassHashtable_LookupOrAdd_NotPresent)
|
|||
EXPECT_EQ(42u, entry->GetChar());
|
||||
}
|
||||
|
||||
TEST(Hashtables, ClassHashtable_GetOrInsertWith_Present)
|
||||
TEST(Hashtables, ClassHashtable_LookupOrAddFromFactory_Present)
|
||||
{
|
||||
nsClassHashtable<nsCStringHashKey, TestUniChar> EntToUniClass(ENTITY_COUNT);
|
||||
|
||||
|
@ -1473,17 +1429,17 @@ TEST(Hashtables, ClassHashtable_GetOrInsertWith_Present)
|
|||
mozilla::MakeUnique<TestUniCharDerived>(entity.mUnicode));
|
||||
}
|
||||
|
||||
const auto& entry = EntToUniClass.GetOrInsertWith(
|
||||
auto* entry = EntToUniClass.LookupOrAddFromFactory(
|
||||
"uml"_ns, [] { return mozilla::MakeUnique<TestUniCharDerived>(42); });
|
||||
EXPECT_EQ(168u, entry->GetChar());
|
||||
}
|
||||
|
||||
TEST(Hashtables, ClassHashtable_GetOrInsertWith_NotPresent)
|
||||
TEST(Hashtables, ClassHashtable_LookupOrAddFromFactory_NotPresent)
|
||||
{
|
||||
nsClassHashtable<nsCStringHashKey, TestUniChar> EntToUniClass(ENTITY_COUNT);
|
||||
|
||||
// This is going to insert a TestUniCharDerived.
|
||||
const auto& entry = EntToUniClass.GetOrInsertWith(
|
||||
auto* entry = EntToUniClass.LookupOrAddFromFactory(
|
||||
"uml"_ns, [] { return mozilla::MakeUnique<TestUniCharDerived>(42); });
|
||||
EXPECT_EQ(42u, entry->GetChar());
|
||||
}
|
||||
|
|
|
@ -159,7 +159,7 @@ class DeadlockDetector {
|
|||
*/
|
||||
void Add(const T* aResource) {
|
||||
PRAutoLock _(mLock);
|
||||
mOrdering.Put(aResource, MakeUnique<OrderingEntry>(aResource));
|
||||
mOrdering.Put(aResource, new OrderingEntry(aResource));
|
||||
}
|
||||
|
||||
void Remove(const T* aResource) {
|
||||
|
|
Загрузка…
Ссылка в новой задаче