зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1691894 - Make Put accept DataType instead of wrapping UserDataType. r=xpcom-reviewers,necko-reviewers,nika
Differential Revision: https://phabricator.services.mozilla.com/D104850
This commit is contained in:
Родитель
8973094ec1
Коммит
3c29a68440
|
@ -60,13 +60,13 @@ void nsChromeRegistryContent::RegisterPackage(const ChromePackage& aPackage) {
|
|||
if (NS_FAILED(rv)) return;
|
||||
}
|
||||
|
||||
PackageEntry* entry = new PackageEntry;
|
||||
UniquePtr<PackageEntry> entry = MakeUnique<PackageEntry>();
|
||||
entry->flags = aPackage.flags;
|
||||
entry->contentBaseURI = content;
|
||||
entry->localeBaseURI = locale;
|
||||
entry->skinBaseURI = skin;
|
||||
|
||||
mPackagesHash.Put(aPackage.package, entry);
|
||||
mPackagesHash.Put(aPackage.package, std::move(entry));
|
||||
}
|
||||
|
||||
void nsChromeRegistryContent::RegisterSubstitution(
|
||||
|
|
|
@ -36,8 +36,8 @@ void ChildProcessChannelListener::OnChannelReady(
|
|||
aResolver(rv);
|
||||
} else {
|
||||
mChannelArgs.Put(aIdentifier,
|
||||
{aLoadState, std::move(aStreamFilterEndpoints), aTiming,
|
||||
std::move(aResolver)});
|
||||
CallbackArgs{aLoadState, std::move(aStreamFilterEndpoints),
|
||||
aTiming, std::move(aResolver)});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -933,8 +933,8 @@ nsresult ExternalResourceMap::AddExternalResource(nsIURI* aURI,
|
|||
}
|
||||
}
|
||||
|
||||
ExternalResource* newResource = new ExternalResource();
|
||||
mMap.Put(aURI, newResource);
|
||||
ExternalResource* newResource =
|
||||
mMap.Put(aURI, MakeUnique<ExternalResource>()).get();
|
||||
|
||||
newResource->mDocument = doc;
|
||||
newResource->mViewer = aViewer;
|
||||
|
|
|
@ -78,7 +78,7 @@ void PointerEventHandler::UpdateActivePointerState(WidgetMouseEvent* aEvent,
|
|||
// In this case we have to know information about available mouse pointers
|
||||
sActivePointersIds->Put(
|
||||
aEvent->pointerId,
|
||||
new PointerInfo(false, aEvent->mInputSource, true, nullptr));
|
||||
MakeUnique<PointerInfo>(false, aEvent->mInputSource, true, nullptr));
|
||||
|
||||
MaybeCacheSpoofedPointerID(aEvent->mInputSource, aEvent->pointerId);
|
||||
break;
|
||||
|
@ -90,7 +90,7 @@ void PointerEventHandler::UpdateActivePointerState(WidgetMouseEvent* aEvent,
|
|||
// nullptr, not sure if this also happens on real usage.
|
||||
sActivePointersIds->Put(
|
||||
pointerEvent->pointerId,
|
||||
new PointerInfo(
|
||||
MakeUnique<PointerInfo>(
|
||||
true, pointerEvent->mInputSource, pointerEvent->mIsPrimary,
|
||||
aTargetContent ? aTargetContent->OwnerDoc() : nullptr));
|
||||
MaybeCacheSpoofedPointerID(pointerEvent->mInputSource,
|
||||
|
@ -109,8 +109,8 @@ void PointerEventHandler::UpdateActivePointerState(WidgetMouseEvent* aEvent,
|
|||
MouseEvent_Binding::MOZ_SOURCE_TOUCH) {
|
||||
sActivePointersIds->Put(
|
||||
pointerEvent->pointerId,
|
||||
new PointerInfo(false, pointerEvent->mInputSource,
|
||||
pointerEvent->mIsPrimary, nullptr));
|
||||
MakeUnique<PointerInfo>(false, pointerEvent->mInputSource,
|
||||
pointerEvent->mIsPrimary, nullptr));
|
||||
} else {
|
||||
sActivePointersIds->Remove(pointerEvent->pointerId);
|
||||
}
|
||||
|
|
|
@ -100,13 +100,13 @@ void RemoteLazyInputStreamStorage::AddStream(nsIInputStream* aInputStream,
|
|||
uint64_t aChildID) {
|
||||
MOZ_ASSERT(aInputStream);
|
||||
|
||||
StreamData* data = new StreamData();
|
||||
UniquePtr<StreamData> data = MakeUnique<StreamData>();
|
||||
data->mInputStream = aInputStream;
|
||||
data->mChildID = aChildID;
|
||||
data->mSize = aSize;
|
||||
|
||||
mozilla::StaticMutexAutoLock lock(gMutex);
|
||||
mStorage.Put(aID, data);
|
||||
mStorage.Put(aID, std::move(data));
|
||||
}
|
||||
|
||||
nsCOMPtr<nsIInputStream> RemoteLazyInputStreamStorage::ForgetStream(
|
||||
|
|
|
@ -530,11 +530,12 @@ static void AddDataEntryInternal(const nsACString& aURI, T aObject,
|
|||
gDataTable = new nsClassHashtable<nsCStringHashKey, mozilla::dom::DataInfo>;
|
||||
}
|
||||
|
||||
mozilla::dom::DataInfo* info =
|
||||
new mozilla::dom::DataInfo(aObject, aPrincipal, aAgentClusterId);
|
||||
BlobURLsReporter::GetJSStackForBlob(info);
|
||||
mozilla::UniquePtr<mozilla::dom::DataInfo> info =
|
||||
mozilla::MakeUnique<mozilla::dom::DataInfo>(aObject, aPrincipal,
|
||||
aAgentClusterId);
|
||||
BlobURLsReporter::GetJSStackForBlob(info.get());
|
||||
|
||||
gDataTable->Put(aURI, info);
|
||||
gDataTable->Put(aURI, std::move(info));
|
||||
}
|
||||
|
||||
void BlobURLProtocolHandler::Init(void) {
|
||||
|
|
|
@ -2385,7 +2385,7 @@ void HTMLFormElement::AddToPastNamesMap(const nsAString& aName,
|
|||
// previous entry with the same name, if any.
|
||||
nsCOMPtr<nsIContent> node = do_QueryInterface(aChild);
|
||||
if (node) {
|
||||
mPastNameLookupTable.Put(aName, node);
|
||||
mPastNameLookupTable.Put(aName, ToSupports(node));
|
||||
node->SetFlags(MAY_BE_IN_PAST_NAMES_MAP);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7886,24 +7886,19 @@ uint64_t ConnectionPool::Start(
|
|||
const bool databaseInfoIsNew = !dbInfo;
|
||||
|
||||
if (databaseInfoIsNew) {
|
||||
dbInfo = new DatabaseInfo(this, aDatabaseId);
|
||||
|
||||
MutexAutoLock lock(mDatabasesMutex);
|
||||
|
||||
mDatabases.Put(aDatabaseId, dbInfo);
|
||||
dbInfo =
|
||||
mDatabases.Put(aDatabaseId, MakeUnique<DatabaseInfo>(this, aDatabaseId))
|
||||
.get();
|
||||
}
|
||||
|
||||
auto& transactionInfo = [&]() -> TransactionInfo& {
|
||||
auto* transactionInfo = new TransactionInfo(
|
||||
*dbInfo, aBackgroundChildLoggingId, aDatabaseId, transactionId,
|
||||
aLoggingSerialNumber, aObjectStoreNames, aIsWriteTransaction,
|
||||
aTransactionOp);
|
||||
|
||||
MOZ_ASSERT(!mTransactions.Get(transactionId));
|
||||
mTransactions.Put(transactionId, transactionInfo);
|
||||
|
||||
return *transactionInfo;
|
||||
}();
|
||||
MOZ_ASSERT(!mTransactions.Contains(transactionId));
|
||||
auto& transactionInfo = *mTransactions.Put(
|
||||
transactionId, MakeUnique<TransactionInfo>(
|
||||
*dbInfo, aBackgroundChildLoggingId, aDatabaseId,
|
||||
transactionId, aLoggingSerialNumber, aObjectStoreNames,
|
||||
aIsWriteTransaction, aTransactionOp));
|
||||
|
||||
if (aIsWriteTransaction) {
|
||||
MOZ_ASSERT(dbInfo->mWriteTransactionCount < UINT32_MAX);
|
||||
|
@ -16775,10 +16770,13 @@ void OpenDatabaseOp::EnsureDatabaseActor() {
|
|||
info->mLiveDatabases.AppendElement(
|
||||
WrapNotNullUnchecked(mDatabase.unsafeGetRawPtr()));
|
||||
} else {
|
||||
info = new DatabaseActorInfo(
|
||||
mMetadata.clonePtr(),
|
||||
WrapNotNullUnchecked(mDatabase.unsafeGetRawPtr()));
|
||||
gLiveDatabaseHashtable->Put(mDatabaseId, info);
|
||||
// XXX Maybe use GetOrInsertWith above, to avoid a second lookup here?
|
||||
info = gLiveDatabaseHashtable
|
||||
->Put(mDatabaseId,
|
||||
MakeUnique<DatabaseActorInfo>(
|
||||
mMetadata.clonePtr(),
|
||||
WrapNotNullUnchecked(mDatabase.unsafeGetRawPtr())))
|
||||
.get();
|
||||
}
|
||||
|
||||
// Balanced in Database::CleanupMetadata().
|
||||
|
|
|
@ -1552,8 +1552,10 @@ mozilla::ipc::IPCResult BrowserChild::RecvRealMouseMoveEvent(
|
|||
mToBeDispatchedMouseData.Push(dispatchData.release());
|
||||
|
||||
// Put new data to replace the old one in the hash table.
|
||||
CoalescedMouseData* newData = new CoalescedMouseData();
|
||||
mCoalescedMouseData.Put(aEvent.pointerId, newData);
|
||||
CoalescedMouseData* newData =
|
||||
mCoalescedMouseData
|
||||
.Put(aEvent.pointerId, MakeUnique<CoalescedMouseData>())
|
||||
.get();
|
||||
newData->Coalesce(aEvent, aGuid, aInputBlockId);
|
||||
|
||||
// Dispatch all pending mouse events.
|
||||
|
|
|
@ -222,11 +222,11 @@ Result<Ok, nsresult> SharedMap::MaybeRebuild() {
|
|||
// indicate memory corruption, and are fatal.
|
||||
MOZ_RELEASE_ASSERT(!buffer.error());
|
||||
|
||||
// Note: Order of evaluation of function arguments is not guaranteed, so we
|
||||
// can't use entry.release() in place of entry.get() without entry->Name()
|
||||
// sometimes resulting in a null dereference.
|
||||
mEntries.Put(entry->Name(), entry.get());
|
||||
Unused << entry.release();
|
||||
// Note: While the order of evaluation of the arguments to Put doesn't
|
||||
// matter for this (the actual move will only happen within Put), to be
|
||||
// clear about this, we call entry->Name() before calling Put.
|
||||
const auto& name = entry->Name();
|
||||
mEntries.Put(name, std::move(entry));
|
||||
}
|
||||
|
||||
return Ok();
|
||||
|
|
|
@ -80,7 +80,7 @@ bool SharedStringMap::Find(const nsCString& aKey, size_t* aIndex) {
|
|||
|
||||
void SharedStringMapBuilder::Add(const nsCString& aKey,
|
||||
const nsString& aValue) {
|
||||
mEntries.Put(aKey, {mKeyTable.Add(aKey), mValueTable.Add(aValue)});
|
||||
mEntries.Put(aKey, Entry{mKeyTable.Add(aKey), mValueTable.Add(aValue)});
|
||||
}
|
||||
|
||||
Result<Ok, nsresult> SharedStringMapBuilder::Finalize(
|
||||
|
|
|
@ -7253,14 +7253,13 @@ void PrepareDatastoreOp::GetResponse(LSRequestResponse& aResponse) {
|
|||
|
||||
mDatastoreId = ++gLastDatastoreId;
|
||||
|
||||
auto preparedDatastore = MakeUnique<PreparedDatastore>(
|
||||
mDatastore, mContentParentId, Origin(), mDatastoreId,
|
||||
/* aForPreload */ mForPreload);
|
||||
|
||||
if (!gPreparedDatastores) {
|
||||
gPreparedDatastores = new PreparedDatastoreHashtable();
|
||||
}
|
||||
gPreparedDatastores->Put(mDatastoreId, preparedDatastore.get());
|
||||
const auto& preparedDatastore = gPreparedDatastores->Put(
|
||||
mDatastoreId, MakeUnique<PreparedDatastore>(
|
||||
mDatastore, mContentParentId, Origin(), mDatastoreId,
|
||||
/* aForPreload */ mForPreload));
|
||||
|
||||
if (mInvalidated) {
|
||||
preparedDatastore->Invalidate();
|
||||
|
@ -7268,8 +7267,6 @@ void PrepareDatastoreOp::GetResponse(LSRequestResponse& aResponse) {
|
|||
|
||||
mPreparedDatastoreRegistered.Flip();
|
||||
|
||||
Unused << preparedDatastore.release();
|
||||
|
||||
if (mForPreload) {
|
||||
LSRequestPreloadDatastoreResponse preloadDatastoreResponse;
|
||||
|
||||
|
|
|
@ -119,7 +119,7 @@ class GMPDiskStorage : public GMPStorage {
|
|||
continue;
|
||||
}
|
||||
|
||||
mRecords.Put(recordName, new Record(filename, recordName));
|
||||
mRecords.Put(recordName, MakeUnique<Record>(filename, recordName));
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
|
|
|
@ -150,9 +150,10 @@ bool GMPInfoFileParser::Init(nsIFile* aInfoFile) {
|
|||
ToLowerCase(key);
|
||||
key.Trim(" ");
|
||||
|
||||
nsCString* value = new nsCString(Substring(line, colon + 1));
|
||||
auto value = MakeUnique<nsCString>(Substring(line, colon + 1));
|
||||
value->Trim(" ");
|
||||
mValues.Put(key, value); // Hashtable assumes ownership of value.
|
||||
mValues.Put(key,
|
||||
std::move(value)); // Hashtable assumes ownership of value.
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -31,9 +31,9 @@ extern LazyLogModule gMediaDecoderLog;
|
|||
using media::TimeUnit;
|
||||
|
||||
/** Decoder base class for Ogg-encapsulated streams. */
|
||||
OggCodecState* OggCodecState::Create(rlbox_sandbox_ogg* aSandbox,
|
||||
tainted_opaque_ogg<ogg_page*> aPage,
|
||||
uint32_t aSerial) {
|
||||
UniquePtr<OggCodecState> OggCodecState::Create(
|
||||
rlbox_sandbox_ogg* aSandbox, tainted_opaque_ogg<ogg_page*> aPage,
|
||||
uint32_t aSerial) {
|
||||
NS_ASSERTION(sandbox_invoke(*aSandbox, ogg_page_bos, aPage)
|
||||
.unverified_safe_because(RLBOX_SAFE_DEBUG_ASSERTION),
|
||||
"Only call on BOS page!");
|
||||
|
@ -69,8 +69,12 @@ OggCodecState* OggCodecState::Create(rlbox_sandbox_ogg* aSandbox,
|
|||
// Can't use MakeUnique here, OggCodecState is protected.
|
||||
codecState.reset(new OggCodecState(aSandbox, aPage, aSerial, false));
|
||||
}
|
||||
return codecState->OggCodecState::InternalInit() ? codecState.release()
|
||||
: nullptr;
|
||||
|
||||
if (!codecState->OggCodecState::InternalInit()) {
|
||||
codecState.reset();
|
||||
}
|
||||
|
||||
return codecState;
|
||||
}
|
||||
|
||||
OggCodecState::OggCodecState(rlbox_sandbox_ogg* aSandbox,
|
||||
|
@ -1559,7 +1563,7 @@ bool SkeletonState::DecodeIndex(ogg_packet* aPacket) {
|
|||
|
||||
int32_t keyPointsRead = keyPoints->Length();
|
||||
if (keyPointsRead > 0) {
|
||||
mIndex.Put(serialno, keyPoints.release());
|
||||
mIndex.Put(serialno, std::move(keyPoints));
|
||||
}
|
||||
|
||||
LOG(LogLevel::Debug, ("Loaded %d keypoints for Skeleton on stream %u",
|
||||
|
|
|
@ -118,9 +118,9 @@ class OggCodecState {
|
|||
|
||||
// Factory for creating nsCodecStates. Use instead of constructor.
|
||||
// aPage should be a beginning-of-stream page.
|
||||
static OggCodecState* Create(rlbox_sandbox_ogg* aSandbox,
|
||||
tainted_opaque_ogg<ogg_page*> aPage,
|
||||
uint32_t aSerial);
|
||||
static UniquePtr<OggCodecState> Create(rlbox_sandbox_ogg* aSandbox,
|
||||
tainted_opaque_ogg<ogg_page*> aPage,
|
||||
uint32_t aSerial);
|
||||
|
||||
virtual CodecType GetType() { return TYPE_UNKNOWN; }
|
||||
|
||||
|
|
|
@ -12,9 +12,10 @@ namespace mozilla {
|
|||
|
||||
OggCodecStore::OggCodecStore() : mMonitor("CodecStore") {}
|
||||
|
||||
void OggCodecStore::Add(uint32_t serial, OggCodecState* codecState) {
|
||||
OggCodecState* OggCodecStore::Add(uint32_t serial,
|
||||
UniquePtr<OggCodecState> codecState) {
|
||||
MonitorAutoLock mon(mMonitor);
|
||||
mCodecStates.Put(serial, codecState);
|
||||
return mCodecStates.Put(serial, std::move(codecState)).get();
|
||||
}
|
||||
|
||||
bool OggCodecStore::Contains(uint32_t serial) {
|
||||
|
|
|
@ -19,7 +19,7 @@ namespace mozilla {
|
|||
class OggCodecStore {
|
||||
public:
|
||||
OggCodecStore();
|
||||
void Add(uint32_t serial, OggCodecState* codecState);
|
||||
OggCodecState* Add(uint32_t serial, UniquePtr<OggCodecState> codecState);
|
||||
bool Contains(uint32_t serial);
|
||||
OggCodecState* Get(uint32_t serial);
|
||||
bool IsKnownStream(uint32_t aSerial);
|
||||
|
|
|
@ -510,9 +510,9 @@ nsresult OggDemuxer::ReadMetadata() {
|
|||
// We've not encountered a stream with this serial number before. Create
|
||||
// an OggCodecState to demux it, and map that to the OggCodecState
|
||||
// in mCodecStates.
|
||||
OggCodecState* codecState =
|
||||
OggCodecState::Create(mSandbox.get(), page.to_opaque(), serial);
|
||||
mCodecStore.Add(serial, codecState);
|
||||
OggCodecState* const codecState = mCodecStore.Add(
|
||||
serial,
|
||||
OggCodecState::Create(mSandbox.get(), page.to_opaque(), serial));
|
||||
bitstreams.AppendElement(codecState);
|
||||
serials.AppendElement(serial);
|
||||
}
|
||||
|
@ -685,7 +685,7 @@ bool OggDemuxer::ReadOggChain(const media::TimeUnit& aLastEndTime) {
|
|||
|
||||
OggCodecState* state;
|
||||
|
||||
mCodecStore.Add(serial, codecState.release());
|
||||
mCodecStore.Add(serial, std::move(codecState));
|
||||
state = mCodecStore.Get(serial);
|
||||
|
||||
NS_ENSURE_TRUE(state != nullptr, false);
|
||||
|
|
|
@ -145,7 +145,7 @@ class EMEDecryptor : public MediaDataDecoder,
|
|||
return;
|
||||
}
|
||||
|
||||
mDecrypts.Put(aSample, new DecryptPromiseRequestHolder());
|
||||
mDecrypts.Put(aSample, MakeUnique<DecryptPromiseRequestHolder>());
|
||||
mProxy->Decrypt(aSample)
|
||||
->Then(mThread, __func__, this, &EMEDecryptor::Decrypted,
|
||||
&EMEDecryptor::Decrypted)
|
||||
|
|
|
@ -71,8 +71,7 @@ class OriginKeyStore : public nsISupports {
|
|||
if (NS_WARN_IF(NS_FAILED(rv))) {
|
||||
return rv;
|
||||
}
|
||||
key = new OriginKey(salt);
|
||||
mKeys.Put(principalString, key);
|
||||
key = mKeys.Put(principalString, MakeUnique<OriginKey>(salt)).get();
|
||||
}
|
||||
if (aPersist && !key->mSecondsStamp) {
|
||||
key->mSecondsStamp = PR_Now() / PR_USEC_PER_SEC;
|
||||
|
@ -259,7 +258,7 @@ class OriginKeyStore : public nsISupports {
|
|||
if (NS_FAILED(rv)) {
|
||||
continue;
|
||||
}
|
||||
mKeys.Put(origin, new OriginKey(key, secondsstamp));
|
||||
mKeys.Put(origin, MakeUnique<OriginKey>(key, secondsstamp));
|
||||
}
|
||||
mPersistCount = mKeys.Count();
|
||||
return NS_OK;
|
||||
|
|
|
@ -104,11 +104,13 @@ bool MessagePortService::RequestEntangling(MessagePortParent* aParent,
|
|||
return false;
|
||||
}
|
||||
|
||||
data = new MessagePortServiceData(aParent->ID());
|
||||
mPorts.Put(aDestinationUUID, data);
|
||||
mPorts.Put(aDestinationUUID,
|
||||
MakeUnique<MessagePortServiceData>(aParent->ID()));
|
||||
|
||||
data = new MessagePortServiceData(aDestinationUUID);
|
||||
mPorts.Put(aParent->ID(), data);
|
||||
data = mPorts
|
||||
.Put(aParent->ID(),
|
||||
MakeUnique<MessagePortServiceData>(aDestinationUUID))
|
||||
.get();
|
||||
}
|
||||
|
||||
// This is a security check.
|
||||
|
|
|
@ -2082,7 +2082,8 @@ void PluginInstanceParent::SubclassPluginWindow(HWND aWnd) {
|
|||
mPluginWndProc = nullptr;
|
||||
// Note sPluginInstanceList wil delete 'this' if we do not remove
|
||||
// it on shutdown.
|
||||
sPluginInstanceList->Put((void*)mPluginHWND, this);
|
||||
sPluginInstanceList->Put((void*)mPluginHWND,
|
||||
UniquePtr<PluginInstanceParent>(this));
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -195,7 +195,7 @@ void ReportingHeader::ReportingFromChannel(nsIHttpChannel* aChannel) {
|
|||
}
|
||||
|
||||
// Here we override the previous data.
|
||||
mOrigins.Put(origin, client.release());
|
||||
mOrigins.Put(origin, std::move(client));
|
||||
|
||||
MaybeCreateCleanupTimer();
|
||||
}
|
||||
|
|
|
@ -1589,8 +1589,9 @@ ServiceWorkerManager::GetOrCreateJobQueue(const nsACString& aKey,
|
|||
// XXX we could use WithEntryHandle here to avoid a hashtable lookup, except
|
||||
// that leads to a false positive assertion, see bug 1370674 comment 7.
|
||||
if (!mRegistrationInfos.Get(aKey, &data)) {
|
||||
data = new RegistrationDataPerPrincipal();
|
||||
mRegistrationInfos.Put(aKey, data);
|
||||
data =
|
||||
mRegistrationInfos.Put(aKey, MakeUnique<RegistrationDataPerPrincipal>())
|
||||
.get();
|
||||
}
|
||||
|
||||
return data->mJobQueues
|
||||
|
|
|
@ -73,8 +73,8 @@ SessionStorageManagerBase::GetOriginRecord(
|
|||
OriginKeyHashTable* table;
|
||||
if (!mOATable.Get(aOriginAttrs, &table)) {
|
||||
if (aMakeIfNeeded) {
|
||||
table = new OriginKeyHashTable();
|
||||
mOATable.Put(aOriginAttrs, table);
|
||||
table =
|
||||
mOATable.Put(aOriginAttrs, MakeUnique<OriginKeyHashTable>()).get();
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -83,13 +83,13 @@ SessionStorageManagerBase::GetOriginRecord(
|
|||
OriginRecord* originRecord;
|
||||
if (!table->Get(aOriginKey, &originRecord)) {
|
||||
if (aMakeIfNeeded) {
|
||||
originRecord = new OriginRecord();
|
||||
auto newOriginRecord = MakeUnique<OriginRecord>();
|
||||
if (aCloneFrom) {
|
||||
originRecord->mCache = aCloneFrom->Clone();
|
||||
newOriginRecord->mCache = aCloneFrom->Clone();
|
||||
} else {
|
||||
originRecord->mCache = new SessionStorageCache();
|
||||
newOriginRecord->mCache = new SessionStorageCache();
|
||||
}
|
||||
table->Put(aOriginKey, originRecord);
|
||||
originRecord = table->Put(aOriginKey, std::move(newOriginRecord)).get();
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
|
|
|
@ -304,7 +304,7 @@ void StorageDBThread::SyncPreload(LocalStorageCacheBridge* aCache,
|
|||
// need to be flushed first.
|
||||
// Schedule preload for this cache as the first operation.
|
||||
nsresult rv =
|
||||
InsertDBOp(new DBOperation(DBOperation::opPreloadUrgent, aCache));
|
||||
InsertDBOp(MakeUnique<DBOperation>(DBOperation::opPreloadUrgent, aCache));
|
||||
|
||||
// LoadWait exits after LoadDone of the cache has been called.
|
||||
if (NS_SUCCEEDED(rv)) {
|
||||
|
@ -330,12 +330,10 @@ void StorageDBThread::GetOriginsHavingData(nsTArray<nsCString>* aOrigins) {
|
|||
}
|
||||
}
|
||||
|
||||
nsresult StorageDBThread::InsertDBOp(StorageDBThread::DBOperation* aOperation) {
|
||||
nsresult StorageDBThread::InsertDBOp(
|
||||
UniquePtr<StorageDBThread::DBOperation> aOperation) {
|
||||
MonitorAutoLock monitor(mThreadObserver->GetMonitor());
|
||||
|
||||
// Sentinel to don't forget to delete the operation when we exit early.
|
||||
UniquePtr<StorageDBThread::DBOperation> opScope(aOperation);
|
||||
|
||||
if (NS_FAILED(mStatus)) {
|
||||
MonitorAutoUnlock unlock(mThreadObserver->GetMonitor());
|
||||
aOperation->Finalize(mStatus);
|
||||
|
@ -377,14 +375,11 @@ nsresult StorageDBThread::InsertDBOp(StorageDBThread::DBOperation* aOperation) {
|
|||
case DBOperation::opGetUsage:
|
||||
if (aOperation->Type() == DBOperation::opPreloadUrgent) {
|
||||
SetHigherPriority(); // Dropped back after urgent preload execution
|
||||
mPreloads.InsertElementAt(0, aOperation);
|
||||
mPreloads.InsertElementAt(0, aOperation.release());
|
||||
} else {
|
||||
mPreloads.AppendElement(aOperation);
|
||||
mPreloads.AppendElement(aOperation.release());
|
||||
}
|
||||
|
||||
// DB operation adopted, don't delete it.
|
||||
Unused << opScope.release();
|
||||
|
||||
// Immediately start executing this.
|
||||
monitor.Notify();
|
||||
break;
|
||||
|
@ -392,10 +387,7 @@ nsresult StorageDBThread::InsertDBOp(StorageDBThread::DBOperation* aOperation) {
|
|||
default:
|
||||
// Update operations are first collected, coalesced and then flushed
|
||||
// after a short time.
|
||||
mPendingTasks.Add(aOperation);
|
||||
|
||||
// DB operation adopted, don't delete it.
|
||||
Unused << opScope.release();
|
||||
mPendingTasks.Add(std::move(aOperation));
|
||||
|
||||
ScheduleFlush();
|
||||
break;
|
||||
|
@ -1308,14 +1300,13 @@ bool StorageDBThread::PendingOperations::CheckForCoalesceOpportunity(
|
|||
}
|
||||
|
||||
void StorageDBThread::PendingOperations::Add(
|
||||
StorageDBThread::DBOperation* aOperation) {
|
||||
UniquePtr<StorageDBThread::DBOperation> aOperation) {
|
||||
// Optimize: when a key to remove has never been written to disk
|
||||
// just bypass this operation. A key is new when an operation scheduled
|
||||
// to write it to the database is of type opAddItem.
|
||||
if (CheckForCoalesceOpportunity(aOperation, DBOperation::opAddItem,
|
||||
if (CheckForCoalesceOpportunity(aOperation.get(), DBOperation::opAddItem,
|
||||
DBOperation::opRemoveItem)) {
|
||||
mUpdates.Remove(aOperation->Target());
|
||||
delete aOperation;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1323,7 +1314,7 @@ void StorageDBThread::PendingOperations::Add(
|
|||
// written to disk, keep type of the operation to store it at opAddItem.
|
||||
// This allows optimization to just forget adding a new key when
|
||||
// it is removed from the storage before flush.
|
||||
if (CheckForCoalesceOpportunity(aOperation, DBOperation::opAddItem,
|
||||
if (CheckForCoalesceOpportunity(aOperation.get(), DBOperation::opAddItem,
|
||||
DBOperation::opUpdateItem)) {
|
||||
aOperation->mType = DBOperation::opAddItem;
|
||||
}
|
||||
|
@ -1332,7 +1323,7 @@ void StorageDBThread::PendingOperations::Add(
|
|||
// remove/set/remove on a previously existing key we have to change
|
||||
// opAddItem to opUpdateItem on the new operation when there is opRemoveItem
|
||||
// pending for the key.
|
||||
if (CheckForCoalesceOpportunity(aOperation, DBOperation::opRemoveItem,
|
||||
if (CheckForCoalesceOpportunity(aOperation.get(), DBOperation::opRemoveItem,
|
||||
DBOperation::opAddItem)) {
|
||||
aOperation->mType = DBOperation::opUpdateItem;
|
||||
}
|
||||
|
@ -1344,7 +1335,7 @@ void StorageDBThread::PendingOperations::Add(
|
|||
case DBOperation::opUpdateItem:
|
||||
case DBOperation::opRemoveItem:
|
||||
// Override any existing operation for the target (=scope+key).
|
||||
mUpdates.Put(aOperation->Target(), aOperation);
|
||||
mUpdates.Put(aOperation->Target(), std::move(aOperation));
|
||||
break;
|
||||
|
||||
// Clear operations
|
||||
|
@ -1381,14 +1372,14 @@ void StorageDBThread::PendingOperations::Add(
|
|||
iter.Remove();
|
||||
}
|
||||
|
||||
mClears.Put(aOperation->Target(), aOperation);
|
||||
mClears.Put(aOperation->Target(), std::move(aOperation));
|
||||
break;
|
||||
|
||||
case DBOperation::opClearAll:
|
||||
// Drop simply everything, this is a super-operation.
|
||||
mUpdates.Clear();
|
||||
mClears.Clear();
|
||||
mClears.Put(aOperation->Target(), aOperation);
|
||||
mClears.Put(aOperation->Target(), std::move(aOperation));
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -216,7 +216,7 @@ class StorageDBThread final {
|
|||
|
||||
// Method responsible for coalescing redundant update operations with the
|
||||
// same |Target()| or clear operations with the same or matching |Origin()|
|
||||
void Add(DBOperation* aOperation);
|
||||
void Add(UniquePtr<DBOperation> aOperation);
|
||||
|
||||
// True when there are some scheduled operations to flush on disk
|
||||
bool HasTasks() const;
|
||||
|
@ -330,7 +330,7 @@ class StorageDBThread final {
|
|||
|
||||
virtual void AsyncPreload(LocalStorageCacheBridge* aCache,
|
||||
bool aPriority = false) {
|
||||
InsertDBOp(new DBOperation(
|
||||
InsertDBOp(MakeUnique<DBOperation>(
|
||||
aPriority ? DBOperation::opPreloadUrgent : DBOperation::opPreload,
|
||||
aCache));
|
||||
}
|
||||
|
@ -339,45 +339,46 @@ class StorageDBThread final {
|
|||
bool aForce = false);
|
||||
|
||||
virtual void AsyncGetUsage(StorageUsageBridge* aUsage) {
|
||||
InsertDBOp(new DBOperation(DBOperation::opGetUsage, aUsage));
|
||||
InsertDBOp(MakeUnique<DBOperation>(DBOperation::opGetUsage, aUsage));
|
||||
}
|
||||
|
||||
virtual nsresult AsyncAddItem(LocalStorageCacheBridge* aCache,
|
||||
const nsAString& aKey,
|
||||
const nsAString& aValue) {
|
||||
return InsertDBOp(
|
||||
new DBOperation(DBOperation::opAddItem, aCache, aKey, aValue));
|
||||
MakeUnique<DBOperation>(DBOperation::opAddItem, aCache, aKey, aValue));
|
||||
}
|
||||
|
||||
virtual nsresult AsyncUpdateItem(LocalStorageCacheBridge* aCache,
|
||||
const nsAString& aKey,
|
||||
const nsAString& aValue) {
|
||||
return InsertDBOp(
|
||||
new DBOperation(DBOperation::opUpdateItem, aCache, aKey, aValue));
|
||||
return InsertDBOp(MakeUnique<DBOperation>(DBOperation::opUpdateItem, aCache,
|
||||
aKey, aValue));
|
||||
}
|
||||
|
||||
virtual nsresult AsyncRemoveItem(LocalStorageCacheBridge* aCache,
|
||||
const nsAString& aKey) {
|
||||
return InsertDBOp(new DBOperation(DBOperation::opRemoveItem, aCache, aKey));
|
||||
return InsertDBOp(
|
||||
MakeUnique<DBOperation>(DBOperation::opRemoveItem, aCache, aKey));
|
||||
}
|
||||
|
||||
virtual nsresult AsyncClear(LocalStorageCacheBridge* aCache) {
|
||||
return InsertDBOp(new DBOperation(DBOperation::opClear, aCache));
|
||||
return InsertDBOp(MakeUnique<DBOperation>(DBOperation::opClear, aCache));
|
||||
}
|
||||
|
||||
virtual void AsyncClearAll() {
|
||||
InsertDBOp(new DBOperation(DBOperation::opClearAll));
|
||||
InsertDBOp(MakeUnique<DBOperation>(DBOperation::opClearAll));
|
||||
}
|
||||
|
||||
virtual void AsyncClearMatchingOrigin(const nsACString& aOriginNoSuffix) {
|
||||
InsertDBOp(
|
||||
new DBOperation(DBOperation::opClearMatchingOrigin, aOriginNoSuffix));
|
||||
InsertDBOp(MakeUnique<DBOperation>(DBOperation::opClearMatchingOrigin,
|
||||
aOriginNoSuffix));
|
||||
}
|
||||
|
||||
virtual void AsyncClearMatchingOriginAttributes(
|
||||
const OriginAttributesPattern& aPattern) {
|
||||
InsertDBOp(new DBOperation(DBOperation::opClearMatchingOriginAttributes,
|
||||
aPattern));
|
||||
InsertDBOp(MakeUnique<DBOperation>(
|
||||
DBOperation::opClearMatchingOriginAttributes, aPattern));
|
||||
}
|
||||
|
||||
virtual void AsyncFlush();
|
||||
|
@ -443,7 +444,7 @@ class StorageDBThread final {
|
|||
|
||||
// Helper to direct an operation to one of the arrays above;
|
||||
// also checks IsOriginClearPending for preloads
|
||||
nsresult InsertDBOp(DBOperation* aOperation);
|
||||
nsresult InsertDBOp(UniquePtr<DBOperation> aOperation);
|
||||
|
||||
// Opens the database, first thing we do after start of the thread.
|
||||
nsresult OpenDatabaseConnection();
|
||||
|
|
|
@ -55,10 +55,12 @@ enum SVGTag {
|
|||
void SVGElementFactory::Init() {
|
||||
sTagAtomTable = new TagAtomTable(64);
|
||||
|
||||
#define SVG_TAG(_tag, _classname) \
|
||||
sTagAtomTable->Put(nsGkAtoms::_tag, NS_NewSVG##_classname##Element);
|
||||
#define SVG_FROM_PARSER_TAG(_tag, _classname) \
|
||||
sTagAtomTable->Put(nsGkAtoms::_tag, NS_NewSVG##_classname##Element);
|
||||
#define SVG_TAG(_tag, _classname) \
|
||||
sTagAtomTable->Put(nsGkAtoms::_tag, SVGContentCreatorFunction( \
|
||||
NS_NewSVG##_classname##Element));
|
||||
#define SVG_FROM_PARSER_TAG(_tag, _classname) \
|
||||
sTagAtomTable->Put(nsGkAtoms::_tag, SVGContentCreatorFunction( \
|
||||
NS_NewSVG##_classname##Element));
|
||||
#include "SVGTagList.h"
|
||||
#undef SVG_TAG
|
||||
#undef SVG_FROM_PARSER_TAG
|
||||
|
|
|
@ -619,12 +619,12 @@ PersistNodeFixup::PersistNodeFixup(WebBrowserPersistLocalDocument* aParent,
|
|||
NS_ENSURE_SUCCESS_VOID(rv);
|
||||
for (uint32_t i = 0; i < mapSize; ++i) {
|
||||
nsAutoCString urlFrom;
|
||||
auto* urlTo = new nsCString();
|
||||
auto urlTo = MakeUnique<nsCString>();
|
||||
|
||||
rv = aMap->GetURIMapping(i, urlFrom, *urlTo);
|
||||
MOZ_ASSERT(NS_SUCCEEDED(rv));
|
||||
if (NS_SUCCEEDED(rv)) {
|
||||
mMap.Put(urlFrom, urlTo);
|
||||
mMap.Put(urlFrom, std::move(urlTo));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -572,7 +572,7 @@ nsresult nsWebBrowserPersist::StartUpload(nsIInputStream* aInputStream,
|
|||
|
||||
// add this to the upload list
|
||||
nsCOMPtr<nsISupports> keyPtr = do_QueryInterface(destChannel);
|
||||
mUploadList.Put(keyPtr, new UploadData(aDestinationURI));
|
||||
mUploadList.Put(keyPtr, MakeUnique<UploadData>(aDestinationURI));
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
@ -1507,7 +1507,7 @@ nsresult nsWebBrowserPersist::SaveChannelInternal(nsIChannel* aChannel,
|
|||
MutexAutoLock lock(mOutputMapMutex);
|
||||
// Add the output transport to the output map with the channel as the key
|
||||
nsCOMPtr<nsISupports> keyPtr = do_QueryInterface(aChannel);
|
||||
mOutputMap.Put(keyPtr, new OutputData(aFile, mURI, aCalcFileExt));
|
||||
mOutputMap.Put(keyPtr, MakeUnique<OutputData>(aFile, mURI, aCalcFileExt));
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
@ -2424,7 +2424,7 @@ nsresult nsWebBrowserPersist::FixRedirectedChannelEntry(
|
|||
// Store data again with new channel unless told to ignore redirects.
|
||||
if (!(mPersistFlags & PERSIST_FLAGS_IGNORE_REDIRECTED_DATA)) {
|
||||
nsCOMPtr<nsISupports> keyPtr = do_QueryInterface(aNewChannel);
|
||||
mOutputMap.Put(keyPtr, outputData.release());
|
||||
mOutputMap.Put(keyPtr, std::move(outputData));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2743,7 +2743,7 @@ nsresult nsWebBrowserPersist::MakeAndStoreLocalFilenameInURIMap(
|
|||
|
||||
if (aNeedsPersisting) mCurrentThingsToPersist++;
|
||||
|
||||
mURIMap.Put(spec, data);
|
||||
mURIMap.Put(spec, UniquePtr<URIData>(data));
|
||||
if (aData) {
|
||||
*aData = data;
|
||||
}
|
||||
|
|
|
@ -194,7 +194,7 @@ nsresult nsXULPrototypeCache::PutScript(nsIURI* aURI,
|
|||
}
|
||||
#endif
|
||||
|
||||
mScriptTable.Put(aURI, aScriptObject);
|
||||
mScriptTable.Put(aURI, JS::Heap<JSScript*>{aScriptObject});
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
|
|
@ -415,9 +415,10 @@ mozilla::ipc::IPCResult CompositorBridgeChild::RecvSharedCompositorFrameMetrics(
|
|||
const mozilla::ipc::SharedMemoryBasic::Handle& metrics,
|
||||
const CrossProcessMutexHandle& handle, const LayersId& aLayersId,
|
||||
const uint32_t& aAPZCId) {
|
||||
SharedFrameMetricsData* data =
|
||||
new SharedFrameMetricsData(metrics, handle, aLayersId, aAPZCId);
|
||||
mFrameMetricsTable.Put(data->GetViewID(), data);
|
||||
auto data =
|
||||
MakeUnique<SharedFrameMetricsData>(metrics, handle, aLayersId, aAPZCId);
|
||||
const auto& viewID = data->GetViewID();
|
||||
mFrameMetricsTable.Put(viewID, std::move(data));
|
||||
return IPC_OK();
|
||||
}
|
||||
|
||||
|
|
|
@ -162,10 +162,10 @@ void AsyncImagePipelineManager::AddAsyncImagePipeline(
|
|||
MOZ_ASSERT(aImageHost);
|
||||
uint64_t id = wr::AsUint64(aPipelineId);
|
||||
|
||||
MOZ_ASSERT(!mAsyncImagePipelines.Get(id));
|
||||
AsyncImagePipeline* holder = new AsyncImagePipeline();
|
||||
MOZ_ASSERT(!mAsyncImagePipelines.Contains(id));
|
||||
auto holder = MakeUnique<AsyncImagePipeline>();
|
||||
holder->mImageHost = aImageHost;
|
||||
mAsyncImagePipelines.Put(id, holder);
|
||||
mAsyncImagePipelines.Put(id, std::move(holder));
|
||||
AddPipeline(aPipelineId, /* aWrBridge */ nullptr);
|
||||
}
|
||||
|
||||
|
|
|
@ -373,20 +373,17 @@ class BlurCache final : public nsExpirationTracker<BlurCacheData, 4> {
|
|||
return blur;
|
||||
}
|
||||
|
||||
// Returns true if we successfully register the blur in the cache, false
|
||||
// otherwise.
|
||||
bool RegisterEntry(BlurCacheData* aValue) {
|
||||
nsresult rv = AddObject(aValue);
|
||||
void RegisterEntry(UniquePtr<BlurCacheData> aValue) {
|
||||
nsresult rv = AddObject(aValue.get());
|
||||
if (NS_FAILED(rv)) {
|
||||
// We are OOM, and we cannot track this object. We don't want stall
|
||||
// entries in the hash table (since the expiration tracker is responsible
|
||||
// for removing the cache entries), so we avoid putting that entry in the
|
||||
// table, which is a good things considering we are short on memory
|
||||
// table, which is a good thing considering we are short on memory
|
||||
// anyway, we probably don't want to retain things.
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
mHashEntries.Put(aValue->mKey, aValue);
|
||||
return true;
|
||||
mHashEntries.Put(aValue->mKey, std::move(aValue));
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -446,13 +443,10 @@ static void CacheBlur(DrawTarget* aDT, const IntSize& aMinSize,
|
|||
const RectCornerRadii* aCornerRadii,
|
||||
const sRGBColor& aShadowColor,
|
||||
const IntMargin& aBlurMargin, SourceSurface* aBoxShadow) {
|
||||
BlurCacheKey key(aMinSize, aBlurRadius, aCornerRadii, aShadowColor,
|
||||
aDT->GetBackendType());
|
||||
BlurCacheData* data =
|
||||
new BlurCacheData(aBoxShadow, aBlurMargin, std::move(key));
|
||||
if (!gBlurCache->RegisterEntry(data)) {
|
||||
delete data;
|
||||
}
|
||||
gBlurCache->RegisterEntry(MakeUnique<BlurCacheData>(
|
||||
aBoxShadow, aBlurMargin,
|
||||
BlurCacheKey(aMinSize, aBlurRadius, aCornerRadii, aShadowColor,
|
||||
aDT->GetBackendType())));
|
||||
}
|
||||
|
||||
// Blurs a small surface and creates the colored box shadow.
|
||||
|
@ -1001,11 +995,9 @@ static void CacheInsetBlur(const IntSize& aMinOuterSize,
|
|||
BlurCacheKey key(aMinOuterSize, aMinInnerSize, aBlurRadius, aCornerRadii,
|
||||
aShadowColor, isInsetBlur, aBackendType);
|
||||
IntMargin blurMargin(0, 0, 0, 0);
|
||||
BlurCacheData* data =
|
||||
new BlurCacheData(aBoxShadow, blurMargin, std::move(key));
|
||||
if (!gBlurCache->RegisterEntry(data)) {
|
||||
delete data;
|
||||
}
|
||||
|
||||
gBlurCache->RegisterEntry(
|
||||
MakeUnique<BlurCacheData>(aBoxShadow, blurMargin, std::move(key)));
|
||||
}
|
||||
|
||||
already_AddRefed<SourceSurface> gfxAlphaBoxBlur::GetInsetBlur(
|
||||
|
|
|
@ -1876,10 +1876,12 @@ nsresult gfxDWriteFontList::GetFontSubstitutes() {
|
|||
}
|
||||
if (SharedFontList()->FindFamily(actualFontName,
|
||||
/*aPrimaryNameOnly*/ true)) {
|
||||
mSubstitutions.Put(substituteName, new nsCString(actualFontName));
|
||||
} else if (mSubstitutions.Get(actualFontName)) {
|
||||
mSubstitutions.Put(substituteName,
|
||||
new nsCString(*mSubstitutions.Get(actualFontName)));
|
||||
MakeUnique<nsCString>(actualFontName));
|
||||
} else if (mSubstitutions.Get(actualFontName)) {
|
||||
mSubstitutions.Put(
|
||||
substituteName,
|
||||
MakeUnique<nsCString>(*mSubstitutions.Get(actualFontName)));
|
||||
} else {
|
||||
mNonExistingFonts.AppendElement(substituteName);
|
||||
}
|
||||
|
@ -1925,7 +1927,8 @@ void gfxDWriteFontList::GetDirectWriteSubstitutes() {
|
|||
BuildKeyNameFromFontName(actualFontName);
|
||||
if (SharedFontList()->FindFamily(actualFontName,
|
||||
/*aPrimaryNameOnly*/ true)) {
|
||||
mSubstitutions.Put(substituteName, new nsCString(actualFontName));
|
||||
mSubstitutions.Put(substituteName,
|
||||
MakeUnique<nsCString>(actualFontName));
|
||||
} else {
|
||||
mNonExistingFonts.AppendElement(substituteName);
|
||||
}
|
||||
|
|
|
@ -638,8 +638,9 @@ void gfxFT2FontList::CollectInitData(const FontListEntry& aFLE,
|
|||
BuildKeyNameFromFontName(key);
|
||||
auto faceList = mFaceInitData.Get(key);
|
||||
if (!faceList) {
|
||||
faceList = new nsTArray<fontlist::Face::InitData>;
|
||||
mFaceInitData.Put(key, faceList);
|
||||
faceList =
|
||||
mFaceInitData.Put(key, MakeUnique<nsTArray<fontlist::Face::InitData>>())
|
||||
.get();
|
||||
mFamilyInitData.AppendElement(
|
||||
fontlist::Family::InitData{key, aFLE.familyName()});
|
||||
}
|
||||
|
@ -1735,7 +1736,7 @@ gfxFontEntry* gfxFT2FontList::LookupLocalFont(const nsACString& aFontName,
|
|||
|
||||
// if so, iterate over faces in this family to see if there is a match
|
||||
if (family.Equals(fullNameFamily, nsCaseInsensitiveCStringComparator)) {
|
||||
nsTArray<RefPtr<gfxFontEntry> >& fontList = fontFamily->GetFontList();
|
||||
nsTArray<RefPtr<gfxFontEntry>>& fontList = fontFamily->GetFontList();
|
||||
int index, len = fontList.Length();
|
||||
for (index = 0; index < len; index++) {
|
||||
gfxFontEntry* fe = fontList[index];
|
||||
|
|
|
@ -132,20 +132,17 @@ class GradientCache final : public nsExpirationTracker<GradientCacheData, 4> {
|
|||
return gradient;
|
||||
}
|
||||
|
||||
// Returns true if we successfully register the gradient in the cache, false
|
||||
// otherwise.
|
||||
bool RegisterEntry(GradientCacheData* aValue) {
|
||||
nsresult rv = AddObject(aValue);
|
||||
void RegisterEntry(UniquePtr<GradientCacheData> aValue) {
|
||||
nsresult rv = AddObject(aValue.get());
|
||||
if (NS_FAILED(rv)) {
|
||||
// We are OOM, and we cannot track this object. We don't want stall
|
||||
// entries in the hash table (since the expiration tracker is responsible
|
||||
// for removing the cache entries), so we avoid putting that entry in the
|
||||
// table, which is a good things considering we are short on memory
|
||||
// table, which is a good thing considering we are short on memory
|
||||
// anyway, we probably don't want to retain things.
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
mHashEntries.Put(aValue->mKey, aValue);
|
||||
return true;
|
||||
mHashEntries.Put(aValue->mKey, std::move(aValue));
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -190,11 +187,8 @@ already_AddRefed<GradientStops> gfxGradientCache::GetOrCreateGradientStops(
|
|||
if (!gs) {
|
||||
return nullptr;
|
||||
}
|
||||
GradientCacheData* cached = new GradientCacheData(
|
||||
gs, GradientCacheKey(aStops, aExtend, aDT->GetBackendType()));
|
||||
if (!gGradientCache->RegisterEntry(cached)) {
|
||||
delete cached;
|
||||
}
|
||||
gGradientCache->RegisterEntry(MakeUnique<GradientCacheData>(
|
||||
gs, GradientCacheKey(aStops, aExtend, aDT->GetBackendType())));
|
||||
}
|
||||
return gs.forget();
|
||||
}
|
||||
|
|
|
@ -570,8 +570,8 @@ Result<Ok, nsresult> ScriptPreloader::InitCacheInternal(
|
|||
script->mReadyToExecute = true;
|
||||
}
|
||||
|
||||
mScripts.Put(script->mCachePath, script.get());
|
||||
Unused << script.release();
|
||||
const auto& cachePath = script->mCachePath;
|
||||
mScripts.Put(cachePath, std::move(script));
|
||||
}
|
||||
|
||||
if (buf.error()) {
|
||||
|
|
|
@ -1311,7 +1311,7 @@ nsresult mozJSComponentLoader::Import(JSContext* aCx,
|
|||
return NS_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
mLocations.Put(newEntry->resolvedURL, new nsCString(info.Key()));
|
||||
mLocations.Put(newEntry->resolvedURL, MakeUnique<nsCString>(info.Key()));
|
||||
|
||||
RootedValue exception(aCx);
|
||||
{
|
||||
|
@ -1363,7 +1363,7 @@ nsresult mozJSComponentLoader::Import(JSContext* aCx,
|
|||
|
||||
// Cache this module for later
|
||||
if (newEntry) {
|
||||
mImports.Put(info.Key(), newEntry.release());
|
||||
mImports.Put(info.Key(), std::move(newEntry));
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
|
|
|
@ -479,7 +479,8 @@ void SharedStyleSheetCache::InsertIntoCompleteCacheIfNeeded(
|
|||
}
|
||||
|
||||
mCompleteSheets.Put(
|
||||
key, {aData.mExpirationTime, std::move(counters), std::move(sheet)});
|
||||
key, CompleteSheet{aData.mExpirationTime, std::move(counters),
|
||||
std::move(sheet)});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1263,7 +1263,7 @@ nsresult nsOfflineCacheDevice::InitActiveCaches() {
|
|||
statement->GetUTF8String(1, clientID);
|
||||
|
||||
mActiveCaches.PutEntry(clientID);
|
||||
mActiveCachesByGroup.Put(group, new nsCString(clientID));
|
||||
mActiveCachesByGroup.Put(group, MakeUnique<nsCString>(clientID));
|
||||
|
||||
rv = statement->ExecuteStep(&hasRows);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
|
@ -2536,7 +2536,7 @@ nsresult nsOfflineCacheDevice::ActivateCache(const nsACString& group,
|
|||
|
||||
if (!clientID.IsEmpty()) {
|
||||
mActiveCaches.PutEntry(clientID);
|
||||
mActiveCachesByGroup.Put(group, new nsCString(clientID));
|
||||
mActiveCachesByGroup.Put(group, MakeUnique<nsCString>(clientID));
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
|
|
|
@ -1192,8 +1192,11 @@ void CacheStorageService::RecordMemoryOnlyEntry(CacheEntry* aEntry,
|
|||
return;
|
||||
}
|
||||
|
||||
entries = new CacheEntryTable(CacheEntryTable::MEMORY_ONLY);
|
||||
sGlobalEntryTables->Put(memoryStorageID, entries);
|
||||
entries =
|
||||
sGlobalEntryTables
|
||||
->Put(memoryStorageID,
|
||||
MakeUnique<CacheEntryTable>(CacheEntryTable::MEMORY_ONLY))
|
||||
.get();
|
||||
LOG((" new memory-only storage table for %s", memoryStorageID.get()));
|
||||
}
|
||||
|
||||
|
|
|
@ -272,11 +272,7 @@ nsPreflightCache::CacheEntry* nsPreflightCache::GetEntry(
|
|||
|
||||
// This is a new entry, allocate and insert into the table now so that any
|
||||
// failures don't cause items to be removed from a full cache.
|
||||
CacheEntry* newEntry = new CacheEntry(key);
|
||||
if (!newEntry) {
|
||||
NS_WARNING("Failed to allocate new cache entry!");
|
||||
return nullptr;
|
||||
}
|
||||
auto newEntry = MakeUnique<CacheEntry>(key);
|
||||
|
||||
NS_ASSERTION(mTable.Count() <= PREFLIGHT_CACHE_SIZE,
|
||||
"Something is borked, too many entries in the cache!");
|
||||
|
@ -310,10 +306,10 @@ nsPreflightCache::CacheEntry* nsPreflightCache::GetEntry(
|
|||
}
|
||||
}
|
||||
|
||||
mTable.Put(key, newEntry);
|
||||
mList.insertFront(newEntry);
|
||||
auto* newEntryWeakRef = mTable.Put(key, std::move(newEntry)).get();
|
||||
mList.insertFront(newEntryWeakRef);
|
||||
|
||||
return newEntry;
|
||||
return newEntryWeakRef;
|
||||
}
|
||||
|
||||
void nsPreflightCache::RemoveEntries(
|
||||
|
|
|
@ -122,14 +122,15 @@ nsresult nsHttpAuthCache::SetAuthEntry(const char* scheme, const char* host,
|
|||
|
||||
if (!node) {
|
||||
// create a new entry node and set the given entry
|
||||
node = new nsHttpAuthNode();
|
||||
LOG((" new nsHttpAuthNode %p for key='%s'", node, key.get()));
|
||||
auto node = UniquePtr<nsHttpAuthNode>(new nsHttpAuthNode);
|
||||
LOG((" new nsHttpAuthNode %p for key='%s'", node.get(), key.get()));
|
||||
rv = node->SetAuthEntry(path, realm, creds, challenge, ident, metadata);
|
||||
if (NS_FAILED(rv))
|
||||
delete node;
|
||||
else
|
||||
mDB.Put(key, node);
|
||||
return rv;
|
||||
if (NS_FAILED(rv)) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
mDB.Put(key, std::move(node));
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
return node->SetAuthEntry(path, realm, creds, challenge, ident, metadata);
|
||||
|
|
|
@ -1865,8 +1865,9 @@ void nsHttpHandler::PrefsChanged(const char* pref) {
|
|||
nsAutoCString token{tokenSubstring};
|
||||
int32_t index = token.Find(";");
|
||||
if (index != kNotFound) {
|
||||
auto* map = new nsCString(Substring(token, index + 1));
|
||||
mAltSvcMappingTemptativeMap.Put(Substring(token, 0, index), map);
|
||||
mAltSvcMappingTemptativeMap.Put(
|
||||
Substring(token, 0, index),
|
||||
MakeUnique<nsCString>(Substring(token, index + 1)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -197,7 +197,7 @@ nsresult nsStreamConverterService::FindConverter(
|
|||
for (auto iter = mAdjacencyList.Iter(); !iter.Done(); iter.Next()) {
|
||||
const nsACString& key = iter.Key();
|
||||
MOZ_ASSERT(iter.UserData(), "no data in the table iteration");
|
||||
lBFSTable.Put(key, new BFSTableData(key));
|
||||
lBFSTable.Put(key, mozilla::MakeUnique<BFSTableData>(key));
|
||||
}
|
||||
|
||||
NS_ASSERTION(lBFSTable.Count() == vertexCount,
|
||||
|
|
|
@ -768,8 +768,8 @@ void NetlinkService::OnLinkMessage(struct nlmsghdr* aNlh) {
|
|||
if (!linkInfo) {
|
||||
LOG(("Creating new link [index=%u, name=%s, flags=%u, type=%u]",
|
||||
linkIndex, linkName.get(), link->GetFlags(), link->GetType()));
|
||||
linkInfo = new LinkInfo(std::move(link));
|
||||
mLinks.Put(linkIndex, linkInfo);
|
||||
linkInfo =
|
||||
mLinks.Put(linkIndex, MakeUnique<LinkInfo>(std::move(link))).get();
|
||||
} else {
|
||||
LOG(("Updating link [index=%u, name=%s, flags=%u, type=%u]", linkIndex,
|
||||
linkName.get(), link->GetFlags(), link->GetType()));
|
||||
|
@ -1049,7 +1049,7 @@ void NetlinkService::OnNeighborMessage(struct nlmsghdr* aNlh) {
|
|||
neigh->GetAsString(neighDbgStr);
|
||||
LOG(("Adding neighbor: %s", neighDbgStr.get()));
|
||||
}
|
||||
linkInfo->mNeighbors.Put(key, neigh.release());
|
||||
linkInfo->mNeighbors.Put(key, std::move(neigh));
|
||||
} else {
|
||||
if (LOG_ENABLED()) {
|
||||
nsAutoCString neighDbgStr;
|
||||
|
|
|
@ -210,7 +210,7 @@ AsyncBindingParams::BindByName(const nsACString& aName, nsIVariant* aValue) {
|
|||
RefPtr<Variant_base> variant = convertVariantToStorageVariant(aValue);
|
||||
if (!variant) return NS_ERROR_UNEXPECTED;
|
||||
|
||||
mNamedParameters.Put(aName, variant);
|
||||
mNamedParameters.Put(aName, nsCOMPtr<nsIVariant>{variant});
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -662,9 +662,10 @@ nsresult NativeFileWatcherIOTask::AddPathRunnableMethod(
|
|||
nsresult rv = AddDirectoryToWatchList(resourceDesc.get());
|
||||
if (NS_SUCCEEDED(rv)) {
|
||||
// Add the resource pointer to both indexes.
|
||||
WatchedResourceDescriptor* resource = resourceDesc.release();
|
||||
mWatchedResourcesByPath.Put(wrappedParameters->mPath, resource);
|
||||
mWatchedResourcesByHandle.Put(resHandle, resource);
|
||||
mWatchedResourcesByHandle.Put(
|
||||
resHandle, mWatchedResourcesByPath
|
||||
.Put(wrappedParameters->mPath, std::move(resourceDesc))
|
||||
.get());
|
||||
|
||||
// Dispatch the success callback.
|
||||
nsresult rv = ReportSuccess(wrappedParameters->mSuccessCallbackHandle,
|
||||
|
|
|
@ -36,6 +36,7 @@ using mozilla::StaticAutoPtr;
|
|||
using mozilla::StaticMutex;
|
||||
using mozilla::StaticMutexAutoLock;
|
||||
using mozilla::TimeStamp;
|
||||
using mozilla::UniquePtr;
|
||||
using mozilla::Telemetry::ChildEventData;
|
||||
using mozilla::Telemetry::EventExtraEntry;
|
||||
using mozilla::Telemetry::LABELS_TELEMETRY_EVENT_RECORDING_ERROR;
|
||||
|
@ -538,7 +539,8 @@ void RegisterEvents(const StaticMutexAutoLock& lock, const nsACString& category,
|
|||
gDynamicEventInfo->AppendElement(eventInfos[i]);
|
||||
uint32_t eventId =
|
||||
eventExpired[i] ? kExpiredEventId : gDynamicEventInfo->Length() - 1;
|
||||
gEventNameIDMap.Put(eventName, new EventKey{eventId, true});
|
||||
gEventNameIDMap.Put(eventName,
|
||||
UniquePtr<EventKey>{new EventKey{eventId, true}});
|
||||
}
|
||||
|
||||
// If it is a builtin, add the category name in order to enable it later.
|
||||
|
@ -707,7 +709,8 @@ void TelemetryEvent::InitializeGlobalState(bool aCanRecordBase,
|
|||
eventId = kExpiredEventId;
|
||||
}
|
||||
|
||||
gEventNameIDMap.Put(UniqueEventName(info), new EventKey{eventId, false});
|
||||
gEventNameIDMap.Put(UniqueEventName(info),
|
||||
UniquePtr<EventKey>{new EventKey{eventId, false}});
|
||||
gCategoryNames.PutEntry(info.common_info.category());
|
||||
}
|
||||
|
||||
|
@ -1288,7 +1291,7 @@ nsresult TelemetryEvent::CreateSnapshots(uint32_t aDataset, bool aClear,
|
|||
gEventRecords.Clear();
|
||||
for (auto& pair : leftovers) {
|
||||
gEventRecords.Put(pair.first,
|
||||
new EventRecordArray(std::move(pair.second)));
|
||||
MakeUnique<EventRecordArray>(std::move(pair.second)));
|
||||
}
|
||||
leftovers.Clear();
|
||||
}
|
||||
|
|
|
@ -35,8 +35,10 @@ using base::CountHistogram;
|
|||
using base::FlagHistogram;
|
||||
using base::LinearHistogram;
|
||||
using mozilla::MakeTuple;
|
||||
using mozilla::MakeUnique;
|
||||
using mozilla::StaticMutex;
|
||||
using mozilla::StaticMutexAutoLock;
|
||||
using mozilla::UniquePtr;
|
||||
using mozilla::Telemetry::HistogramAccumulation;
|
||||
using mozilla::Telemetry::HistogramCount;
|
||||
using mozilla::Telemetry::HistogramID;
|
||||
|
@ -979,7 +981,6 @@ Histogram::Histogram(HistogramID histogramId, const HistogramInfo& info,
|
|||
return;
|
||||
}
|
||||
|
||||
base::Histogram* h;
|
||||
const int bucketsOffset = gHistogramBucketLowerBoundIndex[histogramId];
|
||||
|
||||
if (info.is_single_store()) {
|
||||
|
@ -988,8 +989,9 @@ Histogram::Histogram(HistogramID histogramId, const HistogramInfo& info,
|
|||
for (uint32_t i = 0; i < info.store_count; i++) {
|
||||
auto store = nsDependentCString(
|
||||
&gHistogramStringTable[gHistogramStoresTable[info.store_index + i]]);
|
||||
h = internal_CreateBaseHistogramInstance(info, bucketsOffset);
|
||||
mStorage.Put(store, h);
|
||||
mStorage.Put(store, UniquePtr<base::Histogram>(
|
||||
internal_CreateBaseHistogramInstance(
|
||||
info, bucketsOffset)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1131,7 +1133,7 @@ KeyedHistogram::KeyedHistogram(HistogramID id, const HistogramInfo& info,
|
|||
for (uint32_t i = 0; i < info.store_count; i++) {
|
||||
auto store = nsDependentCString(
|
||||
&gHistogramStringTable[gHistogramStoresTable[info.store_index + i]]);
|
||||
mStorage.Put(store, new KeyedHistogramMapType);
|
||||
mStorage.Put(store, MakeUnique<KeyedHistogramMapType>());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1165,16 +1167,16 @@ nsresult KeyedHistogram::GetHistogram(const nsCString& aStore,
|
|||
}
|
||||
|
||||
int bucketsOffset = gHistogramBucketLowerBoundIndex[mId];
|
||||
base::Histogram* h =
|
||||
internal_CreateBaseHistogramInstance(mHistogramInfo, bucketsOffset);
|
||||
auto h = UniquePtr<base::Histogram>{
|
||||
internal_CreateBaseHistogramInstance(mHistogramInfo, bucketsOffset)};
|
||||
if (!h) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
h->ClearFlags(base::Histogram::kUmaTargetedHistogramFlag);
|
||||
*histogram = h;
|
||||
*histogram = h.get();
|
||||
|
||||
bool inserted = histogramMap->Put(key, h, mozilla::fallible);
|
||||
bool inserted = histogramMap->Put(key, std::move(h), mozilla::fallible);
|
||||
if (MOZ_UNLIKELY(!inserted)) {
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ using mozilla::Some;
|
|||
using mozilla::StaticAutoPtr;
|
||||
using mozilla::StaticMutex;
|
||||
using mozilla::StaticMutexAutoLock;
|
||||
using mozilla::UniquePtr;
|
||||
using mozilla::Telemetry::DynamicScalarDefinition;
|
||||
using mozilla::Telemetry::KeyedScalarAction;
|
||||
using mozilla::Telemetry::ProcessID;
|
||||
|
@ -1130,7 +1131,7 @@ ScalarResult KeyedScalar::GetScalarForKey(const StaticMutexAutoLock& locker,
|
|||
return ScalarResult::InvalidType;
|
||||
}
|
||||
|
||||
mScalarKeys.Put(utf8Key, scalar);
|
||||
mScalarKeys.Put(utf8Key, UniquePtr<ScalarBase>(scalar));
|
||||
|
||||
*aRet = scalar;
|
||||
return ScalarResult::Ok;
|
||||
|
@ -1547,7 +1548,7 @@ nsresult internal_GetScalarByEnum(const StaticMutexAutoLock& lock,
|
|||
return NS_ERROR_INVALID_ARG;
|
||||
}
|
||||
|
||||
scalarStorage->Put(aId.id, scalar);
|
||||
scalarStorage->Put(aId.id, UniquePtr<ScalarBase>(scalar));
|
||||
*aRet = scalar;
|
||||
return NS_OK;
|
||||
}
|
||||
|
@ -1822,7 +1823,7 @@ nsresult internal_GetKeyedScalarByEnum(const StaticMutexAutoLock& lock,
|
|||
return NS_ERROR_INVALID_ARG;
|
||||
}
|
||||
|
||||
scalarStorage->Put(aId.id, scalar);
|
||||
scalarStorage->Put(aId.id, UniquePtr<KeyedScalar>(scalar));
|
||||
*aRet = scalar;
|
||||
return NS_OK;
|
||||
}
|
||||
|
|
|
@ -186,7 +186,7 @@ void TableUpdateV4::NewPrefixes(int32_t aSize, const nsACString& aPrefixes) {
|
|||
aPrefixes.Length() / aSize));
|
||||
}
|
||||
|
||||
mPrefixesMap.Put(aSize, new nsCString(aPrefixes));
|
||||
mPrefixesMap.Put(aSize, MakeUnique<nsCString>(aPrefixes));
|
||||
}
|
||||
|
||||
nsresult TableUpdateV4::NewRemovalIndices(const uint32_t* aIndices,
|
||||
|
|
|
@ -516,7 +516,7 @@ VLPrefixSet::VLPrefixSet(const PrefixStringMap& aMap) : mCount(0) {
|
|||
uint32_t size = iter.Key();
|
||||
MOZ_ASSERT(iter.Data()->Length() % size == 0,
|
||||
"PrefixString must be a multiple of the prefix size.");
|
||||
mMap.Put(size, new PrefixString(*iter.Data(), size));
|
||||
mMap.Put(size, MakeUnique<PrefixString>(*iter.Data(), size));
|
||||
mCount += iter.Data()->Length() / size;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -116,7 +116,7 @@ nsresult VariableLengthPrefixSet::SetPrefixes(AddPrefixArray& aAddPrefixes,
|
|||
const char* buf = reinterpret_cast<const char*>(completions[i].buf);
|
||||
completionStr->Append(buf, COMPLETE_SIZE);
|
||||
}
|
||||
mVLPrefixSet.Put(COMPLETE_SIZE, completionStr.release());
|
||||
mVLPrefixSet.Put(COMPLETE_SIZE, std::move(completionStr));
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ nsresult VariableLengthPrefixSet::SetPrefixes(PrefixStringMap& aPrefixMap) {
|
|||
continue;
|
||||
}
|
||||
|
||||
mVLPrefixSet.Put(iter.Key(), new nsCString(*iter.Data()));
|
||||
mVLPrefixSet.Put(iter.Key(), MakeUnique<nsCString>(*iter.Data()));
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
|
@ -203,12 +203,12 @@ nsresult VariableLengthPrefixSet::GetPrefixes(PrefixStringMap& aPrefixMap) {
|
|||
begin[i] = NativeEndian::swapToBigEndian(array[i]);
|
||||
}
|
||||
|
||||
aPrefixMap.Put(PREFIX_SIZE_FIXED, prefixes.release());
|
||||
aPrefixMap.Put(PREFIX_SIZE_FIXED, std::move(prefixes));
|
||||
}
|
||||
|
||||
// Copy variable-length prefix set
|
||||
for (auto iter = mVLPrefixSet.ConstIter(); !iter.Done(); iter.Next()) {
|
||||
aPrefixMap.Put(iter.Key(), new nsCString(*iter.Data()));
|
||||
aPrefixMap.Put(iter.Key(), MakeUnique<nsCString>(*iter.Data()));
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
|
@ -351,7 +351,7 @@ nsresult VariableLengthPrefixSet::LoadPrefixes(nsCOMPtr<nsIInputStream>& in) {
|
|||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
NS_ENSURE_TRUE(read == stringLength, NS_ERROR_FAILURE);
|
||||
|
||||
mVLPrefixSet.Put(prefixSize, vlPrefixes.release());
|
||||
mVLPrefixSet.Put(prefixSize, std::move(vlPrefixes));
|
||||
totalPrefixes += prefixCount;
|
||||
LOG(("[%s] Loaded %u %u-byte prefixes", mName.get(), prefixCount,
|
||||
prefixSize));
|
||||
|
|
|
@ -875,7 +875,7 @@ nsresult nsUrlClassifierUtils::ReadProvidersFromPrefs(ProviderDictType& aDict) {
|
|||
nsTArray<nsCString> tables;
|
||||
Classifier::SplitTables(owningLists, tables);
|
||||
for (auto tableName : tables) {
|
||||
aDict.Put(tableName, new nsCString(provider));
|
||||
aDict.Put(tableName, MakeUnique<nsCString>(provider));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -128,7 +128,7 @@ nsresult PrefixArrayToPrefixStringMap(const _PrefixArray& aPrefixArray,
|
|||
uint32_t size = iter.Key();
|
||||
uint32_t count = iter.Data()->Length();
|
||||
|
||||
_Prefix* str = new _Prefix();
|
||||
auto str = MakeUnique<_Prefix>();
|
||||
str->SetLength(size * count);
|
||||
|
||||
char* dst = str->BeginWriting();
|
||||
|
@ -139,7 +139,7 @@ nsresult PrefixArrayToPrefixStringMap(const _PrefixArray& aPrefixArray,
|
|||
dst += size;
|
||||
}
|
||||
|
||||
aOut.Put(size, str);
|
||||
aOut.Put(size, std::move(str));
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
|
|
|
@ -229,7 +229,7 @@ NS_IMETHODIMP ContentHandlerService::GetTypeFromExtension(
|
|||
mHandlerServiceChild->SendGetTypeFromExtension(nsCString(aFileExtension),
|
||||
&type);
|
||||
_retval.Assign(type);
|
||||
mExtToTypeMap.Put(nsCString(aFileExtension), new nsCString(type));
|
||||
mExtToTypeMap.Put(nsCString(aFileExtension), MakeUnique<nsCString>(type));
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ nsSystemStatusBarCocoa::AddItem(Element* aElement) {
|
|||
}
|
||||
|
||||
nsCOMPtr<nsISupports> keyPtr = aElement;
|
||||
mItems.Put(keyPtr, new StatusItem(menu));
|
||||
mItems.Put(keyPtr, mozilla::MakeUnique<StatusItem>(menu));
|
||||
|
||||
return NS_OK;
|
||||
|
||||
|
|
|
@ -38,7 +38,8 @@ already_AddRefed<nsIErrorService> nsErrorService::GetOrCreate() {
|
|||
NS_IMETHODIMP
|
||||
nsErrorService::RegisterErrorStringBundle(int16_t aErrorModule,
|
||||
const char* aStringBundleURL) {
|
||||
mErrorStringBundleURLMap.Put(aErrorModule, new nsCString(aStringBundleURL));
|
||||
mErrorStringBundleURLMap.Put(aErrorModule,
|
||||
MakeUnique<nsCString>(aStringBundleURL));
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -241,7 +241,7 @@ nsresult nsINIParser::DeleteString(const char* aSection, const char* aKey) {
|
|||
if (!val->next) {
|
||||
mSections.Remove(aSection);
|
||||
} else {
|
||||
mSections.Put(aSection, val->next.release());
|
||||
mSections.Put(aSection, std::move(val->next));
|
||||
delete val;
|
||||
}
|
||||
return NS_OK;
|
||||
|
@ -283,7 +283,7 @@ nsresult nsINIParser::RenameSection(const char* aSection,
|
|||
|
||||
mozilla::UniquePtr<INIValue> val;
|
||||
if (mSections.Remove(aSection, &val)) {
|
||||
mSections.Put(aNewName, val.release());
|
||||
mSections.Put(aNewName, std::move(val));
|
||||
} else {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
|
|
@ -307,8 +307,10 @@ static BloatEntry* GetBloatEntry(const char* aTypeName,
|
|||
EnsureBloatView();
|
||||
BloatEntry* entry = gBloatView->Get(aTypeName);
|
||||
if (!entry && aInstanceSize > 0) {
|
||||
entry = new BloatEntry(aTypeName, aInstanceSize);
|
||||
gBloatView->Put(aTypeName, entry);
|
||||
entry =
|
||||
gBloatView
|
||||
->Put(aTypeName, MakeUnique<BloatEntry>(aTypeName, aInstanceSize))
|
||||
.get();
|
||||
} else {
|
||||
MOZ_ASSERT(
|
||||
aInstanceSize == 0 || entry->GetClassSize() == aInstanceSize,
|
||||
|
|
|
@ -511,9 +511,11 @@ void nsCategoryManager::AddCategoryEntry(const nsACString& aCategoryName,
|
|||
|
||||
if (!category) {
|
||||
// That category doesn't exist yet; let's make it.
|
||||
category = CategoryNode::Create(&mArena);
|
||||
|
||||
mTable.Put(MaybeStrdup(aCategoryName, &mArena), category);
|
||||
category =
|
||||
mTable
|
||||
.Put(MaybeStrdup(aCategoryName, &mArena),
|
||||
UniquePtr<CategoryNode>{CategoryNode::Create(&mArena)})
|
||||
.get();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -216,45 +216,29 @@ class nsBaseHashtable
|
|||
}
|
||||
|
||||
/**
|
||||
* Put a new value for the associated key
|
||||
* @param aKey the key to put
|
||||
* @param aData the new data
|
||||
* If it does not yet, inserts a new entry with the handle's key and the
|
||||
* value passed to this function. Otherwise, it updates the entry by the
|
||||
* value passed to this function.
|
||||
*
|
||||
* \tparam U DataType must be implicitly convertible (and assignable) from U
|
||||
* \post HasEntry()
|
||||
* \param aKey the key to put
|
||||
* \param aData the new data
|
||||
*/
|
||||
void Put(KeyType aKey, const UserDataType& aData) {
|
||||
WithEntryHandle(aKey, [&aData](auto entryHandle) {
|
||||
entryHandle.InsertOrUpdate(Converter::Wrap(aData));
|
||||
template <typename U>
|
||||
DataType& Put(KeyType aKey, U&& aData) {
|
||||
return WithEntryHandle(aKey, [&aData](auto entryHandle) -> DataType& {
|
||||
return entryHandle.InsertOrUpdate(std::forward<U>(aData));
|
||||
});
|
||||
}
|
||||
|
||||
[[nodiscard]] bool Put(KeyType aKey, const UserDataType& aData,
|
||||
const fallible_t& aFallible) {
|
||||
template <typename U>
|
||||
[[nodiscard]] bool Put(KeyType aKey, U&& aData, const fallible_t& aFallible) {
|
||||
return WithEntryHandle(aKey, aFallible, [&aData](auto maybeEntryHandle) {
|
||||
if (!maybeEntryHandle) {
|
||||
return false;
|
||||
}
|
||||
maybeEntryHandle->InsertOrUpdate(Converter::Wrap(aData));
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Put a new value for the associated key
|
||||
* @param aKey the key to put
|
||||
* @param aData the new data
|
||||
*/
|
||||
void Put(KeyType aKey, UserDataType&& aData) {
|
||||
WithEntryHandle(aKey, [&aData](auto entryHandle) {
|
||||
entryHandle.InsertOrUpdate(Converter::Wrap(std::move(aData)));
|
||||
});
|
||||
}
|
||||
|
||||
[[nodiscard]] bool Put(KeyType aKey, UserDataType&& aData,
|
||||
const fallible_t& aFallible) {
|
||||
return WithEntryHandle(aKey, aFallible, [&aData](auto maybeEntryHandle) {
|
||||
if (!maybeEntryHandle) {
|
||||
return false;
|
||||
}
|
||||
maybeEntryHandle->InsertOrUpdate(Converter::Wrap(std::move(aData)));
|
||||
maybeEntryHandle->InsertOrUpdate(std::forward<U>(aData));
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
@ -470,7 +454,7 @@ class nsBaseHashtable
|
|||
* the result of the functor passed to this function. The functor is not
|
||||
* called if no insert takes place.
|
||||
*
|
||||
* \tparam F must return a value that DataType is constructible from
|
||||
* \tparam F must return a value that is implicitly convertible to DataType
|
||||
* \post HasEntry()
|
||||
*/
|
||||
template <typename F>
|
||||
|
@ -528,7 +512,7 @@ class nsBaseHashtable
|
|||
* value passed to this function. Otherwise, it updates the entry by the
|
||||
* value passed to this function.
|
||||
*
|
||||
* \tparam U DataType must be constructible and assignable from U
|
||||
* \tparam U DataType must be implicitly convertible (and assignable) from U
|
||||
* \post HasEntry()
|
||||
*/
|
||||
template <typename U>
|
||||
|
|
|
@ -77,16 +77,6 @@ class nsClassHashtable : public nsBaseHashtable<KeyClass, mozilla::UniquePtr<T>,
|
|||
* @returns nullptr if the key is not present.
|
||||
*/
|
||||
UserDataType Get(KeyType aKey) const;
|
||||
|
||||
// For now, overload Put, rather than hiding it.
|
||||
using base_type::Put;
|
||||
|
||||
template <typename U, typename = std::enable_if_t<std::is_base_of_v<T, U>>>
|
||||
void Put(KeyType aKey, mozilla::UniquePtr<U>&& aData);
|
||||
|
||||
template <typename U, typename = std::enable_if_t<std::is_base_of_v<T, U>>>
|
||||
[[nodiscard]] bool Put(KeyType aKey, mozilla::UniquePtr<U>&& aData,
|
||||
const mozilla::fallible_t&);
|
||||
};
|
||||
|
||||
template <typename K, typename T>
|
||||
|
@ -158,29 +148,4 @@ T* nsClassHashtable<KeyClass, T>::Get(KeyType aKey) const {
|
|||
return ent->GetData().get();
|
||||
}
|
||||
|
||||
template <class KeyClass, class T>
|
||||
template <typename U, typename>
|
||||
void nsClassHashtable<KeyClass, T>::Put(KeyType aKey,
|
||||
mozilla::UniquePtr<U>&& aData) {
|
||||
if (!Put(aKey, std::move(aData), mozilla::fallible)) {
|
||||
NS_ABORT_OOM(this->mTable.EntrySize() * this->mTable.EntryCount());
|
||||
}
|
||||
}
|
||||
|
||||
template <class KeyClass, class T>
|
||||
template <typename U, typename>
|
||||
bool nsClassHashtable<KeyClass, T>::Put(KeyType aKey,
|
||||
mozilla::UniquePtr<U>&& aData,
|
||||
const mozilla::fallible_t&) {
|
||||
typename base_type::EntryType* ent = this->PutEntry(aKey, mozilla::fallible);
|
||||
|
||||
if (!ent) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ent->SetData(std::move(aData));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // nsClassHashtable_h__
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <numeric>
|
||||
|
||||
using mozilla::MakeRefPtr;
|
||||
using mozilla::MakeUnique;
|
||||
using mozilla::UniquePtr;
|
||||
|
||||
namespace TestHashtables {
|
||||
|
@ -401,32 +402,32 @@ struct NonDefaultConstructible_NonDefaultConstructible {
|
|||
using DataType = NonDefaultConstructible;
|
||||
using UserDataType = NonDefaultConstructible;
|
||||
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Contains = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_GetGeneration = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Contains = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_GetGeneration = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_SizeOfExcludingThis = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_SizeOfIncludingThis = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Count = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_IsEmpty = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Get_OutputParam = 6;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_MaybeGet = 6;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Put = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Put_Fallible = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Put_Rvalue = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Put_Rvalue_Fallible = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Remove = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_GetAndRemove = 4;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_RemoveIf = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Lookup = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Lookup_Remove = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Iter = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_ConstIter = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_begin_end = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_cbegin_cend = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Clear = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_ShallowSizeOfExcludingThis = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_ShallowSizeOfIncludingThis = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_SwapElements = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_MarkImmutable = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Count = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_IsEmpty = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Get_OutputParam = 5;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_MaybeGet = 5;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Put = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Put_Fallible = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Put_Rvalue = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Put_Rvalue_Fallible = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Remove = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_GetAndRemove = 3;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_RemoveIf = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Lookup = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Lookup_Remove = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Iter = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_ConstIter = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_begin_end = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_cbegin_cend = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_Clear = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_ShallowSizeOfExcludingThis = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_ShallowSizeOfIncludingThis = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_SwapElements = 2;
|
||||
static constexpr uint32_t kExpectedAddRefCnt_MarkImmutable = 2;
|
||||
};
|
||||
|
||||
struct NonDefaultConstructible_MovingNonDefaultConstructible {
|
||||
|
@ -1162,8 +1163,8 @@ TEST(Hashtables, ClassHashtable_RangeBasedFor)
|
|||
nsClassHashtable<nsCStringHashKey, TestUniChar> EntToUniClass(ENTITY_COUNT);
|
||||
|
||||
for (auto& entity : gEntities) {
|
||||
auto* temp = new TestUniChar(entity.mUnicode);
|
||||
EntToUniClass.Put(nsDependentCString(entity.mStr), temp);
|
||||
EntToUniClass.Put(nsDependentCString(entity.mStr),
|
||||
MakeUnique<TestUniChar>(entity.mUnicode));
|
||||
}
|
||||
|
||||
// const range-based for
|
||||
|
|
|
@ -159,7 +159,7 @@ class DeadlockDetector {
|
|||
*/
|
||||
void Add(const T* aResource) {
|
||||
PRAutoLock _(mLock);
|
||||
mOrdering.Put(aResource, new OrderingEntry(aResource));
|
||||
mOrdering.Put(aResource, MakeUnique<OrderingEntry>(aResource));
|
||||
}
|
||||
|
||||
void Remove(const T* aResource) {
|
||||
|
|
Загрузка…
Ссылка в новой задаче