2014-06-30 19:39:45 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
|
|
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
2012-05-21 15:12:37 +04:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2015-05-21 07:25:55 +03:00
|
|
|
#include <new>
|
2005-08-12 00:47:03 +04:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2015-09-16 06:49:53 +03:00
|
|
|
#include "PLDHashTable.h"
|
2012-03-03 02:20:44 +04:00
|
|
|
#include "mozilla/HashFunctions.h"
|
2013-11-13 04:22:38 +04:00
|
|
|
#include "mozilla/MathAlgorithms.h"
|
2016-08-23 01:40:10 +03:00
|
|
|
#include "mozilla/OperatorNewExtensions.h"
|
2012-09-28 01:44:47 +04:00
|
|
|
#include "nsAlgorithm.h"
|
2017-07-07 23:54:16 +03:00
|
|
|
#include "nsPointerHashKeys.h"
|
2012-10-26 17:32:10 +04:00
|
|
|
#include "mozilla/Likely.h"
|
2013-06-23 16:03:39 +04:00
|
|
|
#include "mozilla/MemoryReporting.h"
|
2014-03-03 09:12:46 +04:00
|
|
|
#include "mozilla/ChaosMode.h"
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2015-05-21 09:11:35 +03:00
|
|
|
using namespace mozilla;
|
|
|
|
|
2006-04-18 22:30:50 +04:00
|
|
|
#ifdef DEBUG
|
|
|
|
|
2015-05-21 09:11:35 +03:00
|
|
|
class AutoReadOp
|
|
|
|
{
|
|
|
|
Checker& mChk;
|
|
|
|
public:
|
|
|
|
explicit AutoReadOp(Checker& aChk) : mChk(aChk) { mChk.StartReadOp(); }
|
|
|
|
~AutoReadOp() { mChk.EndReadOp(); }
|
|
|
|
};
|
2006-04-18 22:30:50 +04:00
|
|
|
|
2015-05-21 09:11:35 +03:00
|
|
|
class AutoWriteOp
|
|
|
|
{
|
|
|
|
Checker& mChk;
|
|
|
|
public:
|
|
|
|
explicit AutoWriteOp(Checker& aChk) : mChk(aChk) { mChk.StartWriteOp(); }
|
|
|
|
~AutoWriteOp() { mChk.EndWriteOp(); }
|
|
|
|
};
|
|
|
|
|
|
|
|
class AutoIteratorRemovalOp
|
|
|
|
{
|
|
|
|
Checker& mChk;
|
|
|
|
public:
|
|
|
|
explicit AutoIteratorRemovalOp(Checker& aChk)
|
|
|
|
: mChk(aChk)
|
|
|
|
{
|
|
|
|
mChk.StartIteratorRemovalOp();
|
|
|
|
}
|
|
|
|
~AutoIteratorRemovalOp() { mChk.EndIteratorRemovalOp(); }
|
|
|
|
};
|
|
|
|
|
|
|
|
class AutoDestructorOp
|
|
|
|
{
|
|
|
|
Checker& mChk;
|
|
|
|
public:
|
|
|
|
explicit AutoDestructorOp(Checker& aChk)
|
|
|
|
: mChk(aChk)
|
|
|
|
{
|
|
|
|
mChk.StartDestructorOp();
|
|
|
|
}
|
|
|
|
~AutoDestructorOp() { mChk.EndDestructorOp(); }
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif
|
2012-03-03 02:20:44 +04:00
|
|
|
|
2015-09-15 00:23:47 +03:00
|
|
|
/* static */ PLDHashNumber
|
2016-03-16 07:33:44 +03:00
|
|
|
PLDHashTable::HashStringKey(const void* aKey)
|
2005-08-12 00:47:03 +04:00
|
|
|
{
|
2014-06-27 05:35:39 +04:00
|
|
|
return HashString(static_cast<const char*>(aKey));
|
2005-08-12 00:47:03 +04:00
|
|
|
}
|
|
|
|
|
2015-09-15 00:23:47 +03:00
|
|
|
/* static */ PLDHashNumber
|
2016-03-16 07:33:44 +03:00
|
|
|
PLDHashTable::HashVoidPtrKeyStub(const void* aKey)
|
2005-08-12 00:47:03 +04:00
|
|
|
{
|
2017-07-07 23:54:16 +03:00
|
|
|
return nsPtrHashKey<void>::HashKey(aKey);
|
2005-08-12 00:47:03 +04:00
|
|
|
}
|
|
|
|
|
2015-09-15 00:23:47 +03:00
|
|
|
/* static */ bool
|
2016-03-16 07:33:44 +03:00
|
|
|
PLDHashTable::MatchEntryStub(const PLDHashEntryHdr* aEntry, const void* aKey)
|
2005-08-12 00:47:03 +04:00
|
|
|
{
|
2014-06-27 05:35:39 +04:00
|
|
|
const PLDHashEntryStub* stub = (const PLDHashEntryStub*)aEntry;
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2014-06-27 05:35:39 +04:00
|
|
|
return stub->key == aKey;
|
2005-08-12 00:47:03 +04:00
|
|
|
}
|
|
|
|
|
2015-09-15 00:23:47 +03:00
|
|
|
/* static */ bool
|
2016-03-16 07:33:44 +03:00
|
|
|
PLDHashTable::MatchStringKey(const PLDHashEntryHdr* aEntry, const void* aKey)
|
2005-08-12 00:47:03 +04:00
|
|
|
{
|
2014-06-27 05:35:39 +04:00
|
|
|
const PLDHashEntryStub* stub = (const PLDHashEntryStub*)aEntry;
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// XXX tolerate null keys on account of sloppy Mozilla callers.
|
2014-06-27 05:35:39 +04:00
|
|
|
return stub->key == aKey ||
|
|
|
|
(stub->key && aKey &&
|
|
|
|
strcmp((const char*)stub->key, (const char*)aKey) == 0);
|
2005-08-12 00:47:03 +04:00
|
|
|
}
|
|
|
|
|
2015-09-15 00:23:47 +03:00
|
|
|
/* static */ void
|
|
|
|
PLDHashTable::MoveEntryStub(PLDHashTable* aTable,
|
|
|
|
const PLDHashEntryHdr* aFrom,
|
2014-08-26 03:56:33 +04:00
|
|
|
PLDHashEntryHdr* aTo)
|
|
|
|
{
|
2015-09-15 00:23:47 +03:00
|
|
|
memcpy(aTo, aFrom, aTable->mEntrySize);
|
2014-08-26 03:56:33 +04:00
|
|
|
}
|
|
|
|
|
2015-09-15 00:23:47 +03:00
|
|
|
/* static */ void
|
|
|
|
PLDHashTable::ClearEntryStub(PLDHashTable* aTable, PLDHashEntryHdr* aEntry)
|
2005-08-12 00:47:03 +04:00
|
|
|
{
|
2015-09-15 00:23:47 +03:00
|
|
|
memset(aEntry, 0, aTable->mEntrySize);
|
2014-08-26 03:56:33 +04:00
|
|
|
}
|
|
|
|
|
2015-09-15 00:23:47 +03:00
|
|
|
static const PLDHashTableOps gStubOps = {
|
|
|
|
PLDHashTable::HashVoidPtrKeyStub,
|
|
|
|
PLDHashTable::MatchEntryStub,
|
|
|
|
PLDHashTable::MoveEntryStub,
|
|
|
|
PLDHashTable::ClearEntryStub,
|
2014-06-27 05:35:39 +04:00
|
|
|
nullptr
|
2005-08-12 00:47:03 +04:00
|
|
|
};
|
|
|
|
|
2015-09-15 00:23:47 +03:00
|
|
|
/* static */ const PLDHashTableOps*
|
|
|
|
PLDHashTable::StubOps()
|
2005-08-12 00:47:03 +04:00
|
|
|
{
|
2015-09-15 00:23:47 +03:00
|
|
|
return &gStubOps;
|
2005-08-12 00:47:03 +04:00
|
|
|
}
|
|
|
|
|
2013-10-22 02:36:45 +04:00
|
|
|
static bool
|
2014-06-27 05:35:39 +04:00
|
|
|
SizeOfEntryStore(uint32_t aCapacity, uint32_t aEntrySize, uint32_t* aNbytes)
|
2013-10-22 02:36:45 +04:00
|
|
|
{
|
2014-06-27 05:35:39 +04:00
|
|
|
uint64_t nbytes64 = uint64_t(aCapacity) * uint64_t(aEntrySize);
|
|
|
|
*aNbytes = aCapacity * aEntrySize;
|
|
|
|
return uint64_t(*aNbytes) == nbytes64; // returns false on overflow
|
2013-10-22 02:36:45 +04:00
|
|
|
}
|
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Compute max and min load numbers (entry counts). We have a secondary max
|
|
|
|
// that allows us to overload a table reasonably if it cannot be grown further
|
|
|
|
// (i.e. if ChangeTable() fails). The table slows down drastically if the
|
|
|
|
// secondary max is too close to 1, but 0.96875 gives only a slight slowdown
|
|
|
|
// while allowing 1.3x more elements.
|
2014-08-06 17:31:21 +04:00
|
|
|
static inline uint32_t
|
|
|
|
MaxLoad(uint32_t aCapacity)
|
|
|
|
{
|
|
|
|
return aCapacity - (aCapacity >> 2); // == aCapacity * 0.75
|
|
|
|
}
|
|
|
|
static inline uint32_t
|
|
|
|
MaxLoadOnGrowthFailure(uint32_t aCapacity)
|
|
|
|
{
|
|
|
|
return aCapacity - (aCapacity >> 5); // == aCapacity * 0.96875
|
|
|
|
}
|
|
|
|
static inline uint32_t
|
|
|
|
MinLoad(uint32_t aCapacity)
|
|
|
|
{
|
|
|
|
return aCapacity >> 2; // == aCapacity * 0.25
|
|
|
|
}
|
|
|
|
|
2015-06-10 23:54:06 +03:00
|
|
|
// Compute the minimum capacity (and the Log2 of that capacity) for a table
|
|
|
|
// containing |aLength| elements while respecting the following contraints:
|
|
|
|
// - table must be at most 75% full;
|
|
|
|
// - capacity must be a power of two;
|
|
|
|
// - capacity cannot be too small.
|
|
|
|
static inline void
|
|
|
|
BestCapacity(uint32_t aLength, uint32_t* aCapacityOut,
|
|
|
|
uint32_t* aLog2CapacityOut)
|
2005-08-12 00:47:03 +04:00
|
|
|
{
|
2014-08-06 17:31:21 +04:00
|
|
|
// Compute the smallest capacity allowing |aLength| elements to be inserted
|
|
|
|
// without rehashing.
|
2015-05-21 07:25:55 +03:00
|
|
|
uint32_t capacity = (aLength * 4 + (3 - 1)) / 3; // == ceil(aLength * 4 / 3)
|
2015-07-21 03:06:38 +03:00
|
|
|
if (capacity < PLDHashTable::kMinCapacity) {
|
|
|
|
capacity = PLDHashTable::kMinCapacity;
|
2014-06-27 05:35:39 +04:00
|
|
|
}
|
|
|
|
|
2015-05-21 07:25:55 +03:00
|
|
|
// Round up capacity to next power-of-two.
|
2015-06-10 23:54:06 +03:00
|
|
|
uint32_t log2 = CeilingLog2(capacity);
|
2014-08-06 17:31:21 +04:00
|
|
|
capacity = 1u << log2;
|
2015-07-21 03:06:38 +03:00
|
|
|
MOZ_ASSERT(capacity <= PLDHashTable::kMaxCapacity);
|
2015-05-21 07:25:55 +03:00
|
|
|
|
2015-06-10 23:54:06 +03:00
|
|
|
*aCapacityOut = capacity;
|
|
|
|
*aLog2CapacityOut = log2;
|
|
|
|
}
|
|
|
|
|
2015-07-21 03:06:38 +03:00
|
|
|
/* static */ MOZ_ALWAYS_INLINE uint32_t
|
|
|
|
PLDHashTable::HashShift(uint32_t aEntrySize, uint32_t aLength)
|
2015-06-10 23:54:06 +03:00
|
|
|
{
|
2015-07-21 03:06:38 +03:00
|
|
|
if (aLength > kMaxInitialLength) {
|
2015-06-10 23:54:06 +03:00
|
|
|
MOZ_CRASH("Initial length is too large");
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t capacity, log2;
|
|
|
|
BestCapacity(aLength, &capacity, &log2);
|
|
|
|
|
2014-06-27 05:35:39 +04:00
|
|
|
uint32_t nbytes;
|
2014-08-25 23:17:15 +04:00
|
|
|
if (!SizeOfEntryStore(capacity, aEntrySize, &nbytes)) {
|
2015-04-30 02:38:29 +03:00
|
|
|
MOZ_CRASH("Initial entry store size is too large");
|
2014-08-25 23:17:15 +04:00
|
|
|
}
|
2014-06-27 05:35:39 +04:00
|
|
|
|
2015-05-21 07:25:55 +03:00
|
|
|
// Compute the hashShift value.
|
2018-07-26 11:52:46 +03:00
|
|
|
return kPLDHashNumberBits - log2;
|
2014-08-26 03:56:33 +04:00
|
|
|
}
|
|
|
|
|
2015-06-02 11:58:58 +03:00
|
|
|
PLDHashTable::PLDHashTable(const PLDHashTableOps* aOps, uint32_t aEntrySize,
|
|
|
|
uint32_t aLength)
|
2018-07-23 17:47:55 +03:00
|
|
|
: mOps(recordreplay::GeneratePLDHashTableCallbacks(aOps))
|
2017-09-15 13:04:29 +03:00
|
|
|
, mEntryStore()
|
|
|
|
, mGeneration(0)
|
2015-05-21 07:25:55 +03:00
|
|
|
, mHashShift(HashShift(aEntrySize, aLength))
|
|
|
|
, mEntrySize(aEntrySize)
|
|
|
|
, mEntryCount(0)
|
|
|
|
, mRemovedCount(0)
|
|
|
|
#ifdef DEBUG
|
2015-05-21 09:11:35 +03:00
|
|
|
, mChecker()
|
2015-05-21 07:25:55 +03:00
|
|
|
#endif
|
2015-05-18 10:52:01 +03:00
|
|
|
{
|
2017-09-15 13:04:29 +03:00
|
|
|
// An entry size greater than 0xff is unlikely, but let's check anyway. If
|
|
|
|
// you hit this, your hashtable would waste lots of space for unused entries
|
|
|
|
// and you should change your hash table's entries to pointers.
|
|
|
|
if (aEntrySize != uint32_t(mEntrySize)) {
|
|
|
|
MOZ_CRASH("Entry size is too large");
|
|
|
|
}
|
2015-05-18 10:52:01 +03:00
|
|
|
}
|
|
|
|
|
2015-06-02 11:58:58 +03:00
|
|
|
PLDHashTable&
|
|
|
|
PLDHashTable::operator=(PLDHashTable&& aOther)
|
2015-05-04 03:04:07 +03:00
|
|
|
{
|
|
|
|
if (this == &aOther) {
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2018-04-09 21:01:59 +03:00
|
|
|
// |mOps| and |mEntrySize| are required to stay the same, they're
|
2015-05-21 07:25:55 +03:00
|
|
|
// conceptually part of the type -- indeed, if PLDHashTable was a templated
|
|
|
|
// type like nsTHashtable, they *would* be part of the type -- so it only
|
2018-07-23 17:47:55 +03:00
|
|
|
// makes sense to assign in cases where they match. An exception is when we
|
|
|
|
// are recording or replaying the execution, in which case custom ops are
|
|
|
|
// generated for each table.
|
|
|
|
MOZ_RELEASE_ASSERT(mOps == aOther.mOps || !mOps || recordreplay::IsRecordingOrReplaying());
|
|
|
|
MOZ_RELEASE_ASSERT(mEntrySize == aOther.mEntrySize || !mEntrySize);
|
2015-05-04 03:04:07 +03:00
|
|
|
|
2018-04-09 21:01:59 +03:00
|
|
|
// Reconstruct |this|.
|
2018-07-23 17:47:55 +03:00
|
|
|
const PLDHashTableOps* ops = recordreplay::UnwrapPLDHashTableCallbacks(aOther.mOps);
|
2018-04-09 21:01:59 +03:00
|
|
|
this->~PLDHashTable();
|
2018-07-23 17:47:55 +03:00
|
|
|
new (KnownNotNull, this) PLDHashTable(ops, aOther.mEntrySize, 0);
|
2018-04-09 21:01:59 +03:00
|
|
|
|
2015-05-21 07:25:55 +03:00
|
|
|
// Move non-const pieces over.
|
2018-05-30 22:15:35 +03:00
|
|
|
mHashShift = std::move(aOther.mHashShift);
|
|
|
|
mEntryCount = std::move(aOther.mEntryCount);
|
|
|
|
mRemovedCount = std::move(aOther.mRemovedCount);
|
2017-09-15 13:04:29 +03:00
|
|
|
mEntryStore.Set(aOther.mEntryStore.Get(), &mGeneration);
|
2015-05-04 03:04:07 +03:00
|
|
|
#ifdef DEBUG
|
2018-05-30 22:15:35 +03:00
|
|
|
mChecker = std::move(aOther.mChecker);
|
2015-05-04 03:04:07 +03:00
|
|
|
#endif
|
|
|
|
|
2018-07-23 17:47:55 +03:00
|
|
|
recordreplay::MovePLDHashTableContents(aOther.mOps, mOps);
|
|
|
|
|
2018-07-16 17:03:30 +03:00
|
|
|
// Clear up |aOther| so its destruction will be a no-op and it reports being
|
|
|
|
// empty.
|
2015-05-21 09:11:35 +03:00
|
|
|
{
|
2015-05-04 03:04:07 +03:00
|
|
|
#ifdef DEBUG
|
2015-05-21 09:11:35 +03:00
|
|
|
AutoDestructorOp op(mChecker);
|
2015-05-04 03:04:07 +03:00
|
|
|
#endif
|
2018-07-16 17:03:30 +03:00
|
|
|
aOther.mEntryCount = 0;
|
2017-09-15 13:04:29 +03:00
|
|
|
aOther.mEntryStore.Set(nullptr, &aOther.mGeneration);
|
2015-05-21 09:11:35 +03:00
|
|
|
}
|
2015-05-04 03:04:07 +03:00
|
|
|
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2015-07-21 03:15:00 +03:00
|
|
|
PLDHashNumber
|
2018-07-13 10:56:29 +03:00
|
|
|
PLDHashTable::Hash1(PLDHashNumber aHash0) const
|
2015-07-21 03:15:00 +03:00
|
|
|
{
|
|
|
|
return aHash0 >> mHashShift;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
Bug 1352889 - Ensure that PLDHashTable's second hash doesn't have padding with 0 bits for tables with capacity larger than 2^16. r=njn
PLDHashTable takes the result of the hash function and multiplies it by
kGoldenRatio to ensure that it has a good distribution of bits across
the 32-bit hash value, and then zeroes out the low bit so that it can be
used for the collision flag. This result is called hash0. From hash0
it computes two different numbers used to find entries in the table
storage: hash1 is used to find an initial position in the table to
begin searching for an entry; hash2 is then used to repeatedly offset
that position (mod the size of the table) to build a chain of positions
to search.
In a table with capacity 2^c entries, hash1 is simply the upper c bits
of hash0. This patch does not change this.
Prior to this patch, hash2 was the c bits below hash1, padded at the low
end with zeroes when c > 16. (Note that bug 927705, changeset
1a02bec165e16f370cace3da21bb2b377a0a7242, increased the maximum capacity
from 2^23 to 2^26 since 2^23 was sometimes insufficient!) This manner
of computing hash2 is problematic because it increases the risk of long
chains for very large tables, since there is less variation in the hash2
result due to the zero padding.
So this patch changes the hash2 computation by using the low bits of
hash0 instead of shifting it around, thus avoiding 0 bits in parts of
the hash2 value that are significant.
Note that this changes what hash2 is in all cases except when the table
capacity is exactly 2^16, so it does change our hashing characteristics.
For tables with capacity less than 2^16, it should be using a different
second hash, but with the same amount of random-ish data. For tables
with capacity greater than 2^16, it should be using more random-ish
data.
Note that this patch depends on the patch for bug 1353458 in order to
avoid causing test failures.
MozReview-Commit-ID: JvnxAMBY711
--HG--
extra : transplant_source : 2%D2%C2%CE%E1%92%C8%F8H%D7%15%A4%86%5B%3Ac%0B%08%3DA
2017-05-31 23:44:02 +03:00
|
|
|
PLDHashTable::Hash2(PLDHashNumber aHash0,
|
2018-07-13 10:56:29 +03:00
|
|
|
uint32_t& aHash2Out, uint32_t& aSizeMaskOut) const
|
2015-07-21 03:15:00 +03:00
|
|
|
{
|
2018-07-26 11:52:46 +03:00
|
|
|
uint32_t sizeLog2 = kPLDHashNumberBits - mHashShift;
|
2017-07-19 06:02:19 +03:00
|
|
|
uint32_t sizeMask = (PLDHashNumber(1) << sizeLog2) - 1;
|
Bug 1352889 - Ensure that PLDHashTable's second hash doesn't have padding with 0 bits for tables with capacity larger than 2^16. r=njn
PLDHashTable takes the result of the hash function and multiplies it by
kGoldenRatio to ensure that it has a good distribution of bits across
the 32-bit hash value, and then zeroes out the low bit so that it can be
used for the collision flag. This result is called hash0. From hash0
it computes two different numbers used to find entries in the table
storage: hash1 is used to find an initial position in the table to
begin searching for an entry; hash2 is then used to repeatedly offset
that position (mod the size of the table) to build a chain of positions
to search.
In a table with capacity 2^c entries, hash1 is simply the upper c bits
of hash0. This patch does not change this.
Prior to this patch, hash2 was the c bits below hash1, padded at the low
end with zeroes when c > 16. (Note that bug 927705, changeset
1a02bec165e16f370cace3da21bb2b377a0a7242, increased the maximum capacity
from 2^23 to 2^26 since 2^23 was sometimes insufficient!) This manner
of computing hash2 is problematic because it increases the risk of long
chains for very large tables, since there is less variation in the hash2
result due to the zero padding.
So this patch changes the hash2 computation by using the low bits of
hash0 instead of shifting it around, thus avoiding 0 bits in parts of
the hash2 value that are significant.
Note that this changes what hash2 is in all cases except when the table
capacity is exactly 2^16, so it does change our hashing characteristics.
For tables with capacity less than 2^16, it should be using a different
second hash, but with the same amount of random-ish data. For tables
with capacity greater than 2^16, it should be using more random-ish
data.
Note that this patch depends on the patch for bug 1353458 in order to
avoid causing test failures.
MozReview-Commit-ID: JvnxAMBY711
--HG--
extra : transplant_source : 2%D2%C2%CE%E1%92%C8%F8H%D7%15%A4%86%5B%3Ac%0B%08%3DA
2017-05-31 23:44:02 +03:00
|
|
|
aSizeMaskOut = sizeMask;
|
|
|
|
|
|
|
|
// The incoming aHash0 always has the low bit unset (since we leave it
|
|
|
|
// free for the collision flag), and should have reasonably random
|
|
|
|
// data in the other 31 bits. We used the high bits of aHash0 for
|
|
|
|
// Hash1, so we use the low bits here. If the table size is large,
|
|
|
|
// the bits we use may overlap, but that's still more random than
|
|
|
|
// filling with 0s.
|
|
|
|
//
|
|
|
|
// Double hashing needs the second hash code to be relatively prime to table
|
|
|
|
// size, so we simply make hash2 odd.
|
|
|
|
//
|
|
|
|
// This also conveniently covers up the fact that we have the low bit
|
|
|
|
// unset since aHash0 has the low bit unset.
|
|
|
|
aHash2Out = (aHash0 & sizeMask) | 1;
|
2015-07-21 03:15:00 +03:00
|
|
|
}
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Reserve mKeyHash 0 for free entries and 1 for removed-entry sentinels. Note
|
|
|
|
// that a removed-entry sentinel need be stored only if the removed entry had
|
|
|
|
// a colliding entry added after it. Therefore we can use 1 as the collision
|
|
|
|
// flag in addition to the removed-entry sentinel value. Multiplicative hash
|
|
|
|
// uses the high order bits of mKeyHash, so this least-significant reservation
|
|
|
|
// should not hurt the hash function's effectiveness much.
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Match an entry's mKeyHash against an unstored one computed from a key.
|
2015-07-21 03:15:00 +03:00
|
|
|
/* static */ bool
|
2018-07-13 10:56:29 +03:00
|
|
|
PLDHashTable::MatchEntryKeyhash(const PLDHashEntryHdr* aEntry,
|
|
|
|
const PLDHashNumber aKeyHash)
|
2015-07-21 03:15:00 +03:00
|
|
|
{
|
|
|
|
return (aEntry->mKeyHash & ~kCollisionFlag) == aKeyHash;
|
|
|
|
}
|
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Compute the address of the indexed entry in table.
|
2015-07-21 03:15:00 +03:00
|
|
|
PLDHashEntryHdr*
|
2018-07-13 10:56:29 +03:00
|
|
|
PLDHashTable::AddressEntry(uint32_t aIndex) const
|
2015-07-21 03:15:00 +03:00
|
|
|
{
|
2018-07-13 10:56:29 +03:00
|
|
|
return const_cast<PLDHashEntryHdr*>(
|
|
|
|
reinterpret_cast<const PLDHashEntryHdr*>(
|
|
|
|
mEntryStore.Get() + aIndex * mEntrySize));
|
2015-07-21 03:15:00 +03:00
|
|
|
}
|
2015-01-27 03:02:05 +03:00
|
|
|
|
2015-05-21 07:25:55 +03:00
|
|
|
PLDHashTable::~PLDHashTable()
|
2014-06-27 05:35:39 +04:00
|
|
|
{
|
2015-05-21 09:11:35 +03:00
|
|
|
#ifdef DEBUG
|
|
|
|
AutoDestructorOp op(mChecker);
|
|
|
|
#endif
|
|
|
|
|
2015-07-27 05:57:23 +03:00
|
|
|
if (!mEntryStore.Get()) {
|
2018-07-23 17:47:55 +03:00
|
|
|
recordreplay::DestroyPLDHashTableCallbacks(mOps);
|
2015-05-04 03:04:07 +03:00
|
|
|
return;
|
|
|
|
}
|
2015-02-12 07:24:33 +03:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Clear any remaining live entries.
|
2015-07-27 05:57:23 +03:00
|
|
|
char* entryAddr = mEntryStore.Get();
|
2014-08-26 04:29:14 +04:00
|
|
|
char* entryLimit = entryAddr + Capacity() * mEntrySize;
|
2014-06-27 05:35:39 +04:00
|
|
|
while (entryAddr < entryLimit) {
|
|
|
|
PLDHashEntryHdr* entry = (PLDHashEntryHdr*)entryAddr;
|
2015-07-21 03:15:00 +03:00
|
|
|
if (EntryIsLive(entry)) {
|
2015-01-20 03:34:44 +03:00
|
|
|
mOps->clearEntry(this, entry);
|
2005-08-12 00:47:03 +04:00
|
|
|
}
|
2014-08-26 04:29:14 +04:00
|
|
|
entryAddr += mEntrySize;
|
2014-06-27 05:35:39 +04:00
|
|
|
}
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2018-07-23 17:47:55 +03:00
|
|
|
recordreplay::DestroyPLDHashTableCallbacks(mOps);
|
|
|
|
|
2015-07-27 05:57:23 +03:00
|
|
|
// Entry storage is freed last, by ~EntryStore().
|
2014-08-26 03:56:33 +04:00
|
|
|
}
|
|
|
|
|
2015-05-19 05:16:06 +03:00
|
|
|
void
|
2015-06-02 11:58:58 +03:00
|
|
|
PLDHashTable::ClearAndPrepareForLength(uint32_t aLength)
|
2015-05-19 05:16:06 +03:00
|
|
|
{
|
2015-05-21 07:25:55 +03:00
|
|
|
// Get these values before the destructor clobbers them.
|
2018-07-23 17:47:55 +03:00
|
|
|
const PLDHashTableOps* ops = recordreplay::UnwrapPLDHashTableCallbacks(mOps);
|
2015-05-19 05:16:06 +03:00
|
|
|
uint32_t entrySize = mEntrySize;
|
|
|
|
|
2015-05-21 07:25:55 +03:00
|
|
|
this->~PLDHashTable();
|
2016-08-23 01:40:10 +03:00
|
|
|
new (KnownNotNull, this) PLDHashTable(ops, entrySize, aLength);
|
2015-05-19 05:16:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2015-06-02 11:58:58 +03:00
|
|
|
PLDHashTable::Clear()
|
2015-05-19 05:16:06 +03:00
|
|
|
{
|
2015-07-21 03:06:38 +03:00
|
|
|
ClearAndPrepareForLength(kDefaultInitialLength);
|
2015-05-19 05:16:06 +03:00
|
|
|
}
|
|
|
|
|
2016-06-22 09:44:40 +03:00
|
|
|
// If |Reason| is |ForAdd|, the return value is always non-null and it may be
|
|
|
|
// a previously-removed entry. If |Reason| is |ForSearchOrRemove|, the return
|
|
|
|
// value is null on a miss, and will never be a previously-removed entry on a
|
|
|
|
// hit. This distinction is a bit grotty but this function is hot enough that
|
2018-07-24 04:09:35 +03:00
|
|
|
// these differences are worthwhile. (It's also hot enough that
|
|
|
|
// MOZ_ALWAYS_INLINE makes a significant difference.)
|
2015-01-30 07:18:28 +03:00
|
|
|
template <PLDHashTable::SearchReason Reason>
|
2018-07-24 04:09:35 +03:00
|
|
|
MOZ_ALWAYS_INLINE PLDHashEntryHdr*
|
2018-07-13 10:56:29 +03:00
|
|
|
PLDHashTable::SearchTable(const void* aKey, PLDHashNumber aKeyHash) const
|
2005-08-12 00:47:03 +04:00
|
|
|
{
|
2015-07-27 05:57:23 +03:00
|
|
|
MOZ_ASSERT(mEntryStore.Get());
|
2015-07-21 03:15:00 +03:00
|
|
|
NS_ASSERTION(!(aKeyHash & kCollisionFlag),
|
|
|
|
"!(aKeyHash & kCollisionFlag)");
|
2014-06-27 05:35:39 +04:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Compute the primary hash address.
|
2015-07-21 03:15:00 +03:00
|
|
|
PLDHashNumber hash1 = Hash1(aKeyHash);
|
|
|
|
PLDHashEntryHdr* entry = AddressEntry(hash1);
|
2014-06-27 05:35:39 +04:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Miss: return space for a new entry.
|
2015-01-29 08:33:38 +03:00
|
|
|
if (EntryIsFree(entry)) {
|
2015-01-30 07:18:28 +03:00
|
|
|
return (Reason == ForAdd) ? entry : nullptr;
|
2014-06-27 05:35:39 +04:00
|
|
|
}
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Hit: return entry.
|
2015-01-20 03:34:44 +03:00
|
|
|
PLDHashMatchEntry matchEntry = mOps->matchEntry;
|
2015-07-21 03:15:00 +03:00
|
|
|
if (MatchEntryKeyhash(entry, aKeyHash) &&
|
2016-03-16 07:33:44 +03:00
|
|
|
matchEntry(entry, aKey)) {
|
2014-06-27 05:35:39 +04:00
|
|
|
return entry;
|
|
|
|
}
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Collision: double hash.
|
2015-07-21 03:15:00 +03:00
|
|
|
PLDHashNumber hash2;
|
2017-07-19 06:02:19 +03:00
|
|
|
uint32_t sizeMask;
|
2015-07-21 03:15:00 +03:00
|
|
|
Hash2(aKeyHash, hash2, sizeMask);
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Save the first removed entry pointer so Add() can recycle it. (Only used
|
|
|
|
// if Reason==ForAdd.)
|
2014-06-27 05:35:39 +04:00
|
|
|
PLDHashEntryHdr* firstRemoved = nullptr;
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2014-06-27 05:35:39 +04:00
|
|
|
for (;;) {
|
Bug 1352888 - Don't set the collision flag when adding to PLDHashTable if we've already found the entry we're going to add. r=njn
PLDHashTable's entry store has two types of unoccupied entries: free
entries and removed entries. The search of a chain of entries
(determined by the hash value) in the entry store to search for an entry
can stop at free entries, but it continues across removed entries,
because removed entries are entries that may have been skipped over when
we were adding the value we're searching for to the hash, but have since
been removed. For live entries, we also maintain this distinction by
using one bit of storage for a collision flag, which notes that if the
hashtable entry is removed, its place in the entry store must become a
removed entry rather than a free entry.
When we add a new entry to the table, Add's semantics require that we
return an existing entry if there is one, and only create a new entry if
no existing entry exists. (Bug 1352198 suggests the possibility of a
faster alternative Add API where the caller guarantees that the key is
not already in the hashtable.) When we search for the existing entry,
we must thus continue the search across removed entries, even though we
record the first removed entry found to return if the search for an
existing entry fails.
The existing code adds the collision flag through the entire table
search during an Add. This patch changes that behavior so that we only
add the collision flag prior to finding the first removed entry. Adding
it after we find the first removed entry is unnecessary, since we are
not making that entry part of a path to a new entry. If it is part of a
path to an existing entry, it will already have the collision flag set.
This patch effectively puts an if (!firstRemoved) around the else branch
of the if (MOZ_UNLIKELY(EntryIsRemoved(entry))), and then refactors that
condition outwards since it is now around the contents of both the if
and else branches.
MozReview-Commit-ID: CsXnMYttHVy
--HG--
extra : transplant_source : %80%9E%83%EC%CCY%B4%B0%86%86%18%99%B6U%21o%5D%29%AD%04
2017-05-31 23:44:02 +03:00
|
|
|
if (Reason == ForAdd && !firstRemoved) {
|
2015-07-21 03:15:00 +03:00
|
|
|
if (MOZ_UNLIKELY(EntryIsRemoved(entry))) {
|
Bug 1352888 - Don't set the collision flag when adding to PLDHashTable if we've already found the entry we're going to add. r=njn
PLDHashTable's entry store has two types of unoccupied entries: free
entries and removed entries. The search of a chain of entries
(determined by the hash value) in the entry store to search for an entry
can stop at free entries, but it continues across removed entries,
because removed entries are entries that may have been skipped over when
we were adding the value we're searching for to the hash, but have since
been removed. For live entries, we also maintain this distinction by
using one bit of storage for a collision flag, which notes that if the
hashtable entry is removed, its place in the entry store must become a
removed entry rather than a free entry.
When we add a new entry to the table, Add's semantics require that we
return an existing entry if there is one, and only create a new entry if
no existing entry exists. (Bug 1352198 suggests the possibility of a
faster alternative Add API where the caller guarantees that the key is
not already in the hashtable.) When we search for the existing entry,
we must thus continue the search across removed entries, even though we
record the first removed entry found to return if the search for an
existing entry fails.
The existing code adds the collision flag through the entire table
search during an Add. This patch changes that behavior so that we only
add the collision flag prior to finding the first removed entry. Adding
it after we find the first removed entry is unnecessary, since we are
not making that entry part of a path to a new entry. If it is part of a
path to an existing entry, it will already have the collision flag set.
This patch effectively puts an if (!firstRemoved) around the else branch
of the if (MOZ_UNLIKELY(EntryIsRemoved(entry))), and then refactors that
condition outwards since it is now around the contents of both the if
and else branches.
MozReview-Commit-ID: CsXnMYttHVy
--HG--
extra : transplant_source : %80%9E%83%EC%CCY%B4%B0%86%86%18%99%B6U%21o%5D%29%AD%04
2017-05-31 23:44:02 +03:00
|
|
|
firstRemoved = entry;
|
2015-01-30 07:18:28 +03:00
|
|
|
} else {
|
2015-07-21 03:15:00 +03:00
|
|
|
entry->mKeyHash |= kCollisionFlag;
|
2014-06-27 05:35:39 +04:00
|
|
|
}
|
|
|
|
}
|
2007-03-27 19:32:38 +04:00
|
|
|
|
2014-06-27 05:35:39 +04:00
|
|
|
hash1 -= hash2;
|
|
|
|
hash1 &= sizeMask;
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2015-07-21 03:15:00 +03:00
|
|
|
entry = AddressEntry(hash1);
|
2015-01-29 08:33:38 +03:00
|
|
|
if (EntryIsFree(entry)) {
|
2015-01-30 07:18:28 +03:00
|
|
|
return (Reason == ForAdd) ? (firstRemoved ? firstRemoved : entry)
|
|
|
|
: nullptr;
|
2014-06-27 05:35:39 +04:00
|
|
|
}
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2015-07-21 03:15:00 +03:00
|
|
|
if (MatchEntryKeyhash(entry, aKeyHash) &&
|
2016-03-16 07:33:44 +03:00
|
|
|
matchEntry(entry, aKey)) {
|
2014-06-27 05:35:39 +04:00
|
|
|
return entry;
|
2005-08-12 00:47:03 +04:00
|
|
|
}
|
2014-06-27 05:35:39 +04:00
|
|
|
}
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// NOTREACHED
|
2014-06-27 05:35:39 +04:00
|
|
|
return nullptr;
|
2005-08-12 00:47:03 +04:00
|
|
|
}
|
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// This is a copy of SearchTable(), used by ChangeTable(), hardcoded to
|
2016-06-22 09:44:40 +03:00
|
|
|
// 1. assume |Reason| is |ForAdd|,
|
2015-07-24 09:13:11 +03:00
|
|
|
// 2. assume that |aKey| will never match an existing entry, and
|
|
|
|
// 3. assume that no entries have been removed from the current table
|
|
|
|
// structure.
|
|
|
|
// Avoiding the need for |aKey| means we can avoid needing a way to map entries
|
|
|
|
// to keys, which means callers can use complex key types more easily.
|
2016-02-01 22:27:57 +03:00
|
|
|
MOZ_ALWAYS_INLINE PLDHashEntryHdr*
|
2018-07-13 10:56:29 +03:00
|
|
|
PLDHashTable::FindFreeEntry(PLDHashNumber aKeyHash) const
|
2007-03-24 02:32:40 +03:00
|
|
|
{
|
2015-07-27 05:57:23 +03:00
|
|
|
MOZ_ASSERT(mEntryStore.Get());
|
2015-07-21 03:15:00 +03:00
|
|
|
NS_ASSERTION(!(aKeyHash & kCollisionFlag),
|
|
|
|
"!(aKeyHash & kCollisionFlag)");
|
2014-06-27 05:35:39 +04:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Compute the primary hash address.
|
2015-07-21 03:15:00 +03:00
|
|
|
PLDHashNumber hash1 = Hash1(aKeyHash);
|
|
|
|
PLDHashEntryHdr* entry = AddressEntry(hash1);
|
2014-06-27 05:35:39 +04:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Miss: return space for a new entry.
|
2015-01-29 08:33:38 +03:00
|
|
|
if (EntryIsFree(entry)) {
|
2014-06-27 05:35:39 +04:00
|
|
|
return entry;
|
|
|
|
}
|
2007-03-24 02:32:40 +03:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Collision: double hash.
|
2015-07-21 03:15:00 +03:00
|
|
|
PLDHashNumber hash2;
|
2017-07-19 06:02:19 +03:00
|
|
|
uint32_t sizeMask;
|
2015-07-21 03:15:00 +03:00
|
|
|
Hash2(aKeyHash, hash2, sizeMask);
|
2007-03-24 02:32:40 +03:00
|
|
|
|
2014-06-27 05:35:39 +04:00
|
|
|
for (;;) {
|
2015-07-21 03:15:00 +03:00
|
|
|
NS_ASSERTION(!EntryIsRemoved(entry),
|
|
|
|
"!EntryIsRemoved(entry)");
|
|
|
|
entry->mKeyHash |= kCollisionFlag;
|
2007-03-24 02:32:40 +03:00
|
|
|
|
2014-06-27 05:35:39 +04:00
|
|
|
hash1 -= hash2;
|
|
|
|
hash1 &= sizeMask;
|
2007-03-24 02:32:40 +03:00
|
|
|
|
2015-07-21 03:15:00 +03:00
|
|
|
entry = AddressEntry(hash1);
|
2015-01-29 08:33:38 +03:00
|
|
|
if (EntryIsFree(entry)) {
|
2014-06-27 05:35:39 +04:00
|
|
|
return entry;
|
2007-03-24 02:32:40 +03:00
|
|
|
}
|
2014-06-27 05:35:39 +04:00
|
|
|
}
|
2007-03-24 02:32:40 +03:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// NOTREACHED
|
2007-03-24 02:32:40 +03:00
|
|
|
}
|
|
|
|
|
2014-08-26 03:56:33 +04:00
|
|
|
bool
|
2015-06-10 23:54:06 +03:00
|
|
|
PLDHashTable::ChangeTable(int32_t aDeltaLog2)
|
2005-08-12 00:47:03 +04:00
|
|
|
{
|
2015-07-27 05:57:23 +03:00
|
|
|
MOZ_ASSERT(mEntryStore.Get());
|
2015-02-02 01:56:33 +03:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Look, but don't touch, until we succeed in getting new entry store.
|
2018-07-26 11:52:46 +03:00
|
|
|
int32_t oldLog2 = kPLDHashNumberBits - mHashShift;
|
2015-06-10 23:54:06 +03:00
|
|
|
int32_t newLog2 = oldLog2 + aDeltaLog2;
|
2014-06-27 05:35:39 +04:00
|
|
|
uint32_t newCapacity = 1u << newLog2;
|
2015-07-21 03:06:38 +03:00
|
|
|
if (newCapacity > kMaxCapacity) {
|
2014-06-27 05:35:39 +04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t nbytes;
|
2014-08-26 04:29:14 +04:00
|
|
|
if (!SizeOfEntryStore(newCapacity, mEntrySize, &nbytes)) {
|
2014-06-27 05:35:39 +04:00
|
|
|
return false; // overflowed
|
|
|
|
}
|
|
|
|
|
2018-04-07 02:51:58 +03:00
|
|
|
char* newEntryStore = (char*)calloc(1, nbytes);
|
2014-06-27 05:35:39 +04:00
|
|
|
if (!newEntryStore) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// We can't fail from here on, so update table parameters.
|
2018-07-26 11:52:46 +03:00
|
|
|
mHashShift = kPLDHashNumberBits - newLog2;
|
2014-08-26 04:29:14 +04:00
|
|
|
mRemovedCount = 0;
|
2014-06-27 05:35:39 +04:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Assign the new entry store to table.
|
2014-06-27 05:35:39 +04:00
|
|
|
char* oldEntryStore;
|
|
|
|
char* oldEntryAddr;
|
2015-07-27 05:57:23 +03:00
|
|
|
oldEntryAddr = oldEntryStore = mEntryStore.Get();
|
2017-09-15 13:04:29 +03:00
|
|
|
mEntryStore.Set(newEntryStore, &mGeneration);
|
2015-01-20 03:34:44 +03:00
|
|
|
PLDHashMoveEntry moveEntry = mOps->moveEntry;
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Copy only live entries, leaving removed ones behind.
|
2014-06-27 05:35:39 +04:00
|
|
|
uint32_t oldCapacity = 1u << oldLog2;
|
|
|
|
for (uint32_t i = 0; i < oldCapacity; ++i) {
|
|
|
|
PLDHashEntryHdr* oldEntry = (PLDHashEntryHdr*)oldEntryAddr;
|
2015-07-21 03:15:00 +03:00
|
|
|
if (EntryIsLive(oldEntry)) {
|
2018-07-23 18:30:57 +03:00
|
|
|
const PLDHashNumber key = oldEntry->mKeyHash & ~kCollisionFlag;
|
|
|
|
PLDHashEntryHdr* newEntry = FindFreeEntry(key);
|
2015-01-29 08:33:38 +03:00
|
|
|
NS_ASSERTION(EntryIsFree(newEntry), "EntryIsFree(newEntry)");
|
2014-08-26 03:56:33 +04:00
|
|
|
moveEntry(this, oldEntry, newEntry);
|
2018-07-23 18:30:57 +03:00
|
|
|
newEntry->mKeyHash = key;
|
2005-08-12 00:47:03 +04:00
|
|
|
}
|
2014-08-26 04:29:14 +04:00
|
|
|
oldEntryAddr += mEntrySize;
|
2014-06-27 05:35:39 +04:00
|
|
|
}
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2015-01-15 01:35:56 +03:00
|
|
|
free(oldEntryStore);
|
2014-06-27 05:35:39 +04:00
|
|
|
return true;
|
2005-08-12 00:47:03 +04:00
|
|
|
}
|
|
|
|
|
2015-01-23 02:43:18 +03:00
|
|
|
MOZ_ALWAYS_INLINE PLDHashNumber
|
2018-07-13 10:56:29 +03:00
|
|
|
PLDHashTable::ComputeKeyHash(const void* aKey) const
|
2005-08-12 00:47:03 +04:00
|
|
|
{
|
2015-07-27 05:57:23 +03:00
|
|
|
MOZ_ASSERT(mEntryStore.Get());
|
2015-02-02 01:56:33 +03:00
|
|
|
|
2018-07-26 11:52:47 +03:00
|
|
|
PLDHashNumber keyHash = mozilla::ScrambleHashCode(mOps->hashKey(aKey));
|
2014-06-27 05:35:39 +04:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Avoid 0 and 1 hash codes, they indicate free and removed entries.
|
2015-07-21 03:15:00 +03:00
|
|
|
if (keyHash < 2) {
|
|
|
|
keyHash -= 2;
|
|
|
|
}
|
|
|
|
keyHash &= ~kCollisionFlag;
|
2014-06-27 05:35:39 +04:00
|
|
|
|
2015-01-23 02:43:18 +03:00
|
|
|
return keyHash;
|
|
|
|
}
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2015-05-21 10:34:25 +03:00
|
|
|
PLDHashEntryHdr*
|
2018-07-13 10:56:29 +03:00
|
|
|
PLDHashTable::Search(const void* aKey) const
|
2015-01-23 02:43:18 +03:00
|
|
|
{
|
2015-05-21 09:11:35 +03:00
|
|
|
#ifdef DEBUG
|
|
|
|
AutoReadOp op(mChecker);
|
|
|
|
#endif
|
2005-08-12 00:47:03 +04:00
|
|
|
|
2015-07-27 05:57:23 +03:00
|
|
|
PLDHashEntryHdr* entry = mEntryStore.Get()
|
|
|
|
? SearchTable<ForSearchOrRemove>(aKey,
|
|
|
|
ComputeKeyHash(aKey))
|
|
|
|
: nullptr;
|
2015-01-30 07:18:28 +03:00
|
|
|
return entry;
|
2015-01-23 08:06:55 +03:00
|
|
|
}
|
|
|
|
|
2015-09-15 00:23:12 +03:00
|
|
|
PLDHashEntryHdr*
|
2015-02-03 01:48:58 +03:00
|
|
|
PLDHashTable::Add(const void* aKey, const mozilla::fallible_t&)
|
2015-01-16 03:01:28 +03:00
|
|
|
{
|
2015-05-21 09:11:35 +03:00
|
|
|
#ifdef DEBUG
|
|
|
|
AutoWriteOp op(mChecker);
|
|
|
|
#endif
|
2015-01-23 02:43:18 +03:00
|
|
|
|
2015-02-02 01:56:33 +03:00
|
|
|
// Allocate the entry storage if it hasn't already been allocated.
|
2015-07-27 05:57:23 +03:00
|
|
|
if (!mEntryStore.Get()) {
|
2015-02-02 01:56:33 +03:00
|
|
|
uint32_t nbytes;
|
2015-05-21 07:25:55 +03:00
|
|
|
// We already checked this in the constructor, so it must still be true.
|
2015-02-02 01:56:33 +03:00
|
|
|
MOZ_RELEASE_ASSERT(SizeOfEntryStore(CapacityFromHashShift(), mEntrySize,
|
|
|
|
&nbytes));
|
2018-04-07 02:51:58 +03:00
|
|
|
mEntryStore.Set((char*)calloc(1, nbytes), &mGeneration);
|
2015-07-27 05:57:23 +03:00
|
|
|
if (!mEntryStore.Get()) {
|
2015-05-21 09:11:35 +03:00
|
|
|
return nullptr;
|
2015-02-02 01:56:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// If alpha is >= .75, grow or compress the table. If aKey is already in the
|
|
|
|
// table, we may grow once more than necessary, but only if we are on the
|
|
|
|
// edge of being overloaded.
|
2015-05-21 09:11:35 +03:00
|
|
|
uint32_t capacity = Capacity();
|
2015-01-23 02:43:18 +03:00
|
|
|
if (mEntryCount + mRemovedCount >= MaxLoad(capacity)) {
|
2015-07-24 09:13:11 +03:00
|
|
|
// Compress if a quarter or more of all entries are removed.
|
2015-01-23 02:43:18 +03:00
|
|
|
int deltaLog2;
|
|
|
|
if (mRemovedCount >= capacity >> 2) {
|
|
|
|
deltaLog2 = 0;
|
|
|
|
} else {
|
|
|
|
deltaLog2 = 1;
|
|
|
|
}
|
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Grow or compress the table. If ChangeTable() fails, allow overloading up
|
|
|
|
// to the secondary max. Once we hit the secondary max, return null.
|
2015-01-23 02:43:18 +03:00
|
|
|
if (!ChangeTable(deltaLog2) &&
|
|
|
|
mEntryCount + mRemovedCount >= MaxLoadOnGrowthFailure(capacity)) {
|
2015-05-21 09:11:35 +03:00
|
|
|
return nullptr;
|
2015-01-23 02:43:18 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Look for entry after possibly growing, so we don't have to add it,
|
|
|
|
// then skip it while growing the table and re-add it after.
|
2015-05-21 09:11:35 +03:00
|
|
|
PLDHashNumber keyHash = ComputeKeyHash(aKey);
|
|
|
|
PLDHashEntryHdr* entry = SearchTable<ForAdd>(aKey, keyHash);
|
2015-07-21 03:15:00 +03:00
|
|
|
if (!EntryIsLive(entry)) {
|
2015-07-24 09:13:11 +03:00
|
|
|
// Initialize the entry, indicating that it's no longer free.
|
2015-07-21 03:15:00 +03:00
|
|
|
if (EntryIsRemoved(entry)) {
|
2015-01-23 02:43:18 +03:00
|
|
|
mRemovedCount--;
|
2015-07-21 03:15:00 +03:00
|
|
|
keyHash |= kCollisionFlag;
|
2015-01-23 02:43:18 +03:00
|
|
|
}
|
2015-02-11 20:46:40 +03:00
|
|
|
if (mOps->initEntry) {
|
|
|
|
mOps->initEntry(entry, aKey);
|
2015-01-23 02:43:18 +03:00
|
|
|
}
|
2015-01-29 08:33:38 +03:00
|
|
|
entry->mKeyHash = keyHash;
|
2015-01-23 02:43:18 +03:00
|
|
|
mEntryCount++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return entry;
|
2015-01-16 03:01:28 +03:00
|
|
|
}
|
|
|
|
|
2015-09-15 00:23:12 +03:00
|
|
|
PLDHashEntryHdr*
|
2015-02-02 01:56:33 +03:00
|
|
|
PLDHashTable::Add(const void* aKey)
|
|
|
|
{
|
|
|
|
PLDHashEntryHdr* entry = Add(aKey, fallible);
|
|
|
|
if (!entry) {
|
2015-07-27 05:57:23 +03:00
|
|
|
if (!mEntryStore.Get()) {
|
2015-02-02 01:56:33 +03:00
|
|
|
// We OOM'd while allocating the initial entry storage.
|
|
|
|
uint32_t nbytes;
|
|
|
|
(void) SizeOfEntryStore(CapacityFromHashShift(), mEntrySize, &nbytes);
|
|
|
|
NS_ABORT_OOM(nbytes);
|
|
|
|
} else {
|
|
|
|
// We failed to resize the existing entry storage, either due to OOM or
|
|
|
|
// because we exceeded the maximum table capacity or size; report it as
|
|
|
|
// an OOM. The multiplication by 2 gets us the size we tried to allocate,
|
|
|
|
// which is double the current size.
|
|
|
|
NS_ABORT_OOM(2 * EntrySize() * EntryCount());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
2015-09-15 00:23:24 +03:00
|
|
|
void
|
2015-01-16 03:01:28 +03:00
|
|
|
PLDHashTable::Remove(const void* aKey)
|
|
|
|
{
|
2015-05-21 09:11:35 +03:00
|
|
|
#ifdef DEBUG
|
|
|
|
AutoWriteOp op(mChecker);
|
|
|
|
#endif
|
2015-01-23 02:43:18 +03:00
|
|
|
|
2015-07-27 05:57:23 +03:00
|
|
|
PLDHashEntryHdr* entry = mEntryStore.Get()
|
|
|
|
? SearchTable<ForSearchOrRemove>(aKey,
|
|
|
|
ComputeKeyHash(aKey))
|
|
|
|
: nullptr;
|
2015-01-30 07:18:28 +03:00
|
|
|
if (entry) {
|
2015-07-24 07:43:48 +03:00
|
|
|
RawRemove(entry);
|
2015-09-08 05:20:12 +03:00
|
|
|
ShrinkIfAppropriate();
|
2015-01-23 02:43:18 +03:00
|
|
|
}
|
2015-01-16 03:01:28 +03:00
|
|
|
}
|
|
|
|
|
2015-09-08 05:20:12 +03:00
|
|
|
void
|
|
|
|
PLDHashTable::RemoveEntry(PLDHashEntryHdr* aEntry)
|
|
|
|
{
|
|
|
|
#ifdef DEBUG
|
|
|
|
AutoWriteOp op(mChecker);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
RawRemove(aEntry);
|
|
|
|
ShrinkIfAppropriate();
|
|
|
|
}
|
|
|
|
|
2015-09-15 00:23:26 +03:00
|
|
|
void
|
2014-08-26 03:56:33 +04:00
|
|
|
PLDHashTable::RawRemove(PLDHashEntryHdr* aEntry)
|
|
|
|
{
|
2015-05-21 09:11:35 +03:00
|
|
|
// Unfortunately, we can only do weak checking here. That's because
|
|
|
|
// RawRemove() can be called legitimately while an Enumerate() call is
|
|
|
|
// active, which doesn't fit well into how Checker's mState variable works.
|
|
|
|
MOZ_ASSERT(mChecker.IsWritable());
|
2015-02-12 07:24:33 +03:00
|
|
|
|
2015-07-27 05:57:23 +03:00
|
|
|
MOZ_ASSERT(mEntryStore.Get());
|
2014-08-26 03:56:33 +04:00
|
|
|
|
2015-09-08 05:20:12 +03:00
|
|
|
MOZ_ASSERT(EntryIsLive(aEntry), "EntryIsLive(aEntry)");
|
2014-06-27 05:35:39 +04:00
|
|
|
|
2015-07-24 09:13:11 +03:00
|
|
|
// Load keyHash first in case clearEntry() goofs it.
|
2015-01-29 08:33:38 +03:00
|
|
|
PLDHashNumber keyHash = aEntry->mKeyHash;
|
2015-01-20 03:34:44 +03:00
|
|
|
mOps->clearEntry(this, aEntry);
|
2015-07-21 03:15:00 +03:00
|
|
|
if (keyHash & kCollisionFlag) {
|
|
|
|
MarkEntryRemoved(aEntry);
|
2014-08-26 04:29:14 +04:00
|
|
|
mRemovedCount++;
|
2014-06-27 05:35:39 +04:00
|
|
|
} else {
|
2015-07-21 03:15:00 +03:00
|
|
|
MarkEntryFree(aEntry);
|
2014-06-27 05:35:39 +04:00
|
|
|
}
|
2014-08-26 04:29:14 +04:00
|
|
|
mEntryCount--;
|
2005-08-12 00:47:03 +04:00
|
|
|
}
|
|
|
|
|
2015-06-11 02:36:02 +03:00
|
|
|
// Shrink or compress if a quarter or more of all entries are removed, or if the
|
|
|
|
// table is underloaded according to the minimum alpha, and is not minimal-size
|
|
|
|
// already.
|
|
|
|
void
|
|
|
|
PLDHashTable::ShrinkIfAppropriate()
|
|
|
|
{
|
|
|
|
uint32_t capacity = Capacity();
|
|
|
|
if (mRemovedCount >= capacity >> 2 ||
|
2015-07-21 03:06:38 +03:00
|
|
|
(capacity > kMinCapacity && mEntryCount <= MinLoad(capacity))) {
|
2015-06-11 02:36:02 +03:00
|
|
|
uint32_t log2;
|
|
|
|
BestCapacity(mEntryCount, &capacity, &log2);
|
|
|
|
|
2018-07-26 11:52:46 +03:00
|
|
|
int32_t deltaLog2 = log2 - (kPLDHashNumberBits - mHashShift);
|
2015-06-11 02:36:02 +03:00
|
|
|
MOZ_ASSERT(deltaLog2 <= 0);
|
|
|
|
|
|
|
|
(void) ChangeTable(deltaLog2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-30 08:28:20 +03:00
|
|
|
size_t
|
|
|
|
PLDHashTable::ShallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
|
2014-06-27 05:35:39 +04:00
|
|
|
{
|
2015-05-21 09:11:35 +03:00
|
|
|
#ifdef DEBUG
|
|
|
|
AutoReadOp op(mChecker);
|
|
|
|
#endif
|
|
|
|
|
2015-07-30 08:28:20 +03:00
|
|
|
return aMallocSizeOf(mEntryStore.Get());
|
2014-08-26 03:56:33 +04:00
|
|
|
}
|
|
|
|
|
2011-11-28 07:03:14 +04:00
|
|
|
size_t
|
2015-07-30 08:28:20 +03:00
|
|
|
PLDHashTable::ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
|
2014-06-27 05:35:39 +04:00
|
|
|
{
|
2015-07-30 08:28:20 +03:00
|
|
|
return aMallocSizeOf(this) + ShallowSizeOfExcludingThis(aMallocSizeOf);
|
2011-09-15 04:37:45 +04:00
|
|
|
}
|
|
|
|
|
2015-06-12 07:19:53 +03:00
|
|
|
PLDHashTable::Iterator::Iterator(Iterator&& aOther)
|
|
|
|
: mTable(aOther.mTable)
|
2015-07-03 10:27:27 +03:00
|
|
|
, mStart(aOther.mStart)
|
2015-06-12 07:19:53 +03:00
|
|
|
, mLimit(aOther.mLimit)
|
2015-07-03 10:27:27 +03:00
|
|
|
, mCurrent(aOther.mCurrent)
|
|
|
|
, mNexts(aOther.mNexts)
|
|
|
|
, mNextsLimit(aOther.mNextsLimit)
|
2015-07-07 08:02:26 +03:00
|
|
|
, mHaveRemoved(aOther.mHaveRemoved)
|
2015-06-12 07:19:53 +03:00
|
|
|
{
|
2015-05-21 09:11:35 +03:00
|
|
|
// No need to change |mChecker| here.
|
2015-06-12 07:19:53 +03:00
|
|
|
aOther.mTable = nullptr;
|
2015-07-03 10:27:27 +03:00
|
|
|
aOther.mStart = nullptr;
|
2015-06-12 07:19:53 +03:00
|
|
|
aOther.mLimit = nullptr;
|
2015-07-03 10:27:27 +03:00
|
|
|
aOther.mCurrent = nullptr;
|
|
|
|
aOther.mNexts = 0;
|
|
|
|
aOther.mNextsLimit = 0;
|
2015-07-07 08:02:26 +03:00
|
|
|
aOther.mHaveRemoved = false;
|
2015-06-12 07:19:53 +03:00
|
|
|
}
|
|
|
|
|
2015-07-07 08:02:26 +03:00
|
|
|
PLDHashTable::Iterator::Iterator(PLDHashTable* aTable)
|
2015-06-12 07:19:53 +03:00
|
|
|
: mTable(aTable)
|
2015-07-27 05:57:23 +03:00
|
|
|
, mStart(mTable->mEntryStore.Get())
|
|
|
|
, mLimit(mTable->mEntryStore.Get() + mTable->Capacity() * mTable->mEntrySize)
|
|
|
|
, mCurrent(mTable->mEntryStore.Get())
|
2015-07-03 10:27:27 +03:00
|
|
|
, mNexts(0)
|
|
|
|
, mNextsLimit(mTable->EntryCount())
|
2015-07-07 08:02:26 +03:00
|
|
|
, mHaveRemoved(false)
|
2014-08-28 20:29:23 +04:00
|
|
|
{
|
2015-05-21 09:11:35 +03:00
|
|
|
#ifdef DEBUG
|
|
|
|
mTable->mChecker.StartReadOp();
|
|
|
|
#endif
|
2014-08-28 20:29:23 +04:00
|
|
|
|
2015-07-16 01:08:25 +03:00
|
|
|
if (ChaosMode::isActive(ChaosFeature::HashTableIteration) &&
|
2015-07-03 10:27:27 +03:00
|
|
|
mTable->Capacity() > 0) {
|
|
|
|
// Start iterating at a random entry. It would be even more chaotic to
|
|
|
|
// iterate in fully random order, but that's harder.
|
|
|
|
mCurrent += ChaosMode::randomUint32LessThan(mTable->Capacity()) *
|
|
|
|
mTable->mEntrySize;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advance to the first live entry, if there is one.
|
|
|
|
if (!Done()) {
|
|
|
|
while (IsOnNonLiveEntry()) {
|
|
|
|
MoveToNextEntry();
|
|
|
|
}
|
2014-08-28 20:29:23 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-12 07:19:53 +03:00
|
|
|
PLDHashTable::Iterator::~Iterator()
|
2014-08-28 20:29:23 +04:00
|
|
|
{
|
2015-06-12 07:19:53 +03:00
|
|
|
if (mTable) {
|
2015-07-07 08:02:26 +03:00
|
|
|
if (mHaveRemoved) {
|
|
|
|
mTable->ShrinkIfAppropriate();
|
|
|
|
}
|
|
|
|
#ifdef DEBUG
|
2015-05-21 09:11:35 +03:00
|
|
|
mTable->mChecker.EndReadOp();
|
|
|
|
#endif
|
2015-07-07 08:02:26 +03:00
|
|
|
}
|
2014-08-28 20:29:23 +04:00
|
|
|
}
|
|
|
|
|
2015-06-12 07:19:53 +03:00
|
|
|
MOZ_ALWAYS_INLINE bool
|
|
|
|
PLDHashTable::Iterator::IsOnNonLiveEntry() const
|
2014-08-28 20:29:23 +04:00
|
|
|
{
|
2015-07-03 10:27:27 +03:00
|
|
|
MOZ_ASSERT(!Done());
|
2015-07-21 03:15:00 +03:00
|
|
|
return !EntryIsLive(reinterpret_cast<PLDHashEntryHdr*>(mCurrent));
|
2015-07-03 10:27:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
MOZ_ALWAYS_INLINE void
|
|
|
|
PLDHashTable::Iterator::MoveToNextEntry()
|
|
|
|
{
|
|
|
|
mCurrent += mTable->mEntrySize;
|
|
|
|
if (mCurrent == mLimit) {
|
|
|
|
mCurrent = mStart; // Wrap-around. Possible due to Chaos Mode.
|
|
|
|
}
|
2014-08-28 20:29:23 +04:00
|
|
|
}
|
|
|
|
|
2015-06-12 07:19:53 +03:00
|
|
|
void
|
|
|
|
PLDHashTable::Iterator::Next()
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(!Done());
|
2014-08-28 20:29:23 +04:00
|
|
|
|
2015-07-03 10:27:27 +03:00
|
|
|
mNexts++;
|
|
|
|
|
|
|
|
// Advance to the next live entry, if there is one.
|
|
|
|
if (!Done()) {
|
|
|
|
do {
|
|
|
|
MoveToNextEntry();
|
|
|
|
} while (IsOnNonLiveEntry());
|
|
|
|
}
|
2014-08-28 20:29:23 +04:00
|
|
|
}
|
|
|
|
|
2015-06-11 03:04:07 +03:00
|
|
|
void
|
2015-07-07 08:02:26 +03:00
|
|
|
PLDHashTable::Iterator::Remove()
|
2015-06-11 03:04:07 +03:00
|
|
|
{
|
|
|
|
// This cast is needed for the same reason as the one in the destructor.
|
2015-07-07 08:02:26 +03:00
|
|
|
mTable->RawRemove(Get());
|
2015-06-11 03:04:07 +03:00
|
|
|
mHaveRemoved = true;
|
|
|
|
}
|
|
|
|
|
2009-01-10 19:28:16 +03:00
|
|
|
#ifdef DEBUG
|
2015-09-15 00:23:27 +03:00
|
|
|
void
|
2014-08-26 03:56:33 +04:00
|
|
|
PLDHashTable::MarkImmutable()
|
|
|
|
{
|
2015-05-21 09:11:35 +03:00
|
|
|
mChecker.SetNonWritable();
|
2014-08-26 03:56:33 +04:00
|
|
|
}
|
2009-01-10 19:28:16 +03:00
|
|
|
#endif
|