Bug 581875 - use js::HashSet in JSAtomState (r=igor)

This commit is contained in:
Luke Wagner 2010-07-26 21:09:23 -07:00
Родитель 95b6da52e3
Коммит 45696f175e
4 изменённых файлов: 182 добавлений и 269 удалений

Просмотреть файл

@ -283,81 +283,38 @@ const char js_close_str[] = "close";
const char js_send_str[] = "send"; const char js_send_str[] = "send";
#endif #endif
/*
* JSAtomState.stringAtoms hashtable entry. To support pinned and interned
* string atoms, we use the lowest bits of the keyAndFlags field to store
* ATOM_PINNED and ATOM_INTERNED flags.
*/
typedef struct JSAtomHashEntry {
JSDHashEntryHdr hdr;
jsuword keyAndFlags;
} JSAtomHashEntry;
#define ATOM_ENTRY_FLAG_MASK (ATOM_PINNED | ATOM_INTERNED)
JS_STATIC_ASSERT(ATOM_ENTRY_FLAG_MASK < JS_GCTHING_ALIGN);
/* /*
* Helper macros to access and modify JSAtomHashEntry. * Helper macros to access and modify JSAtomHashEntry.
*/ */
#define TO_ATOM_ENTRY(hdr) ((JSAtomHashEntry *) hdr)
#define ATOM_ENTRY_KEY(entry) \
((JSString *)((entry)->keyAndFlags & ~ATOM_ENTRY_FLAG_MASK))
#define ATOM_ENTRY_FLAGS(entry) \
((uintN)((entry)->keyAndFlags & ATOM_ENTRY_FLAG_MASK))
#define INIT_ATOM_ENTRY(entry, key) \
((void)((entry)->keyAndFlags = (jsuword)(key)))
#define ADD_ATOM_ENTRY_FLAGS(entry, flags) \
((void)((entry)->keyAndFlags |= (jsuword)(flags)))
#define CLEAR_ATOM_ENTRY_FLAGS(entry, flags) \
((void)((entry)->keyAndFlags &= ~(jsuword)(flags)))
static JSDHashNumber inline AtomEntryType
HashString(JSDHashTable *table, const void *key); StringToInitialAtomEntry(JSString *str)
static JSBool
MatchString(JSDHashTable *table, const JSDHashEntryHdr *hdr, const void *key);
static const JSDHashTableOps StringHashOps = {
JS_DHashAllocTable,
JS_DHashFreeTable,
HashString,
MatchString,
JS_DHashMoveEntryStub,
JS_DHashClearEntryStub,
JS_DHashFinalizeStub,
NULL
};
#define IS_INITIALIZED_STATE(state) ((state)->stringAtoms.ops != NULL)
static JSDHashNumber
HashString(JSDHashTable *table, const void *key)
{ {
return js_HashString((JSString *)key); return (AtomEntryType) str;
} }
static JSBool inline uintN
MatchString(JSDHashTable *table, const JSDHashEntryHdr *hdr, const void *key) AtomEntryFlags(AtomEntryType entry)
{ {
JSAtomHashEntry *entry = TO_ATOM_ENTRY(hdr); return (uintN) (entry & ATOM_ENTRY_FLAG_MASK);
}
if (entry->keyAndFlags == 0) { /*
/* * Conceptually, we have compressed a HashMap<JSAtom *, uint> into a
* This happens when js_AtomizeString adds a new hash entry and * HashMap<size_t>. Here, we promise that we are only changing the "value" of
* releases the lock but before it takes the lock the second time to * the HashMap entry, so the const_cast is safe.
* initialize keyAndFlags for the entry. */
*
* We always return false for such entries so JS_DHashTableOperate inline void
* never finds them. We clean them during GC's sweep phase. AddAtomEntryFlags(const AtomEntryType &entry, uintN flags)
* {
* It means that with a contested lock or when GC is triggered outside const_cast<AtomEntryType &>(entry) |= flags;
* the lock we may end up adding two entries, but this is a price for }
* simpler code.
*/ inline void
return JS_FALSE; ClearAtomEntryFlags(const AtomEntryType &entry, uintN flags)
} {
return js_EqualStrings(ATOM_ENTRY_KEY(entry), (JSString *)key); const_cast<AtomEntryType &>(entry) &= ~flags;
} }
/* /*
@ -373,50 +330,23 @@ js_InitAtomState(JSRuntime *rt)
{ {
JSAtomState *state = &rt->atomState; JSAtomState *state = &rt->atomState;
/* JS_ASSERT(!state->atoms.initialized());
* The caller must zero the state before calling this function. if (!state->atoms.init(JS_STRING_HASH_COUNT))
*/ return false;
JS_ASSERT(!state->stringAtoms.ops);
if (!JS_DHashTableInit(&state->stringAtoms, &StringHashOps,
NULL, sizeof(JSAtomHashEntry),
JS_DHASH_DEFAULT_CAPACITY(JS_STRING_HASH_COUNT))) {
state->stringAtoms.ops = NULL;
return JS_FALSE;
}
#ifdef JS_THREADSAFE #ifdef JS_THREADSAFE
js_InitLock(&state->lock); js_InitLock(&state->lock);
#endif #endif
JS_ASSERT(IS_INITIALIZED_STATE(state)); JS_ASSERT(state->atoms.initialized());
return JS_TRUE; return JS_TRUE;
} }
static JSDHashOperator
js_string_uninterner(JSDHashTable *table, JSDHashEntryHdr *hdr,
uint32 number, void *arg)
{
JSAtomHashEntry *entry = TO_ATOM_ENTRY(hdr);
JSRuntime *rt = (JSRuntime *)arg;
JSString *str;
/*
* Any string entry that remains at this point must be initialized, as the
* last GC should clean any uninitialized ones.
*/
JS_ASSERT(entry->keyAndFlags != 0);
str = ATOM_ENTRY_KEY(entry);
js_FinalizeStringRT(rt, str);
return JS_DHASH_NEXT;
}
void void
js_FinishAtomState(JSRuntime *rt) js_FinishAtomState(JSRuntime *rt)
{ {
JSAtomState *state = &rt->atomState; JSAtomState *state = &rt->atomState;
if (!IS_INITIALIZED_STATE(state)) { if (!state->atoms.initialized()) {
/* /*
* We are called with uninitialized state when JS_NewRuntime fails and * We are called with uninitialized state when JS_NewRuntime fails and
* calls JS_DestroyRuntime on a partially initialized runtime. * calls JS_DestroyRuntime on a partially initialized runtime.
@ -424,15 +354,14 @@ js_FinishAtomState(JSRuntime *rt)
return; return;
} }
JS_DHashTableEnumerate(&state->stringAtoms, js_string_uninterner, rt); for (AtomSet::Range r = state->atoms.all(); !r.empty(); r.popFront()) {
JS_DHashTableFinish(&state->stringAtoms); JSString *str = AtomEntryToKey(r.front());
js_FinalizeStringRT(rt, str);
}
#ifdef JS_THREADSAFE #ifdef JS_THREADSAFE
js_FinishLock(&state->lock); js_FinishLock(&state->lock);
#endif #endif
#ifdef DEBUG
memset(state, JS_FREE_PATTERN, sizeof *state);
#endif
} }
JSBool JSBool
@ -456,91 +385,50 @@ js_InitCommonAtoms(JSContext *cx)
return JS_TRUE; return JS_TRUE;
} }
static JSDHashOperator
js_atom_unpinner(JSDHashTable *table, JSDHashEntryHdr *hdr,
uint32 number, void *arg)
{
CLEAR_ATOM_ENTRY_FLAGS(TO_ATOM_ENTRY(hdr), ATOM_PINNED);
return JS_DHASH_NEXT;
}
void void
js_FinishCommonAtoms(JSContext *cx) js_FinishCommonAtoms(JSContext *cx)
{ {
cx->runtime->emptyString = NULL; cx->runtime->emptyString = NULL;
JSAtomState *state = &cx->runtime->atomState; JSAtomState *state = &cx->runtime->atomState;
JS_DHashTableEnumerate(&state->stringAtoms, js_atom_unpinner, NULL);
for (AtomSet::Range r = state->atoms.all(); !r.empty(); r.popFront())
ClearAtomEntryFlags(r.front(), ATOM_PINNED);
#ifdef DEBUG #ifdef DEBUG
memset(COMMON_ATOMS_START(state), JS_FREE_PATTERN, memset(COMMON_ATOMS_START(state), JS_FREE_PATTERN,
ATOM_OFFSET_LIMIT - ATOM_OFFSET_START); ATOM_OFFSET_LIMIT - ATOM_OFFSET_START);
#endif #endif
} }
static JSDHashOperator
js_locked_atom_tracer(JSDHashTable *table, JSDHashEntryHdr *hdr,
uint32 number, void *arg)
{
JSAtomHashEntry *entry = TO_ATOM_ENTRY(hdr);
JSTracer *trc = (JSTracer *)arg;
if (entry->keyAndFlags == 0) {
/* Ignore uninitialized entries during tracing. */
return JS_DHASH_NEXT;
}
JS_SET_TRACING_INDEX(trc, "locked_atom", (size_t)number);
Mark(trc, ATOM_ENTRY_KEY(entry), JSTRACE_STRING);
return JS_DHASH_NEXT;
}
static JSDHashOperator
js_pinned_atom_tracer(JSDHashTable *table, JSDHashEntryHdr *hdr,
uint32 number, void *arg)
{
JSAtomHashEntry *entry = TO_ATOM_ENTRY(hdr);
JSTracer *trc = (JSTracer *)arg;
uintN flags = ATOM_ENTRY_FLAGS(entry);
if (flags & (ATOM_PINNED | ATOM_INTERNED)) {
JS_SET_TRACING_INDEX(trc,
flags & ATOM_PINNED
? "pinned_atom"
: "interned_atom",
(size_t)number);
Mark(trc, ATOM_ENTRY_KEY(entry), JSTRACE_STRING);
}
return JS_DHASH_NEXT;
}
void void
js_TraceAtomState(JSTracer *trc) js_TraceAtomState(JSTracer *trc)
{ {
JSRuntime *rt = trc->context->runtime; JSRuntime *rt = trc->context->runtime;
JSAtomState *state = &rt->atomState; JSAtomState *state = &rt->atomState;
if (rt->gcKeepAtoms) #ifdef DEBUG
JS_DHashTableEnumerate(&state->stringAtoms, js_locked_atom_tracer, trc); size_t number = 0;
else #endif
JS_DHashTableEnumerate(&state->stringAtoms, js_pinned_atom_tracer, trc);
}
static JSDHashOperator if (rt->gcKeepAtoms) {
js_atom_sweeper(JSDHashTable *table, JSDHashEntryHdr *hdr, for (AtomSet::Range r = state->atoms.all(); !r.empty(); r.popFront()) {
uint32 number, void *arg) JS_SET_TRACING_INDEX(trc, "locked_atom", number++);
{ MarkString(trc, AtomEntryToKey(r.front()));
JSAtomHashEntry *entry = TO_ATOM_ENTRY(hdr); }
} else {
/* Remove uninitialized entries. */ for (AtomSet::Range r = state->atoms.all(); !r.empty(); r.popFront()) {
if (entry->keyAndFlags == 0) AtomEntryType entry = r.front();
return JS_DHASH_REMOVE; uintN flags = AtomEntryFlags(entry);
if (flags & (ATOM_PINNED | ATOM_INTERNED)) {
if (ATOM_ENTRY_FLAGS(entry) & (ATOM_PINNED | ATOM_INTERNED)) { JS_SET_TRACING_INDEX(trc,
/* Pinned or interned key cannot be finalized. */ flags & ATOM_PINNED
JS_ASSERT(!js_IsAboutToBeFinalized(ATOM_ENTRY_KEY(entry))); ? "pinned_atom"
} else if (js_IsAboutToBeFinalized(ATOM_ENTRY_KEY(entry))) { : "interned_atom",
/* Remove entries with things about to be GC'ed. */ number++);
return JS_DHASH_REMOVE; MarkString(trc, AtomEntryToKey(entry));
}
}
} }
return JS_DHASH_NEXT;
} }
void void
@ -548,25 +436,20 @@ js_SweepAtomState(JSContext *cx)
{ {
JSAtomState *state = &cx->runtime->atomState; JSAtomState *state = &cx->runtime->atomState;
JS_DHashTableEnumerate(&state->stringAtoms, js_atom_sweeper, NULL); for (AtomSet::Enum e(state->atoms); !e.empty(); e.popFront()) {
AtomEntryType entry = e.front();
/* if (AtomEntryFlags(entry) & (ATOM_PINNED | ATOM_INTERNED)) {
* Optimize for simplicity and mutate table generation numbers even if the /* Pinned or interned key cannot be finalized. */
* sweeper has not removed any entries. JS_ASSERT(!js_IsAboutToBeFinalized(AtomEntryToKey(entry)));
*/ } else if (js_IsAboutToBeFinalized(AtomEntryToKey(entry))) {
state->stringAtoms.generation++; e.removeFront();
}
}
} }
JSAtom * JSAtom *
js_AtomizeString(JSContext *cx, JSString *str, uintN flags) js_AtomizeString(JSContext *cx, JSString *str, uintN flags)
{ {
JSAtom *atom;
JSAtomState *state;
JSDHashTable *table;
JSAtomHashEntry *entry;
JSString *key;
uint32 gen;
JS_ASSERT(!(flags & ~(ATOM_PINNED|ATOM_INTERNED|ATOM_TMPSTR|ATOM_NOCOPY))); JS_ASSERT(!(flags & ~(ATOM_PINNED|ATOM_INTERNED|ATOM_TMPSTR|ATOM_NOCOPY)));
JS_ASSERT_IF(flags & ATOM_NOCOPY, flags & ATOM_TMPSTR); JS_ASSERT_IF(flags & ATOM_NOCOPY, flags & ATOM_TMPSTR);
@ -602,30 +485,30 @@ js_AtomizeString(JSContext *cx, JSString *str, uintN flags)
} }
} }
state = &cx->runtime->atomState; JSAtomState *state = &cx->runtime->atomState;
table = &state->stringAtoms; AtomSet &atoms = state->atoms;
JS_LOCK(cx, &state->lock); JS_LOCK(cx, &state->lock);
entry = TO_ATOM_ENTRY(JS_DHashTableOperate(table, str, JS_DHASH_ADD)); AtomSet::AddPtr p = atoms.lookupForAdd(str);
if (!entry)
goto failed_hash_add;
/* Hashing the string should have flattened it if it was a rope. */ /* Hashing the string should have flattened it if it was a rope. */
JS_ASSERT(str->isFlat() || str->isDependent()); JS_ASSERT(str->isFlat() || str->isDependent());
if (entry->keyAndFlags != 0) {
key = ATOM_ENTRY_KEY(entry); JSString *key;
if (p) {
key = AtomEntryToKey(*p);
} else { } else {
/* /*
* We created a new hashtable entry. Unless str is already allocated * Unless str is already allocated from the GC heap and flat, we have
* from the GC heap and flat, we have to release state->lock as * to release state->lock as string construction is a complex
* string construction is a complex operation. For example, it can * operation. For example, it can trigger GC which may rehash the table
* trigger GC which may rehash the table and make the entry invalid. * and make the entry invalid.
*/ */
++table->generation;
if (!(flags & ATOM_TMPSTR) && str->isFlat()) { if (!(flags & ATOM_TMPSTR) && str->isFlat()) {
str->flatClearMutable(); str->flatClearMutable();
key = str; key = str;
atoms.add(p, StringToInitialAtomEntry(key));
} else { } else {
gen = table->generation;
JS_UNLOCK(cx, &state->lock); JS_UNLOCK(cx, &state->lock);
if (flags & ATOM_TMPSTR) { if (flags & ATOM_TMPSTR) {
@ -649,36 +532,22 @@ js_AtomizeString(JSContext *cx, JSString *str, uintN flags)
} }
JS_LOCK(cx, &state->lock); JS_LOCK(cx, &state->lock);
if (table->generation == gen) { if (!atoms.relookupOrAdd(p, str, StringToInitialAtomEntry(key))) {
JS_ASSERT(entry->keyAndFlags == 0); JS_UNLOCK(cx, &state->lock);
} else { JS_ReportOutOfMemory(cx); /* SystemAllocPolicy does not report */
entry = TO_ATOM_ENTRY(JS_DHashTableOperate(table, key, return NULL;
JS_DHASH_ADD));
if (!entry)
goto failed_hash_add;
if (entry->keyAndFlags != 0) {
key = ATOM_ENTRY_KEY(entry);
goto finish;
}
++table->generation;
} }
} }
INIT_ATOM_ENTRY(entry, key);
key->flatSetAtomized(); key->flatSetAtomized();
} }
finish: AddAtomEntryFlags(*p, flags & (ATOM_PINNED | ATOM_INTERNED));
ADD_ATOM_ENTRY_FLAGS(entry, flags & (ATOM_PINNED | ATOM_INTERNED));
JS_ASSERT(key->isAtomized()); JS_ASSERT(key->isAtomized());
atom = STRING_TO_ATOM(key); JSAtom *atom = STRING_TO_ATOM(key);
cx->weakRoots.lastAtom = atom; cx->weakRoots.lastAtom = atom;
JS_UNLOCK(cx, &state->lock); JS_UNLOCK(cx, &state->lock);
return atom; return atom;
failed_hash_add:
JS_UNLOCK(cx, &state->lock);
JS_ReportOutOfMemory(cx);
return NULL;
} }
JSAtom * JSAtom *
@ -735,7 +604,6 @@ js_GetExistingStringAtom(JSContext *cx, const jschar *chars, size_t length)
{ {
JSString str, *str2; JSString str, *str2;
JSAtomState *state; JSAtomState *state;
JSDHashEntryHdr *hdr;
if (length == 1) { if (length == 1) {
jschar c = *chars; jschar c = *chars;
@ -747,63 +615,41 @@ js_GetExistingStringAtom(JSContext *cx, const jschar *chars, size_t length)
state = &cx->runtime->atomState; state = &cx->runtime->atomState;
JS_LOCK(cx, &state->lock); JS_LOCK(cx, &state->lock);
hdr = JS_DHashTableOperate(&state->stringAtoms, &str, JS_DHASH_LOOKUP); AtomSet::Ptr p = state->atoms.lookup(&str);
str2 = JS_DHASH_ENTRY_IS_BUSY(hdr) str2 = p ? AtomEntryToKey(*p) : NULL;
? ATOM_ENTRY_KEY(TO_ATOM_ENTRY(hdr))
: NULL;
JS_UNLOCK(cx, &state->lock); JS_UNLOCK(cx, &state->lock);
return str2 ? STRING_TO_ATOM(str2) : NULL; return str2 ? STRING_TO_ATOM(str2) : NULL;
} }
#ifdef DEBUG #ifdef DEBUG
static JSDHashOperator
atom_dumper(JSDHashTable *table, JSDHashEntryHdr *hdr,
uint32 number, void *arg)
{
JSAtomHashEntry *entry = TO_ATOM_ENTRY(hdr);
FILE *fp = (FILE *)arg;
JSString *key;
uintN flags;
fprintf(fp, "%3u %08x ", number, (uintN)entry->hdr.keyHash);
if (entry->keyAndFlags == 0) {
fputs("<uninitialized>", fp);
} else {
key = ATOM_ENTRY_KEY(entry);
js_FileEscapedString(fp, key, '"');
flags = ATOM_ENTRY_FLAGS(entry);
if (flags != 0) {
fputs((flags & (ATOM_PINNED | ATOM_INTERNED))
? " pinned | interned"
: (flags & ATOM_PINNED) ? " pinned" : " interned",
fp);
}
}
putc('\n', fp);
return JS_DHASH_NEXT;
}
JS_FRIEND_API(void) JS_FRIEND_API(void)
js_DumpAtoms(JSContext *cx, FILE *fp) js_DumpAtoms(JSContext *cx, FILE *fp)
{ {
JSAtomState *state = &cx->runtime->atomState; JSAtomState *state = &cx->runtime->atomState;
fprintf(fp, "stringAtoms table contents:\n"); fprintf(fp, "atoms table contents:\n");
JS_DHashTableEnumerate(&state->stringAtoms, atom_dumper, fp); size_t number;
#ifdef JS_DHASHMETER for (AtomSet::Range r = state->atoms.all(); !r.empty(); r.popFront()) {
JS_DHashTableDumpMeter(&state->stringAtoms, atom_dumper, fp); AtomEntryType entry = r.front();
#endif fprintf(fp, "%3u ", number++);
putc('\n', fp); if (entry == 0) {
fputs("<uninitialized>", fp);
fprintf(fp, "doubleAtoms table contents:\n"); } else {
#ifdef JS_DHASHMETER JSString *key = AtomEntryToKey(entry);
JS_DHashTableDumpMeter(&state->doubleAtoms, atom_dumper, fp); js_FileEscapedString(fp, key, '"');
#endif uintN flags = AtomEntryFlags(entry);
if (flags != 0) {
fputs((flags & (ATOM_PINNED | ATOM_INTERNED))
? " pinned | interned"
: (flags & ATOM_PINNED) ? " pinned" : " interned",
fp);
}
}
putc('\n', fp);
}
putc('\n', fp); putc('\n', fp);
} }
#endif #endif
static JSHashNumber static JSHashNumber

Просмотреть файл

@ -45,11 +45,12 @@
#include <stddef.h> #include <stddef.h>
#include "jsversion.h" #include "jsversion.h"
#include "jstypes.h" #include "jstypes.h"
#include "jshash.h" /* Added by JSIFY */ #include "jshash.h"
#include "jsdhash.h" #include "jshashtable.h"
#include "jsapi.h" #include "jsapi.h"
#include "jsprvtd.h" #include "jsprvtd.h"
#include "jspubtd.h" #include "jspubtd.h"
#include "jsstr.h"
#include "jslock.h" #include "jslock.h"
#include "jsvalue.h" #include "jsvalue.h"
@ -258,8 +259,43 @@ struct JSAtomMap {
jsatomid length; /* count of (to-be-)indexed atoms */ jsatomid length; /* count of (to-be-)indexed atoms */
}; };
struct JSAtomState { namespace js {
JSDHashTable stringAtoms; /* hash table with shared strings */
#define ATOM_ENTRY_FLAG_MASK (ATOM_PINNED | ATOM_INTERNED)
JS_STATIC_ASSERT(ATOM_ENTRY_FLAG_MASK < JS_GCTHING_ALIGN);
typedef uintptr_t AtomEntryType;
static JS_ALWAYS_INLINE JSString *
AtomEntryToKey(AtomEntryType entry)
{
JS_ASSERT(entry != 0);
return (JSString *)(entry & ~ATOM_ENTRY_FLAG_MASK);
}
struct AtomHasher
{
typedef JSString *Lookup;
static HashNumber hash(JSString *str) {
return js_HashString(str);
}
static bool match(AtomEntryType entry, JSString *lookup) {
return entry ? js_EqualStrings(AtomEntryToKey(entry), lookup) : false;
}
};
typedef HashSet<AtomEntryType, AtomHasher, SystemAllocPolicy> AtomSet;
} /* namespace js */
struct JSAtomState
{
/* We cannot rely on constructors/destructors, so do it manually. */
js::AtomSet atoms;
#ifdef JS_THREADSAFE #ifdef JS_THREADSAFE
JSThinLock lock; JSThinLock lock;
#endif #endif

Просмотреть файл

@ -519,6 +519,13 @@ Mark(JSTracer *trc, void *thing, uint32 kind, const char *name)
Mark(trc, thing, kind); Mark(trc, thing, kind);
} }
static inline void
MarkString(JSTracer *trc, JSString *str)
{
JS_ASSERT(str);
Mark(trc, str, JSTRACE_STRING);
}
static inline void static inline void
MarkString(JSTracer *trc, JSString *str, const char *name) MarkString(JSTracer *trc, JSString *str, const char *name)
{ {

Просмотреть файл

@ -847,8 +847,8 @@ class HashMap
* N.B. The caller must ensure that no mutating hash table operations * N.B. The caller must ensure that no mutating hash table operations
* occur between a pair of |lookupForAdd| and |add| calls. To avoid * occur between a pair of |lookupForAdd| and |add| calls. To avoid
* looking up the key a second time, the caller may use the more efficient * looking up the key a second time, the caller may use the more efficient
* relookupOrAdd method. That method relookups the map if necessary and * relookupOrAdd method. This method reuses part of the hashing computation
* inserts the new value only if the key still does not exist. For * to more efficiently insert the key if it has not been added. For
* example, a mutation-handling version of the previous example: * example, a mutation-handling version of the previous example:
* *
* HM::AddPtr p = h.lookupForAdd(3); * HM::AddPtr p = h.lookupForAdd(3);
@ -1026,13 +1026,37 @@ class HashSet
* assert(*p == 3); // p acts like a pointer to int * assert(*p == 3); // p acts like a pointer to int
* *
* Also see the definition of AddPtr in HashTable above. * Also see the definition of AddPtr in HashTable above.
*
* N.B. The caller must ensure that no mutating hash table operations
* occur between a pair of |lookupForAdd| and |add| calls. To avoid
* looking up the key a second time, the caller may use the more efficient
* relookupOrAdd method. This method reuses part of the hashing computation
* to more efficiently insert the key if it has not been added. For
* example, a mutation-handling version of the previous example:
*
* HS::AddPtr p = h.lookupForAdd(3);
* if (!p) {
* call_that_may_mutate_h();
* if (!h.relookupOrAdd(p, 3, 3))
* return false;
* }
* assert(*p == 3);
*
* Note that relookupOrAdd(p,l,t) performs Lookup using l and adds the
* entry t, where the caller ensures match(l,t).
*/ */
typedef typename Impl::AddPtr AddPtr; typedef typename Impl::AddPtr AddPtr;
AddPtr lookupForAdd(const Lookup &l) const { AddPtr lookupForAdd(const Lookup &l) const {
return impl.lookupForAdd(l); return impl.lookupForAdd(l);
} }
bool add(AddPtr &p, const T &t) { return impl.add(p, t); } bool add(AddPtr &p, const T &t) {
return impl.add(p, t);
}
bool relookupOrAdd(AddPtr &p, const Lookup &l, const T &t) {
return impl.relookupOrAdd(p, l, t);
}
/* /*
* |all()| returns a Range containing |count()| elements: * |all()| returns a Range containing |count()| elements: