Bug 581875 - use js::HashSet in JSAtomState (r=igor)

This commit is contained in:
Luke Wagner 2010-07-26 21:09:23 -07:00
Родитель 95b6da52e3
Коммит 45696f175e
4 изменённых файлов: 182 добавлений и 269 удалений

Просмотреть файл

@ -283,81 +283,38 @@ const char js_close_str[] = "close";
const char js_send_str[] = "send";
#endif
/*
* JSAtomState.stringAtoms hashtable entry. To support pinned and interned
* string atoms, we use the lowest bits of the keyAndFlags field to store
* ATOM_PINNED and ATOM_INTERNED flags.
*/
typedef struct JSAtomHashEntry {
JSDHashEntryHdr hdr;
jsuword keyAndFlags;
} JSAtomHashEntry;
#define ATOM_ENTRY_FLAG_MASK (ATOM_PINNED | ATOM_INTERNED)
JS_STATIC_ASSERT(ATOM_ENTRY_FLAG_MASK < JS_GCTHING_ALIGN);
/*
* Helper macros to access and modify JSAtomHashEntry.
*/
#define TO_ATOM_ENTRY(hdr) ((JSAtomHashEntry *) hdr)
#define ATOM_ENTRY_KEY(entry) \
((JSString *)((entry)->keyAndFlags & ~ATOM_ENTRY_FLAG_MASK))
#define ATOM_ENTRY_FLAGS(entry) \
((uintN)((entry)->keyAndFlags & ATOM_ENTRY_FLAG_MASK))
#define INIT_ATOM_ENTRY(entry, key) \
((void)((entry)->keyAndFlags = (jsuword)(key)))
#define ADD_ATOM_ENTRY_FLAGS(entry, flags) \
((void)((entry)->keyAndFlags |= (jsuword)(flags)))
#define CLEAR_ATOM_ENTRY_FLAGS(entry, flags) \
((void)((entry)->keyAndFlags &= ~(jsuword)(flags)))
static JSDHashNumber
HashString(JSDHashTable *table, const void *key);
static JSBool
MatchString(JSDHashTable *table, const JSDHashEntryHdr *hdr, const void *key);
static const JSDHashTableOps StringHashOps = {
JS_DHashAllocTable,
JS_DHashFreeTable,
HashString,
MatchString,
JS_DHashMoveEntryStub,
JS_DHashClearEntryStub,
JS_DHashFinalizeStub,
NULL
};
#define IS_INITIALIZED_STATE(state) ((state)->stringAtoms.ops != NULL)
static JSDHashNumber
HashString(JSDHashTable *table, const void *key)
inline AtomEntryType
StringToInitialAtomEntry(JSString *str)
{
return js_HashString((JSString *)key);
return (AtomEntryType) str;
}
static JSBool
MatchString(JSDHashTable *table, const JSDHashEntryHdr *hdr, const void *key)
inline uintN
AtomEntryFlags(AtomEntryType entry)
{
JSAtomHashEntry *entry = TO_ATOM_ENTRY(hdr);
return (uintN) (entry & ATOM_ENTRY_FLAG_MASK);
}
if (entry->keyAndFlags == 0) {
/*
* This happens when js_AtomizeString adds a new hash entry and
* releases the lock but before it takes the lock the second time to
* initialize keyAndFlags for the entry.
*
* We always return false for such entries so JS_DHashTableOperate
* never finds them. We clean them during GC's sweep phase.
*
* It means that with a contested lock or when GC is triggered outside
* the lock we may end up adding two entries, but this is a price for
* simpler code.
*/
return JS_FALSE;
}
return js_EqualStrings(ATOM_ENTRY_KEY(entry), (JSString *)key);
/*
* Conceptually, we have compressed a HashMap<JSAtom *, uint> into a
* HashMap<size_t>. Here, we promise that we are only changing the "value" of
* the HashMap entry, so the const_cast is safe.
*/
inline void
AddAtomEntryFlags(const AtomEntryType &entry, uintN flags)
{
const_cast<AtomEntryType &>(entry) |= flags;
}
inline void
ClearAtomEntryFlags(const AtomEntryType &entry, uintN flags)
{
const_cast<AtomEntryType &>(entry) &= ~flags;
}
/*
@ -373,50 +330,23 @@ js_InitAtomState(JSRuntime *rt)
{
JSAtomState *state = &rt->atomState;
/*
* The caller must zero the state before calling this function.
*/
JS_ASSERT(!state->stringAtoms.ops);
if (!JS_DHashTableInit(&state->stringAtoms, &StringHashOps,
NULL, sizeof(JSAtomHashEntry),
JS_DHASH_DEFAULT_CAPACITY(JS_STRING_HASH_COUNT))) {
state->stringAtoms.ops = NULL;
return JS_FALSE;
}
JS_ASSERT(!state->atoms.initialized());
if (!state->atoms.init(JS_STRING_HASH_COUNT))
return false;
#ifdef JS_THREADSAFE
js_InitLock(&state->lock);
#endif
JS_ASSERT(IS_INITIALIZED_STATE(state));
JS_ASSERT(state->atoms.initialized());
return JS_TRUE;
}
static JSDHashOperator
js_string_uninterner(JSDHashTable *table, JSDHashEntryHdr *hdr,
uint32 number, void *arg)
{
JSAtomHashEntry *entry = TO_ATOM_ENTRY(hdr);
JSRuntime *rt = (JSRuntime *)arg;
JSString *str;
/*
* Any string entry that remains at this point must be initialized, as the
* last GC should clean any uninitialized ones.
*/
JS_ASSERT(entry->keyAndFlags != 0);
str = ATOM_ENTRY_KEY(entry);
js_FinalizeStringRT(rt, str);
return JS_DHASH_NEXT;
}
void
js_FinishAtomState(JSRuntime *rt)
{
JSAtomState *state = &rt->atomState;
if (!IS_INITIALIZED_STATE(state)) {
if (!state->atoms.initialized()) {
/*
* We are called with uninitialized state when JS_NewRuntime fails and
* calls JS_DestroyRuntime on a partially initialized runtime.
@ -424,15 +354,14 @@ js_FinishAtomState(JSRuntime *rt)
return;
}
JS_DHashTableEnumerate(&state->stringAtoms, js_string_uninterner, rt);
JS_DHashTableFinish(&state->stringAtoms);
for (AtomSet::Range r = state->atoms.all(); !r.empty(); r.popFront()) {
JSString *str = AtomEntryToKey(r.front());
js_FinalizeStringRT(rt, str);
}
#ifdef JS_THREADSAFE
js_FinishLock(&state->lock);
#endif
#ifdef DEBUG
memset(state, JS_FREE_PATTERN, sizeof *state);
#endif
}
JSBool
@ -456,91 +385,50 @@ js_InitCommonAtoms(JSContext *cx)
return JS_TRUE;
}
static JSDHashOperator
js_atom_unpinner(JSDHashTable *table, JSDHashEntryHdr *hdr,
uint32 number, void *arg)
{
CLEAR_ATOM_ENTRY_FLAGS(TO_ATOM_ENTRY(hdr), ATOM_PINNED);
return JS_DHASH_NEXT;
}
void
js_FinishCommonAtoms(JSContext *cx)
{
cx->runtime->emptyString = NULL;
JSAtomState *state = &cx->runtime->atomState;
JS_DHashTableEnumerate(&state->stringAtoms, js_atom_unpinner, NULL);
for (AtomSet::Range r = state->atoms.all(); !r.empty(); r.popFront())
ClearAtomEntryFlags(r.front(), ATOM_PINNED);
#ifdef DEBUG
memset(COMMON_ATOMS_START(state), JS_FREE_PATTERN,
ATOM_OFFSET_LIMIT - ATOM_OFFSET_START);
#endif
}
static JSDHashOperator
js_locked_atom_tracer(JSDHashTable *table, JSDHashEntryHdr *hdr,
uint32 number, void *arg)
{
JSAtomHashEntry *entry = TO_ATOM_ENTRY(hdr);
JSTracer *trc = (JSTracer *)arg;
if (entry->keyAndFlags == 0) {
/* Ignore uninitialized entries during tracing. */
return JS_DHASH_NEXT;
}
JS_SET_TRACING_INDEX(trc, "locked_atom", (size_t)number);
Mark(trc, ATOM_ENTRY_KEY(entry), JSTRACE_STRING);
return JS_DHASH_NEXT;
}
static JSDHashOperator
js_pinned_atom_tracer(JSDHashTable *table, JSDHashEntryHdr *hdr,
uint32 number, void *arg)
{
JSAtomHashEntry *entry = TO_ATOM_ENTRY(hdr);
JSTracer *trc = (JSTracer *)arg;
uintN flags = ATOM_ENTRY_FLAGS(entry);
if (flags & (ATOM_PINNED | ATOM_INTERNED)) {
JS_SET_TRACING_INDEX(trc,
flags & ATOM_PINNED
? "pinned_atom"
: "interned_atom",
(size_t)number);
Mark(trc, ATOM_ENTRY_KEY(entry), JSTRACE_STRING);
}
return JS_DHASH_NEXT;
}
void
js_TraceAtomState(JSTracer *trc)
{
JSRuntime *rt = trc->context->runtime;
JSAtomState *state = &rt->atomState;
if (rt->gcKeepAtoms)
JS_DHashTableEnumerate(&state->stringAtoms, js_locked_atom_tracer, trc);
else
JS_DHashTableEnumerate(&state->stringAtoms, js_pinned_atom_tracer, trc);
}
#ifdef DEBUG
size_t number = 0;
#endif
static JSDHashOperator
js_atom_sweeper(JSDHashTable *table, JSDHashEntryHdr *hdr,
uint32 number, void *arg)
{
JSAtomHashEntry *entry = TO_ATOM_ENTRY(hdr);
/* Remove uninitialized entries. */
if (entry->keyAndFlags == 0)
return JS_DHASH_REMOVE;
if (ATOM_ENTRY_FLAGS(entry) & (ATOM_PINNED | ATOM_INTERNED)) {
/* Pinned or interned key cannot be finalized. */
JS_ASSERT(!js_IsAboutToBeFinalized(ATOM_ENTRY_KEY(entry)));
} else if (js_IsAboutToBeFinalized(ATOM_ENTRY_KEY(entry))) {
/* Remove entries with things about to be GC'ed. */
return JS_DHASH_REMOVE;
if (rt->gcKeepAtoms) {
for (AtomSet::Range r = state->atoms.all(); !r.empty(); r.popFront()) {
JS_SET_TRACING_INDEX(trc, "locked_atom", number++);
MarkString(trc, AtomEntryToKey(r.front()));
}
} else {
for (AtomSet::Range r = state->atoms.all(); !r.empty(); r.popFront()) {
AtomEntryType entry = r.front();
uintN flags = AtomEntryFlags(entry);
if (flags & (ATOM_PINNED | ATOM_INTERNED)) {
JS_SET_TRACING_INDEX(trc,
flags & ATOM_PINNED
? "pinned_atom"
: "interned_atom",
number++);
MarkString(trc, AtomEntryToKey(entry));
}
}
}
return JS_DHASH_NEXT;
}
void
@ -548,25 +436,20 @@ js_SweepAtomState(JSContext *cx)
{
JSAtomState *state = &cx->runtime->atomState;
JS_DHashTableEnumerate(&state->stringAtoms, js_atom_sweeper, NULL);
/*
* Optimize for simplicity and mutate table generation numbers even if the
* sweeper has not removed any entries.
*/
state->stringAtoms.generation++;
for (AtomSet::Enum e(state->atoms); !e.empty(); e.popFront()) {
AtomEntryType entry = e.front();
if (AtomEntryFlags(entry) & (ATOM_PINNED | ATOM_INTERNED)) {
/* Pinned or interned key cannot be finalized. */
JS_ASSERT(!js_IsAboutToBeFinalized(AtomEntryToKey(entry)));
} else if (js_IsAboutToBeFinalized(AtomEntryToKey(entry))) {
e.removeFront();
}
}
}
JSAtom *
js_AtomizeString(JSContext *cx, JSString *str, uintN flags)
{
JSAtom *atom;
JSAtomState *state;
JSDHashTable *table;
JSAtomHashEntry *entry;
JSString *key;
uint32 gen;
JS_ASSERT(!(flags & ~(ATOM_PINNED|ATOM_INTERNED|ATOM_TMPSTR|ATOM_NOCOPY)));
JS_ASSERT_IF(flags & ATOM_NOCOPY, flags & ATOM_TMPSTR);
@ -602,30 +485,30 @@ js_AtomizeString(JSContext *cx, JSString *str, uintN flags)
}
}
state = &cx->runtime->atomState;
table = &state->stringAtoms;
JSAtomState *state = &cx->runtime->atomState;
AtomSet &atoms = state->atoms;
JS_LOCK(cx, &state->lock);
entry = TO_ATOM_ENTRY(JS_DHashTableOperate(table, str, JS_DHASH_ADD));
if (!entry)
goto failed_hash_add;
AtomSet::AddPtr p = atoms.lookupForAdd(str);
/* Hashing the string should have flattened it if it was a rope. */
JS_ASSERT(str->isFlat() || str->isDependent());
if (entry->keyAndFlags != 0) {
key = ATOM_ENTRY_KEY(entry);
JSString *key;
if (p) {
key = AtomEntryToKey(*p);
} else {
/*
* We created a new hashtable entry. Unless str is already allocated
* from the GC heap and flat, we have to release state->lock as
* string construction is a complex operation. For example, it can
* trigger GC which may rehash the table and make the entry invalid.
* Unless str is already allocated from the GC heap and flat, we have
* to release state->lock as string construction is a complex
* operation. For example, it can trigger GC which may rehash the table
* and make the entry invalid.
*/
++table->generation;
if (!(flags & ATOM_TMPSTR) && str->isFlat()) {
str->flatClearMutable();
key = str;
atoms.add(p, StringToInitialAtomEntry(key));
} else {
gen = table->generation;
JS_UNLOCK(cx, &state->lock);
if (flags & ATOM_TMPSTR) {
@ -649,36 +532,22 @@ js_AtomizeString(JSContext *cx, JSString *str, uintN flags)
}
JS_LOCK(cx, &state->lock);
if (table->generation == gen) {
JS_ASSERT(entry->keyAndFlags == 0);
} else {
entry = TO_ATOM_ENTRY(JS_DHashTableOperate(table, key,
JS_DHASH_ADD));
if (!entry)
goto failed_hash_add;
if (entry->keyAndFlags != 0) {
key = ATOM_ENTRY_KEY(entry);
goto finish;
}
++table->generation;
if (!atoms.relookupOrAdd(p, str, StringToInitialAtomEntry(key))) {
JS_UNLOCK(cx, &state->lock);
JS_ReportOutOfMemory(cx); /* SystemAllocPolicy does not report */
return NULL;
}
}
INIT_ATOM_ENTRY(entry, key);
key->flatSetAtomized();
}
finish:
ADD_ATOM_ENTRY_FLAGS(entry, flags & (ATOM_PINNED | ATOM_INTERNED));
AddAtomEntryFlags(*p, flags & (ATOM_PINNED | ATOM_INTERNED));
JS_ASSERT(key->isAtomized());
atom = STRING_TO_ATOM(key);
JSAtom *atom = STRING_TO_ATOM(key);
cx->weakRoots.lastAtom = atom;
JS_UNLOCK(cx, &state->lock);
return atom;
failed_hash_add:
JS_UNLOCK(cx, &state->lock);
JS_ReportOutOfMemory(cx);
return NULL;
}
JSAtom *
@ -735,7 +604,6 @@ js_GetExistingStringAtom(JSContext *cx, const jschar *chars, size_t length)
{
JSString str, *str2;
JSAtomState *state;
JSDHashEntryHdr *hdr;
if (length == 1) {
jschar c = *chars;
@ -747,63 +615,41 @@ js_GetExistingStringAtom(JSContext *cx, const jschar *chars, size_t length)
state = &cx->runtime->atomState;
JS_LOCK(cx, &state->lock);
hdr = JS_DHashTableOperate(&state->stringAtoms, &str, JS_DHASH_LOOKUP);
str2 = JS_DHASH_ENTRY_IS_BUSY(hdr)
? ATOM_ENTRY_KEY(TO_ATOM_ENTRY(hdr))
: NULL;
AtomSet::Ptr p = state->atoms.lookup(&str);
str2 = p ? AtomEntryToKey(*p) : NULL;
JS_UNLOCK(cx, &state->lock);
return str2 ? STRING_TO_ATOM(str2) : NULL;
}
#ifdef DEBUG
static JSDHashOperator
atom_dumper(JSDHashTable *table, JSDHashEntryHdr *hdr,
uint32 number, void *arg)
{
JSAtomHashEntry *entry = TO_ATOM_ENTRY(hdr);
FILE *fp = (FILE *)arg;
JSString *key;
uintN flags;
fprintf(fp, "%3u %08x ", number, (uintN)entry->hdr.keyHash);
if (entry->keyAndFlags == 0) {
fputs("<uninitialized>", fp);
} else {
key = ATOM_ENTRY_KEY(entry);
js_FileEscapedString(fp, key, '"');
flags = ATOM_ENTRY_FLAGS(entry);
if (flags != 0) {
fputs((flags & (ATOM_PINNED | ATOM_INTERNED))
? " pinned | interned"
: (flags & ATOM_PINNED) ? " pinned" : " interned",
fp);
}
}
putc('\n', fp);
return JS_DHASH_NEXT;
}
JS_FRIEND_API(void)
js_DumpAtoms(JSContext *cx, FILE *fp)
{
JSAtomState *state = &cx->runtime->atomState;
fprintf(fp, "stringAtoms table contents:\n");
JS_DHashTableEnumerate(&state->stringAtoms, atom_dumper, fp);
#ifdef JS_DHASHMETER
JS_DHashTableDumpMeter(&state->stringAtoms, atom_dumper, fp);
#endif
putc('\n', fp);
fprintf(fp, "doubleAtoms table contents:\n");
#ifdef JS_DHASHMETER
JS_DHashTableDumpMeter(&state->doubleAtoms, atom_dumper, fp);
#endif
fprintf(fp, "atoms table contents:\n");
size_t number;
for (AtomSet::Range r = state->atoms.all(); !r.empty(); r.popFront()) {
AtomEntryType entry = r.front();
fprintf(fp, "%3u ", number++);
if (entry == 0) {
fputs("<uninitialized>", fp);
} else {
JSString *key = AtomEntryToKey(entry);
js_FileEscapedString(fp, key, '"');
uintN flags = AtomEntryFlags(entry);
if (flags != 0) {
fputs((flags & (ATOM_PINNED | ATOM_INTERNED))
? " pinned | interned"
: (flags & ATOM_PINNED) ? " pinned" : " interned",
fp);
}
}
putc('\n', fp);
}
putc('\n', fp);
}
#endif
static JSHashNumber

Просмотреть файл

@ -45,11 +45,12 @@
#include <stddef.h>
#include "jsversion.h"
#include "jstypes.h"
#include "jshash.h" /* Added by JSIFY */
#include "jsdhash.h"
#include "jshash.h"
#include "jshashtable.h"
#include "jsapi.h"
#include "jsprvtd.h"
#include "jspubtd.h"
#include "jsstr.h"
#include "jslock.h"
#include "jsvalue.h"
@ -258,8 +259,43 @@ struct JSAtomMap {
jsatomid length; /* count of (to-be-)indexed atoms */
};
struct JSAtomState {
JSDHashTable stringAtoms; /* hash table with shared strings */
namespace js {
#define ATOM_ENTRY_FLAG_MASK (ATOM_PINNED | ATOM_INTERNED)
JS_STATIC_ASSERT(ATOM_ENTRY_FLAG_MASK < JS_GCTHING_ALIGN);
typedef uintptr_t AtomEntryType;
static JS_ALWAYS_INLINE JSString *
AtomEntryToKey(AtomEntryType entry)
{
JS_ASSERT(entry != 0);
return (JSString *)(entry & ~ATOM_ENTRY_FLAG_MASK);
}
struct AtomHasher
{
typedef JSString *Lookup;
static HashNumber hash(JSString *str) {
return js_HashString(str);
}
static bool match(AtomEntryType entry, JSString *lookup) {
return entry ? js_EqualStrings(AtomEntryToKey(entry), lookup) : false;
}
};
typedef HashSet<AtomEntryType, AtomHasher, SystemAllocPolicy> AtomSet;
} /* namespace js */
struct JSAtomState
{
/* We cannot rely on constructors/destructors, so do it manually. */
js::AtomSet atoms;
#ifdef JS_THREADSAFE
JSThinLock lock;
#endif

Просмотреть файл

@ -519,6 +519,13 @@ Mark(JSTracer *trc, void *thing, uint32 kind, const char *name)
Mark(trc, thing, kind);
}
static inline void
MarkString(JSTracer *trc, JSString *str)
{
JS_ASSERT(str);
Mark(trc, str, JSTRACE_STRING);
}
static inline void
MarkString(JSTracer *trc, JSString *str, const char *name)
{

Просмотреть файл

@ -847,8 +847,8 @@ class HashMap
* N.B. The caller must ensure that no mutating hash table operations
* occur between a pair of |lookupForAdd| and |add| calls. To avoid
* looking up the key a second time, the caller may use the more efficient
* relookupOrAdd method. That method relookups the map if necessary and
* inserts the new value only if the key still does not exist. For
* relookupOrAdd method. This method reuses part of the hashing computation
* to more efficiently insert the key if it has not been added. For
* example, a mutation-handling version of the previous example:
*
* HM::AddPtr p = h.lookupForAdd(3);
@ -1026,13 +1026,37 @@ class HashSet
* assert(*p == 3); // p acts like a pointer to int
*
* Also see the definition of AddPtr in HashTable above.
*
* N.B. The caller must ensure that no mutating hash table operations
* occur between a pair of |lookupForAdd| and |add| calls. To avoid
* looking up the key a second time, the caller may use the more efficient
* relookupOrAdd method. This method reuses part of the hashing computation
* to more efficiently insert the key if it has not been added. For
* example, a mutation-handling version of the previous example:
*
* HS::AddPtr p = h.lookupForAdd(3);
* if (!p) {
* call_that_may_mutate_h();
* if (!h.relookupOrAdd(p, 3, 3))
* return false;
* }
* assert(*p == 3);
*
* Note that relookupOrAdd(p,l,t) performs Lookup using l and adds the
* entry t, where the caller ensures match(l,t).
*/
typedef typename Impl::AddPtr AddPtr;
AddPtr lookupForAdd(const Lookup &l) const {
return impl.lookupForAdd(l);
}
bool add(AddPtr &p, const T &t) { return impl.add(p, t); }
bool add(AddPtr &p, const T &t) {
return impl.add(p, t);
}
bool relookupOrAdd(AddPtr &p, const Lookup &l, const T &t) {
return impl.relookupOrAdd(p, l, t);
}
/*
* |all()| returns a Range containing |count()| elements: