- Add a generation number to JSDHashTable that counts entryStore changes due

to grows, shrinks, and compresses.  This helps JS_DHashTableOperate callers
  who hold returned entry pointers to validate those pointers and avoid having
  to re-lookup an entry by its key.
- Balance that addition by removing JSDHashTable.sizeMask, which is induced by
  JSDHashTable.sizeLog2 at the cost of two typically single-cycle instructions.
- Use JSDHashTable.generation in jsobj.c to avoid unsafely dereferencing an
  entry pointer held across calls to JSClass.resolve from js_LookupProperty,
  which may recur and add entries to cx->resolving, growing that table and
  invalidating entry pointers held by earlier js_LookupProperty activations.
(bug 78121, r=jst@netscape.com, sr=jband@netscape.com, a=asa@mozilla.org)
This commit is contained in:
brendan%mozilla.org 2001-05-25 03:05:38 +00:00
Родитель 7647565c82
Коммит 769e09a93e
5 изменённых файлов: 38 добавлений и 21 удалений

Просмотреть файл

@ -183,9 +183,9 @@ JS_DHashTableInit(JSDHashTable *table, JSDHashTableOps *ops, void *data,
capacity = JS_BIT(log2);
table->hashShift = JS_DHASH_BITS - log2;
table->sizeLog2 = log2;
table->sizeMask = JS_BITMASK(log2);
table->entrySize = entrySize;
table->entryCount = table->removedCount = 0;
table->generation = 0;
nbytes = capacity * entrySize;
table->entryStore = ops->allocTable(table, nbytes);
@ -244,9 +244,10 @@ static JSDHashEntryHdr *
SearchTable(JSDHashTable *table, const void *key, JSDHashNumber keyHash)
{
JSDHashNumber hash1, hash2;
int hashShift;
int hashShift, sizeLog2;
JSDHashEntryHdr *entry;
JSDHashMatchEntry matchEntry;
uint32 sizeMask;
METER(table->stats.searches++);
@ -269,11 +270,13 @@ SearchTable(JSDHashTable *table, const void *key, JSDHashNumber keyHash)
}
/* Collision: double hash. */
hash2 = HASH2(keyHash, table->sizeLog2, hashShift);
sizeLog2 = table->sizeLog2;
hash2 = HASH2(keyHash, sizeLog2, hashShift);
sizeMask = JS_BITMASK(sizeLog2);
do {
METER(table->stats.steps++);
hash1 -= hash2;
hash1 &= table->sizeMask;
hash1 &= sizeMask;
entry = ADDRESS_ENTRY(table, hash1);
if (JS_DHASH_ENTRY_IS_FREE(entry)) {
METER(table->stats.misses++);
@ -311,8 +314,8 @@ ChangeTable(JSDHashTable *table, int deltaLog2)
/* We can't fail from here on, so update table parameters. */
table->hashShift = JS_DHASH_BITS - newLog2;
table->sizeLog2 = newLog2;
table->sizeMask = JS_BITMASK(newLog2);
table->removedCount = 0;
table->generation++;
/* Assign the new entry store to table. */
memset(newEntryStore, 0, nbytes);
@ -482,7 +485,7 @@ JS_DHashTableDumpMeter(JSDHashTable *table, JSDHashEnumerator dump, FILE *fp)
{
char *entryAddr;
uint32 entrySize, entryCount;
uint32 i, tableSize, chainLen, maxChainLen, chainCount;
uint32 i, tableSize, sizeMask, chainLen, maxChainLen, chainCount;
JSDHashNumber hash1, hash2, saveHash1, maxChainHash1, maxChainHash2;
double sqsum, mean, variance, sigma;
JSDHashEntryHdr *entry, *probe;
@ -490,6 +493,7 @@ JS_DHashTableDumpMeter(JSDHashTable *table, JSDHashEnumerator dump, FILE *fp)
entryAddr = table->entryStore;
entrySize = table->entrySize;
tableSize = JS_BIT(table->sizeLog2);
sizeMask = JS_BITMASK(table->sizeLog2);
chainCount = maxChainLen = 0;
hash2 = 0;
sqsum = 0;
@ -510,7 +514,7 @@ JS_DHashTableDumpMeter(JSDHashTable *table, JSDHashEnumerator dump, FILE *fp)
do {
chainLen++;
hash1 -= hash2;
hash1 &= table->sizeMask;
hash1 &= sizeMask;
probe = ADDRESS_ENTRY(table, hash1);
} while (probe != entry);
}
@ -565,7 +569,7 @@ JS_DHashTableDumpMeter(JSDHashTable *table, JSDHashEnumerator dump, FILE *fp)
if (dump(table, entry, i++, fp) != JS_DHASH_NEXT)
break;
hash1 -= hash2;
hash1 &= table->sizeMask;
hash1 &= sizeMask;
entry = ADDRESS_ENTRY(table, hash1);
} while (JS_DHASH_ENTRY_IS_BUSY(entry));
}

Просмотреть файл

@ -153,7 +153,9 @@ struct JSDHashEntryHdr {
* Note a qualitative difference between chaining and double hashing: under
* chaining, entry addresses are stable across table shrinks and grows. With
* double hashing, you can't safely hold an entry pointer and use it after an
* ADD or REMOVE operation.
* ADD or REMOVE operation, unless you sample table->generation before adding
* or removing, and compare the sample after, dereferencing the entry pointer
* only if table->generation has not changed.
*
* The moral of this story: there is no one-size-fits-all hash table scheme,
* but for small table entry size, and assuming entry address stability is not
@ -164,10 +166,10 @@ struct JSDHashTable {
void *data; /* ops- and instance-specific data */
int16 hashShift; /* multiplicative hash shift */
int16 sizeLog2; /* log2(table size) */
uint32 sizeMask; /* JS_BITMASK(log2(table size)) */
uint32 entrySize; /* number of bytes in an entry */
uint32 entryCount; /* number of entries in table */
uint32 removedCount; /* removed entry sentinels in table */
uint32 generation; /* entry storage generation number */
char *entryStore; /* entry storage */
#ifdef JS_DHASHMETER
struct JSDHashStats {

Просмотреть файл

@ -2052,6 +2052,7 @@ js_LookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
JSResolvingKey key;
JSDHashTable *table;
JSDHashEntryHdr *entry;
uint32 generation;
JSNewResolveOp newresolve;
uintN flags;
uint32 format;
@ -2110,6 +2111,7 @@ js_LookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
return JS_FALSE;
}
((JSResolvingEntry *)entry)->key = key;
generation = table->generation;
/* Null *propp here so we can test it at cleanup: safely. */
*propp = NULL;
@ -2189,7 +2191,10 @@ js_LookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
}
cleanup:
JS_DHashTableRawRemove(table, entry);
if (table->generation == generation)
JS_DHashTableRawRemove(table, entry);
else
JS_DHashTableOperate(table, &key, JS_DHASH_REMOVE);
if (table->entryCount == 0) {
cx->resolving = NULL;
JS_DHashTableDestroy(table);

Просмотреть файл

@ -184,9 +184,9 @@ PL_DHashTableInit(PLDHashTable *table, PLDHashTableOps *ops, void *data,
capacity = PR_BIT(log2);
table->hashShift = PL_DHASH_BITS - log2;
table->sizeLog2 = log2;
table->sizeMask = PR_BITMASK(log2);
table->entrySize = entrySize;
table->entryCount = table->removedCount = 0;
table->generation = 0;
nbytes = capacity * entrySize;
table->entryStore = ops->allocTable(table, nbytes);
@ -245,9 +245,10 @@ static PLDHashEntryHdr *
SearchTable(PLDHashTable *table, const void *key, PLDHashNumber keyHash)
{
PLDHashNumber hash1, hash2;
int hashShift;
int hashShift, sizeLog2;
PLDHashEntryHdr *entry;
PLDHashMatchEntry matchEntry;
PRUint32 sizeMask;
METER(table->stats.searches++);
@ -270,11 +271,13 @@ SearchTable(PLDHashTable *table, const void *key, PLDHashNumber keyHash)
}
/* Collision: double hash. */
hash2 = HASH2(keyHash, table->sizeLog2, hashShift);
sizeLog2 = table->sizeLog2;
hash2 = HASH2(keyHash, sizeLog2, hashShift);
sizeMask = PR_BITMASK(sizeLog2);
do {
METER(table->stats.steps++);
hash1 -= hash2;
hash1 &= table->sizeMask;
hash1 &= sizeMask;
entry = ADDRESS_ENTRY(table, hash1);
if (PL_DHASH_ENTRY_IS_FREE(entry)) {
METER(table->stats.misses++);
@ -312,8 +315,8 @@ ChangeTable(PLDHashTable *table, int deltaLog2)
/* We can't fail from here on, so update table parameters. */
table->hashShift = PL_DHASH_BITS - newLog2;
table->sizeLog2 = newLog2;
table->sizeMask = PR_BITMASK(newLog2);
table->removedCount = 0;
table->generation++;
/* Assign the new entry store to table. */
memset(newEntryStore, 0, nbytes);
@ -483,7 +486,7 @@ PL_DHashTableDumpMeter(PLDHashTable *table, PLDHashEnumerator dump, FILE *fp)
{
char *entryAddr;
PRUint32 entrySize, entryCount;
PRUint32 i, tableSize, chainLen, maxChainLen, chainCount;
PRUint32 i, tableSize, sizeMask, chainLen, maxChainLen, chainCount;
PLDHashNumber hash1, hash2, saveHash1, maxChainHash1, maxChainHash2;
double sqsum, mean, variance, sigma;
PLDHashEntryHdr *entry, *probe;
@ -491,6 +494,7 @@ PL_DHashTableDumpMeter(PLDHashTable *table, PLDHashEnumerator dump, FILE *fp)
entryAddr = table->entryStore;
entrySize = table->entrySize;
tableSize = PR_BIT(table->sizeLog2);
sizeMask = PR_BITMASK(table->sizeLog2);
chainCount = maxChainLen = 0;
hash2 = 0;
sqsum = 0;
@ -511,7 +515,7 @@ PL_DHashTableDumpMeter(PLDHashTable *table, PLDHashEnumerator dump, FILE *fp)
do {
chainLen++;
hash1 -= hash2;
hash1 &= table->sizeMask;
hash1 &= sizeMask;
probe = ADDRESS_ENTRY(table, hash1);
} while (probe != entry);
}
@ -566,7 +570,7 @@ PL_DHashTableDumpMeter(PLDHashTable *table, PLDHashEnumerator dump, FILE *fp)
if (dump(table, entry, i++, fp) != PL_DHASH_NEXT)
break;
hash1 -= hash2;
hash1 &= table->sizeMask;
hash1 &= sizeMask;
entry = ADDRESS_ENTRY(table, hash1);
} while (PL_DHASH_ENTRY_IS_BUSY(entry));
}

Просмотреть файл

@ -154,7 +154,9 @@ struct PLDHashEntryHdr {
* Note a qualitative difference between chaining and double hashing: under
* chaining, entry addresses are stable across table shrinks and grows. With
* double hashing, you can't safely hold an entry pointer and use it after an
* ADD or REMOVE operation.
* ADD or REMOVE operation, unless you sample table->generation before adding
* or removing, and compare the sample after, dereferencing the entry pointer
* only if table->generation has not changed.
*
* The moral of this story: there is no one-size-fits-all hash table scheme,
* but for small table entry size, and assuming entry address stability is not
@ -165,10 +167,10 @@ struct PLDHashTable {
void *data; /* ops- and instance-specific data */
PRInt16 hashShift; /* multiplicative hash shift */
PRInt16 sizeLog2; /* log2(table size) */
PRUint32 sizeMask; /* PR_BITMASK(log2(table size)) */
PRUint32 entrySize; /* number of bytes in an entry */
PRUint32 entryCount; /* number of entries in table */
PRUint32 removedCount; /* removed entry sentinels in table */
PRUint32 generation; /* entry storage generation number */
char *entryStore; /* entry storage */
#ifdef PL_DHASHMETER
struct PLDHashStats {