Bug 475474 - Maintain globalSlots per global, not just one per JIT instance, r=danderson.

This commit is contained in:
Graydon Hoare 2009-01-29 15:59:52 -08:00
Родитель 6c157b6873
Коммит 4f11cfcaf5
4 изменённых файлов: 216 добавлений и 153 удалений

Просмотреть файл

@ -111,6 +111,12 @@ typedef Queue<uint16> SlotList;
#define FRAGMENT_TABLE_SIZE 512
struct VMFragment;
#define MONITOR_N_GLOBAL_STATES 4
struct GlobalState {
uint32 globalShape;
CLS(SlotList) globalSlots;
};
/*
* Trace monitor. Every JSThread (if JS_THREADSAFE) or JSRuntime (if not
* JS_THREADSAFE) has an associated trace monitor that keeps track of loop
@ -127,12 +133,12 @@ typedef struct JSTraceMonitor {
CLS(nanojit::LirBuffer) lirbuf;
CLS(nanojit::Fragmento) fragmento;
CLS(TraceRecorder) recorder;
uint32 globalShape;
CLS(SlotList) globalSlots;
jsval *reservedDoublePool;
jsval *reservedDoublePoolPtr;
struct GlobalState globalStates[MONITOR_N_GLOBAL_STATES];
struct VMFragment* vmfragments[FRAGMENT_TABLE_SIZE];
JSBool needFlush;
/*
* reservedObjects is a linked list (via fslots[0]) of preallocated JSObjects.

Просмотреть файл

@ -3097,9 +3097,8 @@ js_TraceTraceMonitor(JSTracer *trc, JSTraceMonitor *tm)
if (IS_GC_MARKING_TRACER(trc)) {
tm->reservedDoublePoolPtr = tm->reservedDoublePool;
/* Make sure the global shape changes and will force a flush
of the code cache. */
tm->globalShape = -1;
tm->needFlush = JS_TRUE;
/* Keep the reserved objects. */
for (JSObject *obj = tm->reservedObjects; obj; obj = JSVAL_TO_OBJECT(obj->fslots[0])) {
uint8 *flagp = GetGCThingFlags(obj);

Просмотреть файл

@ -223,7 +223,7 @@ static avmplus::AvmCore* core = &s_core;
#ifdef JS_JIT_SPEW
void
js_DumpPeerStability(JSTraceMonitor* tm, const void* ip);
js_DumpPeerStability(JSTraceMonitor* tm, const void* ip, uint32 globalShape);
#endif
/* We really need a better way to configure the JIT. Shaver, where is my fancy JIT object? */
@ -243,7 +243,7 @@ static Oracle oracle;
/* Blacklists the root peer fragment at a fragment's PC. This is so blacklisting stays at the
top of the peer list and not scattered around. */
void
js_BlacklistPC(JSTraceMonitor* tm, Fragment* frag);
js_BlacklistPC(JSTraceMonitor* tm, Fragment* frag, uint32 globalShape);
Tracker::Tracker()
{
@ -540,17 +540,14 @@ getVMFragment(JSTraceMonitor* tm, const void *ip, uint32 globalShape)
return vf;
}
// FIXME: remove the default parameters for globalShape when we're
// actually keying by it.
static Fragment*
getLoop(JSTraceMonitor* tm, const void *ip, uint32 globalShape = 0)
getLoop(JSTraceMonitor* tm, const void *ip, uint32 globalShape)
{
return getVMFragment(tm, ip, globalShape);
}
static Fragment*
getAnchor(JSTraceMonitor* tm, const void *ip, uint32 globalShape = 0)
getAnchor(JSTraceMonitor* tm, const void *ip, uint32 globalShape)
{
LirBufWriter writer(tm->lirbuf);
char *fragmem = (char*) writer.skip(sizeof(VMFragment))->payload();
@ -1227,10 +1224,10 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _frag
globalObj_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, globalObj)), "globalObj");
/* If we came from exit, we might not have enough global types. */
if (JS_TRACE_MONITOR(cx).globalSlots->length() > ti->globalSlots()) {
if (ti->globalSlots->length() > ti->nGlobalTypes()) {
ti->typeMap.captureMissingGlobalTypes(cx,
*JS_TRACE_MONITOR(cx).globalSlots,
ti->stackSlots);
*(ti->globalSlots),
ti->nStackTypes);
}
/* read into registers all values on the stack and all globals we know so far */
@ -1778,7 +1775,7 @@ TraceRecorder::import(TreeInfo* treeInfo, LIns* sp, unsigned stackSlots, unsigne
didn't provide, this is always safe to do. */
uint8* globalTypeMap = typeMap + stackSlots;
unsigned length = treeInfo->globalSlots();
unsigned length = treeInfo->nGlobalTypes();
/* This is potentially the typemap of the side exit and thus shorter than the tree's
global type map. */
@ -1786,10 +1783,10 @@ TraceRecorder::import(TreeInfo* treeInfo, LIns* sp, unsigned stackSlots, unsigne
mergeTypeMaps(&globalTypeMap/*out param*/, &ngslots/*out param*/,
treeInfo->globalTypeMap(), length,
(uint8*)alloca(sizeof(uint8) * length));
JS_ASSERT(ngslots == treeInfo->globalSlots());
JS_ASSERT(ngslots == treeInfo->nGlobalTypes());
/* the first time we compile a tree this will be empty as we add entries lazily */
uint16* gslots = traceMonitor->globalSlots->data();
uint16* gslots = treeInfo->globalSlots->data();
uint8* m = globalTypeMap;
FORALL_GLOBAL_SLOTS(cx, ngslots, gslots,
import(gp_ins, nativeGlobalOffset(vp), vp, *m, vpname, vpnum, NULL);
@ -1812,12 +1809,9 @@ TraceRecorder::lazilyImportGlobalSlot(unsigned slot)
jsval* vp = &STOBJ_GET_SLOT(globalObj, slot);
if (known(vp))
return true; /* we already have it */
unsigned index = traceMonitor->globalSlots->length();
/* If this the first global we are adding, remember the shape of the global object. */
if (index == 0)
traceMonitor->globalShape = OBJ_SHAPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain));
unsigned index = treeInfo->globalSlots->length();
/* Add the slot to the list of interned global slots. */
traceMonitor->globalSlots->add(slot);
treeInfo->globalSlots->add(slot);
uint8 type = getCoercedType(*vp);
if ((type == JSVAL_INT) && oracle.isGlobalSlotUndemotable(cx, slot))
type = JSVAL_DOUBLE;
@ -1985,9 +1979,9 @@ JS_REQUIRES_STACK bool
TraceRecorder::adjustCallerTypes(Fragment* f)
{
JSTraceMonitor* tm = traceMonitor;
uint16* gslots = tm->globalSlots->data();
unsigned ngslots = tm->globalSlots->length();
JS_ASSERT(ngslots == treeInfo->globalSlots());
uint16* gslots = treeInfo->globalSlots->data();
unsigned ngslots = treeInfo->globalSlots->length();
JS_ASSERT(ngslots == treeInfo->nGlobalTypes());
TreeInfo* ti = (TreeInfo*)f->vmprivate;
bool ok = true;
uint8* map = ti->globalTypeMap();
@ -2004,7 +1998,7 @@ TraceRecorder::adjustCallerTypes(Fragment* f)
}
++m;
);
JS_ASSERT(unsigned(m - map) == ti->globalSlots());
JS_ASSERT(unsigned(m - map) == ti->nGlobalTypes());
map = ti->stackTypeMap();
m = map;
FORALL_SLOTS_IN_PENDING_FRAMES(cx, 0,
@ -2025,7 +2019,7 @@ TraceRecorder::adjustCallerTypes(Fragment* f)
}
++m;
);
JS_ASSERT(unsigned(m - map) == ti->stackSlots);
JS_ASSERT(unsigned(m - map) == ti->nStackTypes);
JS_ASSERT(f == f->root);
return ok;
}
@ -2120,7 +2114,7 @@ TraceRecorder::snapshot(ExitType exitType)
trackNativeStackUse(stackSlots + 1);
/* Capture the type map into a temporary location. */
unsigned ngslots = traceMonitor->globalSlots->length();
unsigned ngslots = treeInfo->globalSlots->length();
unsigned typemap_size = (stackSlots + ngslots) * sizeof(uint8);
uint8* typemap = (uint8*)alloca(typemap_size);
uint8* m = typemap;
@ -2128,7 +2122,7 @@ TraceRecorder::snapshot(ExitType exitType)
/* Determine the type of a store by looking at the current type of the actual value the
interpreter is using. For numbers we have to check what kind of store we used last
(integer or double) to figure out what the side exit show reflect in its typemap. */
FORALL_SLOTS(cx, ngslots, traceMonitor->globalSlots->data(), callDepth,
FORALL_SLOTS(cx, ngslots, treeInfo->globalSlots->data(), callDepth,
*m++ = determineSlotType(vp);
);
JS_ASSERT(unsigned(m - typemap) == ngslots + stackSlots);
@ -2324,9 +2318,9 @@ TraceRecorder::deduceTypeStability(Fragment* root_peer, Fragment** stable_peer,
{
uint8* m;
uint8* typemap;
unsigned ngslots = traceMonitor->globalSlots->length();
uint16* gslots = traceMonitor->globalSlots->data();
JS_ASSERT(ngslots == treeInfo->globalSlots());
unsigned ngslots = treeInfo->globalSlots->length();
uint16* gslots = treeInfo->globalSlots->data();
JS_ASSERT(ngslots == treeInfo->nGlobalTypes());
if (stable_peer)
*stable_peer = NULL;
@ -2400,14 +2394,15 @@ checktype_fail_1:
continue;
ti = (TreeInfo*)f->vmprivate;
/* Don't allow varying stack depths */
if ((ti->stackSlots != treeInfo->stackSlots) ||
(ti->typeMap.length() != treeInfo->typeMap.length()))
if ((ti->nStackTypes != treeInfo->nStackTypes) ||
(ti->typeMap.length() != treeInfo->typeMap.length()) ||
(ti->globalSlots->length() != treeInfo->globalSlots->length()))
continue;
stage_count = 0;
success = false;
m = ti->globalTypeMap();
FORALL_GLOBAL_SLOTS(cx, traceMonitor->globalSlots->length(), traceMonitor->globalSlots->data(),
FORALL_GLOBAL_SLOTS(cx, treeInfo->globalSlots->length(), treeInfo->globalSlots->data(),
if (!checkType(*vp, *m, stage_vals[stage_count], stage_ins[stage_count], stage_count))
goto checktype_fail_2;
++m;
@ -2444,7 +2439,7 @@ checktype_fail_2:
*/
if (demote && fragment->kind == LoopTrace) {
typemap = m = treeInfo->globalTypeMap();
FORALL_GLOBAL_SLOTS(cx, traceMonitor->globalSlots->length(), traceMonitor->globalSlots->data(),
FORALL_GLOBAL_SLOTS(cx, treeInfo->globalSlots->length(), treeInfo->globalSlots->data(),
if (*m == JSVAL_INT) {
JS_ASSERT(isNumber(*vp));
if (!isPromoteInt(get(vp)))
@ -2494,7 +2489,7 @@ TraceRecorder::compile(JSTraceMonitor* tm)
Fragmento* fragmento = tm->fragmento;
if (treeInfo->maxNativeStackSlots >= MAX_NATIVE_STACK_SLOTS) {
debug_only_v(printf("Trace rejected: excessive stack use.\n"));
js_BlacklistPC(tm, fragment);
js_BlacklistPC(tm, fragment, treeInfo->globalShape);
return;
}
++treeInfo->branchCount;
@ -2506,7 +2501,7 @@ TraceRecorder::compile(JSTraceMonitor* tm)
if (fragmento->assm()->error() == nanojit::OutOMem)
return;
if (fragmento->assm()->error() != nanojit::None) {
js_BlacklistPC(tm, fragment);
js_BlacklistPC(tm, fragment, treeInfo->globalShape);
return;
}
if (anchor)
@ -2531,7 +2526,7 @@ static bool
js_JoinPeersIfCompatible(Fragmento* frago, Fragment* stableFrag, TreeInfo* stableTree,
VMSideExit* exit)
{
JS_ASSERT(exit->numStackSlots == stableTree->stackSlots);
JS_ASSERT(exit->numStackSlots == stableTree->nStackTypes);
/* Must have a matching type unstable exit. */
if ((exit->numGlobalSlots + exit->numStackSlots != stableTree->typeMap.length()) ||
@ -2563,14 +2558,14 @@ TraceRecorder::closeLoop(JSTraceMonitor* tm, bool& demote)
if (callDepth != 0) {
debug_only_v(printf("Stack depth mismatch, possible recursion\n");)
js_BlacklistPC(tm, fragment);
js_BlacklistPC(tm, fragment, treeInfo->globalShape);
trashSelf = true;
return false;
}
JS_ASSERT(exit->numStackSlots == treeInfo->stackSlots);
JS_ASSERT(exit->numStackSlots == treeInfo->nStackTypes);
peer_root = getLoop(traceMonitor, fragment->root->ip);
peer_root = getLoop(traceMonitor, fragment->root->ip, treeInfo->globalShape);
JS_ASSERT(peer_root != NULL);
stable = deduceTypeStability(peer_root, &peer, demote);
@ -2652,8 +2647,8 @@ TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, Fragment* peer_root)
Fragment* peer;
uint8* t1, *t2;
UnstableExit* uexit, **unext;
uint32* stackDemotes = (uint32*)alloca(sizeof(uint32) * treeInfo->stackSlots);
uint32* globalDemotes = (uint32*)alloca(sizeof(uint32) * treeInfo->globalSlots());
uint32* stackDemotes = (uint32*)alloca(sizeof(uint32) * treeInfo->nStackTypes);
uint32* globalDemotes = (uint32*)alloca(sizeof(uint32) * treeInfo->nGlobalTypes());
for (peer = peer_root; peer != NULL; peer = peer->peer) {
if (!peer->code())
@ -2699,7 +2694,7 @@ TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, Fragment* peer_root)
for (unsigned i = 0; i < stackCount; i++)
oracle.markStackSlotUndemotable(cx, stackDemotes[i]);
for (unsigned i = 0; i < globalCount; i++)
oracle.markGlobalSlotUndemotable(cx, tm->globalSlots->data()[globalDemotes[i]]);
oracle.markGlobalSlotUndemotable(cx, ti->globalSlots->data()[globalDemotes[i]]);
JS_ASSERT(peer == uexit->fragment->root);
if (fragment == peer)
trashSelf = true;
@ -2720,7 +2715,7 @@ TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, Fragment* peer_root)
}
}
debug_only_v(js_DumpPeerStability(traceMonitor, peer_root->ip);)
debug_only_v(js_DumpPeerStability(traceMonitor, peer_root->ip, treeInfo->globalShape);)
}
/* Emit an always-exit guard and compile the tree (used for break statements. */
@ -2731,7 +2726,7 @@ TraceRecorder::endLoop(JSTraceMonitor* tm)
if (callDepth != 0) {
debug_only_v(printf("Stack depth mismatch, possible recursion\n");)
js_BlacklistPC(tm, fragment);
js_BlacklistPC(tm, fragment, treeInfo->globalShape);
trashSelf = true;
return;
}
@ -2742,7 +2737,7 @@ TraceRecorder::endLoop(JSTraceMonitor* tm)
if (tm->fragmento->assm()->error() != nanojit::None)
return;
joinEdgesToEntry(tm->fragmento, getLoop(tm, fragment->root->ip));
joinEdgesToEntry(tm->fragmento, getLoop(tm, fragment->root->ip, treeInfo->globalShape));
debug_only_v(printf("recording completed at %s:%u@%u via endLoop\n",
cx->fp->script->filename,
@ -2984,16 +2979,57 @@ js_DeleteRecorder(JSContext* cx)
* Checks whether the shape of the global object has changed.
*/
static inline bool
js_CheckGlobalObjectShape(JSContext* cx, JSTraceMonitor* tm, JSObject* globalObj)
js_CheckGlobalObjectShape(JSContext* cx, JSTraceMonitor* tm, JSObject* globalObj,
uint32 *shape=NULL, SlotList** slots=NULL)
{
/* Check the global shape. */
if (OBJ_SHAPE(globalObj) != tm->globalShape) {
AUDIT(globalShapeMismatchAtEntry);
debug_only_v(printf("Global shape mismatch (%u vs. %u), flushing cache.\n",
OBJ_SHAPE(globalObj), tm->globalShape);)
if (tm->needFlush) {
tm->needFlush = JS_FALSE;
return false;
}
return true;
uint32 globalShape = OBJ_SHAPE(globalObj);
if (tm->recorder) {
TreeInfo* ti = tm->recorder->getTreeInfo();
/* Check the global shape matches the recorder's treeinfo's shape. */
if (globalShape != ti->globalShape) {
AUDIT(globalShapeMismatchAtEntry);
debug_only_v(printf("Global shape mismatch (%u vs. %u), flushing cache.\n",
globalShape, ti->globalShape);)
return false;
}
if (shape)
*shape = globalShape;
if (slots)
*slots = ti->globalSlots;
return true;
}
/* No recorder, search for a tracked global-state (or allocate one). */
for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
GlobalState &state = tm->globalStates[i];
if (state.globalShape == -1) {
state.globalShape = globalShape;
JS_ASSERT(state.globalSlots);
JS_ASSERT(state.globalSlots->length() == 0);
}
if (tm->globalStates[i].globalShape == globalShape) {
if (shape)
*shape = globalShape;
if (slots)
*slots = state.globalSlots;
return true;
}
}
/* No currently-tracked-global found and no room to allocate, abort. */
AUDIT(globalShapeMismatchAtEntry);
debug_only_v(printf("No global slotlist for global shape %u, flushing cache.\n",
globalShape));
return false;
}
static JS_REQUIRES_STACK bool
@ -3233,7 +3269,8 @@ js_dumpMap(TypeMap const & tm) {
#endif
JS_REQUIRES_STACK bool
js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, Fragment* outer)
js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, Fragment* outer,
uint32 globalShape, SlotList* globalSlots)
{
JS_ASSERT(f->root == f);
@ -3256,7 +3293,7 @@ js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, Fragment* outer)
while (f->code() && f->peer)
f = f->peer;
if (f->code())
f = getAnchor(&JS_TRACE_MONITOR(cx), f->root->ip);
f = getAnchor(&JS_TRACE_MONITOR(cx), f->root->ip, globalShape);
if (!f) {
js_FlushJITCache(cx);
@ -3276,18 +3313,17 @@ js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, Fragment* outer)
JS_ASSERT(!f->code() && !f->vmprivate);
/* setup the VM-private treeInfo structure for this fragment */
TreeInfo* ti = new (&gc) TreeInfo(f);
TreeInfo* ti = new (&gc) TreeInfo(f, globalShape, globalSlots);
/* capture the coerced type of each active slot in the type map */
SlotList& globalSlots = *tm->globalSlots;
ti->typeMap.captureTypes(cx, globalSlots, 0/*callDepth*/);
ti->stackSlots = ti->typeMap.length() - globalSlots.length();
ti->typeMap.captureTypes(cx, *globalSlots, 0/*callDepth*/);
ti->nStackTypes = ti->typeMap.length() - globalSlots->length();
/* Check for duplicate entry type maps. This is always wrong and hints at trace explosion
since we are trying to stabilize something without properly connecting peer edges. */
#ifdef DEBUG
TreeInfo* ti_other;
for (Fragment* peer = getLoop(tm, f->root->ip); peer != NULL; peer = peer->peer) {
for (Fragment* peer = getLoop(tm, f->root->ip, globalShape); peer != NULL; peer = peer->peer) {
if (!peer->code() || peer == f)
continue;
ti_other = (TreeInfo*)peer->vmprivate;
@ -3297,7 +3333,7 @@ js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, Fragment* outer)
#endif
/* determine the native frame layout at the entry point */
unsigned entryNativeStackSlots = ti->stackSlots;
unsigned entryNativeStackSlots = ti->nStackTypes;
JS_ASSERT(entryNativeStackSlots == js_NativeStackSlots(cx, 0/*callDepth*/));
ti->nativeStackBase = (entryNativeStackSlots -
(cx->fp->regs->sp - StackBase(cx->fp))) * sizeof(double);
@ -3307,8 +3343,8 @@ js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, Fragment* outer)
/* recording primary trace */
if (!js_StartRecorder(cx, NULL, f, ti,
ti->stackSlots,
tm->globalSlots->length(),
ti->nStackTypes,
ti->globalSlots->length(),
ti->typeMap.data(), NULL, outer)) {
return false;
}
@ -3316,14 +3352,14 @@ js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, Fragment* outer)
return true;
}
JS_REQUIRES_STACK static inline bool isSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot)
JS_REQUIRES_STACK static inline bool
isSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot)
{
if (slot < ti->stackSlots)
if (slot < ti->nStackTypes)
return oracle.isStackSlotUndemotable(cx, slot);
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
uint16* gslots = tm->globalSlots->data();
return oracle.isGlobalSlotUndemotable(cx, gslots[slot - ti->stackSlots]);
uint16* gslots = ti->globalSlots->data();
return oracle.isGlobalSlotUndemotable(cx, gslots[slot - ti->nStackTypes]);
}
JS_REQUIRES_STACK static bool
@ -3331,6 +3367,7 @@ js_AttemptToStabilizeTree(JSContext* cx, VMSideExit* exit, Fragment* outer)
{
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
Fragment* from = exit->from->root;
TreeInfo* from_ti = (TreeInfo*)from->vmprivate;
JS_ASSERT(exit->from->root->code());
@ -3343,28 +3380,22 @@ js_AttemptToStabilizeTree(JSContext* cx, VMSideExit* exit, Fragment* outer)
m = getGlobalTypeMap(exit);
for (unsigned i = 0; i < exit->numGlobalSlots; i++) {
if (m[i] == JSVAL_DOUBLE)
oracle.markGlobalSlotUndemotable(cx, tm->globalSlots->data()[i]);
oracle.markGlobalSlotUndemotable(cx, from_ti->globalSlots->data()[i]);
}
/* If this exit does not have enough globals, there might exist a peer with more globals that we
* can join to.
*/
uint8* m2;
Fragment* f;
TreeInfo* ti;
bool matched;
bool undemote;
bool bound = false;
unsigned int checkSlots;
for (f = from->first; f != NULL; f = f->peer) {
for (Fragment* f = from->first; f != NULL; f = f->peer) {
if (!f->code())
continue;
ti = (TreeInfo*)f->vmprivate;
JS_ASSERT(exit->numStackSlots == ti->stackSlots);
TreeInfo* ti = (TreeInfo*)f->vmprivate;
JS_ASSERT(exit->numStackSlots == ti->nStackTypes);
/* Check the minimum number of slots that need to be compared. */
checkSlots = JS_MIN(exit->numStackSlots + exit->numGlobalSlots, ti->typeMap.length());
unsigned checkSlots = JS_MIN(exit->numStackSlots + exit->numGlobalSlots, ti->typeMap.length());
m = getFullTypeMap(exit);
m2 = ti->typeMap.data();
uint8* m2 = ti->typeMap.data();
/* Analyze the exit typemap against the peer typemap.
* Two conditions are important:
* 1) Typemaps are identical: these peers can be attached.
@ -3372,8 +3403,8 @@ js_AttemptToStabilizeTree(JSContext* cx, VMSideExit* exit, Fragment* outer)
* In this case, the original tree must be trashed because it
* will never connect to any peer.
*/
matched = true;
undemote = false;
bool matched = true;
bool undemote = false;
for (uint32 i = 0; i < checkSlots; i++) {
/* If the types are equal we're okay. */
if (m[i] == m2[i])
@ -3390,18 +3421,19 @@ js_AttemptToStabilizeTree(JSContext* cx, VMSideExit* exit, Fragment* outer)
}
}
if (matched) {
JS_ASSERT(from_ti->globalSlots == ti->globalSlots);
JS_ASSERT(from_ti->nStackTypes == ti->nStackTypes);
/* Capture missing globals on both trees and link the fragments together. */
if (from != f) {
ti->dependentTrees.addUnique(from);
ti->typeMap.captureMissingGlobalTypes(cx, *tm->globalSlots, ti->stackSlots);
ti->typeMap.captureMissingGlobalTypes(cx, *ti->globalSlots, ti->nStackTypes);
}
ti = (TreeInfo*)from->vmprivate;
ti->typeMap.captureMissingGlobalTypes(cx, *tm->globalSlots, ti->stackSlots);
from_ti->typeMap.captureMissingGlobalTypes(cx, *from_ti->globalSlots, from_ti->nStackTypes);
exit->target = f;
tm->fragmento->assm()->patch(exit);
/* Now erase this exit from the unstable exit list. */
UnstableExit** tail = &ti->unstableExits;
for (UnstableExit* uexit = ti->unstableExits; uexit != NULL; uexit = uexit->next) {
UnstableExit** tail = &from_ti->unstableExits;
for (UnstableExit* uexit = from_ti->unstableExits; uexit != NULL; uexit = uexit->next) {
if (uexit->exit == exit) {
*tail = uexit->next;
delete uexit;
@ -3411,7 +3443,7 @@ js_AttemptToStabilizeTree(JSContext* cx, VMSideExit* exit, Fragment* outer)
tail = &uexit->next;
}
JS_ASSERT(bound);
debug_only_v(js_DumpPeerStability(tm, f->ip);)
debug_only_v(js_DumpPeerStability(tm, f->ip, from_ti->globalShape);)
break;
} else if (undemote) {
/* The original tree is unconnectable, so trash it. */
@ -3423,7 +3455,7 @@ js_AttemptToStabilizeTree(JSContext* cx, VMSideExit* exit, Fragment* outer)
if (bound)
return false;
return js_RecordTree(cx, tm, from->first, outer);
return js_RecordTree(cx, tm, from->first, outer, from_ti->globalShape, from_ti->globalSlots);
}
static JS_REQUIRES_STACK bool
@ -3504,6 +3536,9 @@ js_CloseLoop(JSContext* cx)
bool demote = false;
Fragment* f = r->getFragment();
TreeInfo* ti = r->getTreeInfo();
uint32 globalShape = ti->globalShape;
SlotList* globalSlots = ti->globalSlots;
r->closeLoop(tm, demote);
/*
@ -3517,7 +3552,7 @@ js_CloseLoop(JSContext* cx)
* compiler again here since we didn't return to the loop header.
*/
if (demote && !walkedOutOfLoop)
return js_RecordTree(cx, tm, f, NULL);
return js_RecordTree(cx, tm, f, NULL, globalShape, globalSlots);
return false;
}
@ -3531,6 +3566,7 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount)
}
#endif
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
TreeInfo* ti = r->getTreeInfo();
/* Process deep abort requests. */
if (r->wasDeepAborted()) {
js_AbortRecording(cx, "deep abort requested");
@ -3540,7 +3576,7 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount)
if (r->isLoopHeader(cx))
return js_CloseLoop(cx);
/* does this branch go to an inner loop? */
Fragment* f = getLoop(&JS_TRACE_MONITOR(cx), cx->fp->regs->pc);
Fragment* f = getLoop(&JS_TRACE_MONITOR(cx), cx->fp->regs->pc, ti->globalShape);
Fragment* peer_root = f;
if (nesting_enabled && f) {
@ -3554,7 +3590,9 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount)
/* Make sure the shape of the global object still matches (this might flush
the JIT cache). */
JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
if (!js_CheckGlobalObjectShape(cx, tm, globalObj)) {
uint32 globalShape = -1;
SlotList* globalSlots = NULL;
if (!js_CheckGlobalObjectShape(cx, tm, globalObj, &globalShape, &globalSlots)) {
js_AbortRecording(cx, "Couldn't call inner tree (prep failed)");
return false;
}
@ -3576,7 +3614,7 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount)
AUDIT(noCompatInnerTrees);
debug_only_v(printf("No compatible inner tree (%p).\n", f);)
Fragment* old = getLoop(tm, tm->recorder->getFragment()->root->ip);
Fragment* old = getLoop(tm, tm->recorder->getFragment()->root->ip, ti->globalShape);
if (old == NULL)
old = tm->recorder->getFragment();
js_AbortRecording(cx, "No compatible inner tree");
@ -3586,13 +3624,13 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount)
oracle.resetHits(old->ip);
f = empty;
if (!f) {
f = getAnchor(tm, cx->fp->regs->pc);
f = getAnchor(tm, cx->fp->regs->pc, globalShape);
if (!f) {
js_FlushJITCache(cx);
return false;
}
}
return js_RecordTree(cx, tm, f, old);
return js_RecordTree(cx, tm, f, old, globalShape, globalSlots);
}
r->prepareTreeCall(f);
@ -3615,13 +3653,13 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount)
return true;
case UNSTABLE_LOOP_EXIT:
/* abort recording so the inner loop can become type stable. */
old = getLoop(tm, tm->recorder->getFragment()->root->ip);
old = getLoop(tm, tm->recorder->getFragment()->root->ip, ti->globalShape);
js_AbortRecording(cx, "Inner tree is trying to stabilize, abort outer recording");
oracle.resetHits(old->ip);
return js_AttemptToStabilizeTree(cx, lr, old);
case BRANCH_EXIT:
/* abort recording the outer tree, extend the inner tree */
old = getLoop(tm, tm->recorder->getFragment()->root->ip);
old = getLoop(tm, tm->recorder->getFragment()->root->ip, ti->globalShape);
js_AbortRecording(cx, "Inner tree is trying to grow, abort outer recording");
oracle.resetHits(old->ip);
return js_AttemptToExtendTree(cx, lr, NULL, old);
@ -3696,8 +3734,8 @@ TraceRecorder::findNestedCompatiblePeer(Fragment* f, Fragment** empty)
demote = NULL;
tm = &JS_TRACE_MONITOR(cx);
unsigned int ngslots = tm->globalSlots->length();
uint16* gslots = tm->globalSlots->data();
unsigned int ngslots = treeInfo->globalSlots->length();
uint16* gslots = treeInfo->globalSlots->data();
/* We keep a maximum tally - we want to select the peer most likely to work so we don't keep
* recording.
@ -3717,8 +3755,8 @@ TraceRecorder::findNestedCompatiblePeer(Fragment* f, Fragment** empty)
debug_only_v(printf("checking nested types %p: ", f);)
if (ngslots > ti->globalSlots())
ti->typeMap.captureMissingGlobalTypes(cx, *tm->globalSlots, ti->stackSlots);
if (ngslots > ti->nGlobalTypes())
ti->typeMap.captureMissingGlobalTypes(cx, *ti->globalSlots, ti->nStackTypes);
uint8* m = ti->typeMap.data();
@ -3767,22 +3805,19 @@ check_fail:
static JS_REQUIRES_STACK bool
js_CheckEntryTypes(JSContext* cx, TreeInfo* ti)
{
JSTraceMonitor* tm;
unsigned int ngslots = ti->globalSlots->length();
uint16* gslots = ti->globalSlots->data();
tm = &JS_TRACE_MONITOR(cx);
unsigned int ngslots = tm->globalSlots->length();
uint16* gslots = tm->globalSlots->data();
JS_ASSERT(ti->nStackTypes == js_NativeStackSlots(cx, 0));
JS_ASSERT(ti->stackSlots == js_NativeStackSlots(cx, 0));
if (ngslots > ti->globalSlots())
ti->typeMap.captureMissingGlobalTypes(cx, *tm->globalSlots, ti->stackSlots);
if (ngslots > ti->nGlobalTypes())
ti->typeMap.captureMissingGlobalTypes(cx, *ti->globalSlots, ti->nStackTypes);
uint8* m = ti->typeMap.data();
JS_ASSERT(ti->typeMap.length() == js_NativeStackSlots(cx, 0) + ngslots);
JS_ASSERT(ti->typeMap.length() == ti->stackSlots + ngslots);
JS_ASSERT(ti->globalSlots() == ngslots);
JS_ASSERT(ti->typeMap.length() == ti->nStackTypes + ngslots);
JS_ASSERT(ti->nGlobalTypes() == ngslots);
FORALL_SLOTS(cx, ngslots, gslots, 0,
debug_only_v(printf("%s%d=", vpname, vpnum);)
JS_ASSERT(*m != 0xCD);
@ -3832,15 +3867,15 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount,
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
TreeInfo* ti = (TreeInfo*)f->vmprivate;
unsigned ngslots = tm->globalSlots->length();
uint16* gslots = tm->globalSlots->data();
unsigned ngslots = ti->globalSlots->length();
uint16* gslots = ti->globalSlots->data();
unsigned globalFrameSize = STOBJ_NSLOTS(globalObj);
double* global = (double*)alloca((globalFrameSize+1) * sizeof(double));
double stack_buffer[MAX_NATIVE_STACK_SLOTS];
double* stack = stack_buffer;
/* Make sure the global object is sane. */
JS_ASSERT(!ngslots || (OBJ_SHAPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain)) == tm->globalShape));
JS_ASSERT(!ngslots || (OBJ_SHAPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain)) == ti->globalShape));
/* Make sure our caller replenished the double pool. */
JS_ASSERT(tm->reservedDoublePoolPtr >= tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS);
@ -3867,7 +3902,7 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount,
ti->maxNativeStackSlots,
f->code());)
JS_ASSERT(ti->globalSlots() == ngslots);
JS_ASSERT(ti->nGlobalTypes() == ngslots);
if (ngslots)
BuildNativeGlobalFrame(cx, ngslots, gslots, ti->globalTypeMap(), global);
@ -4036,13 +4071,13 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount,
information from the entry type-map. See also comment in the constructor of TraceRecorder
why this is always safe to do. */
unsigned exit_gslots = innermost->numGlobalSlots;
JS_ASSERT(ngslots == ti->globalSlots());
JS_ASSERT(ngslots == ti->nGlobalTypes());
JS_ASSERT(ngslots >= exit_gslots);
uint8* globalTypeMap = getGlobalTypeMap(innermost);
if (exit_gslots < ngslots)
mergeTypeMaps(&globalTypeMap, &exit_gslots, ti->globalTypeMap(), ngslots,
(uint8*)alloca(sizeof(uint8) * ngslots));
JS_ASSERT(exit_gslots == ti->globalSlots());
JS_ASSERT(exit_gslots == ti->nGlobalTypes());
/* write back interned globals */
FlushNativeGlobalFrame(cx, exit_gslots, gslots, globalTypeMap, global);
@ -4096,7 +4131,10 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount)
/* Make sure the shape of the global object still matches (this might flush the JIT cache). */
JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
if (!js_CheckGlobalObjectShape(cx, tm, globalObj))
uint32 globalShape = -1;
SlotList* globalSlots = NULL;
if (!js_CheckGlobalObjectShape(cx, tm, globalObj, &globalShape, &globalSlots))
js_FlushJITCache(cx);
jsbytecode* pc = cx->fp->regs->pc;
@ -4107,9 +4145,9 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount)
return false;
}
Fragment* f = getLoop(tm, pc);
Fragment* f = getLoop(tm, pc, globalShape);
if (!f)
f = getAnchor(tm, pc);
f = getAnchor(tm, pc, globalShape);
if (!f) {
js_FlushJITCache(cx);
@ -4123,7 +4161,7 @@ monitor_loop:
if (oracle.hit(pc) >= HOTLOOP) {
/* We can give RecordTree the root peer. If that peer is already taken, it will
walk the peer list and find us a free slot or allocate a new tree if needed. */
return js_RecordTree(cx, tm, f->first, NULL);
return js_RecordTree(cx, tm, f->first, NULL, globalShape, globalSlots);
}
/* Threshold not reached yet. */
return false;
@ -4262,10 +4300,10 @@ TraceRecorder::monitorRecording(JSContext* cx, TraceRecorder* tr, JSOp op)
/* If used on a loop trace, blacklists the root peer instead of the given fragment. */
void
js_BlacklistPC(JSTraceMonitor* tm, Fragment* frag)
js_BlacklistPC(JSTraceMonitor* tm, Fragment* frag, uint32 globalShape)
{
if (frag->kind == LoopTrace)
frag = getLoop(tm, frag->ip);
frag = getLoop(tm, frag->ip, globalShape);
oracle.blacklist(frag->ip);
}
@ -4290,11 +4328,12 @@ js_AbortRecording(JSContext* cx, const char* reason)
return;
}
JS_ASSERT(!f->vmprivate);
js_BlacklistPC(tm, f);
uint32 globalShape = tm->recorder->getTreeInfo()->globalShape;
js_BlacklistPC(tm, f, globalShape);
Fragment* outer = tm->recorder->getOuterToBlacklist();
/* Give outer two chances to stabilize before we start blacklisting. */
if (outer != NULL && outer->recordAttempts >= 2)
js_BlacklistPC(tm, outer);
js_BlacklistPC(tm, outer, globalShape);
/*
* If js_DeleteRecorder flushed the code cache, we can't rely on f any more.
@ -4361,7 +4400,7 @@ js_InitJIT(JSTraceMonitor *tm)
}
#endif
if (!tm->fragmento) {
JS_ASSERT(!tm->globalSlots && !tm->reservedDoublePool);
JS_ASSERT(!tm->reservedDoublePool);
Fragmento* fragmento = new (&gc) Fragmento(core, 24);
verbose_only(fragmento->labels = new (&gc) LabelMap(core, NULL);)
tm->fragmento = fragmento;
@ -4369,7 +4408,11 @@ js_InitJIT(JSTraceMonitor *tm)
#ifdef DEBUG
tm->lirbuf->names = new (&gc) LirNameMap(&gc, NULL, tm->fragmento->labels);
#endif
tm->globalSlots = new (&gc) SlotList();
for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
tm->globalStates[i].globalShape = -1;
JS_ASSERT(!tm->globalStates[i].globalSlots);
tm->globalStates[i].globalSlots = new (&gc) SlotList();
}
tm->reservedDoublePoolPtr = tm->reservedDoublePool = new jsval[MAX_NATIVE_STACK_SLOTS];
memset(tm->vmfragments, 0, sizeof(tm->vmfragments));
}
@ -4403,7 +4446,7 @@ js_FinishJIT(JSTraceMonitor *tm)
}
#endif
if (tm->fragmento != NULL) {
JS_ASSERT(tm->globalSlots && tm->reservedDoublePool);
JS_ASSERT(tm->reservedDoublePool);
verbose_only(delete tm->fragmento->labels;)
#ifdef DEBUG
delete tm->lirbuf->names;
@ -4413,8 +4456,10 @@ js_FinishJIT(JSTraceMonitor *tm)
tm->lirbuf = NULL;
delete tm->fragmento;
tm->fragmento = NULL;
delete tm->globalSlots;
tm->globalSlots = NULL;
for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
JS_ASSERT(tm->globalStates[i].globalSlots);
delete tm->globalStates[i].globalSlots;
}
delete[] tm->reservedDoublePool;
tm->reservedDoublePool = tm->reservedDoublePoolPtr = NULL;
}
@ -4480,10 +4525,10 @@ js_FlushJITCache(JSContext* cx)
#endif
tm->lirbuf->rewind();
memset(tm->vmfragments, 0, sizeof(tm->vmfragments));
}
if (cx->fp) {
tm->globalShape = OBJ_SHAPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain));
tm->globalSlots->clear();
for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
tm->globalStates[i].globalShape = -1;
tm->globalStates[i].globalSlots->clear();
}
}
oracle.clearHitCounts();
}
@ -5787,7 +5832,7 @@ TraceRecorder::guardElemOp(JSObject* obj, LIns* obj_ins, jsid id, size_t op_offs
return false;
uint32 shape = OBJ_SHAPE(obj);
if (JSID_IS_ATOM(id) && shape == traceMonitor->globalShape)
if (JSID_IS_ATOM(id) && shape == treeInfo->globalShape)
ABORT_TRACE("elem op probably aliases global");
JSObject* pobj;
@ -9088,23 +9133,23 @@ TraceRecorder::record_JSOP_HOLE()
#ifdef JS_JIT_SPEW
/* Prints information about entry typemaps and unstable exits for all peers at a PC */
void
js_DumpPeerStability(JSTraceMonitor* tm, const void* ip)
js_DumpPeerStability(JSTraceMonitor* tm, const void* ip, uint32 globalShape)
{
Fragment* f;
TreeInfo* ti;
bool looped = false;
unsigned length = 0;
for (f = getLoop(tm, ip); f != NULL; f = f->peer) {
for (f = getLoop(tm, ip, globalShape); f != NULL; f = f->peer) {
if (!f->vmprivate)
continue;
printf("fragment %p:\nENTRY: ", f);
ti = (TreeInfo*)f->vmprivate;
if (looped)
JS_ASSERT(ti->stackSlots == length);
for (unsigned i = 0; i < ti->stackSlots; i++)
JS_ASSERT(ti->nStackTypes == length);
for (unsigned i = 0; i < ti->nStackTypes; i++)
printf("S%d ", ti->stackTypeMap()[i]);
for (unsigned i = 0; i < ti->globalSlots(); i++)
for (unsigned i = 0; i < ti->nGlobalTypes(); i++)
printf("G%d ", ti->globalTypeMap()[i]);
printf("\n");
UnstableExit* uexit = ti->unstableExits;
@ -9118,7 +9163,7 @@ js_DumpPeerStability(JSTraceMonitor* tm, const void* ip)
printf("\n");
uexit = uexit->next;
}
length = ti->stackSlots;
length = ti->nStackTypes;
looped = true;
}
}

Просмотреть файл

@ -274,22 +274,35 @@ public:
ptrdiff_t nativeStackBase;
unsigned maxCallDepth;
TypeMap typeMap;
unsigned stackSlots;
unsigned nStackTypes;
uint32 globalShape;
SlotList* globalSlots;
Queue<nanojit::Fragment*> dependentTrees;
unsigned branchCount;
Queue<VMSideExit*> sideExits;
UnstableExit* unstableExits;
TreeInfo(nanojit::Fragment* _fragment) : unstableExits(NULL) {
fragment = _fragment;
}
TreeInfo(nanojit::Fragment* _fragment,
uint32 _globalShape,
SlotList* _globalSlots)
: fragment(_fragment),
script(NULL),
maxNativeStackSlots(0),
nativeStackBase(0),
maxCallDepth(0),
nStackTypes(0),
globalShape(_globalShape),
globalSlots(_globalSlots),
branchCount(0),
unstableExits(NULL)
{}
~TreeInfo();
inline unsigned globalSlots() {
return typeMap.length() - stackSlots;
inline unsigned nGlobalTypes() {
return typeMap.length() - nStackTypes;
}
inline uint8* globalTypeMap() {
return typeMap.data() + stackSlots;
return typeMap.data() + nStackTypes;
}
inline uint8* stackTypeMap() {
return typeMap.data();