diff --git a/config/autoconf.mk.in b/config/autoconf.mk.in index f46c76edcc8..51f26fba0bf 100644 --- a/config/autoconf.mk.in +++ b/config/autoconf.mk.in @@ -297,6 +297,7 @@ MOZ_OPTIMIZE_LDFLAGS = @MOZ_OPTIMIZE_LDFLAGS@ MOZ_OPTIMIZE_SIZE_TWEAK = @MOZ_OPTIMIZE_SIZE_TWEAK@ MOZ_RTTI_FLAGS_ON = @_MOZ_RTTI_FLAGS_ON@ +MOZ_EXCEPTIONS_FLAGS_ON = @_MOZ_EXCEPTIONS_FLAGS_ON@ MOZ_PROFILE_GUIDED_OPTIMIZE_DISABLE = @MOZ_PROFILE_GUIDED_OPTIMIZE_DISABLE@ PROFILE_GEN_CFLAGS = @PROFILE_GEN_CFLAGS@ diff --git a/configure.in b/configure.in index 81a14fd3c90..f743c752cca 100644 --- a/configure.in +++ b/configure.in @@ -7330,6 +7330,8 @@ else _MOZ_EXCEPTIONS_FLAGS=$_MOZ_EXCEPTIONS_FLAGS_OFF fi +AC_SUBST(_MOZ_EXCEPTIONS_FLAGS_ON) + # Irix & OSF native compilers do not like exception declarations # when exceptions are disabled if test -n "$MIPSPRO_CXX" -o -n "$COMPAQ_CXX" -o -n "$VACPP"; then diff --git a/js/src/Makefile.in b/js/src/Makefile.in index 00bf8509ca0..5bba9ce489b 100644 --- a/js/src/Makefile.in +++ b/js/src/Makefile.in @@ -144,6 +144,7 @@ CPPSRCS = \ jsscope.cpp \ jsscript.cpp \ jsstr.cpp \ + jstask.cpp \ jsutil.cpp \ jsxdrapi.cpp \ jsxml.cpp \ @@ -200,6 +201,7 @@ INSTALLED_HEADERS = \ jsscript.h \ jsstaticcheck.h \ jsstr.h \ + jstask.h \ jstracer.h \ jstypes.h \ jsutil.h \ diff --git a/js/src/config/autoconf.mk.in b/js/src/config/autoconf.mk.in index 0ff86b97013..9f5f9cfed8e 100644 --- a/js/src/config/autoconf.mk.in +++ b/js/src/config/autoconf.mk.in @@ -162,6 +162,7 @@ MOZ_OPTIMIZE_LDFLAGS = @MOZ_OPTIMIZE_LDFLAGS@ MOZ_OPTIMIZE_SIZE_TWEAK = @MOZ_OPTIMIZE_SIZE_TWEAK@ MOZ_RTTI_FLAGS_ON = @_MOZ_RTTI_FLAGS_ON@ +MOZ_EXCEPTIONS_FLAGS_ON = @_MOZ_EXCEPTIONS_FLAGS_ON@ MOZ_PROFILE_GUIDED_OPTIMIZE_DISABLE = @MOZ_PROFILE_GUIDED_OPTIMIZE_DISABLE@ PROFILE_GEN_CFLAGS = @PROFILE_GEN_CFLAGS@ diff --git a/js/src/configure.in b/js/src/configure.in index dc68057bd1b..73c0e03ea51 100644 --- a/js/src/configure.in +++ b/js/src/configure.in @@ -4742,6 +4742,8 @@ else _MOZ_EXCEPTIONS_FLAGS=$_MOZ_EXCEPTIONS_FLAGS_OFF fi +AC_SUBST(_MOZ_EXCEPTIONS_FLAGS_ON) + # Irix & OSF native compilers do not like exception declarations # when exceptions are disabled if test -n "$MIPSPRO_CXX" -o -n "$COMPAQ_CXX" -o -n "$VACPP"; then diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp index 93d3a2e542a..dd3ab2e8355 100644 --- a/js/src/jsapi.cpp +++ b/js/src/jsapi.cpp @@ -79,6 +79,7 @@ #include "jsscope.h" #include "jsscript.h" #include "jsstr.h" +#include "jstask.h" #include "jstracer.h" #include "jsdbgapi.h" #include "prmjtime.h" @@ -448,7 +449,7 @@ JS_AddArgumentFormatter(JSContext *cx, const char *format, goto out; mpp = &map->next; } - map = (JSArgumentFormatMap *) JS_malloc(cx, sizeof *map); + map = (JSArgumentFormatMap *) cx->malloc(sizeof *map); if (!map) return JS_FALSE; map->format = format; @@ -471,7 +472,7 @@ JS_RemoveArgumentFormatter(JSContext *cx, const char *format) while ((map = *mpp) != NULL) { if (map->length == length && !strcmp(map->format, format)) { *mpp = map->next; - JS_free(cx, map); + cx->free(map); return; } mpp = &map->next; @@ -773,7 +774,7 @@ JS_NewRuntime(uint32 maxbytes) } #endif /* DEBUG */ - rt = (JSRuntime *) malloc(sizeof(JSRuntime)); + rt = (JSRuntime *) js_malloc(sizeof(JSRuntime)); if (!rt) return NULL; @@ -817,6 +818,9 @@ JS_NewRuntime(uint32 maxbytes) rt->debuggerLock = JS_NEW_LOCK(); if (!rt->debuggerLock) goto bad; + rt->deallocatorThread = new JSBackgroundThread(); + if (!rt->deallocatorThread || !rt->deallocatorThread->init()) + goto bad; #endif if (!js_InitPropertyTree(rt)) goto bad; @@ -886,9 +890,13 @@ JS_DestroyRuntime(JSRuntime *rt) JS_DESTROY_CONDVAR(rt->titleSharingDone); if (rt->debuggerLock) JS_DESTROY_LOCK(rt->debuggerLock); + if (rt->deallocatorThread) { + rt->deallocatorThread->cancel(); + delete rt->deallocatorThread; + } #endif js_FinishPropertyTree(rt); - free(rt); + js_free(rt); } JS_PUBLIC_API(void) @@ -1328,7 +1336,7 @@ JS_InitStandardClasses(JSContext *cx, JSObject *obj) /* Define a top-level property 'undefined' with the undefined value. */ atom = cx->runtime->atomState.typeAtoms[JSTYPE_VOID]; if (!OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), JSVAL_VOID, - JS_PropertyStub, JS_PropertyStub, JSPROP_PERMANENT, + JS_PropertyStub, JS_PropertyStub, JSPROP_PERMANENT, NULL)) { return JS_FALSE; } @@ -1535,7 +1543,7 @@ JS_ResolveStandardClass(JSContext *cx, JSObject *obj, jsval id, if (idstr == ATOM_TO_STRING(atom)) { *resolved = JS_TRUE; return OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), JSVAL_VOID, - JS_PropertyStub, JS_PropertyStub, + JS_PropertyStub, JS_PropertyStub, JSPROP_PERMANENT, NULL); } @@ -1630,7 +1638,7 @@ JS_EnumerateStandardClasses(JSContext *cx, JSObject *obj) atom = rt->atomState.typeAtoms[JSTYPE_VOID]; if (!AlreadyHasOwnProperty(cx, obj, atom) && !OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), JSVAL_VOID, - JS_PropertyStub, JS_PropertyStub, JSPROP_PERMANENT, + JS_PropertyStub, JS_PropertyStub, JSPROP_PERMANENT, NULL)) { return JS_FALSE; } @@ -1653,7 +1661,7 @@ NewIdArray(JSContext *cx, jsint length) JSIdArray *ida; ida = (JSIdArray *) - JS_malloc(cx, offsetof(JSIdArray, vector) + length * sizeof(jsval)); + cx->malloc(offsetof(JSIdArray, vector) + length * sizeof(jsval)); if (ida) ida->length = length; return ida; @@ -1831,41 +1839,19 @@ JS_ComputeThis(JSContext *cx, jsval *vp) JS_PUBLIC_API(void *) JS_malloc(JSContext *cx, size_t nbytes) { - void *p; - - JS_ASSERT(nbytes != 0); - if (nbytes == 0) - nbytes = 1; - - p = malloc(nbytes); - if (!p) { - JS_ReportOutOfMemory(cx); - return NULL; - } - cx->updateMallocCounter(nbytes); - - return p; + return cx->malloc(nbytes); } JS_PUBLIC_API(void *) JS_realloc(JSContext *cx, void *p, size_t nbytes) { - void *orig = p; - p = realloc(p, nbytes); - if (!p) { - JS_ReportOutOfMemory(cx); - return NULL; - } - if (!orig) - cx->updateMallocCounter(nbytes); - return p; + return cx->realloc(p, nbytes); } JS_PUBLIC_API(void) JS_free(JSContext *cx, void *p) { - if (p) - free(p); + return cx->free(p); } JS_PUBLIC_API(char *) @@ -1875,7 +1861,7 @@ JS_strdup(JSContext *cx, const char *s) void *p; n = strlen(s) + 1; - p = JS_malloc(cx, n); + p = cx->malloc(n); if (!p) return NULL; return (char *)memcpy(p, s, n); @@ -2260,7 +2246,7 @@ DumpNotify(JSTracer *trc, void *thing, uint32 kind) edgeNameSize = strlen(edgeName) + 1; node = (JSHeapDumpNode *) - JS_malloc(cx, offsetof(JSHeapDumpNode, edgeName) + edgeNameSize); + cx->malloc(offsetof(JSHeapDumpNode, edgeName) + edgeNameSize); if (!node) { dtrc->ok = JS_FALSE; return; @@ -2412,7 +2398,7 @@ JS_DumpHeap(JSContext *cx, FILE *fp, void* startThing, uint32 startKind, for (;;) { next = node->next; parent = node->parent; - JS_free(cx, node); + cx->free(node); node = next; if (node) break; @@ -2679,7 +2665,7 @@ JS_SetScriptStackQuota(JSContext *cx, size_t quota) JS_PUBLIC_API(void) JS_DestroyIdArray(JSContext *cx, JSIdArray *ida) { - JS_free(cx, ida); + cx->free(ida); } JS_PUBLIC_API(JSBool) @@ -3001,7 +2987,7 @@ DefinePropertyById(JSContext *cx, JSObject *obj, jsid id, jsval value, attrs, flags, tinyid, NULL); } return OBJ_DEFINE_PROPERTY(cx, obj, id, value, getter, setter, attrs, - NULL); + NULL); } static JSBool @@ -3720,7 +3706,7 @@ JS_HasUCProperty(JSContext *cx, JSObject *obj, JSProperty *prop; CHECK_REQUEST(cx); - ok = LookupUCProperty(cx, obj, name, namelen, + ok = LookupUCProperty(cx, obj, name, namelen, JSRESOLVE_QUALIFIED | JSRESOLVE_DETECTING, &obj2, &prop); if (ok) { @@ -4072,13 +4058,13 @@ prop_iter_trace(JSTracer *trc, JSObject *obj) /* Native case: just mark the next property to visit. */ sprop = (JSScopeProperty *) JSVAL_TO_PRIVATE(v); if (sprop) - TRACE_SCOPE_PROPERTY(trc, sprop); + sprop->trace(trc); } else { /* Non-native case: mark each id in the JSIdArray private. */ ida = (JSIdArray *) JSVAL_TO_PRIVATE(v); for (i = 0, n = ida->length; i < n; i++) { id = ida->vector[i]; - TRACE_ID(trc, id); + js_TraceId(trc, id); } } } @@ -4644,7 +4630,7 @@ JS_CompileScript(JSContext *cx, JSObject *obj, if (!chars) return NULL; script = JS_CompileUCScript(cx, obj, chars, length, filename, lineno); - JS_free(cx, chars); + cx->free(chars); return script; } @@ -4663,7 +4649,7 @@ JS_CompileScriptForPrincipals(JSContext *cx, JSObject *obj, return NULL; script = JS_CompileUCScriptForPrincipals(cx, obj, principals, chars, length, filename, lineno); - JS_free(cx, chars); + cx->free(chars); return script; } @@ -4748,7 +4734,7 @@ JS_BufferIsCompilableUnit(JSContext *cx, JSObject *obj, JS_SetErrorReporter(cx, older); } } - JS_free(cx, chars); + cx->free(chars); JS_RestoreExceptionState(cx, exnState); return result; } @@ -4857,7 +4843,7 @@ JS_CompileFunction(JSContext *cx, JSObject *obj, const char *name, return NULL; fun = JS_CompileUCFunction(cx, obj, name, nargs, argnames, chars, length, filename, lineno); - JS_free(cx, chars); + cx->free(chars); return fun; } @@ -4878,7 +4864,7 @@ JS_CompileFunctionForPrincipals(JSContext *cx, JSObject *obj, fun = JS_CompileUCFunctionForPrincipals(cx, obj, principals, name, nargs, argnames, chars, length, filename, lineno); - JS_free(cx, chars); + cx->free(chars); return fun; } @@ -5088,7 +5074,7 @@ JS_EvaluateScript(JSContext *cx, JSObject *obj, if (!chars) return JS_FALSE; ok = JS_EvaluateUCScript(cx, obj, chars, length, filename, lineno, rval); - JS_free(cx, chars); + cx->free(chars); return ok; } @@ -5110,7 +5096,7 @@ JS_EvaluateScriptForPrincipals(JSContext *cx, JSObject *obj, return JS_FALSE; ok = JS_EvaluateUCScriptForPrincipals(cx, obj, principals, chars, length, filename, lineno, rval); - JS_free(cx, chars); + cx->free(chars); return ok; } @@ -5197,7 +5183,7 @@ JS_SetOperationCallback(JSContext *cx, JSOperationCallback callback) { #ifdef JS_THREADSAFE JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread)); -#endif +#endif JSOperationCallback old = cx->operationCallback; cx->operationCallback = callback; return old; @@ -5319,13 +5305,13 @@ JS_NewString(JSContext *cx, char *bytes, size_t nbytes) /* Free chars (but not bytes, which caller frees on error) if we fail. */ str = js_NewString(cx, chars, length); if (!str) { - JS_free(cx, chars); + cx->free(chars); return NULL; } /* Hand off bytes to the deflated string cache, if possible. */ if (!js_SetStringBytes(cx, str, bytes, nbytes)) - JS_free(cx, bytes); + cx->free(bytes); return str; } @@ -5341,7 +5327,7 @@ JS_NewStringCopyN(JSContext *cx, const char *s, size_t n) return NULL; str = js_NewString(cx, js, n); if (!str) - JS_free(cx, js); + cx->free(js); return str; } @@ -5361,7 +5347,7 @@ JS_NewStringCopyZ(JSContext *cx, const char *s) return NULL; str = js_NewString(cx, js, n); if (!str) - JS_free(cx, js); + cx->free(js); return str; } @@ -5449,7 +5435,7 @@ JS_GetStringChars(JSString *str) if (str->isDependent()) { n = str->dependentLength(); size = (n + 1) * sizeof(jschar); - s = (jschar *) malloc(size); + s = (jschar *) js_malloc(size); if (s) { memcpy(s, str->dependentChars(), n * sizeof *s); s[n] = 0; @@ -5727,7 +5713,7 @@ JS_NewRegExpObject(JSContext *cx, char *bytes, size_t length, uintN flags) if (!chars) return NULL; obj = js_NewRegExpObject(cx, NULL, chars, length, flags); - JS_free(cx, chars); + cx->free(chars); return obj; } @@ -5857,7 +5843,7 @@ JS_SaveExceptionState(JSContext *cx) JSExceptionState *state; CHECK_REQUEST(cx); - state = (JSExceptionState *) JS_malloc(cx, sizeof(JSExceptionState)); + state = (JSExceptionState *) cx->malloc(sizeof(JSExceptionState)); if (state) { state->throwing = JS_GetPendingException(cx, &state->exception); if (state->throwing && JSVAL_IS_GCTHING(state->exception)) @@ -5886,7 +5872,7 @@ JS_DropExceptionState(JSContext *cx, JSExceptionState *state) if (state) { if (state->throwing && JSVAL_IS_GCTHING(state->exception)) JS_RemoveRoot(cx, &state->exception); - JS_free(cx, state); + cx->free(state); } } diff --git a/js/src/jsapi.h b/js/src/jsapi.h index d03b13d83a6..2a6a422331d 100644 --- a/js/src/jsapi.h +++ b/js/src/jsapi.h @@ -54,35 +54,122 @@ JS_BEGIN_EXTERN_C /* * Type tags stored in the low bits of a jsval. */ -#define JSVAL_OBJECT 0x0 /* untagged reference to object */ -#define JSVAL_INT 0x1 /* tagged 31-bit integer value */ -#define JSVAL_DOUBLE 0x2 /* tagged reference to double */ -#define JSVAL_STRING 0x4 /* tagged reference to string */ -#define JSVAL_BOOLEAN 0x6 /* tagged boolean value */ +typedef enum jsvaltag { + JSVAL_OBJECT = 0x0, /* untagged reference to object */ + JSVAL_INT = 0x1, /* tagged 31-bit integer value */ + JSVAL_DOUBLE = 0x2, /* tagged reference to double */ + JSVAL_STRING = 0x4, /* tagged reference to string */ + JSVAL_BOOLEAN = 0x6 /* tagged boolean value */ +} jsvaltag; + +#define JSVAL_OBJECT ((jsvaltag)0x0) +#define JSVAL_INT ((jsvaltag)0x1) +#define JSVAL_DOUBLE ((jsvaltag)0x2) +#define JSVAL_STRING ((jsvaltag)0x4) +#define JSVAL_BOOLEAN ((jsvaltag)0x6) /* Type tag bitfield length and derived macros. */ #define JSVAL_TAGBITS 3 #define JSVAL_TAGMASK JS_BITMASK(JSVAL_TAGBITS) -#define JSVAL_TAG(v) ((v) & JSVAL_TAGMASK) -#define JSVAL_SETTAG(v,t) ((v) | (t)) -#define JSVAL_CLRTAG(v) ((v) & ~(jsval)JSVAL_TAGMASK) #define JSVAL_ALIGN JS_BIT(JSVAL_TAGBITS) +/* Not a function, because we have static asserts that use it */ +#define JSVAL_TAG(v) ((jsvaltag)((v) & JSVAL_TAGMASK)) + +/* Not a function, because we have static asserts that use it */ +#define JSVAL_SETTAG(v, t) ((v) | (t)) + +static JS_ALWAYS_INLINE jsval +JSVAL_CLRTAG(jsval v) +{ + return v & ~(jsval)JSVAL_TAGMASK; +} + +/* + * Well-known JS values. The extern'd variables are initialized when the + * first JSContext is created by JS_NewContext (see below). + */ +#define JSVAL_NULL ((jsval) 0) +#define JSVAL_ZERO INT_TO_JSVAL(0) +#define JSVAL_ONE INT_TO_JSVAL(1) +#define JSVAL_FALSE PSEUDO_BOOLEAN_TO_JSVAL(JS_FALSE) +#define JSVAL_TRUE PSEUDO_BOOLEAN_TO_JSVAL(JS_TRUE) +#define JSVAL_VOID PSEUDO_BOOLEAN_TO_JSVAL(2) + +/* + * A pseudo-boolean is a 29-bit (for 32-bit jsval) or 61-bit (for 64-bit jsval) + * value other than 0 or 1 encoded as a jsval whose tag is JSVAL_BOOLEAN. + * + * JSVAL_VOID happens to be defined as a jsval encoding a pseudo-boolean, but + * embedders MUST NOT rely on this. All other possible pseudo-boolean values + * are implementation-reserved and MUST NOT be constructed by any embedding of + * SpiderMonkey. + */ +#define JSVAL_TO_PSEUDO_BOOLEAN(v) ((JSBool) ((v) >> JSVAL_TAGBITS)) +#define PSEUDO_BOOLEAN_TO_JSVAL(b) \ + JSVAL_SETTAG((jsval) (b) << JSVAL_TAGBITS, JSVAL_BOOLEAN) + /* Predicates for type testing. */ -#define JSVAL_IS_OBJECT(v) (JSVAL_TAG(v) == JSVAL_OBJECT) -#define JSVAL_IS_NUMBER(v) (JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v)) -#define JSVAL_IS_INT(v) ((v) & JSVAL_INT) -#define JSVAL_IS_DOUBLE(v) (JSVAL_TAG(v) == JSVAL_DOUBLE) -#define JSVAL_IS_STRING(v) (JSVAL_TAG(v) == JSVAL_STRING) -#define JSVAL_IS_BOOLEAN(v) (((v) & ~((jsval)1 << JSVAL_TAGBITS)) == \ - JSVAL_BOOLEAN) -#define JSVAL_IS_NULL(v) ((v) == JSVAL_NULL) -#define JSVAL_IS_VOID(v) ((v) == JSVAL_VOID) -#define JSVAL_IS_PRIMITIVE(v) (!JSVAL_IS_OBJECT(v) || JSVAL_IS_NULL(v)) +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_OBJECT(jsval v) +{ + return JSVAL_TAG(v) == JSVAL_OBJECT; +} + +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_INT(jsval v) +{ + return v & JSVAL_INT; +} + +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_DOUBLE(jsval v) +{ + return JSVAL_TAG(v) == JSVAL_DOUBLE; +} + +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_NUMBER(jsval v) +{ + return JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v); +} + +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_STRING(jsval v) +{ + return JSVAL_TAG(v) == JSVAL_STRING; +} + +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_BOOLEAN(jsval v) +{ + return (v & ~((jsval)1 << JSVAL_TAGBITS)) == JSVAL_BOOLEAN; +} + +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_NULL(jsval v) +{ + return v == JSVAL_NULL; +} + +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_VOID(jsval v) +{ + return v == JSVAL_VOID; +} + +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_PRIMITIVE(jsval v) +{ + return !JSVAL_IS_OBJECT(v) || JSVAL_IS_NULL(v); +} /* Objects, strings, and doubles are GC'ed. */ -#define JSVAL_IS_GCTHING(v) (!((v) & JSVAL_INT) && \ - JSVAL_TAG(v) != JSVAL_BOOLEAN) +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_GCTHING(jsval v) +{ + return !(v & JSVAL_INT) && JSVAL_TAG(v) != JSVAL_BOOLEAN; +} static JS_ALWAYS_INLINE void * JSVAL_TO_GCTHING(jsval v) @@ -145,36 +232,18 @@ STRING_TO_JSVAL(JSString *str) #define JSVAL_INT_POW2(n) ((jsval)1 << (n)) #define JSVAL_INT_MIN (-JSVAL_INT_POW2(30)) #define JSVAL_INT_MAX (JSVAL_INT_POW2(30) - 1) + +/* Not a function, because we have static asserts that use it */ #define INT_FITS_IN_JSVAL(i) ((jsuint)(i) - (jsuint)JSVAL_INT_MIN <= \ (jsuint)(JSVAL_INT_MAX - JSVAL_INT_MIN)) +/* Not a function, because we have static asserts that use it */ +/* FIXME: Bug 506721, since that means we can't assert JSVAL_IS_INT(v) */ #define JSVAL_TO_INT(v) ((jsint)(v) >> 1) + +/* Not a function, because we have static asserts that use it */ +/* FIXME: Bug 506721, since that means we can't assert INT_FITS_IN_JSVAL(i) */ #define INT_TO_JSVAL(i) (((jsval)(i) << 1) | JSVAL_INT) -/* - * A pseudo-boolean is a 29-bit (for 32-bit jsval) or 61-bit (for 64-bit jsval) - * value other than 0 or 1 encoded as a jsval whose tag is JSVAL_BOOLEAN. - * - * JSVAL_VOID happens to be defined as a jsval encoding a pseudo-boolean, but - * embedders MUST NOT rely on this. All other possible pseudo-boolean values - * are implementation-reserved and MUST NOT be constructed by any embedding of - * SpiderMonkey. - */ -#define JSVAL_TO_PSEUDO_BOOLEAN(v) ((JSBool) ((v) >> JSVAL_TAGBITS)) -#define PSEUDO_BOOLEAN_TO_JSVAL(b) \ - JSVAL_SETTAG((jsval) (b) << JSVAL_TAGBITS, JSVAL_BOOLEAN) - -/* - * Well-known JS values. The extern'd variables are initialized when the - * first JSContext is created by JS_NewContext (see below). - */ -#define JSVAL_NULL ((jsval) 0) -#define JSVAL_ZERO INT_TO_JSVAL(0) -#define JSVAL_ONE INT_TO_JSVAL(1) -#define JSVAL_FALSE PSEUDO_BOOLEAN_TO_JSVAL(JS_FALSE) -#define JSVAL_TRUE PSEUDO_BOOLEAN_TO_JSVAL(JS_TRUE) -#define JSVAL_VOID PSEUDO_BOOLEAN_TO_JSVAL(2) - - /* Convert between boolean and jsval, asserting that inputs are valid. */ static JS_ALWAYS_INLINE JSBool JSVAL_TO_BOOLEAN(jsval v) @@ -2229,14 +2298,14 @@ JS_CallFunctionValue(JSContext *cx, JSObject *obj, jsval fval, uintN argc, * These functions allow setting an operation callback that will be called * from the thread the context is associated with some time after any thread * triggered the callback using JS_TriggerOperationCallback(cx). - * + * * In a threadsafe build the engine internally triggers operation callbacks * under certain circumstances (i.e. GC and title transfer) to force the - * context to yield its current request, which the engine always + * context to yield its current request, which the engine always * automatically does immediately prior to calling the callback function. * The embedding should thus not rely on callbacks being triggered through * the external API only. - * + * * Important note: Additional callbacks can occur inside the callback handler * if it re-enters the JS engine. The embedding must ensure that the callback * is disconnected before attempting such re-entry. diff --git a/js/src/jsarena.cpp b/js/src/jsarena.cpp index 254d6801e1d..68407af0a63 100644 --- a/js/src/jsarena.cpp +++ b/js/src/jsarena.cpp @@ -160,12 +160,12 @@ JS_ArenaAllocate(JSArenaPool *pool, size_t nb) if (pool->quotap) { if (gross > *pool->quotap) return NULL; - b = (JSArena *) malloc(gross); + b = (JSArena *) js_malloc(gross); if (!b) return NULL; *pool->quotap -= gross; } else { - b = (JSArena *) malloc(gross); + b = (JSArena *) js_malloc(gross); if (!b) return NULL; } @@ -227,12 +227,12 @@ JS_ArenaRealloc(JSArenaPool *pool, void *p, size_t size, size_t incr) growth = gross - (a->limit - (jsuword) a); if (growth > *pool->quotap) return NULL; - a = (JSArena *) realloc(a, gross); + a = (JSArena *) js_realloc(a, gross); if (!a) return NULL; *pool->quotap -= growth; } else { - a = (JSArena *) realloc(a, gross); + a = (JSArena *) js_realloc(a, gross); if (!a) return NULL; } @@ -315,7 +315,7 @@ FreeArenaList(JSArenaPool *pool, JSArena *head) *pool->quotap += a->limit - (jsuword) a; JS_CLEAR_ARENA(a); JS_COUNT_ARENA(pool,--); - free(a); + js_free(a); } while ((a = *ap) != NULL); pool->current = head; @@ -354,7 +354,7 @@ JS_FinishArenaPool(JSArenaPool *pool) JSArenaStats *stats, **statsp; if (pool->stats.name) { - free(pool->stats.name); + js_free(pool->stats.name); pool->stats.name = NULL; } for (statsp = &arena_stats_list; (stats = *statsp) != 0; diff --git a/js/src/jsarray.cpp b/js/src/jsarray.cpp index 2d121e3ddc4..f4b1039efab 100644 --- a/js/src/jsarray.cpp +++ b/js/src/jsarray.cpp @@ -314,7 +314,7 @@ ResizeSlots(JSContext *cx, JSObject *obj, uint32 oldsize, uint32 size) if (size == 0) { if (obj->dslots) { - JS_free(cx, obj->dslots - 1); + cx->free(obj->dslots - 1); obj->dslots = NULL; } return JS_TRUE; @@ -330,7 +330,7 @@ ResizeSlots(JSContext *cx, JSObject *obj, uint32 oldsize, uint32 size) } slots = obj->dslots ? obj->dslots - 1 : NULL; - newslots = (jsval *) JS_realloc(cx, slots, (size + 1) * sizeof(jsval)); + newslots = (jsval *) cx->realloc(slots, (size + 1) * sizeof(jsval)); if (!newslots) return JS_FALSE; @@ -1099,7 +1099,7 @@ array_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, if (obj->dslots[i] == JSVAL_HOLE) { if (!ii) { ii = (JSIndexIterState *) - JS_malloc(cx, offsetof(JSIndexIterState, holes) + + cx->malloc(offsetof(JSIndexIterState, holes) + JS_BITMAP_SIZE(capacity)); if (!ii) return JS_FALSE; @@ -1116,7 +1116,7 @@ array_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, break; } ii = (JSIndexIterState *) - JS_malloc(cx, offsetof(JSIndexIterState, holes)); + cx->malloc(offsetof(JSIndexIterState, holes)); if (!ii) return JS_FALSE; ii->hasHoles = JS_FALSE; @@ -1157,7 +1157,7 @@ array_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, if (JSVAL_TAG(*statep) != JSVAL_BOOLEAN) { JS_ASSERT((*statep & INDEX_ITER_TAG) == INDEX_ITER_TAG); ii = (JSIndexIterState *) (*statep & ~INDEX_ITER_TAG); - JS_free(cx, ii); + cx->free(ii); } *statep = JSVAL_NULL; break; @@ -1188,7 +1188,7 @@ static void array_finalize(JSContext *cx, JSObject *obj) { if (obj->dslots) - JS_free(cx, obj->dslots - 1); + cx->free(obj->dslots - 1); obj->dslots = NULL; } @@ -1336,7 +1336,7 @@ BufferToString(JSContext *cx, JSTempVector &buf, jsval *rval) jschar *chars = buf.extractRawBuffer(); JSString *str = js_NewString(cx, chars, length); if (!str) { - JS_free(cx, chars); + cx->free(chars); return JS_FALSE; } *rval = STRING_TO_JSVAL(str); @@ -1368,7 +1368,7 @@ array_toSource(JSContext *cx, uintN argc, jsval *vp) JSBool ok = JS_TRUE; /* - * This object will take responsibility for the jschar buffer until the + * This object will take responsibility for the jschar buffer until the * buffer is transferred to the returned JSString. */ JSTempVector buf(cx); @@ -1392,7 +1392,7 @@ array_toSource(JSContext *cx, uintN argc, jsval *vp) if (!(ok = buf.pushBack(arr, arr + 3))) goto done; if (sharpchars) - JS_free(cx, sharpchars); + cx->free(sharpchars); goto make_string; } #endif @@ -1520,7 +1520,7 @@ array_toString_sub(JSContext *cx, JSObject *obj, JSBool locale, } /* - * This object will take responsibility for the jschar buffer until the + * This object will take responsibility for the jschar buffer until the * buffer is transferred to the returned JSString. */ JSTempVector buf(cx); @@ -2151,7 +2151,7 @@ array_sort(JSContext *cx, uintN argc, jsval *vp) return JS_FALSE; } #endif - vec = (jsval *) JS_malloc(cx, 2 * (size_t) len * sizeof(jsval)); + vec = (jsval *) cx->malloc(2 * (size_t) len * sizeof(jsval)); if (!vec) return JS_FALSE; @@ -2280,8 +2280,8 @@ array_sort(JSContext *cx, uintN argc, jsval *vp) } while (i != 0); JS_ASSERT(tvr.u.array == vec); - vec = (jsval *) JS_realloc(cx, vec, - 4 * (size_t) newlen * sizeof(jsval)); + vec = (jsval *) cx->realloc(vec, + 4 * (size_t) newlen * sizeof(jsval)); if (!vec) { vec = tvr.u.array; ok = JS_FALSE; @@ -2342,7 +2342,7 @@ array_sort(JSContext *cx, uintN argc, jsval *vp) out: JS_POP_TEMP_ROOT(cx, &tvr); - JS_free(cx, vec); + cx->free(vec); if (!ok) return JS_FALSE; @@ -3507,7 +3507,7 @@ js_ArrayInfo(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) if (JSVAL_IS_PRIMITIVE(argv[i]) || !OBJ_IS_ARRAY(cx, (array = JSVAL_TO_OBJECT(argv[i])))) { fprintf(stderr, "%s: not array\n", bytes); - JS_free(cx, bytes); + cx->free(bytes); continue; } fprintf(stderr, "%s: %s (len %lu", bytes, @@ -3519,7 +3519,7 @@ js_ArrayInfo(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) js_DenseArrayCapacity(array)); } fputs(")\n", stderr); - JS_free(cx, bytes); + cx->free(bytes); } return JS_TRUE; } diff --git a/js/src/jsarray.h b/js/src/jsarray.h index 4244a6017ad..52a365fe3e9 100644 --- a/js/src/jsarray.h +++ b/js/src/jsarray.h @@ -207,6 +207,10 @@ JSBool js_GetDenseArrayElementValue(JSContext *cx, JSObject *obj, JSProperty *prop, jsval *vp); +/* Array constructor native. Exposed only so the JIT can know its address. */ +JSBool +js_Array(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval); + JS_END_EXTERN_C #endif /* jsarray_h___ */ diff --git a/js/src/jsatom.cpp b/js/src/jsatom.cpp index 6a921d9b0b9..8943f22f224 100644 --- a/js/src/jsatom.cpp +++ b/js/src/jsatom.cpp @@ -783,7 +783,7 @@ js_Atomize(JSContext *cx, const char *bytes, size_t length, uintN flags) str.initFlat(chars, inflatedLength); atom = js_AtomizeString(cx, &str, ATOM_TMPSTR | flags); if (chars != inflated && str.flatChars()) - JS_free(cx, chars); + cx->free(chars); return atom; } diff --git a/js/src/jsbuiltins.cpp b/js/src/jsbuiltins.cpp index 961e1133127..1f1408bf460 100644 --- a/js/src/jsbuiltins.cpp +++ b/js/src/jsbuiltins.cpp @@ -129,7 +129,7 @@ js_BoxInt32(JSContext* cx, int32 i) if (!js_NewDoubleInRootedValue(cx, d, &v)) return JSVAL_ERROR_COOKIE; return v; -} +} JS_DEFINE_CALLINFO_2(extern, JSVAL, js_BoxInt32, CONTEXT, INT32, 1, 1) jsdouble FASTCALL @@ -242,8 +242,6 @@ JSBool FASTCALL js_AddProperty(JSContext* cx, JSObject* obj, JSScopeProperty* sprop) { JS_ASSERT(OBJ_IS_NATIVE(obj)); - JS_ASSERT(SPROP_HAS_STUB_SETTER(sprop)); - JS_LOCK_OBJ(cx, obj); JSScope* scope = OBJ_SCOPE(obj); diff --git a/js/src/jsbuiltins.h b/js/src/jsbuiltins.h index baef4f07be6..197cd580d14 100644 --- a/js/src/jsbuiltins.h +++ b/js/src/jsbuiltins.h @@ -86,7 +86,7 @@ struct JSTraceableNative { const nanojit::CallInfo *builtin; const char *prefix; const char *argtypes; - uintN flags; /* JSTNErrType | JSTN_UNBOX_AFTER | JSTN_MORE | + uintN flags; /* JSTNErrType | JSTN_UNBOX_AFTER | JSTN_MORE | JSTN_CONSTRUCTOR */ }; @@ -117,7 +117,7 @@ struct JSTraceableNative { #endif /* - * Supported types for builtin functions. + * Supported types for builtin functions. * * Types with -- for the two string fields are not permitted as argument types * in JS_DEFINE_TRCINFO. @@ -165,7 +165,7 @@ struct JSTraceableNative { * trace. If an exception is pending, it is thrown; otherwise, we assume the * builtin had no side effects and retry the current bytecode in the * interpreter. - * + * * So a builtin must not return a value indicating failure after causing side * effects (such as reporting an error), without setting an exception pending. * The operation would be retried, despite the first attempt's observable @@ -187,6 +187,7 @@ struct JSTraceableNative { #define _JS_CTYPE_JSVAL _JS_JSVAL_CTYPE( _JS_PTR, "","v", INFALLIBLE) #define _JS_CTYPE_JSVAL_RETRY _JS_JSVAL_CTYPE( _JS_PTR, --, --, FAIL_COOKIE) #define _JS_CTYPE_JSVAL_FAIL _JS_JSVAL_CTYPE( _JS_PTR, --, --, FAIL_STATUS) +#define _JS_CTYPE_JSID _JS_CTYPE(jsid, _JS_PTR, --, --, INFALLIBLE) #define _JS_CTYPE_BOOL _JS_CTYPE(JSBool, _JS_I32, "","i", INFALLIBLE) #define _JS_CTYPE_BOOL_RETRY _JS_CTYPE(JSBool, _JS_I32, --, --, FAIL_VOID) #define _JS_CTYPE_BOOL_FAIL _JS_CTYPE(JSBool, _JS_I32, --, --, FAIL_STATUS) @@ -414,6 +415,9 @@ js_BooleanOrUndefinedToNumber(JSContext* cx, int32 unboxed); extern JS_FRIEND_API(void) js_SetTraceableNativeFailed(JSContext *cx); +extern jsdouble FASTCALL +js_dmod(jsdouble a, jsdouble b); + #else #define JS_DEFINE_CALLINFO_1(linkage, rt, op, at0, cse, fold) @@ -444,6 +448,10 @@ JS_DECLARE_CALLINFO(js_ArrayCompPush) JS_DECLARE_CALLINFO(js_AllocFlatClosure) JS_DECLARE_CALLINFO(js_PutArguments) +/* Defined in jsfun.cpp. */ +JS_DECLARE_CALLINFO(js_SetCallVar) +JS_DECLARE_CALLINFO(js_SetCallArg) + /* Defined in jsnum.cpp. */ JS_DECLARE_CALLINFO(js_NumberToString) diff --git a/js/src/jscntxt.cpp b/js/src/jscntxt.cpp index 9ce2db55eb7..42b579b363b 100644 --- a/js/src/jscntxt.cpp +++ b/js/src/jscntxt.cpp @@ -142,7 +142,7 @@ static JSThread * NewThread(jsword id) { JS_ASSERT(js_CurrentThreadId() == id); - JSThread *thread = (JSThread *) calloc(1, sizeof(JSThread)); + JSThread *thread = (JSThread *) js_calloc(sizeof(JSThread)); if (!thread) return NULL; JS_INIT_CLIST(&thread->contextList); @@ -158,7 +158,7 @@ DestroyThread(JSThread *thread) JS_ASSERT(JS_CLIST_IS_EMPTY(&thread->contextList)); JS_ASSERT(!thread->titleToShare); FinishThreadData(&thread->data); - free(thread); + js_free(thread); } JSBool @@ -370,7 +370,7 @@ js_NewContext(JSRuntime *rt, size_t stackChunkSize) * runtime list. After that it can be accessed from another thread via * js_ContextIterator. */ - cx = (JSContext *) calloc(1, sizeof *cx); + cx = (JSContext *) js_calloc(sizeof *cx); if (!cx) return NULL; @@ -743,14 +743,14 @@ FreeContext(JSContext *cx) JS_FinishArenaPool(&cx->tempPool); if (cx->lastMessage) - free(cx->lastMessage); + js_free(cx->lastMessage); /* Remove any argument formatters. */ map = cx->argumentFormatMap; while (map) { JSArgumentFormatMap *temp = map; map = map->next; - JS_free(cx, temp); + cx->free(temp); } /* Destroy the busy array table. */ @@ -769,13 +769,13 @@ FreeContext(JSContext *cx) if (lrs) { while ((lrc = lrs->topChunk) != &lrs->firstChunk) { lrs->topChunk = lrc->down; - JS_free(cx, lrc); + cx->free(lrc); } - JS_free(cx, lrs); + cx->free(lrs); } /* Finally, free cx itself. */ - free(cx); + js_free(cx); } JSBool @@ -819,7 +819,7 @@ js_NextActiveContext(JSRuntime *rt, JSContext *cx) return cx; #else return js_ContextIterator(rt, JS_FALSE, &iter); -#endif +#endif } #ifdef JS_THREADSAFE @@ -1013,7 +1013,7 @@ js_EnterLocalRootScope(JSContext *cx) lrs = cx->localRootStack; if (!lrs) { - lrs = (JSLocalRootStack *) JS_malloc(cx, sizeof *lrs); + lrs = (JSLocalRootStack *) cx->malloc(sizeof *lrs); if (!lrs) return JS_FALSE; lrs->scopeMark = JSLRS_NULL_MARK; @@ -1056,7 +1056,7 @@ js_LeaveLocalRootScopeWithResult(JSContext *cx, jsval rval) lrc = lrs->topChunk; JS_ASSERT(lrc != &lrs->firstChunk); lrs->topChunk = lrc->down; - JS_free(cx, lrc); + cx->free(lrc); --n; } @@ -1096,10 +1096,10 @@ js_LeaveLocalRootScopeWithResult(JSContext *cx, jsval rval) */ if (mark == 0) { cx->localRootStack = NULL; - JS_free(cx, lrs); + cx->free(lrs); } else if (m == 0) { lrs->topChunk = lrc->down; - JS_free(cx, lrc); + cx->free(lrc); } } @@ -1158,7 +1158,7 @@ js_ForgetLocalRoot(JSContext *cx, jsval v) JS_ASSERT(n != 0); JS_ASSERT(lrc != &lrs->firstChunk); lrs->topChunk = lrc->down; - JS_free(cx, lrc); + cx->free(lrc); } } @@ -1187,7 +1187,7 @@ js_PushLocalRoot(JSContext *cx, JSLocalRootStack *lrs, jsval v) * After lrs->firstChunk, trying to index at a power-of-two chunk * boundary: need a new chunk. */ - lrc = (JSLocalRootChunk *) JS_malloc(cx, sizeof *lrc); + lrc = (JSLocalRootChunk *) cx->malloc(sizeof *lrc); if (!lrc) return -1; lrc->down = lrs->topChunk; @@ -1380,8 +1380,8 @@ js_ReportErrorVA(JSContext *cx, uintN flags, const char *format, va_list ap) } ReportError(cx, message, &report); - free(message); - JS_free(cx, ucmessage); + js_free(message); + cx->free(ucmessage); return warning; } @@ -1432,7 +1432,7 @@ js_ExpandErrorArguments(JSContext *cx, JSErrorCallback callback, * pointers later. */ reportp->messageArgs = (const jschar **) - JS_malloc(cx, sizeof(jschar *) * (argCount + 1)); + cx->malloc(sizeof(jschar *) * (argCount + 1)); if (!reportp->messageArgs) return JS_FALSE; reportp->messageArgs[argCount] = NULL; @@ -1476,9 +1476,9 @@ js_ExpandErrorArguments(JSContext *cx, JSErrorCallback callback, * is used once and only once in the expansion !!! */ reportp->ucmessage = out = (jschar *) - JS_malloc(cx, (expandedLength + 1) * sizeof(jschar)); + cx->malloc((expandedLength + 1) * sizeof(jschar)); if (!out) { - JS_free (cx, buffer); + cx->free(buffer); goto error; } while (*fmt) { @@ -1498,7 +1498,7 @@ js_ExpandErrorArguments(JSContext *cx, JSErrorCallback callback, } JS_ASSERT(expandedArgs == argCount); *out = 0; - JS_free (cx, buffer); + cx->free(buffer); *messagep = js_DeflateString(cx, reportp->ucmessage, (size_t)(out - reportp->ucmessage)); @@ -1527,7 +1527,7 @@ js_ExpandErrorArguments(JSContext *cx, JSErrorCallback callback, const char *defaultErrorMessage = "No error message available for error number %d"; size_t nbytes = strlen(defaultErrorMessage) + 16; - *messagep = (char *)JS_malloc(cx, nbytes); + *messagep = (char *)cx->malloc(nbytes); if (!*messagep) goto error; JS_snprintf(*messagep, nbytes, defaultErrorMessage, errorNumber); @@ -1540,17 +1540,17 @@ error: if (charArgs) { i = 0; while (reportp->messageArgs[i]) - JS_free(cx, (void *)reportp->messageArgs[i++]); + cx->free((void *)reportp->messageArgs[i++]); } - JS_free(cx, (void *)reportp->messageArgs); + cx->free((void *)reportp->messageArgs); reportp->messageArgs = NULL; } if (reportp->ucmessage) { - JS_free(cx, (void *)reportp->ucmessage); + cx->free((void *)reportp->ucmessage); reportp->ucmessage = NULL; } if (*messagep) { - JS_free(cx, (void *)*messagep); + cx->free((void *)*messagep); *messagep = NULL; } return JS_FALSE; @@ -1581,7 +1581,7 @@ js_ReportErrorNumberVA(JSContext *cx, uintN flags, JSErrorCallback callback, ReportError(cx, message, &report); if (message) - JS_free(cx, message); + cx->free(message); if (report.messageArgs) { /* * js_ExpandErrorArguments owns its messageArgs only if it had to @@ -1590,12 +1590,12 @@ js_ReportErrorNumberVA(JSContext *cx, uintN flags, JSErrorCallback callback, if (charArgs) { int i = 0; while (report.messageArgs[i]) - JS_free(cx, (void *)report.messageArgs[i++]); + cx->free((void *)report.messageArgs[i++]); } - JS_free(cx, (void *)report.messageArgs); + cx->free((void *)report.messageArgs); } if (report.ucmessage) - JS_free(cx, (void *)report.ucmessage); + cx->free((void *)report.ucmessage); return warning; } @@ -1609,7 +1609,7 @@ js_ReportErrorAgain(JSContext *cx, const char *message, JSErrorReport *reportp) return; if (cx->lastMessage) - free(cx->lastMessage); + js_free(cx->lastMessage); cx->lastMessage = JS_strdup(cx, message); if (!cx->lastMessage) return; @@ -1667,7 +1667,7 @@ js_ReportIsNullOrUndefined(JSContext *cx, intN spindex, jsval v, js_null_str, NULL); } - JS_free(cx, bytes); + cx->free(bytes); return ok; } @@ -1690,7 +1690,7 @@ js_ReportMissingArg(JSContext *cx, jsval *vp, uintN arg) JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_MISSING_FUN_ARG, argbuf, bytes ? bytes : ""); - JS_free(cx, bytes); + cx->free(bytes); } JSBool @@ -1709,7 +1709,7 @@ js_ReportValueErrorFlags(JSContext *cx, uintN flags, const uintN errorNumber, ok = JS_ReportErrorFlagsAndNumber(cx, flags, js_GetErrorMessage, NULL, errorNumber, bytes, arg1, arg2); - JS_free(cx, bytes); + cx->free(bytes); return ok; } @@ -1738,10 +1738,10 @@ JSBool js_InvokeOperationCallback(JSContext *cx) { JS_ASSERT(cx->operationCallbackFlag); - + /* * Reset the callback flag first, then yield. If another thread is racing - * us here we will accumulate another callback request which will be + * us here we will accumulate another callback request which will be * serviced at the next opportunity. */ cx->operationCallbackFlag = 0; @@ -1755,7 +1755,7 @@ js_InvokeOperationCallback(JSContext *cx) */ if (cx->runtime->gcIsNeeded) js_GC(cx, GC_NORMAL); -#ifdef JS_THREADSAFE +#ifdef JS_THREADSAFE else JS_YieldRequest(cx); #endif diff --git a/js/src/jscntxt.h b/js/src/jscntxt.h index 091a2529a39..c3d2465472e 100644 --- a/js/src/jscntxt.h +++ b/js/src/jscntxt.h @@ -57,6 +57,7 @@ #include "jsregexp.h" #include "jsutil.h" #include "jsarray.h" +#include "jstask.h" JS_BEGIN_EXTERN_C @@ -255,6 +256,13 @@ struct JSThreadData { * locks on each JS_malloc. */ size_t gcMallocBytes; + +#ifdef JS_THREADSAFE + /* + * Deallocator task for this thread. + */ + JSFreePointerListTask *deallocatorTask; +#endif }; #ifdef JS_THREADSAFE @@ -391,7 +399,18 @@ struct JSRuntime { JSPackedBool gcPoke; JSPackedBool gcRunning; JSPackedBool gcRegenShapes; - uint8 gcPadding; + + /* + * During gc, if rt->gcRegenShapes && + * (scope->flags & JSScope::SHAPE_REGEN) == rt->gcRegenShapesScopeFlag, + * then the scope's shape has already been regenerated during this GC. + * To avoid having to sweep JSScopes, the bit's meaning toggles with each + * shape-regenerating GC. + * + * FIXME Once scopes are GC'd (bug 505004), this will be obsolete. + */ + uint8 gcRegenShapesScopeFlag; + #ifdef JS_GC_ZEAL jsrefcount gcZeal; #endif @@ -688,6 +707,26 @@ struct JSRuntime { void setGCTriggerFactor(uint32 factor); void setGCLastBytes(size_t lastBytes); + + inline void* malloc(size_t bytes) { + return ::js_malloc(bytes); + } + + inline void* calloc(size_t bytes) { + return ::js_calloc(bytes); + } + + inline void* realloc(void* p, size_t bytes) { + return ::js_realloc(p, bytes); + } + + inline void free(void* p) { + ::js_free(p); + } + +#ifdef JS_THREADSAFE + JSBackgroundThread *deallocatorThread; +#endif }; /* Common macros to access thread-local caches in JSThread or JSRuntime. */ @@ -1039,16 +1078,86 @@ struct JSContext { jsval *nativeVp; #endif +#ifdef JS_THREADSAFE + inline void createDeallocatorTask() { + JSThreadData* tls = JS_THREAD_DATA(this); + JS_ASSERT(!tls->deallocatorTask); + if (runtime->deallocatorThread && !runtime->deallocatorThread->busy()) + tls->deallocatorTask = new JSFreePointerListTask(); + } + + inline void submitDeallocatorTask() { + JSThreadData* tls = JS_THREAD_DATA(this); + if (tls->deallocatorTask) { + runtime->deallocatorThread->schedule(tls->deallocatorTask); + tls->deallocatorTask = NULL; + } + } +#endif + /* Call this after succesful malloc of memory for GC-related things. */ - inline void - updateMallocCounter(size_t nbytes) - { + inline void updateMallocCounter(size_t nbytes) { size_t *pbytes, bytes; pbytes = &JS_THREAD_DATA(this)->gcMallocBytes; bytes = *pbytes; *pbytes = (size_t(-1) - bytes <= nbytes) ? size_t(-1) : bytes + nbytes; } + + inline void* malloc(size_t bytes) { + JS_ASSERT(bytes != 0); + void *p = runtime->malloc(bytes); + if (!p) { + JS_ReportOutOfMemory(this); + return NULL; + } + updateMallocCounter(bytes); + return p; + } + + inline void* calloc(size_t bytes) { + JS_ASSERT(bytes != 0); + void *p = runtime->calloc(bytes); + if (!p) { + JS_ReportOutOfMemory(this); + return NULL; + } + updateMallocCounter(bytes); + return p; + } + + inline void* realloc(void* p, size_t bytes) { + void *orig = p; + p = runtime->realloc(p, bytes); + if (!p) { + JS_ReportOutOfMemory(this); + return NULL; + } + if (!orig) + updateMallocCounter(bytes); + return p; + } + +#ifdef JS_THREADSAFE + inline void free(void* p) { + if (!p) + return; + if (thread) { + JSFreePointerListTask* task = JS_THREAD_DATA(this)->deallocatorTask; + if (task) { + task->add(p); + return; + } + } + runtime->free(p); + } +#else + inline void free(void* p) { + if (!p) + return; + runtime->free(p); + } +#endif }; #ifdef JS_THREADSAFE diff --git a/js/src/jsdate.cpp b/js/src/jsdate.cpp index c6a5f870a57..9eaa85e60fc 100644 --- a/js/src/jsdate.cpp +++ b/js/src/jsdate.cpp @@ -1961,7 +1961,7 @@ date_toSource(JSContext *cx, uintN argc, jsval *vp) str = JS_NewString(cx, bytes, strlen(bytes)); if (!str) { - free(bytes); + js_free(bytes); return JS_FALSE; } *vp = STRING_TO_JSVAL(str); diff --git a/js/src/jsdate.h b/js/src/jsdate.h index 85e49320422..238994e2e09 100644 --- a/js/src/jsdate.h +++ b/js/src/jsdate.h @@ -124,6 +124,10 @@ typedef uint32 JSIntervalTime; extern JS_FRIEND_API(JSIntervalTime) js_IntervalNow(); +/* Date constructor native. Exposed only so the JIT can know its address. */ +JSBool +js_Date(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval); + JS_END_EXTERN_C #endif /* jsdate_h___ */ diff --git a/js/src/jsdbgapi.cpp b/js/src/jsdbgapi.cpp index 92f35ed6c40..fd6af3ee7dc 100644 --- a/js/src/jsdbgapi.cpp +++ b/js/src/jsdbgapi.cpp @@ -123,7 +123,7 @@ js_UntrapScriptCode(JSContext *cx, JSScript *script) continue; nbytes += (sn - notes + 1) * sizeof *sn; - code = (jsbytecode *) JS_malloc(cx, nbytes); + code = (jsbytecode *) cx->malloc(nbytes); if (!code) break; memcpy(code, script->code, nbytes); @@ -155,12 +155,12 @@ JS_SetTrap(JSContext *cx, JSScript *script, jsbytecode *pc, } else { sample = rt->debuggerMutations; DBG_UNLOCK(rt); - trap = (JSTrap *) JS_malloc(cx, sizeof *trap); + trap = (JSTrap *) cx->malloc(sizeof *trap); if (!trap) return JS_FALSE; trap->closure = NULL; if(!js_AddRoot(cx, &trap->closure, "trap->closure")) { - JS_free(cx, trap); + cx->free(trap); return JS_FALSE; } DBG_LOCK(rt); @@ -184,7 +184,7 @@ JS_SetTrap(JSContext *cx, JSScript *script, jsbytecode *pc, DBG_UNLOCK(rt); if (junk) { js_RemoveRoot(rt, &junk->closure); - JS_free(cx, junk); + cx->free(junk); } return JS_TRUE; } @@ -213,7 +213,7 @@ DestroyTrapAndUnlock(JSContext *cx, JSTrap *trap) DBG_UNLOCK(cx->runtime); js_RemoveRoot(cx->runtime, &trap->closure); - JS_free(cx, trap); + cx->free(trap); } JS_PUBLIC_API(void) @@ -413,7 +413,7 @@ DropWatchPointAndUnlock(JSContext *cx, JSWatchPoint *wp, uintN flag) } } - JS_free(cx, wp); + cx->free(wp); return ok; } @@ -434,7 +434,7 @@ js_TraceWatchPoints(JSTracer *trc, JSObject *obj) &wp->links != &rt->watchPointList; wp = (JSWatchPoint *)wp->links.next) { if (wp->object == obj) { - TRACE_SCOPE_PROPERTY(trc, wp->sprop); + wp->sprop->trace(trc); if ((wp->sprop->attrs & JSPROP_SETTER) && wp->setter) { JS_CALL_OBJECT_TRACER(trc, js_CastAsObject(wp->setter), "wp->setter"); @@ -619,7 +619,7 @@ js_watch_set(JSContext *cx, JSObject *obj, jsval id, jsval *vp) if (nslots <= JS_ARRAY_LENGTH(smallv)) { argv = smallv; } else { - argv = (jsval *) JS_malloc(cx, nslots * sizeof(jsval)); + argv = (jsval *) cx->malloc(nslots * sizeof(jsval)); if (!argv) { DBG_LOCK(rt); DropWatchPointAndUnlock(cx, wp, JSWP_HELD); @@ -651,7 +651,7 @@ js_watch_set(JSContext *cx, JSObject *obj, jsval id, jsval *vp) JSFUN_HEAVYWEIGHT_TEST(fun->flags) && !js_GetCallObject(cx, &frame)) { if (argv != smallv) - JS_free(cx, argv); + cx->free(argv); DBG_LOCK(rt); DropWatchPointAndUnlock(cx, wp, JSWP_HELD); return JS_FALSE; @@ -679,7 +679,7 @@ js_watch_set(JSContext *cx, JSObject *obj, jsval id, jsval *vp) cx->fp = frame.down; if (argv != smallv) - JS_free(cx, argv); + cx->free(argv); } } DBG_LOCK(rt); @@ -825,7 +825,7 @@ JS_SetWatchPoint(JSContext *cx, JSObject *obj, jsval idval, goto out; } - wp = (JSWatchPoint *) JS_malloc(cx, sizeof *wp); + wp = (JSWatchPoint *) cx->malloc(sizeof *wp); if (!wp) { ok = JS_FALSE; goto out; @@ -1343,7 +1343,7 @@ JS_EvaluateInStackFrame(JSContext *cx, JSStackFrame *fp, length = (uintN) len; ok = JS_EvaluateUCInStackFrame(cx, fp, chars, length, filename, lineno, rval); - JS_free(cx, chars); + cx->free(chars); return ok; } @@ -1469,7 +1469,7 @@ JS_GetPropertyDescArray(JSContext *cx, JSObject *obj, JSPropertyDescArray *pda) } n = scope->entryCount; - pd = (JSPropertyDesc *) JS_malloc(cx, (size_t)n * sizeof(JSPropertyDesc)); + pd = (JSPropertyDesc *) cx->malloc((size_t)n * sizeof(JSPropertyDesc)); if (!pd) return JS_FALSE; i = 0; @@ -1511,7 +1511,7 @@ JS_PutPropertyDescArray(JSContext *cx, JSPropertyDescArray *pda) if (pd[i].flags & JSPD_ALIAS) js_RemoveRoot(cx->runtime, &pd[i].alias); } - JS_free(cx, pd); + cx->free(pd); } /************************************************************************/ @@ -1884,7 +1884,7 @@ js_DumpCallgrind(JSContext *cx, JSObject *obj, cstr = js_DeflateString(cx, str->chars(), str->length()); if (cstr) { CALLGRIND_DUMP_STATS_AT(cstr); - JS_free(cx, cstr); + cx->free(cstr); return JS_TRUE; } } @@ -1962,7 +1962,7 @@ js_StartVtune(JSContext *cx, JSObject *obj, status = VTStartSampling(¶ms); if (params.tb5Filename != default_filename) - JS_free(cx, params.tb5Filename); + cx->free(params.tb5Filename); if (status != 0) { if (status == VTAPI_MULTIPLE_RUNS) diff --git a/js/src/jsdhash.cpp b/js/src/jsdhash.cpp index 8252b4b651b..c8b9db09dc0 100644 --- a/js/src/jsdhash.cpp +++ b/js/src/jsdhash.cpp @@ -111,13 +111,13 @@ JS_PUBLIC_API(void *) JS_DHashAllocTable(JSDHashTable *table, uint32 nbytes) { - return malloc(nbytes); + return js_malloc(nbytes); } JS_PUBLIC_API(void) JS_DHashFreeTable(JSDHashTable *table, void *ptr) { - free(ptr); + js_free(ptr); } JS_PUBLIC_API(JSDHashNumber) @@ -180,7 +180,7 @@ JS_DHashFreeStringKey(JSDHashTable *table, JSDHashEntryHdr *entry) { const JSDHashEntryStub *stub = (const JSDHashEntryStub *)entry; - free((void *) stub->key); + js_free((void *) stub->key); memset(entry, 0, table->entrySize); } @@ -212,11 +212,11 @@ JS_NewDHashTable(const JSDHashTableOps *ops, void *data, uint32 entrySize, { JSDHashTable *table; - table = (JSDHashTable *) malloc(sizeof *table); + table = (JSDHashTable *) js_malloc(sizeof *table); if (!table) return NULL; if (!JS_DHashTableInit(table, ops, data, entrySize, capacity)) { - free(table); + js_free(table); return NULL; } return table; @@ -226,7 +226,7 @@ JS_PUBLIC_API(void) JS_DHashTableDestroy(JSDHashTable *table) { JS_DHashTableFinish(table); - free(table); + js_free(table); } JS_PUBLIC_API(JSBool) diff --git a/js/src/jsdtoa.cpp b/js/src/jsdtoa.cpp index b1197ac043d..111cb5dec9d 100644 --- a/js/src/jsdtoa.cpp +++ b/js/src/jsdtoa.cpp @@ -368,7 +368,7 @@ JS_dtobasestr(int base, double dinput) JS_ASSERT(base >= 2 && base <= 36); dval(d) = dinput; - buffer = (char*) malloc(DTOBASESTR_BUFFER_SIZE); + buffer = (char*) js_malloc(DTOBASESTR_BUFFER_SIZE); if (buffer) { p = buffer; if (dval(d) < 0.0 @@ -412,7 +412,7 @@ JS_dtobasestr(int base, double dinput) nomem1: Bfree(b); UNLOCK_DTOA(); - free(buffer); + js_free(buffer); return NULL; } do { @@ -449,7 +449,7 @@ JS_dtobasestr(int base, double dinput) Bfree(mlo); Bfree(mhi); UNLOCK_DTOA(); - free(buffer); + js_free(buffer); return NULL; } JS_ASSERT(e < 0); diff --git a/js/src/jsemit.cpp b/js/src/jsemit.cpp index f591e844b6f..5105d8bba49 100644 --- a/js/src/jsemit.cpp +++ b/js/src/jsemit.cpp @@ -112,10 +112,10 @@ JSCodeGenerator::~JSCodeGenerator() /* NB: non-null only after OOM. */ if (spanDeps) - JS_free(compiler->context, spanDeps); + compiler->context->free(spanDeps); if (upvarMap.vector) - JS_free(compiler->context, upvarMap.vector); + compiler->context->free(upvarMap.vector); } static ptrdiff_t @@ -549,7 +549,7 @@ AddSpanDep(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc, jsbytecode *pc2, if ((index & (index - 1)) == 0 && (!(sdbase = cg->spanDeps) || index >= SPANDEPS_MIN)) { size = sdbase ? SPANDEPS_SIZE(index) : SPANDEPS_SIZE_MIN / 2; - sdbase = (JSSpanDep *) JS_realloc(cx, sdbase, size + size); + sdbase = (JSSpanDep *) cx->realloc(sdbase, size + size); if (!sdbase) return JS_FALSE; cg->spanDeps = sdbase; @@ -1165,7 +1165,7 @@ OptimizeSpanDeps(JSContext *cx, JSCodeGenerator *cg) * can span top-level statements, because JS lacks goto. */ size = SPANDEPS_SIZE(JS_BIT(JS_CeilingLog2(cg->numSpanDeps))); - JS_free(cx, cg->spanDeps); + cx->free(cg->spanDeps); cg->spanDeps = NULL; FreeJumpTargets(cg, cg->jumpTargets); cg->jumpTargets = NULL; @@ -1899,7 +1899,7 @@ MakeUpvarForEval(JSParseNode *pn, JSCodeGenerator *cg) JS_ASSERT(ALE_INDEX(ale) <= length); if (ALE_INDEX(ale) == length) { length = 2 * JS_MAX(2, length); - vector = (uint32 *) JS_realloc(cx, vector, length * sizeof *vector); + vector = (uint32 *) cx->realloc(vector, length * sizeof *vector); if (!vector) return false; cg->upvarMap.vector = vector; @@ -2197,7 +2197,7 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn) if (!vector) { uint32 length = cg->lexdeps.count; - vector = (uint32 *) calloc(length, sizeof *vector); + vector = (uint32 *) js_calloc(length * sizeof *vector); if (!vector) { JS_ReportOutOfMemory(cx); return JS_FALSE; @@ -3144,9 +3144,8 @@ EmitSwitch(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn, /* Just grab 8K for the worst-case bitmap. */ intmap_bitlen = JS_BIT(16); intmap = (jsbitmap *) - JS_malloc(cx, - (JS_BIT(16) >> JS_BITS_PER_WORD_LOG2) - * sizeof(jsbitmap)); + cx->malloc((JS_BIT(16) >> JS_BITS_PER_WORD_LOG2) + * sizeof(jsbitmap)); if (!intmap) { JS_ReportOutOfMemory(cx); return JS_FALSE; @@ -3163,7 +3162,7 @@ EmitSwitch(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn, release: if (intmap && intmap != intmap_space) - JS_free(cx, intmap); + cx->free(intmap); if (!ok) return JS_FALSE; @@ -3307,7 +3306,7 @@ EmitSwitch(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn, */ if (tableLength != 0) { tableSize = (size_t)tableLength * sizeof *table; - table = (JSParseNode **) JS_malloc(cx, tableSize); + table = (JSParseNode **) cx->malloc(tableSize); if (!table) return JS_FALSE; memset(table, 0, tableSize); @@ -3475,7 +3474,7 @@ EmitSwitch(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn, out: if (table) - JS_free(cx, table); + cx->free(table); if (ok) { ok = js_PopStatementCG(cx, cg); diff --git a/js/src/jsexn.cpp b/js/src/jsexn.cpp index 34d9312cce8..a83d658a4e6 100644 --- a/js/src/jsexn.cpp +++ b/js/src/jsexn.cpp @@ -164,7 +164,7 @@ CopyErrorReport(JSContext *cx, JSErrorReport *report) */ mallocSize = sizeof(JSErrorReport) + argsArraySize + argsCopySize + ucmessageSize + uclinebufSize + linebufSize + filenameSize; - cursor = (uint8 *)JS_malloc(cx, mallocSize); + cursor = (uint8 *)cx->malloc(mallocSize); if (!cursor) return NULL; @@ -301,7 +301,7 @@ InitExnPrivate(JSContext *cx, JSObject *exnObject, JSString *message, js_ReportAllocationOverflow(cx); return JS_FALSE; } - priv = (JSExnPrivate *)JS_malloc(cx, size); + priv = (JSExnPrivate *)cx->malloc(size); if (!priv) return JS_FALSE; @@ -417,8 +417,8 @@ exn_finalize(JSContext *cx, JSObject *obj) priv = GetExnPrivate(cx, obj); if (priv) { if (priv->errorReport) - JS_free(cx, priv->errorReport); - JS_free(cx, priv); + cx->free(priv->errorReport); + cx->free(priv); } } @@ -586,7 +586,7 @@ StackTraceToString(JSContext *cx, JSExnPrivate *priv) if (stackmax >= STACK_LENGTH_LIMIT) \ goto done; \ stackmax = stackmax ? 2 * stackmax : 64; \ - ptr_ = JS_realloc(cx, stackbuf, (stackmax+1) * sizeof(jschar)); \ + ptr_ = cx->realloc(stackbuf, (stackmax+1) * sizeof(jschar)); \ if (!ptr_) \ goto bad; \ stackbuf = (jschar *) ptr_; \ @@ -608,7 +608,7 @@ StackTraceToString(JSContext *cx, JSExnPrivate *priv) goto done; \ } \ stackmax = JS_BIT(JS_CeilingLog2(stacklen + length_)); \ - ptr_ = JS_realloc(cx, stackbuf, (stackmax+1) * sizeof(jschar)); \ + ptr_ = cx->realloc(stackbuf, (stackmax+1) * sizeof(jschar)); \ if (!ptr_) \ goto bad; \ stackbuf = (jschar *) ptr_; \ @@ -659,7 +659,7 @@ StackTraceToString(JSContext *cx, JSExnPrivate *priv) * don't use JS_realloc here; simply let the oversized allocation * be owned by the string in that rare case. */ - void *shrunk = JS_realloc(cx, stackbuf, (stacklen+1) * sizeof(jschar)); + void *shrunk = cx->realloc(stackbuf, (stacklen+1) * sizeof(jschar)); if (shrunk) stackbuf = (jschar *) shrunk; } @@ -671,7 +671,7 @@ StackTraceToString(JSContext *cx, JSExnPrivate *priv) bad: if (stackbuf) - JS_free(cx, stackbuf); + cx->free(stackbuf); return NULL; } @@ -800,7 +800,7 @@ exn_toString(JSContext *cx, uintN argc, jsval *vp) name_length = name->length(); message_length = message->length(); length = (name_length ? name_length + 2 : 0) + message_length; - cp = chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar)); + cp = chars = (jschar *) cx->malloc((length + 1) * sizeof(jschar)); if (!chars) return JS_FALSE; @@ -815,7 +815,7 @@ exn_toString(JSContext *cx, uintN argc, jsval *vp) result = js_NewString(cx, chars, length); if (!result) { - JS_free(cx, chars); + cx->free(chars); return JS_FALSE; } } else { @@ -915,7 +915,7 @@ exn_toSource(JSContext *cx, uintN argc, jsval *vp) } } - cp = chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar)); + cp = chars = (jschar *) cx->malloc((length + 1) * sizeof(jschar)); if (!chars) { ok = JS_FALSE; goto out; @@ -955,7 +955,7 @@ exn_toSource(JSContext *cx, uintN argc, jsval *vp) result = js_NewString(cx, chars, length); if (!result) { - JS_free(cx, chars); + cx->free(chars); ok = JS_FALSE; goto out; } diff --git a/js/src/jsfile.cpp b/js/src/jsfile.cpp index b3d618275dd..8228c44add6 100644 --- a/js/src/jsfile.cpp +++ b/js/src/jsfile.cpp @@ -296,7 +296,7 @@ static char* js_combinePath(JSContext *cx, const char *base, const char *name) { int len = strlen(base); - char* result = JS_malloc(cx, len + strlen(name) + 2); + char* result = cx->malloc(len + strlen(name) + 2); if (!result) return NULL; @@ -335,7 +335,7 @@ js_fileBaseName(JSContext *cx, const char *pathname) } /* Allocate and copy. */ - result = JS_malloc(cx, aux - index + 1); + result = cx->malloc(aux - index + 1); if (!result) return NULL; strncpy(result, pathname + index + 1, aux - index); @@ -366,7 +366,7 @@ js_fileDirectoryName(JSContext *cx, const char *pathname) if (cp < pathname && end != pathname) { /* There were just /s, return the root. */ - result = JS_malloc(cx, 1 + 1); /* The separator + trailing NUL. */ + result = cx->malloc(1 + 1); /* The separator + trailing NUL. */ result[0] = FILESEPARATOR; result[1] = '\0'; return result; @@ -388,7 +388,7 @@ js_fileDirectoryName(JSContext *cx, const char *pathname) } pathsize = end - pathname + 1; - result = JS_malloc(cx, pathsize + 1); + result = cx->malloc(pathsize + 1); if (!result) return NULL; @@ -401,7 +401,7 @@ js_fileDirectoryName(JSContext *cx, const char *pathname) /* Return everything up to and including the seperator. */ pathsize = cp - pathname + 1; - result = JS_malloc(cx, pathsize + 1); + result = cx->malloc(pathsize + 1); if (!result) return NULL; @@ -462,7 +462,7 @@ js_canonicalPath(JSContext *cx, char *oldpath) while (j >= 0 && path[j] == ' ') j--; - tmp = JS_malloc(cx, j-i+2); + tmp = cx->malloc(j-i+2); if (!tmp) return NULL; @@ -478,7 +478,7 @@ js_canonicalPath(JSContext *cx, char *oldpath) /* file:// support. */ if (!strncmp(path, URL_PREFIX, strlen(URL_PREFIX))) { tmp = js_canonicalPath(cx, path + strlen(URL_PREFIX)); - JS_free(cx, path); + cx->free(path); return tmp; } @@ -486,7 +486,7 @@ js_canonicalPath(JSContext *cx, char *oldpath) tmp = js_absolutePath(cx, path); if (!tmp) return NULL; - JS_free(cx, path); + cx->free(path); path = tmp; } @@ -505,7 +505,7 @@ js_canonicalPath(JSContext *cx, char *oldpath) back--; } else { tmp = result; - result = JS_malloc(cx, strlen(base) + 1 + strlen(tmp) + 1); + result = cx->malloc(strlen(base) + 1 + strlen(tmp) + 1); if (!result) goto out; @@ -516,18 +516,18 @@ js_canonicalPath(JSContext *cx, char *oldpath) result[c + 1] = '\0'; strcat(result, tmp); } - JS_free(cx, tmp); + cx->free(tmp); } } - JS_free(cx, current); - JS_free(cx, base); + cx->free(current); + cx->free(base); current = dir; base = js_fileBaseName(cx, current); dir = js_fileDirectoryName(cx, current); } tmp = result; - result = JS_malloc(cx, strlen(dir)+1+strlen(tmp)+1); + result = cx->malloc(strlen(dir) + 1 + strlen(tmp) + 1); if (!result) goto out; @@ -543,13 +543,13 @@ js_canonicalPath(JSContext *cx, char *oldpath) out: if (tmp) - JS_free(cx, tmp); + cx->free(tmp); if (dir) - JS_free(cx, dir); + cx->free(dir); if (base) - JS_free(cx, base); + cx->free(base); if (current) - JS_free(cx, current); + cx->free(current); return result; } @@ -753,7 +753,7 @@ js_FileHasOption(JSContext *cx, const char *oldoptions, const char *name) break; current = comma + 1; } - JS_free(cx, options); + cx->free(options); return found; } @@ -838,20 +838,20 @@ js_FileRead(JSContext *cx, JSFile *file, jschar *buf, int32 len, int32 mode) switch (mode) { case ASCII: - aux = (unsigned char*)JS_malloc(cx, len); + aux = (unsigned char*)cx->malloc(len); if (!aux) return 0; count = js_BufferedRead(file, aux, len); if (count == -1) { - JS_free(cx, aux); + cx->free(aux); return 0; } for (i = 0; i < len; i++) buf[i] = (jschar)aux[i]; - JS_free(cx, aux); + cx->free(aux); break; case UTF8: @@ -977,7 +977,7 @@ js_FileWrite(JSContext *cx, JSFile *file, jschar *buf, int32 len, int32 mode) switch (mode) { case ASCII: - aux = (unsigned char*)JS_malloc(cx, len); + aux = (unsigned char*)cx->malloc(len); if (!aux) return 0; @@ -989,21 +989,21 @@ js_FileWrite(JSContext *cx, JSFile *file, jschar *buf, int32 len, int32 mode) : fwrite(aux, 1, len, file->nativehandle); if (count==-1) { - JS_free(cx, aux); + cx->free(aux); return 0; } - JS_free(cx, aux); + cx->free(aux); break; case UTF8: - utfbuf = (unsigned char*)JS_malloc(cx, len*3); + utfbuf = (unsigned char*)cx->malloc(len*3); if (!utfbuf) return 0; i = 0; for (count = 0;countfree(utfbuf); return 0; } i+=j; @@ -1013,10 +1013,10 @@ js_FileWrite(JSContext *cx, JSFile *file, jschar *buf, int32 len, int32 mode) : fwrite(utfbuf, 1, i, file->nativehandle); if (jfree(utfbuf); return 0; } - JS_free(cx, utfbuf); + cx->free(utfbuf); break; case UCS2: @@ -1179,13 +1179,13 @@ js_parent(JSContext *cx, JSFile *file, jsval *resultp) } else { JSObject *obj = js_NewFileObject(cx, str); if (!obj) { - JS_free(cx, str); + cx->free(str); return JS_FALSE; } *resultp = OBJECT_TO_JSVAL(obj); } - JS_free(cx, str); + cx->free(str); return JS_TRUE; } @@ -1206,7 +1206,7 @@ js_name(JSContext *cx, JSFile *file, jsval *vp) str = JS_NewString(cx, name, strlen(name)); if (!str) { - JS_free(cx, name); + cx->free(name); return JS_FALSE; } @@ -1353,7 +1353,7 @@ file_open(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) pipemode[i++] = '\0'; file->nativehandle = POPEN(&file->path[1], pipemode); } else if(file->path[len-1] == PIPE_SYMBOL) { - char *command = JS_malloc(cx, len); + char *command = cx->malloc(len); strncpy(command, file->path, len-1); command[len-1] = '\0'; @@ -1364,7 +1364,7 @@ file_open(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) #endif pipemode[i++] = '\0'; file->nativehandle = POPEN(command, pipemode); - JS_free(cx, command); + cx->free(command); } /* set the flags */ file->isNative = JS_TRUE; @@ -1377,7 +1377,7 @@ file_open(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) } js_ResetBuffers(file); - JS_free(cx, mode); + cx->free(mode); mode = NULL; /* Set the open flag and return result */ @@ -1396,7 +1396,7 @@ good: out: if(mode) - JS_free(cx, mode); + cx->free(mode); return JS_FALSE; } @@ -1511,13 +1511,13 @@ file_copyTo(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) goto out; } - buffer = JS_malloc(cx, size); + buffer = cx->malloc(size); count = INT_TO_JSVAL(PR_Read(file->handle, buffer, size)); /* reading panic */ if (count!=size) { - JS_free(cx, buffer); + cx->free(buffer); JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, JSFILEMSG_COPY_READ_ERROR, file->path); goto out; @@ -1527,13 +1527,13 @@ file_copyTo(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) /* writing panic */ if (count!=size) { - JS_free(cx, buffer); + cx->free(buffer); JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, JSFILEMSG_COPY_WRITE_ERROR, file->path); goto out; } - JS_free(cx, buffer); + cx->free(buffer); if(!fileInitiallyOpen){ if(!file_close(cx, obj, 0, NULL, rval)) goto out; @@ -1577,7 +1577,7 @@ file_renameTo(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval if (PR_Rename(file->path, dest)==PR_SUCCESS){ /* copy the new filename */ - JS_free(cx, file->path); + cx->free(file->path); file->path = dest; *rval = JSVAL_TRUE; return JS_TRUE; @@ -1729,17 +1729,17 @@ file_read(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) /* want = (want>262144)?262144:want; * arbitrary size limitation */ - buf = JS_malloc(cx, want*sizeof buf[0]); + buf = cx->malloc(want*sizeof buf[0]); if (!buf) goto out; count = js_FileRead(cx, file, buf, want, file->type); if (count>0) { str = JS_NewUCStringCopyN(cx, buf, count); *rval = STRING_TO_JSVAL(str); - JS_free(cx, buf); + cx->free(buf); return JS_TRUE; } else { - JS_free(cx, buf); + cx->free(buf); goto out; } out: @@ -1760,7 +1760,7 @@ file_readln(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) SECURITY_CHECK(cx, NULL, "readln", file); JSFILE_CHECK_READ; - buf = JS_malloc(cx, MAX_LINE_LENGTH * sizeof data); + buf = cx->malloc(MAX_LINE_LENGTH * sizeof data); if (!buf) return JS_FALSE; @@ -1792,8 +1792,7 @@ file_readln(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) default: if (--room < 0) { - tmp = JS_realloc(cx, buf, - (offset + MAX_LINE_LENGTH) * sizeof data); + tmp = cx->realloc(buf, (offset + MAX_LINE_LENGTH) * sizeof data); if (!tmp) goto out; @@ -1814,7 +1813,7 @@ eof: done: buf[offset] = 0; - tmp = JS_realloc(cx, buf, (offset + 1) * sizeof data); + tmp = cx->realloc(buf, (offset + 1) * sizeof data); if (!tmp) goto out; @@ -1827,7 +1826,7 @@ done: out: if (buf) - JS_free(cx, buf); + cx->free(buf); return JS_FALSE; } @@ -1980,7 +1979,7 @@ file_list(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) filePath = js_combinePath(cx, file->path, (char*)entry->name); eachFile = js_NewFileObject(cx, filePath); - JS_free(cx, filePath); + cx->free(filePath); if (!eachFile){ JS_ReportWarning(cx, "File %s cannot be retrieved", filePath); continue; @@ -2017,7 +2016,7 @@ file_mkdir(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) char *dir = js_fileDirectoryName(cx, file->path); JSObject *dirObj = js_NewFileObject(cx, dir); - JS_free(cx, dir); + cx->free(dir); /* call file_mkdir with the right set of parameters if needed */ if (file_mkdir(cx, dirObj, argc, argv, rval)) @@ -2031,12 +2030,12 @@ file_mkdir(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) fullName = js_combinePath(cx, file->path, dirName); if (PR_MkDir(fullName, 0755)==PR_SUCCESS){ *rval = JSVAL_TRUE; - JS_free(cx, fullName); + cx->free(fullName); return JS_TRUE; }else{ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, JSFILEMSG_OP_FAILED, "mkdir", fullName); - JS_free(cx, fullName); + cx->free(fullName); goto out; } } @@ -2077,7 +2076,7 @@ file_toURL(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) return JS_FALSE; str = js_NewString(cx, urlChars, len); if (!str) { - JS_free(cx, urlChars); + cx->free(urlChars); return JS_FALSE; } *rval = STRING_TO_JSVAL(str); @@ -2104,9 +2103,9 @@ file_finalize(JSContext *cx, JSObject *obj) } if (file->path) - JS_free(cx, file->path); + cx->free(file->path); - JS_free(cx, file); + cx->free(file); } } @@ -2118,7 +2117,7 @@ file_init(JSContext *cx, JSObject *obj, char *bytes) { JSFile *file; - file = JS_malloc(cx, sizeof *file); + file = cx->malloc(sizeof *file); if (!file) return NULL; memset(file, 0 , sizeof *file); @@ -2130,7 +2129,7 @@ file_init(JSContext *cx, JSObject *obj, char *bytes) if (!JS_SetPrivate(cx, obj, file)) { JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, JSFILEMSG_CANNOT_SET_PRIVATE_FILE, file->path); - JS_free(cx, file); + cx->free(file); return NULL; } @@ -2176,7 +2175,7 @@ js_NewFileObjectFromFILE(JSContext *cx, FILE *nativehandle, char *filename, /* free result of RESOLVE_PATH from file_init. */ JS_ASSERT(file->path != NULL); - JS_free(cx, file->path); + cx->free(file->path); file->path = strdup(filename); file->isOpen = open; @@ -2399,7 +2398,7 @@ file_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) case FILE_MODE: SECURITY_CHECK(cx, NULL, "mode", file); JSFILE_CHECK_OPEN("mode"); - bytes = JS_malloc(cx, MODE_SIZE); + bytes = cx->malloc(MODE_SIZE); bytes[0] = '\0'; flag = JS_FALSE; @@ -2439,7 +2438,7 @@ file_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) flag = JS_TRUE; } *vp = STRING_TO_JSVAL(JS_NewStringCopyZ(cx, bytes)); - JS_free(cx, bytes); + cx->free(bytes); break; case FILE_CREATED: SECURITY_CHECK(cx, NULL, "creationTime", file); @@ -2575,7 +2574,7 @@ file_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) bytes = js_combinePath(cx, file->path, prop_name); *vp = OBJECT_TO_JSVAL(js_NewFileObject(cx, bytes)); PR_CloseDir(dir); - JS_free(cx, bytes); + cx->free(bytes); return !JSVAL_IS_NULL(*vp); } } @@ -2717,10 +2716,10 @@ js_InitFileClass(JSContext *cx, JSObject* obj) /* Define CURRENTDIR property. We are doing this to get a slash at the end of the current dir */ afile = js_NewFileObject(cx, CURRENT_DIR); - currentdir = JS_malloc(cx, MAX_PATH_LENGTH); - currentdir = getcwd(currentdir, MAX_PATH_LENGTH); + currentdir = cx->malloc(MAX_PATH_LENGTH); + currentdir = getcwd(currentdir, MAX_PATH_LENGTH); afile = js_NewFileObject(cx, currentdir); - JS_free(cx, currentdir); + cx->free(currentdir); vp = OBJECT_TO_JSVAL(afile); JS_DefinePropertyWithTinyId(cx, ctor, CURRENTDIR_PROPERTY, 0, vp, JS_PropertyStub, file_currentDirSetter, diff --git a/js/src/jsfun.cpp b/js/src/jsfun.cpp index c3076d055a0..d4b02a1cb22 100644 --- a/js/src/jsfun.cpp +++ b/js/src/jsfun.cpp @@ -136,7 +136,7 @@ MarkArgDeleted(JSContext *cx, JSStackFrame *fp, uintN slot) bitmap = (jsbitmap *) &bmapint; } else { nbytes = JS_HOWMANY(nbits, JS_BITS_PER_WORD) * sizeof(jsbitmap); - bitmap = (jsbitmap *) JS_malloc(cx, nbytes); + bitmap = (jsbitmap *) cx->malloc(nbytes); if (!bitmap) return JS_FALSE; memset(bitmap, 0, nbytes); @@ -311,7 +311,7 @@ js_PutArgsObject(JSContext *cx, JSStackFrame *fp) if (!JSVAL_IS_VOID(bmapval)) { JS_SetReservedSlot(cx, argsobj, 0, JSVAL_VOID); if (fp->argc > JSVAL_INT_BITS) - JS_free(cx, JSVAL_TO_PRIVATE(bmapval)); + cx->free(JSVAL_TO_PRIVATE(bmapval)); } /* @@ -1133,7 +1133,7 @@ js_GetCallArg(JSContext *cx, JSObject *obj, jsid id, jsval *vp) return CallPropertyOp(cx, obj, id, vp, JSCPK_ARG, JS_FALSE); } -static JSBool +JSBool SetCallArg(JSContext *cx, JSObject *obj, jsid id, jsval *vp) { return CallPropertyOp(cx, obj, id, vp, JSCPK_ARG, JS_TRUE); @@ -1154,12 +1154,27 @@ js_GetCallVarChecked(JSContext *cx, JSObject *obj, jsid id, jsval *vp) return CheckForEscapingClosure(cx, obj, vp); } -static JSBool +JSBool SetCallVar(JSContext *cx, JSObject *obj, jsid id, jsval *vp) { return CallPropertyOp(cx, obj, id, vp, JSCPK_VAR, JS_TRUE); } +JSBool JS_FASTCALL +js_SetCallArg(JSContext *cx, JSObject *obj, jsid id, jsval v) +{ + return CallPropertyOp(cx, obj, id, &v, JSCPK_ARG, JS_TRUE); +} + +JSBool JS_FASTCALL +js_SetCallVar(JSContext *cx, JSObject *obj, jsid id, jsval v) +{ + return CallPropertyOp(cx, obj, id, &v, JSCPK_VAR, JS_TRUE); +} + +JS_DEFINE_CALLINFO_4(extern, BOOL, js_SetCallArg, CONTEXT, OBJECT, JSID, JSVAL, 0, 0) +JS_DEFINE_CALLINFO_4(extern, BOOL, js_SetCallVar, CONTEXT, OBJECT, JSID, JSVAL, 0, 0) + static JSBool call_resolve(JSContext *cx, JSObject *obj, jsval idval, uintN flags, JSObject **objp) @@ -2743,10 +2758,10 @@ FreeLocalNameHash(JSContext *cx, JSLocalNameMap *map) for (dup = map->lastdup; dup; dup = next) { next = dup->link; - JS_free(cx, dup); + cx->free(dup); } JS_DHashTableFinish(&map->names); - JS_free(cx, map); + cx->free(map); } static JSBool @@ -2774,7 +2789,7 @@ HashLocalName(JSContext *cx, JSLocalNameMap *map, JSAtom *name, if (entry->name) { JS_ASSERT(entry->name == name); JS_ASSERT(entry->localKind == JSLOCAL_ARG); - dup = (JSNameIndexPair *) JS_malloc(cx, sizeof *dup); + dup = (JSNameIndexPair *) cx->malloc(sizeof *dup); if (!dup) return JS_FALSE; dup->name = entry->name; @@ -2820,7 +2835,7 @@ js_AddLocal(JSContext *cx, JSFunction *fun, JSAtom *atom, JSLocalKind kind) if (n > 1) { array = fun->u.i.names.array; } else { - array = (jsuword *) JS_malloc(cx, MAX_ARRAY_LOCALS * sizeof *array); + array = (jsuword *) cx->malloc(MAX_ARRAY_LOCALS * sizeof *array); if (!array) return JS_FALSE; array[0] = fun->u.i.names.taggedAtom; @@ -2845,7 +2860,7 @@ js_AddLocal(JSContext *cx, JSFunction *fun, JSAtom *atom, JSLocalKind kind) } } else if (n == MAX_ARRAY_LOCALS) { array = fun->u.i.names.array; - map = (JSLocalNameMap *) JS_malloc(cx, sizeof *map); + map = (JSLocalNameMap *) cx->malloc(sizeof *map); if (!map) return JS_FALSE; if (!JS_DHashTableInit(&map->names, JS_DHashGetStubOps(), @@ -2853,7 +2868,7 @@ js_AddLocal(JSContext *cx, JSFunction *fun, JSAtom *atom, JSLocalKind kind) JS_DHASH_DEFAULT_CAPACITY(MAX_ARRAY_LOCALS * 2))) { JS_ReportOutOfMemory(cx); - JS_free(cx, map); + cx->free(map); return JS_FALSE; } @@ -2886,7 +2901,7 @@ js_AddLocal(JSContext *cx, JSFunction *fun, JSAtom *atom, JSLocalKind kind) * to replace fun->u.i.names with the built map. */ fun->u.i.names.map = map; - JS_free(cx, array); + cx->free(array); } else { if (*indexp == JS_BITMASK(16)) { JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, @@ -3108,7 +3123,7 @@ DestroyLocalNames(JSContext *cx, JSFunction *fun) if (n <= 1) return; if (n <= MAX_ARRAY_LOCALS) - JS_free(cx, fun->u.i.names.array); + cx->free(fun->u.i.names.array); else FreeLocalNameHash(cx, fun->u.i.names.map); } @@ -3124,8 +3139,8 @@ js_FreezeLocalNames(JSContext *cx, JSFunction *fun) n = fun->nargs + fun->u.i.nvars + fun->u.i.nupvars; if (2 <= n && n < MAX_ARRAY_LOCALS) { /* Shrink over-allocated array ignoring realloc failures. */ - array = (jsuword *) JS_realloc(cx, fun->u.i.names.array, - n * sizeof *array); + array = (jsuword *) cx->realloc(fun->u.i.names.array, + n * sizeof *array); if (array) fun->u.i.names.array = array; } diff --git a/js/src/jsfun.h b/js/src/jsfun.h index 6be8fb2a097..cbf76932d6c 100644 --- a/js/src/jsfun.h +++ b/js/src/jsfun.h @@ -280,7 +280,25 @@ extern JSBool js_GetCallArg(JSContext *cx, JSObject *obj, jsid id, jsval *vp); extern JSBool -js_GetCallVar(JSContext *cx, JSObject *obj, jsval id, jsval *vp); +js_GetCallVar(JSContext *cx, JSObject *obj, jsid id, jsval *vp); + +extern JSBool +SetCallArg(JSContext *cx, JSObject *obj, jsid id, jsval *vp); + +extern JSBool +SetCallVar(JSContext *cx, JSObject *obj, jsid id, jsval *vp); + +/* + * js_SetCallArg and js_SetCallVar are extern fastcall copies of the setter + * functions. These versions are required in order to set call vars from traces. + * The normal versions must not be fastcall because they are stored in the + * property ops map. + */ +extern JSBool JS_FASTCALL +js_SetCallArg(JSContext *cx, JSObject *obj, jsid id, jsval v); + +extern JSBool JS_FASTCALL +js_SetCallVar(JSContext *cx, JSObject *obj, jsid id, jsval v); /* * Slower version of js_GetCallVar used when call_resolve detects an attempt to diff --git a/js/src/jsgc.cpp b/js/src/jsgc.cpp index b2ee47f5a0c..d4a31a249df 100644 --- a/js/src/jsgc.cpp +++ b/js/src/jsgc.cpp @@ -76,6 +76,7 @@ #include "jsscript.h" #include "jsstaticcheck.h" #include "jsstr.h" +#include "jstask.h" #include "jstracer.h" #if JS_HAS_XML_SUPPORT @@ -722,7 +723,7 @@ FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info) { if (table->array) { JS_ASSERT(table->count > 0); - free(table->array); + js_free(table->array); table->array = NULL; table->count = 0; } @@ -756,8 +757,8 @@ AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info, if (capacity > (size_t)-1 / sizeof table->array[0]) goto bad; } - array = (void **) realloc(table->array, - capacity * sizeof table->array[0]); + array = (void **) js_realloc(table->array, + capacity * sizeof table->array[0]); if (!array) goto bad; #ifdef DEBUG @@ -796,11 +797,11 @@ ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info, array = table->array; JS_ASSERT(array); if (capacity == 0) { - free(array); + js_free(array); table->array = NULL; return; } - array = (void **) realloc(array, capacity * sizeof array[0]); + array = (void **) js_realloc(array, capacity * sizeof array[0]); if (array) table->array = array; } @@ -881,7 +882,7 @@ NewGCChunk(void) * * bytes to ensure that we always have room to store the gap. */ - p = malloc((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT); + p = js_malloc((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT); if (!p) return 0; @@ -913,11 +914,11 @@ DestroyGCChunk(jsuword chunk) #endif #if HAS_POSIX_MEMALIGN - free((void *) chunk); + js_free((void *) chunk); #else /* See comments in NewGCChunk. */ JS_ASSERT(*GetMallocedChunkGapPtr(chunk) < GC_ARENA_SIZE); - free((void *) (chunk - *GetMallocedChunkGapPtr(chunk))); + js_free((void *) (chunk - *GetMallocedChunkGapPtr(chunk))); #endif } @@ -3042,7 +3043,7 @@ js_TraceContext(JSTracer *trc, JSContext *acx) tvr->u.trace(trc, tvr); break; case JSTVU_SPROP: - TRACE_SCOPE_PROPERTY(trc, tvr->u.sprop); + tvr->u.sprop->trace(trc); break; case JSTVU_WEAK_ROOTS: TraceWeakRoots(trc, tvr->u.weakRoots); @@ -3270,7 +3271,10 @@ js_FinalizeStringRT(JSRuntime *rt, JSString *str, intN type, JSContext *cx) JS_ASSERT(type < 0); rt->unitStrings[*chars] = NULL; } else if (type < 0) { - free(chars); + if (cx) + cx->free(chars); + else + rt->free(chars); } else { JS_ASSERT((uintN) type < JS_ARRAY_LENGTH(str_finalizers)); finalizer = str_finalizers[type]; @@ -3519,6 +3523,7 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind) */ if (rt->shapeGen & SHAPE_OVERFLOW_BIT) { rt->gcRegenShapes = true; + rt->gcRegenShapesScopeFlag ^= JSScope::SHAPE_REGEN; rt->shapeGen = 0; rt->protoHazardShape = 0; } @@ -3555,6 +3560,10 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind) rt->gcMarkingTracer = NULL; +#ifdef JS_THREADSAFE + cx->createDeallocatorTask(); +#endif + /* * Sweep phase. * @@ -3733,6 +3742,10 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind) */ DestroyGCArenas(rt, emptyArenas); +#ifdef JS_THREADSAFE + cx->submitDeallocatorTask(); +#endif + if (rt->gcCallback) (void) rt->gcCallback(cx, JSGC_FINALIZE_END); #ifdef DEBUG_srcnotesize diff --git a/js/src/jsgc.h b/js/src/jsgc.h index 417c6405274..52ec7982fd6 100644 --- a/js/src/jsgc.h +++ b/js/src/jsgc.h @@ -47,6 +47,7 @@ #include "jsdhash.h" #include "jsbit.h" #include "jsutil.h" +#include "jstask.h" JS_BEGIN_EXTERN_C @@ -341,6 +342,28 @@ js_AddAsGCBytes(JSContext *cx, size_t sz); extern void js_RemoveAsGCBytes(JSRuntime* rt, size_t sz); +#ifdef JS_THREADSAFE +class JSFreePointerListTask : public JSBackgroundTask { + void *head; + public: + JSFreePointerListTask() : head(NULL) {} + + void add(void* ptr) { + *(void**)ptr = head; + head = ptr; + } + + void run() { + void *ptr = head; + while (ptr) { + void *next = *(void **)ptr; + js_free(ptr); + ptr = next; + } + } +}; +#endif + /* * Free the chars held by str when it is finalized by the GC. When type is * less then zero, it denotes an internal string. Otherwise it denotes the diff --git a/js/src/jshash.cpp b/js/src/jshash.cpp index 9e9466e4b6e..e347744e731 100644 --- a/js/src/jshash.cpp +++ b/js/src/jshash.cpp @@ -73,7 +73,7 @@ DefaultAllocTable(void *pool, size_t size) static void DefaultFreeTable(void *pool, void *item, size_t size) { - free(item); + js_free(item); } static JSHashEntry * @@ -86,7 +86,7 @@ static void DefaultFreeEntry(void *pool, JSHashEntry *he, uintN flag) { if (flag == HT_FREE_ENTRY) - free(he); + js_free(he); } static JSHashAllocOps defaultHashAllocOps = { diff --git a/js/src/jsinterp.cpp b/js/src/jsinterp.cpp index 6f60b4b0821..0d9a50ba5ad 100644 --- a/js/src/jsinterp.cpp +++ b/js/src/jsinterp.cpp @@ -1534,7 +1534,7 @@ js_Execute(JSContext *cx, JSObject *chain, JSScript *script, js_LeaveTrace(cx); #ifdef JS_TRACER - /* + /* * The JIT requires that the scope chain here is equal to its global * object. Disable the JIT for this call if this condition is not true. */ @@ -2095,7 +2095,7 @@ js_GetUpvar(JSContext *cx, uintN level, uintN cookie) } else if (slot == CALLEE_UPVAR_SLOT) { vp = &fp->argv[-2]; slot = 0; - } else { + } else { slot -= fp->fun->nargs; JS_ASSERT(slot < fp->script->nslots); vp = fp->slots; @@ -2132,7 +2132,7 @@ js_TraceOpcode(JSContext *cx) fp->script, cx->tracePrevPc); /* - * If there aren't that many elements on the stack, then + * If there aren't that many elements on the stack, then * we have probably entered a new frame, and printing output * would just be misleading. */ @@ -2145,7 +2145,7 @@ js_TraceOpcode(JSContext *cx) fprintf(tracefp, "%s %s", (n == -ndefs) ? " output:" : ",", bytes); - JS_free(cx, bytes); + cx->free(bytes); } } fprintf(tracefp, " @ %u\n", (uintN) (regs->sp - StackBase(fp))); @@ -2177,7 +2177,7 @@ js_TraceOpcode(JSContext *cx) fprintf(tracefp, "%s %s", (n == -nuses) ? " inputs:" : ",", bytes); - JS_free(cx, bytes); + cx->free(bytes); } } fprintf(tracefp, " @ %u\n", (uintN) (regs->sp - StackBase(fp))); @@ -2264,7 +2264,7 @@ js_DumpOpMeters() # define SIGNIFICANT(count,total) (200. * (count) >= (total)) - graph = (Edge *) calloc(nedges, sizeof graph[0]); + graph = (Edge *) js_calloc(nedges * sizeof graph[0]); for (i = nedges = 0; i < JSOP_LIMIT; i++) { from = js_CodeName[i]; for (j = 0; j < JSOP_LIMIT; j++) { @@ -2293,7 +2293,7 @@ js_DumpOpMeters() graph[i].from, graph[i].to, (unsigned long)graph[i].count, style); } - free(graph); + js_free(graph); fputs("}\n", fp); fclose(fp); @@ -2717,7 +2717,7 @@ js_Interpret(JSContext *cx) * 'op=x; DO_OP()' to let another opcode's implementation finish * their work, and many opcodes share entry points with a run of * consecutive BEGIN_CASEs. - * + * * Take care to trace OP only when it is the opcode fetched from * the instruction stream, so the trace matches what one would * expect from looking at the code. (We do omit POPs after SETs; @@ -4802,7 +4802,6 @@ js_Interpret(JSContext *cx) } JS_UNLOCK_SCOPE(cx, scope); PCMETER(cache->setpcmisses++); - atom = NULL; } } diff --git a/js/src/jsinterp.h b/js/src/jsinterp.h index 3848d5498b1..5190a06114a 100644 --- a/js/src/jsinterp.h +++ b/js/src/jsinterp.h @@ -90,7 +90,7 @@ struct JSStackFrame { * variables on the stack initially, note when they are closed * over, and copy those that are out to the heap when we leave * their dynamic scope. - * + * * The bytecode compiler produces a tree of block objects * accompanying each JSScript representing those lexical blocks in * the script that have let-bound variables associated with them. @@ -102,7 +102,7 @@ struct JSStackFrame { * When we are in the static scope of such a block, blockChain * points to its compiler-allocated block object; otherwise, it is * NULL. - * + * * scopeChain is the current scope chain, including 'call' and * 'block' objects for those function calls and lexical blocks * whose static scope we are currently executing in, and 'with' @@ -158,7 +158,7 @@ static JS_INLINE uintN GlobalVarCount(JSStackFrame *fp) { uintN n; - + JS_ASSERT(!fp->fun); n = fp->script->nfixed; if (fp->script->regexpsOffset != 0) @@ -243,6 +243,10 @@ struct JSPropCacheEntry { jsuword kshape; /* key shape if pc, else obj for atom */ jsuword vcap; /* value capability, see above */ jsuword vword; /* value word, see PCVAL_* below */ + + bool adding() const { + return PCVCAP_TAG(vcap) == 0 && kshape != PCVCAP_SHAPE(vcap); + } }; /* diff --git a/js/src/jsinttypes.h b/js/src/jsinttypes.h index e239551f90c..7e06b3edf0d 100644 --- a/js/src/jsinttypes.h +++ b/js/src/jsinttypes.h @@ -43,7 +43,7 @@ * Types: * JSInt, JSUint (for = 8, 16, 32, and 64) * JSIntPtr, JSUIntPtr - * + * * JSInt and JSUint are signed and unsigned types known to be * bits long. Note that neither JSInt8 nor JSUInt8 is necessarily * equivalent to a plain "char". diff --git a/js/src/jsiter.cpp b/js/src/jsiter.cpp index f941a6e3bd7..a9c3473f977 100644 --- a/js/src/jsiter.cpp +++ b/js/src/jsiter.cpp @@ -649,7 +649,7 @@ generator_finalize(JSContext *cx, JSObject *obj) */ JS_ASSERT(gen->state == JSGEN_NEWBORN || gen->state == JSGEN_CLOSED || gen->state == JSGEN_OPEN); - JS_free(cx, gen); + cx->free(gen); } } @@ -716,7 +716,7 @@ js_NewGenerator(JSContext *cx, JSStackFrame *fp) /* Allocate obj's private data struct. */ gen = (JSGenerator *) - JS_malloc(cx, sizeof(JSGenerator) + (nslots - 1) * sizeof(jsval)); + cx->malloc(sizeof(JSGenerator) + (nslots - 1) * sizeof(jsval)); if (!gen) goto bad; @@ -783,7 +783,7 @@ js_NewGenerator(JSContext *cx, JSStackFrame *fp) gen->state = JSGEN_NEWBORN; if (!JS_SetPrivate(cx, obj, gen)) { - JS_free(cx, gen); + cx->free(gen); goto bad; } return obj; diff --git a/js/src/jslock.cpp b/js/src/jslock.cpp index ace352f9f87..18f7f17b317 100644 --- a/js/src/jslock.cpp +++ b/js/src/jslock.cpp @@ -909,7 +909,7 @@ DestroyFatlock(JSFatLock *fl) { PR_DestroyLock(fl->slock); PR_DestroyCondVar(fl->svar); - free(fl); + js_free(fl); } static JSFatLock * @@ -1003,7 +1003,7 @@ js_SetupLocks(int listc, int globc) global_locks_log2 = JS_CeilingLog2(globc); global_locks_mask = JS_BITMASK(global_locks_log2); global_lock_count = JS_BIT(global_locks_log2); - global_locks = (PRLock **) malloc(global_lock_count * sizeof(PRLock*)); + global_locks = (PRLock **) js_malloc(global_lock_count * sizeof(PRLock*)); if (!global_locks) return JS_FALSE; for (i = 0; i < global_lock_count; i++) { @@ -1014,7 +1014,7 @@ js_SetupLocks(int listc, int globc) return JS_FALSE; } } - fl_list_table = (JSFatLockTable *) malloc(i * sizeof(JSFatLockTable)); + fl_list_table = (JSFatLockTable *) js_malloc(i * sizeof(JSFatLockTable)); if (!fl_list_table) { js_CleanupLocks(); return JS_FALSE; @@ -1036,7 +1036,7 @@ js_CleanupLocks() if (global_locks) { for (i = 0; i < global_lock_count; i++) PR_DestroyLock(global_locks[i]); - free(global_locks); + js_free(global_locks); global_locks = NULL; global_lock_count = 1; global_locks_log2 = 0; @@ -1049,7 +1049,7 @@ js_CleanupLocks() DeleteListOfFatlocks(fl_list_table[i].taken); fl_list_table[i].taken = NULL; } - free(fl_list_table); + js_free(fl_list_table); fl_list_table = NULL; fl_list_table_len = 0; } diff --git a/js/src/jsmath.cpp b/js/src/jsmath.cpp index daed88393b9..dd6fd51665c 100644 --- a/js/src/jsmath.cpp +++ b/js/src/jsmath.cpp @@ -231,7 +231,7 @@ static inline jsdouble JS_FASTCALL math_ceil_kernel(jsdouble x) { #ifdef __APPLE__ - if (x < 0 && x > -1.0) + if (x < 0 && x > -1.0) return js_copysign(0, -1); #endif return ceil(x); diff --git a/js/src/jsnum.cpp b/js/src/jsnum.cpp index 893f74aaaa2..5c8993aa080 100644 --- a/js/src/jsnum.cpp +++ b/js/src/jsnum.cpp @@ -384,7 +384,7 @@ num_toString(JSContext *cx, uintN argc, jsval *vp) return JS_FALSE; } str = JS_NewStringCopyZ(cx, dStr); - free(dStr); + js_free(dStr); } if (!str) return JS_FALSE; @@ -460,7 +460,7 @@ num_toLocaleString(JSContext *cx, uintN argc, jsval *vp) } tmpGroup--; - buf = (char *)JS_malloc(cx, size + 1); + buf = (char *)cx->malloc(size + 1); if (!buf) return JS_FALSE; @@ -492,7 +492,7 @@ num_toLocaleString(JSContext *cx, uintN argc, jsval *vp) str = JS_NewString(cx, buf, size); if (!str) { - JS_free(cx, buf); + cx->free(buf); return JS_FALSE; } @@ -739,9 +739,9 @@ js_FinishRuntimeNumberState(JSContext *cx) rt->jsNegativeInfinity = NULL; rt->jsPositiveInfinity = NULL; - JS_free(cx, (void *)rt->thousandsSeparator); - JS_free(cx, (void *)rt->decimalSeparator); - JS_free(cx, (void *)rt->numGrouping); + cx->free((void *)rt->thousandsSeparator); + cx->free((void *)rt->decimalSeparator); + cx->free((void *)rt->numGrouping); rt->thousandsSeparator = rt->decimalSeparator = rt->numGrouping = NULL; } @@ -852,7 +852,7 @@ NumberToStringWithBase(JSContext *cx, jsdouble d, jsint base) return NULL; s = JS_NewStringCopyZ(cx, numStr); if (!(numStr >= buf && numStr < buf + sizeof buf)) - free(numStr); + js_free(numStr); return s; } @@ -1251,7 +1251,7 @@ js_strtod(JSContext *cx, const jschar *s, const jschar *send, /* Use cbuf to avoid malloc */ if (length >= sizeof cbuf) { - cstr = (char *) JS_malloc(cx, length + 1); + cstr = (char *) cx->malloc(length + 1); if (!cstr) return JS_FALSE; } else { @@ -1292,7 +1292,7 @@ js_strtod(JSContext *cx, const jschar *s, const jschar *send, i = estr - cstr; if (cstr != cbuf) - JS_free(cx, cstr); + cx->free(cstr); *ep = i ? s1 + i : s; *dp = d; return JS_TRUE; @@ -1405,7 +1405,7 @@ js_strtointeger(JSContext *cx, const jschar *s, const jschar *send, */ size_t i; size_t length = s1 - start; - char *cstr = (char *) JS_malloc(cx, length + 1); + char *cstr = (char *) cx->malloc(length + 1); char *estr; int err=0; @@ -1418,12 +1418,12 @@ js_strtointeger(JSContext *cx, const jschar *s, const jschar *send, value = JS_strtod(cstr, &estr, &err); if (err == JS_DTOA_ENOMEM) { JS_ReportOutOfMemory(cx); - JS_free(cx, cstr); + cx->free(cstr); return JS_FALSE; } if (err == JS_DTOA_ERANGE && value == HUGE_VAL) value = *cx->runtime->jsPositiveInfinity; - JS_free(cx, cstr); + cx->free(cstr); } else if ((base & (base - 1)) == 0) { /* * The number may also be inaccurate for power-of-two bases. This diff --git a/js/src/jsobj.cpp b/js/src/jsobj.cpp index c14b66e837a..bf5f72b4e80 100644 --- a/js/src/jsobj.cpp +++ b/js/src/jsobj.cpp @@ -560,7 +560,7 @@ out: ida = JS_Enumerate(cx, obj); if (!ida) { if (*sp) { - JS_free(cx, *sp); + cx->free(*sp); *sp = NULL; } goto bad; @@ -704,7 +704,7 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) if (!chars) { /* If outermost, allocate 4 + 1 for "({})" and the terminator. */ - chars = (jschar *) malloc(((outermost ? 4 : 2) + 1) * sizeof(jschar)); + chars = (jschar *) js_malloc(((outermost ? 4 : 2) + 1) * sizeof(jschar)); nchars = 0; if (!chars) goto error; @@ -715,9 +715,9 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) MAKE_SHARP(he); nchars = js_strlen(chars); chars = (jschar *) - realloc((ochars = chars), (nchars + 2 + 1) * sizeof(jschar)); + js_realloc((ochars = chars), (nchars + 2 + 1) * sizeof(jschar)); if (!chars) { - free(ochars); + js_free(ochars); goto error; } if (outermost) { @@ -958,11 +958,11 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) /* Allocate 1 + 1 at end for closing brace and terminating 0. */ chars = (jschar *) - realloc((ochars = chars), curlen * sizeof(jschar)); + js_realloc((ochars = chars), curlen * sizeof(jschar)); if (!chars) { /* Save code space on error: let JS_free ignore null vsharp. */ - JS_free(cx, vsharp); - free(ochars); + cx->free(vsharp); + js_free(ochars); goto error; } @@ -1005,7 +1005,7 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) nchars += vlength; if (vsharp) - JS_free(cx, vsharp); + cx->free(vsharp); } } @@ -1019,7 +1019,7 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) if (!ok) { if (chars) - free(chars); + js_free(chars); goto out; } @@ -1031,7 +1031,7 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) make_string: str = js_NewString(cx, chars, nchars); if (!str) { - free(chars); + js_free(chars); ok = JS_FALSE; goto out; } @@ -1042,8 +1042,8 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) return ok; overflow: - JS_free(cx, vsharp); - free(chars); + cx->free(vsharp); + js_free(chars); chars = NULL; goto error; } @@ -1064,7 +1064,7 @@ obj_toString(JSContext *cx, uintN argc, jsval *vp) obj = js_GetWrappedObject(cx, obj); clazz = OBJ_GET_CLASS(cx, obj)->name; nchars = 9 + strlen(clazz); /* 9 for "[object ]" */ - chars = (jschar *) JS_malloc(cx, (nchars + 1) * sizeof(jschar)); + chars = (jschar *) cx->malloc((nchars + 1) * sizeof(jschar)); if (!chars) return JS_FALSE; @@ -1079,7 +1079,7 @@ obj_toString(JSContext *cx, uintN argc, jsval *vp) str = js_NewString(cx, chars, nchars); if (!str) { - JS_free(cx, chars); + cx->free(chars); return JS_FALSE; } *vp = STRING_TO_JSVAL(str); @@ -2986,7 +2986,7 @@ AllocSlots(JSContext *cx, JSObject *obj, size_t nslots) JS_ASSERT(nslots > JS_INITIAL_NSLOTS); jsval* slots; - slots = (jsval*) JS_malloc(cx, SLOTS_TO_DYNAMIC_WORDS(nslots) * sizeof(jsval)); + slots = (jsval*) cx->malloc(SLOTS_TO_DYNAMIC_WORDS(nslots) * sizeof(jsval)); if (!slots) return true; @@ -3044,7 +3044,7 @@ js_GrowSlots(JSContext *cx, JSObject *obj, size_t nslots) size_t oslots = size_t(slots[-1]); - slots = (jsval*) JS_realloc(cx, slots - 1, nwords * sizeof(jsval)); + slots = (jsval*) cx->realloc(slots - 1, nwords * sizeof(jsval)); *slots++ = nslots; obj->dslots = slots; @@ -3069,11 +3069,11 @@ js_ShrinkSlots(JSContext *cx, JSObject *obj, size_t nslots) JS_ASSERT(nslots <= size_t(slots[-1])); if (nslots <= JS_INITIAL_NSLOTS) { - JS_free(cx, slots - 1); + cx->free(slots - 1); obj->dslots = NULL; } else { size_t nwords = SLOTS_TO_DYNAMIC_WORDS(nslots); - slots = (jsval*) JS_realloc(cx, slots - 1, nwords * sizeof(jsval)); + slots = (jsval*) cx->realloc(slots - 1, nwords * sizeof(jsval)); *slots++ = nslots; obj->dslots = slots; } @@ -4965,7 +4965,7 @@ js_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, } allocated = NativeEnumeratorSize(length); - ne = (JSNativeEnumerator *) JS_malloc(cx, allocated); + ne = (JSNativeEnumerator *) cx->malloc(allocated); if (!ne) { JS_UNLOCK_SCOPE(cx, scope); return JS_FALSE; @@ -4997,7 +4997,7 @@ js_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, JS_LOCK_GC(cx->runtime); if (!js_AddAsGCBytes(cx, allocated)) { /* js_AddAsGCBytes releases the GC lock on failures. */ - JS_free(cx, ne); + cx->free(ne); return JS_FALSE; } ne->next = cx->runtime->nativeEnumerators; @@ -5085,12 +5085,12 @@ js_TraceNativeEnumerators(JSTracer *trc) cursor = ne->ids; end = cursor + ne->length; do { - TRACE_ID(trc, *cursor); + js_TraceId(trc, *cursor); } while (++cursor != end); } else if (doGC) { js_RemoveAsGCBytes(rt, NativeEnumeratorSize(ne->length)); *nep = ne->next; - JS_free(trc->context, ne); + trc->context->free(ne); continue; } nep = &ne->next; @@ -5748,7 +5748,6 @@ js_TraceObject(JSTracer *trc, JSObject *obj) { JSContext *cx; JSScope *scope; - JSScopeProperty *sprop; JSClass *clasp; size_t nslots, i; jsval v; @@ -5772,32 +5771,7 @@ js_TraceObject(JSTracer *trc, JSObject *obj) MeterEntryCount(scope->entryCount); #endif - sprop = scope->lastProp; - if (sprop) { - JS_ASSERT(scope->has(sprop)); - - /* Regenerate property cache shape ids if GC'ing. */ - if (IS_GC_MARKING_TRACER(trc) && cx->runtime->gcRegenShapes) { - if (!(sprop->flags & SPROP_FLAG_SHAPE_REGEN)) { - sprop->shape = js_RegenerateShapeForGC(cx); - sprop->flags |= SPROP_FLAG_SHAPE_REGEN; - } - - uint32 shape = sprop->shape; - if (scope->hasOwnShape()) { - shape = js_RegenerateShapeForGC(cx); - JS_ASSERT(shape != sprop->shape); - } - scope->shape = shape; - } - - /* Trace scope's property tree ancestor line. */ - do { - if (scope->hadMiddleDelete() && !scope->has(sprop)) - continue; - TRACE_SCOPE_PROPERTY(trc, sprop); - } while ((sprop = sprop->parent) != NULL); - } + scope->trace(trc); if (!JS_CLIST_IS_EMPTY(&cx->runtime->watchPointList)) js_TraceWatchPoints(trc, obj); diff --git a/js/src/jsobj.h b/js/src/jsobj.h index 57058c9f1d6..4bc58c64e90 100644 --- a/js/src/jsobj.h +++ b/js/src/jsobj.h @@ -914,6 +914,10 @@ JS_FRIEND_API(void) js_DumpStackFrame(JSStackFrame *fp); extern uintN js_InferFlags(JSContext *cx, uintN defaultFlags); +/* Object constructor native. Exposed only so the JIT can know its address. */ +JSBool +js_Object(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval); + JS_END_EXTERN_C #endif /* jsobj_h___ */ diff --git a/js/src/json.cpp b/js/src/json.cpp index 7f7c2f2dfe4..6f790f0e984 100644 --- a/js/src/json.cpp +++ b/js/src/json.cpp @@ -38,7 +38,7 @@ * * ***** END LICENSE BLOCK ***** */ -#include /* memset */ +#include #include "jsapi.h" #include "jsarena.h" #include "jsarray.h" @@ -78,7 +78,7 @@ js_json_parse(JSContext *cx, uintN argc, jsval *vp) jsval *argv = vp + 2; jsval reviver = JSVAL_NULL; JSAutoTempValueRooter(cx, 1, &reviver); - + if (!JS_ConvertArguments(cx, argc, argv, "S / v", &s, &reviver)) return JS_FALSE; @@ -523,7 +523,7 @@ Str(JSContext *cx, jsid id, JSObject *holder, StringifyContext *scx, jsval *vp, char numBuf[DTOSTR_STANDARD_BUFFER_SIZE], *numStr; jsdouble d = JSVAL_IS_INT(*vp) ? jsdouble(JSVAL_TO_INT(*vp)) : *JSVAL_TO_DOUBLE(*vp); - numStr = JS_dtostr(numBuf, sizeof numBuf, DTOSTR_STANDARD, 0, d); + numStr = JS_dtostr(numBuf, sizeof numBuf, DTOSTR_STANDARD, 0, d); if (!numStr) { JS_ReportOutOfMemory(cx); return JS_FALSE; @@ -546,7 +546,7 @@ Str(JSContext *cx, jsid id, JSObject *holder, StringifyContext *scx, jsval *vp, return ok; } - + *vp = JSVAL_VOID; return JS_TRUE; } @@ -640,7 +640,7 @@ static JSBool Walk(JSContext *cx, jsid id, JSObject *holder, jsval reviver, jsval *vp) { JS_CHECK_RECURSION(cx, return JS_FALSE); - + if (!OBJ_GET_PROPERTY(cx, holder, id, vp)) return JS_FALSE; @@ -649,7 +649,7 @@ Walk(JSContext *cx, jsid id, JSObject *holder, jsval reviver, jsval *vp) if (!JSVAL_IS_PRIMITIVE(*vp) && !js_IsCallable(obj = JSVAL_TO_OBJECT(*vp), cx)) { jsval propValue = JSVAL_NULL; JSAutoTempValueRooter tvr(cx, 1, &propValue); - + if(OBJ_IS_ARRAY(cx, obj)) { jsuint length = 0; if (!js_GetLengthProperty(cx, obj, &length)) @@ -713,7 +713,7 @@ Walk(JSContext *cx, jsid id, JSObject *holder, jsval reviver, jsval *vp) static JSBool Revive(JSContext *cx, jsval reviver, jsval *vp) { - + JSObject *obj = js_NewObject(cx, &js_ObjectClass, NULL, NULL); if (!obj) return JS_FALSE; @@ -740,10 +740,9 @@ js_BeginJSONParse(JSContext *cx, jsval *rootVal) if (!arr) return NULL; - JSONParser *jp = (JSONParser*) JS_malloc(cx, sizeof(JSONParser)); + JSONParser *jp = (JSONParser*) cx->calloc(sizeof(JSONParser)); if (!jp) return NULL; - memset(jp, 0, sizeof *jp); jp->objectStack = arr; if (!js_AddRoot(cx, &jp->objectStack, "JSON parse stack")) @@ -798,7 +797,7 @@ js_FinishJSONParse(JSContext *cx, JSONParser *jp, jsval reviver) JSBool ok = *jp->statep == JSON_PARSE_STATE_FINISHED; jsval *vp = jp->rootVal; - JS_free(cx, jp); + cx->free(jp); if (!early_ok) return JS_FALSE; @@ -820,7 +819,7 @@ PushState(JSContext *cx, JSONParser *jp, JSONParserState state) if (*jp->statep == JSON_PARSE_STATE_FINISHED) { // extra input JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_JSON_BAD_PARSE); - return JS_FALSE; + return JS_FALSE; } jp->statep++; @@ -993,10 +992,10 @@ HandleNumber(JSContext *cx, JSONParser *jp, const jschar *buf, uint32 len) return JS_FALSE; } - jsval numVal; + jsval numVal; if (!JS_NewNumberValue(cx, val, &numVal)) return JS_FALSE; - + return PushPrimitive(cx, jp, numVal); } @@ -1248,7 +1247,7 @@ js_ConsumeJSONText(JSContext *cx, JSONParser *jp, const jschar *data, uint32 len JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_JSON_BAD_PARSE); return JS_FALSE; } - + if (++(jp->numHex) == 4) { js_FastAppendChar(&jp->buffer, jp->hexChar); jp->hexChar = 0; @@ -1265,7 +1264,7 @@ js_ConsumeJSONText(JSContext *cx, JSONParser *jp, const jschar *data, uint32 len i--; if (!PopState(cx, jp)) return JS_FALSE; - + if (!HandleData(cx, jp, JSON_DATA_KEYWORD)) return JS_FALSE; } diff --git a/js/src/jsopcode.cpp b/js/src/jsopcode.cpp index 98e0970cedd..28376ee0076 100644 --- a/js/src/jsopcode.cpp +++ b/js/src/jsopcode.cpp @@ -604,7 +604,7 @@ Sprint(Sprinter *sp, const char *format, ...) return -1; } offset = SprintCString(sp, bp); - free(bp); + js_free(bp); return offset; } @@ -737,7 +737,7 @@ JS_NEW_PRINTER(JSContext *cx, const char *name, JSFunction *fun, { JSPrinter *jp; - jp = (JSPrinter *) JS_malloc(cx, sizeof(JSPrinter)); + jp = (JSPrinter *) cx->malloc(sizeof(JSPrinter)); if (!jp) return NULL; INIT_SPRINTER(cx, &jp->sprinter, &jp->pool, 0); @@ -764,7 +764,7 @@ void js_DestroyPrinter(JSPrinter *jp) { JS_FinishArenaPool(&jp->pool); - JS_free(jp->sprinter.context, jp); + jp->sprinter.context->free(jp); } JSString * @@ -832,7 +832,7 @@ js_printf(JSPrinter *jp, const char *format, ...) /* Allocate temp space, convert format, and put. */ bp = JS_vsmprintf(format, ap); /* XXX vsaprintf */ if (fp) { - JS_free(jp->sprinter.context, fp); + jp->sprinter.context->free(fp); format = NULL; } if (!bp) { @@ -843,7 +843,7 @@ js_printf(JSPrinter *jp, const char *format, ...) cc = strlen(bp); if (SprintPut(&jp->sprinter, bp, (size_t)cc) < 0) cc = -1; - free(bp); + js_free(bp); va_end(ap); return cc; @@ -929,7 +929,7 @@ GetOff(SprintStack *ss, uintN i) if (off < 0) off = 0; ss->offsets[i] = off; - JS_free(ss->sprinter.context, bytes); + ss->sprinter.context->free(bytes); return off; } if (!ss->sprinter.base && SprintPut(&ss->sprinter, "", 0) >= 0) { @@ -2508,14 +2508,14 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) len = 0; if (!Decompile(ss, done, pc - done, JSOP_POP)) { - JS_free(cx, (char *)lval); + cx->free((char *)lval); return NULL; } /* Pop Decompile result and print comma expression. */ rval = POP_STR(); todo = Sprint(&ss->sprinter, "%s, %s", lval, rval); - JS_free(cx, (char *)lval); + cx->free((char *)lval); break; case SRC_HIDDEN: @@ -2547,7 +2547,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) /* Set saveop to reflect what we will push. */ saveop = JSOP_LEAVEBLOCKEXPR; if (!Decompile(ss, pc, len, saveop)) { - JS_free(cx, (char *)lval); + cx->free((char *)lval); return NULL; } rval = PopStr(ss, JSOP_SETNAME); @@ -2556,7 +2556,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) ? "let (%s) (%s)" : "let (%s) %s", lval, rval); - JS_free(cx, (char *)lval); + cx->free((char *)lval); } break; @@ -2620,7 +2620,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) if ((size_t)argc <= JS_ARRAY_LENGTH(smallv)) { atomv = smallv; } else { - atomv = (JSAtom **) JS_malloc(cx, argc * sizeof(JSAtom *)); + atomv = (JSAtom **) cx->malloc(argc * sizeof(JSAtom *)); if (!atomv) return NULL; } @@ -2755,7 +2755,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) #undef LOCAL_ASSERT_OUT enterblock_out: if (atomv != smallv) - JS_free(cx, atomv); + cx->free(atomv); if (!ok) return NULL; } @@ -3280,7 +3280,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) DECOMPILE_CODE(pc + oplen, len - oplen); lval = JS_strdup(cx, POP_STR()); if (!lval) { - JS_free(cx, (void *)xval); + cx->free((void *)xval); return NULL; } pc += len; @@ -3291,8 +3291,8 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) rval = POP_STR(); todo = Sprint(&ss->sprinter, "%s ? %s : %s", xval, lval, rval); - JS_free(cx, (void *)xval); - JS_free(cx, (void *)lval); + cx->free((void *)xval); + cx->free((void *)lval); break; default: @@ -3319,7 +3319,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) pc += len; len = done - pc; if (!Decompile(ss, pc, len, op)) { - JS_free(cx, (char *)lval); + cx->free((char *)lval); return NULL; } rval = POP_STR(); @@ -3332,14 +3332,14 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) todo = Sprint(&ss->sprinter, "%s %s\n", lval, xval); tail = Sprint(&ss->sprinter, "%*s%s", jp->indent + 4, "", rval); - JS_free(cx, (char *)rval); + cx->free((char *)rval); } if (tail < 0) todo = -1; } else { todo = Sprint(&ss->sprinter, "%s %s %s", lval, xval, rval); } - JS_free(cx, (char *)lval); + cx->free((char *)lval); break; case JSOP_AND: @@ -3532,7 +3532,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) #endif argc = GET_ARGC(pc); argv = (char **) - JS_malloc(cx, (size_t)(argc + 1) * sizeof *argv); + cx->malloc((size_t)(argc + 1) * sizeof *argv); if (!argv) return NULL; @@ -3590,8 +3590,8 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) ok = JS_FALSE; for (i = 0; i <= argc; i++) - JS_free(cx, argv[i]); - JS_free(cx, argv); + cx->free(argv[i]); + cx->free(argv); if (!ok) return NULL; #if JS_HAS_LVALUE_RETURN @@ -4095,7 +4095,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) if (!rval) return NULL; todo = SprintCString(&ss->sprinter, rval); - JS_free(cx, (void *)rval); + cx->free((void *)rval); break; } #endif /* JS_HAS_GENERATOR_EXPRS */ @@ -4166,7 +4166,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) ok = JS_TRUE; } else { table = (TableEntry *) - JS_malloc(cx, (size_t)n * sizeof *table); + cx->malloc((size_t)n * sizeof *table); if (!table) return NULL; for (i = j = 0; i < n; i++) { @@ -4186,12 +4186,12 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) pc2 += jmplen; } tmp = (TableEntry *) - JS_malloc(cx, (size_t)j * sizeof *table); + cx->malloc((size_t)j * sizeof *table); if (tmp) { VOUCH_DOES_NOT_REQUIRE_STACK(); ok = js_MergeSort(table, (size_t)j, sizeof(TableEntry), CompareOffsets, NULL, tmp); - JS_free(cx, tmp); + cx->free(tmp); } else { ok = JS_FALSE; } @@ -4201,7 +4201,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) ok = DecompileSwitch(ss, table, (uintN)j, pc, len, off, JS_FALSE); } - JS_free(cx, table); + cx->free(table); if (!ok) return NULL; todo = -2; @@ -4227,7 +4227,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) pc2 += UINT16_LEN; table = (TableEntry *) - JS_malloc(cx, (size_t)npairs * sizeof *table); + cx->malloc((size_t)npairs * sizeof *table); if (!table) return NULL; for (k = 0; k < npairs; k++) { @@ -4248,7 +4248,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) ok = DecompileSwitch(ss, table, (uintN)npairs, pc, len, off, JS_FALSE); - JS_free(cx, table); + cx->free(table); if (!ok) return NULL; todo = -2; @@ -4292,7 +4292,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) * and the distance to its statements in table[i].offset. */ table = (TableEntry *) - JS_malloc(cx, (size_t)ncases * sizeof *table); + cx->malloc((size_t)ncases * sizeof *table); if (!table) return NULL; pc2 = pc; @@ -4322,7 +4322,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) ok = DecompileSwitch(ss, table, (uintN)ncases, pc, len, off, JS_TRUE); - JS_free(cx, table); + cx->free(table); if (!ok) return NULL; todo = -2; @@ -4370,7 +4370,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) break; } - argv = (char **) JS_malloc(cx, size_t(argc) * sizeof *argv); + argv = (char **) cx->malloc(size_t(argc) * sizeof *argv); if (!argv) return NULL; @@ -4394,8 +4394,8 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) } for (i = 0; i < argc; i++) - JS_free(cx, argv[i]); - JS_free(cx, argv); + cx->free(argv[i]); + cx->free(argv); if (!ok) return NULL; @@ -4728,7 +4728,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) (*rval == '\0' || (SprintPut(&ss->sprinter, " ", 1) >= 0 && SprintCString(&ss->sprinter, rval))); - JS_free(cx, (char *)rval); + cx->free((char *)rval); if (!ok) return NULL; SprintPut(&ss->sprinter, "?>", 2); @@ -4836,7 +4836,7 @@ DecompileCode(JSPrinter *jp, JSScript *script, jsbytecode *pc, uintN len, ok = Decompile(&ss, pc, len, JSOP_NOP) != NULL; if (code != oldcode) { - JS_free(cx, jp->script->code); + cx->free(jp->script->code); jp->script->code = oldcode; jp->script->main = oldmain; } @@ -5055,7 +5055,7 @@ js_DecompileValueGenerator(JSContext *cx, intN spindex, jsval v, * populated interpreter's stack with its current content. */ pcstack = (jsbytecode **) - JS_malloc(cx, StackDepth(script) * sizeof *pcstack); + cx->malloc(StackDepth(script) * sizeof *pcstack); if (!pcstack) return NULL; pcdepth = ReconstructPCStack(cx, script, pc, pcstack); @@ -5096,7 +5096,7 @@ js_DecompileValueGenerator(JSContext *cx, intN spindex, jsval v, } release_pcstack: - JS_free(cx, pcstack); + cx->free(pcstack); if (pcdepth < 0) goto do_fallback; } @@ -5232,7 +5232,7 @@ DecompileExpression(JSContext *cx, JSScript *script, JSFunction *fun, } pcstack = (jsbytecode **) - JS_malloc(cx, StackDepth(script) * sizeof *pcstack); + cx->malloc(StackDepth(script) * sizeof *pcstack); if (!pcstack) { name = NULL; goto out; @@ -5259,12 +5259,12 @@ DecompileExpression(JSContext *cx, JSScript *script, JSFunction *fun, out: if (code != oldcode) { - JS_free(cx, script->code); + cx->free(script->code); script->code = oldcode; script->main = oldmain; } - JS_free(cx, pcstack); + cx->free(pcstack); return name; } @@ -5339,7 +5339,7 @@ SimulateImacroCFG(JSContext *cx, JSScript *script, jsbytecode **pcstack) { size_t nbytes = StackDepth(script) * sizeof *pcstack; - jsbytecode** tmp_pcstack = (jsbytecode **) JS_malloc(cx, nbytes); + jsbytecode** tmp_pcstack = (jsbytecode **) cx->malloc(nbytes); if (!tmp_pcstack) return -1; memcpy(tmp_pcstack, pcstack, nbytes); @@ -5379,11 +5379,11 @@ SimulateImacroCFG(JSContext *cx, JSScript *script, success: memcpy(pcstack, tmp_pcstack, nbytes); - JS_free(cx, tmp_pcstack); + cx->free(tmp_pcstack); return pcdepth; failure: - JS_free(cx, tmp_pcstack); + cx->free(tmp_pcstack); return -1; } diff --git a/js/src/jsparse.cpp b/js/src/jsparse.cpp index 8d87d9df77e..35cd45dd9d6 100644 --- a/js/src/jsparse.cpp +++ b/js/src/jsparse.cpp @@ -3802,7 +3802,7 @@ CheckDestructuring(JSContext *cx, BindData *data, /* * This is a greatly pared down version of CheckDestructuring that extends the * pn_pos.end source coordinate of each name in a destructuring binding such as - * + * * var [x, y] = [function () y, 42]; * * to cover its corresponding initializer, so that the initialized binding does @@ -9036,12 +9036,12 @@ js_FoldConstants(JSContext *cx, JSParseNode *pn, JSTreeContext *tc, bool inCond) } /* Allocate a new buffer and string descriptor for the result. */ - chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar)); + chars = (jschar *) cx->malloc((length + 1) * sizeof(jschar)); if (!chars) return JS_FALSE; str = js_NewString(cx, chars, length); if (!str) { - JS_free(cx, chars); + cx->free(chars); return JS_FALSE; } diff --git a/js/src/jsprf.cpp b/js/src/jsprf.cpp index 5686c1eabcd..6e02bc2d185 100644 --- a/js/src/jsprf.cpp +++ b/js/src/jsprf.cpp @@ -412,7 +412,7 @@ static int cvt_ws(SprintfState *ss, const jschar *ws, int width, int prec, if (!s) return -1; /* JSStuffFunc error indicator. */ result = cvt_s(ss, s, width, prec, flags); - free(s); + js_free(s); } else { result = cvt_s(ss, NULL, width, prec, flags); } @@ -630,7 +630,7 @@ static struct NumArgState* BuildArgArray( const char *fmt, va_list ap, int* rv, if( *rv < 0 ){ if( nas != nasArray ) - free( nas ); + js_free( nas ); return NULL; } @@ -667,7 +667,7 @@ static struct NumArgState* BuildArgArray( const char *fmt, va_list ap, int* rv, default: if( nas != nasArray ) - free( nas ); + js_free( nas ); *rv = -1; return NULL; } @@ -756,7 +756,7 @@ static int dosprintf(SprintfState *ss, const char *fmt, va_list ap) if( nas[i-1].type == TYPE_UNKNOWN ){ if( nas && ( nas != nasArray ) ) - free( nas ); + js_free( nas ); return -1; } @@ -1037,7 +1037,7 @@ static int dosprintf(SprintfState *ss, const char *fmt, va_list ap) rv = (*ss->stuff)(ss, "\0", 1); if( nas && ( nas != nasArray ) ){ - free( nas ); + js_free( nas ); } return rv; @@ -1098,9 +1098,9 @@ static int GrowStuff(SprintfState *ss, const char *sp, JSUint32 len) /* Grow the buffer */ newlen = ss->maxlen + ((len > 32) ? len : 32); if (ss->base) { - newbase = (char*) realloc(ss->base, newlen); + newbase = (char*) js_realloc(ss->base, newlen); } else { - newbase = (char*) malloc(newlen); + newbase = (char*) js_malloc(newlen); } if (!newbase) { /* Ran out of memory */ @@ -1139,7 +1139,7 @@ JS_PUBLIC_API(char *) JS_smprintf(const char *fmt, ...) */ JS_PUBLIC_API(void) JS_smprintf_free(char *mem) { - free(mem); + js_free(mem); } JS_PUBLIC_API(char *) JS_vsmprintf(const char *fmt, va_list ap) @@ -1154,7 +1154,7 @@ JS_PUBLIC_API(char *) JS_vsmprintf(const char *fmt, va_list ap) rv = dosprintf(&ss, fmt, ap); if (rv < 0) { if (ss.base) { - free(ss.base); + js_free(ss.base); } return 0; } @@ -1253,7 +1253,7 @@ JS_PUBLIC_API(char *) JS_vsprintf_append(char *last, const char *fmt, va_list ap rv = dosprintf(&ss, fmt, ap); if (rv < 0) { if (ss.base) { - free(ss.base); + js_free(ss.base); } return 0; } diff --git a/js/src/jspubtd.h b/js/src/jspubtd.h index 6c28b136a79..7efcbfbcc64 100644 --- a/js/src/jspubtd.h +++ b/js/src/jspubtd.h @@ -106,8 +106,8 @@ typedef enum JSAccessMode { JSACC_PROTO = 0, /* XXXbe redundant w.r.t. id */ JSACC_PARENT = 1, /* XXXbe redundant w.r.t. id */ - /* - * enum value #2 formerly called JSACC_IMPORT, + /* + * enum value #2 formerly called JSACC_IMPORT, * gap preserved for ABI compatibility. */ @@ -145,7 +145,6 @@ typedef struct JSObject JSObject; typedef struct JSObjectMap JSObjectMap; typedef struct JSObjectOps JSObjectOps; typedef struct JSRuntime JSRuntime; -typedef struct JSRuntime JSTaskState; /* XXX deprecated name */ typedef struct JSScript JSScript; typedef struct JSStackFrame JSStackFrame; typedef struct JSString JSString; diff --git a/js/src/jsregexp.cpp b/js/src/jsregexp.cpp index 91a41e49da7..8a098ee4265 100644 --- a/js/src/jsregexp.cpp +++ b/js/src/jsregexp.cpp @@ -585,12 +585,12 @@ ParseRegExp(CompilerState *state) } operatorStack = (REOpData *) - JS_malloc(state->context, sizeof(REOpData) * operatorStackSize); + state->context->malloc(sizeof(REOpData) * operatorStackSize); if (!operatorStack) return JS_FALSE; operandStack = (RENode **) - JS_malloc(state->context, sizeof(RENode *) * operandStackSize); + state->context->malloc(sizeof(RENode *) * operandStackSize); if (!operandStack) goto out; @@ -682,8 +682,8 @@ pushOperand: RENode **tmp; operandStackSize += operandStackSize; tmp = (RENode **) - JS_realloc(state->context, operandStack, - sizeof(RENode *) * operandStackSize); + state->context->realloc(operandStack, + sizeof(RENode *) * operandStackSize); if (!tmp) goto out; operandStack = tmp; @@ -817,8 +817,8 @@ pushOperator: REOpData *tmp; operatorStackSize += operatorStackSize; tmp = (REOpData *) - JS_realloc(state->context, operatorStack, - sizeof(REOpData) * operatorStackSize); + state->context->realloc(operatorStack, + sizeof(REOpData) * operatorStackSize); if (!tmp) goto out; operatorStack = tmp; @@ -831,9 +831,9 @@ pushOperator: } out: if (operatorStack) - JS_free(state->context, operatorStack); + state->context->free(operatorStack); if (operandStack) - JS_free(state->context, operandStack); + state->context->free(operandStack); return result; } @@ -1647,9 +1647,8 @@ EmitREBytecode(CompilerState *state, JSRegExp *re, size_t treeDepth, emitStateStack = NULL; } else { emitStateStack = - (EmitStateStackEntry *)JS_malloc(state->context, - sizeof(EmitStateStackEntry) * - treeDepth); + (EmitStateStackEntry *) + state->context->malloc(sizeof(EmitStateStackEntry) * treeDepth); if (!emitStateStack) return NULL; } @@ -1951,7 +1950,7 @@ EmitREBytecode(CompilerState *state, JSRegExp *re, size_t treeDepth, cleanup: if (emitStateStack) - JS_free(state->context, emitStateStack); + state->context->free(emitStateStack); return pc; jump_too_big: @@ -1997,7 +1996,7 @@ CompileRegExpToAST(JSContext* cx, JSTokenStream* ts, + GetCompactIndexWidth(len); return JS_TRUE; } - + return ParseRegExp(&state); } @@ -2410,7 +2409,7 @@ class RegExpNativeCompiler { LIns *branch = lir->insBranch(LIR_jt, test, 0); extras[i].match = branch; } - + fails.pushBack(lir->insBranch(LIR_jf, lir->ins2(LIR_eq, text_ch, lir->insImm(ch)), 0)); for (int i = 0; i < nextras; ++i) @@ -2418,7 +2417,7 @@ class RegExpNativeCompiler { return lir->ins2(LIR_piadd, pos, lir->insImm(2)); } - JS_INLINE bool hasCases(jschar ch) + JS_INLINE bool hasCases(jschar ch) { return JS_TOLOWER(ch) != JS_TOUPPER(ch); } @@ -3166,10 +3165,6 @@ CompileRegExpToNative(JSContext* cx, JSRegExp* re, Fragment* fragment) goto out; } rv = rc.compile(cx); - static int fail = 0; // TODO: remove - if (!rv) - ++fail; - debug_only_printf(LC_TMRegexp, "## Fail? %d, Total %d\n", (int)!rv, fail); out: JS_ARENA_RELEASE(&cx->tempPool, mark); return rv; @@ -3232,7 +3227,7 @@ js_NewRegExp(JSContext *cx, JSTokenStream *ts, goto out; resize = offsetof(JSRegExp, program) + state.progLength + 1; - re = (JSRegExp *) JS_malloc(cx, resize); + re = (JSRegExp *) cx->malloc(resize); if (!re) goto out; @@ -3241,7 +3236,7 @@ js_NewRegExp(JSContext *cx, JSTokenStream *ts, re->classCount = state.classCount; if (re->classCount) { re->classList = (RECharSet *) - JS_malloc(cx, re->classCount * sizeof(RECharSet)); + cx->malloc(re->classCount * sizeof(RECharSet)); if (!re->classList) { js_DestroyRegExp(cx, re); re = NULL; @@ -3270,7 +3265,7 @@ js_NewRegExp(JSContext *cx, JSTokenStream *ts, JSRegExp *tmp; JS_ASSERT((size_t)(endPC - re->program) < state.progLength + 1); resize = offsetof(JSRegExp, program) + (endPC - re->program); - tmp = (JSRegExp *) JS_realloc(cx, re, resize); + tmp = (JSRegExp *) cx->realloc(re, resize); if (tmp) re = tmp; } @@ -3610,7 +3605,7 @@ ProcessCharSet(JSContext *cx, JSRegExp *re, RECharSet *charSet) JS_ASSERT(end[0] == ']'); byteLength = (charSet->length >> 3) + 1; - charSet->u.bits = (uint8 *)JS_malloc(cx, byteLength); + charSet->u.bits = (uint8 *)cx->malloc(byteLength); if (!charSet->u.bits) { JS_ReportOutOfMemory(cx); return JS_FALSE; @@ -3804,12 +3799,12 @@ js_DestroyRegExp(JSContext *cx, JSRegExp *re) uintN i; for (i = 0; i < re->classCount; i++) { if (re->classList[i].converted) - JS_free(cx, re->classList[i].u.bits); + cx->free(re->classList[i].u.bits); re->classList[i].u.bits = NULL; } - JS_free(cx, re->classList); + cx->free(re->classList); } - JS_free(cx, re); + cx->free(re); } } @@ -4874,12 +4869,12 @@ js_ExecuteRegExp(JSContext *cx, JSRegExp *re, JSString *str, size_t *indexp, if (!morepar) { res->moreLength = 10; morepar = (JSSubString*) - JS_malloc(cx, 10 * sizeof(JSSubString)); + cx->malloc(10 * sizeof(JSSubString)); } else if (morenum >= res->moreLength) { res->moreLength += 10; morepar = (JSSubString*) - JS_realloc(cx, morepar, - res->moreLength * sizeof(JSSubString)); + cx->realloc(morepar, + res->moreLength * sizeof(JSSubString)); } if (!morepar) { cx->weakRoots.newborn[GCX_OBJECT] = NULL; @@ -5118,7 +5113,7 @@ js_FreeRegExpStatics(JSContext *cx) JSRegExpStatics *res = &cx->regExpStatics; if (res->moreParens) { - JS_free(cx, res->moreParens); + cx->free(res->moreParens); res->moreParens = NULL; } JS_FinishArenaPool(&cx->regexpPool); @@ -5369,7 +5364,7 @@ js_regexp_toString(JSContext *cx, JSObject *obj, jsval *vp) nflags = 0; for (flags = re->flags; flags != 0; flags &= flags - 1) nflags++; - chars = (jschar*) JS_malloc(cx, (length + nflags + 1) * sizeof(jschar)); + chars = (jschar*) cx->malloc((length + nflags + 1) * sizeof(jschar)); if (!chars) { JS_UNLOCK_OBJ(cx, obj); return JS_FALSE; @@ -5393,7 +5388,7 @@ js_regexp_toString(JSContext *cx, JSObject *obj, jsval *vp) str = js_NewString(cx, chars, length); if (!str) { - JS_free(cx, chars); + cx->free(chars); return JS_FALSE; } *vp = STRING_TO_JSVAL(str); @@ -5476,15 +5471,15 @@ regexp_compile_sub(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, if (*cp == '/' && (cp == start || cp[-1] != '\\')) { nbytes = (++length + 1) * sizeof(jschar); if (!nstart) { - nstart = (jschar *) JS_malloc(cx, nbytes); + nstart = (jschar *) cx->malloc(nbytes); if (!nstart) return JS_FALSE; ncp = nstart + (cp - start); js_strncpy(nstart, start, cp - start); } else { - tmp = (jschar *) JS_realloc(cx, nstart, nbytes); + tmp = (jschar *) cx->realloc(nstart, nbytes); if (!tmp) { - JS_free(cx, nstart); + cx->free(nstart); return JS_FALSE; } ncp = tmp + (ncp - nstart); @@ -5502,7 +5497,7 @@ regexp_compile_sub(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, *ncp = 0; str = js_NewString(cx, nstart, length); if (!str) { - JS_free(cx, nstart); + cx->free(nstart); return JS_FALSE; } argv[0] = STRING_TO_JSVAL(str); diff --git a/js/src/jsscan.cpp b/js/src/jsscan.cpp index 4cab27e9268..90f3547671d 100644 --- a/js/src/jsscan.cpp +++ b/js/src/jsscan.cpp @@ -258,7 +258,7 @@ void js_CloseTokenStream(JSContext *cx, JSTokenStream *ts) { if (ts->flags & TSF_OWNFILENAME) - JS_free(cx, (void *) ts->filename); + cx->free((void *) ts->filename); } JS_FRIEND_API(int) @@ -308,7 +308,7 @@ GetChar(JSTokenStream *ts) ts->flags |= TSF_EOF; return EOF; } - + /* Fill ts->userbuf so that \r and \r\n convert to \n. */ crflag = (ts->flags & TSF_CRFLAG) != 0; len = js_fgets(cbuf, JS_LINE_LIMIT - crflag, ts->file); @@ -336,7 +336,7 @@ GetChar(JSTokenStream *ts) ts->listener(ts->filename, ts->lineno, ts->userbuf.ptr, len, &ts->listenerTSData, ts->listenerData); } - + nl = ts->saveEOL; if (!nl) { /* @@ -362,7 +362,7 @@ GetChar(JSTokenStream *ts) } } } - + /* * If there was a line terminator, copy thru it into linebuf. * Else copy JS_LINE_LIMIT-1 bytes into linebuf. @@ -378,7 +378,7 @@ GetChar(JSTokenStream *ts) js_strncpy(ts->linebuf.base, ts->userbuf.ptr, len); ts->userbuf.ptr += len; olen = len; - + /* * Make sure linebuf contains \n for EOL (don't do this in * userbuf because the user's string might be readonly). @@ -420,11 +420,11 @@ GetChar(JSTokenStream *ts) ts->linebuf.base[len-1] = '\n'; } } - + /* Reset linebuf based on adjusted segment length. */ ts->linebuf.limit = ts->linebuf.base + len; ts->linebuf.ptr = ts->linebuf.base; - + /* Update position of linebuf within physical userbuf line. */ if (!(ts->flags & TSF_NLFLAG)) ts->linepos += ts->linelen; @@ -434,7 +434,7 @@ GetChar(JSTokenStream *ts) ts->flags |= TSF_NLFLAG; else ts->flags &= ~TSF_NLFLAG; - + /* Update linelen from original segment length. */ ts->linelen = olen; } @@ -562,7 +562,7 @@ js_ReportCompileErrorNumber(JSContext *cx, JSTokenStream *ts, JSParseNode *pn, } report.lineno = ts->lineno; linelength = ts->linebuf.limit - ts->linebuf.base; - linechars = (jschar *)JS_malloc(cx, (linelength + 1) * sizeof(jschar)); + linechars = (jschar *)cx->malloc((linelength + 1) * sizeof(jschar)); if (!linechars) { warning = JS_FALSE; goto out; @@ -651,21 +651,21 @@ js_ReportCompileErrorNumber(JSContext *cx, JSTokenStream *ts, JSParseNode *pn, out: if (linebytes) - JS_free(cx, linebytes); + cx->free(linebytes); if (linechars) - JS_free(cx, linechars); + cx->free(linechars); if (message) - JS_free(cx, message); + cx->free(message); if (report.ucmessage) - JS_free(cx, (void *)report.ucmessage); + cx->free((void *)report.ucmessage); if (report.messageArgs) { if (!(flags & JSREPORT_UC)) { i = 0; while (report.messageArgs[i]) - JS_free(cx, (void *)report.messageArgs[i++]); + cx->free((void *)report.messageArgs[i++]); } - JS_free(cx, (void *)report.messageArgs); + cx->free((void *)report.messageArgs); } if (!JSREPORT_IS_WARNING(flags)) { @@ -698,7 +698,7 @@ GrowStringBuffer(JSStringBuffer *sb, size_t amount) /* Now do the full overflow check. */ if (size_t(offset) < newlength && newlength < ~size_t(0) / sizeof(jschar)) { - jschar *bp = (jschar *) realloc(sb->base, newlength * sizeof(jschar)); + jschar *bp = (jschar *) js_realloc(sb->base, newlength * sizeof(jschar)); if (bp) { sb->base = bp; sb->ptr = bp + offset; @@ -709,7 +709,7 @@ GrowStringBuffer(JSStringBuffer *sb, size_t amount) } /* Either newlength overflow or realloc failure: poison the well. */ - free(sb->base); + js_free(sb->base); sb->base = STRING_BUFFER_ERROR_BASE; return false; } @@ -719,7 +719,7 @@ FreeStringBuffer(JSStringBuffer *sb) { JS_ASSERT(STRING_BUFFER_OK(sb)); if (sb->base) - free(sb->base); + js_free(sb->base); } void @@ -924,7 +924,7 @@ bad: if (bytes) { js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, msg, bytes); - JS_free(cx, bytes); + cx->free(bytes); } return JS_FALSE; } @@ -1788,7 +1788,7 @@ retry: if (c == '\n') { if (i > 0) { if (ts->flags & TSF_OWNFILENAME) - JS_free(cx, (void *) ts->filename); + cx->free((void *) ts->filename); ts->filename = JS_strdup(cx, filename); if (!ts->filename) goto error; diff --git a/js/src/jsscope.cpp b/js/src/jsscope.cpp index c343533dde3..02791e40cff 100644 --- a/js/src/jsscope.cpp +++ b/js/src/jsscope.cpp @@ -166,7 +166,7 @@ JSScope::createTable(JSContext *cx, bool report) sizeLog2 = MIN_SCOPE_SIZE_LOG2; } - table = (JSScopeProperty **) calloc(JS_BIT(sizeLog2), sizeof(JSScopeProperty *)); + table = (JSScopeProperty **) js_calloc(JS_BIT(sizeLog2) * sizeof(JSScopeProperty *)); if (!table) { if (report) JS_ReportOutOfMemory(cx); @@ -188,7 +188,7 @@ JSScope::create(JSContext *cx, JSObjectOps *ops, JSClass *clasp, JSObject *obj) JS_ASSERT(OPS_IS_NATIVE(ops)); JS_ASSERT(obj); - JSScope *scope = (JSScope *) JS_malloc(cx, sizeof(JSScope)); + JSScope *scope = (JSScope *) cx->malloc(sizeof(JSScope)); if (!scope) return NULL; @@ -196,7 +196,7 @@ JSScope::create(JSContext *cx, JSObjectOps *ops, JSClass *clasp, JSObject *obj) scope->object = obj; scope->nrefs = 1; scope->freeslot = JSSLOT_FREE(clasp); - scope->flags = 0; + scope->flags = cx->runtime->gcRegenShapesScopeFlag; js_LeaveTraceIfGlobalObject(cx, obj); scope->initMinimal(cx); @@ -213,7 +213,7 @@ JSScope::createEmptyScope(JSContext *cx, JSClass *clasp) { JS_ASSERT(!emptyScope); - JSScope *scope = (JSScope *) JS_malloc(cx, sizeof(JSScope)); + JSScope *scope = (JSScope *) cx->malloc(sizeof(JSScope)); if (!scope) return NULL; @@ -226,7 +226,7 @@ JSScope::createEmptyScope(JSContext *cx, JSClass *clasp) */ scope->nrefs = 2; scope->freeslot = JSSLOT_FREE(clasp); - scope->flags = 0; + scope->flags = OWN_SHAPE | cx->runtime->gcRegenShapesScopeFlag; scope->initMinimal(cx); #ifdef JS_THREADSAFE @@ -252,13 +252,13 @@ JSScope::destroy(JSContext *cx, JSScope *scope) js_FinishTitle(cx, &scope->title); #endif if (scope->table) - JS_free(cx, scope->table); + cx->free(scope->table); if (scope->emptyScope) scope->emptyScope->drop(cx, NULL); LIVE_SCOPE_METER(cx, cx->runtime->liveScopeProps -= scope->entryCount); JS_RUNTIME_UNMETER(cx->runtime, liveScopes); - JS_free(cx, scope); + cx->free(scope); } #ifdef JS_DUMP_PROPTREE_STATS @@ -401,11 +401,9 @@ JSScope::changeTable(JSContext *cx, int change) oldsize = JS_BIT(oldlog2); newsize = JS_BIT(newlog2); nbytes = SCOPE_TABLE_NBYTES(newsize); - newtable = (JSScopeProperty **) calloc(nbytes, 1); - if (!newtable) { - JS_ReportOutOfMemory(cx); + newtable = (JSScopeProperty **) cx->calloc(nbytes); + if (!newtable) return false; - } /* Now that we have newtable allocated, update members. */ hashShift = JS_DHASH_BITS - newlog2; @@ -428,7 +426,7 @@ JSScope::changeTable(JSContext *cx, int change) } /* Finally, free the old table storage. */ - JS_free(cx, oldtable); + cx->free(oldtable); return true; } @@ -578,7 +576,7 @@ NewPropTreeKidsChunk(JSRuntime *rt) { PropTreeKidsChunk *chunk; - chunk = (PropTreeKidsChunk *) calloc(1, sizeof *chunk); + chunk = (PropTreeKidsChunk *) js_calloc(sizeof *chunk); if (!chunk) return NULL; JS_ASSERT(((jsuword)chunk & CHUNKY_KIDS_TAG) == 0); @@ -592,7 +590,7 @@ DestroyPropTreeKidsChunk(JSRuntime *rt, PropTreeKidsChunk *chunk) JS_RUNTIME_UNMETER(rt, propTreeKidsChunks); if (chunk->table) JS_DHashTableDestroy(chunk->table); - free(chunk); + js_free(chunk); } /* NB: Called with rt->gcLock held. */ @@ -1215,7 +1213,7 @@ JSScope::add(JSContext *cx, jsid id, splen = entryCount; JS_ASSERT(splen != 0); spvec = (JSScopeProperty **) - JS_malloc(cx, SCOPE_TABLE_NBYTES(splen)); + cx->malloc(SCOPE_TABLE_NBYTES(splen)); if (!spvec) goto fail_overwrite; i = splen; @@ -1248,7 +1246,7 @@ JSScope::add(JSContext *cx, jsid id, } else { sprop = GetPropertyTreeChild(cx, sprop, spvec[i]); if (!sprop) { - JS_free(cx, spvec); + cx->free(spvec); goto fail_overwrite; } @@ -1257,7 +1255,7 @@ JSScope::add(JSContext *cx, jsid id, SPROP_STORE_PRESERVING_COLLISION(spp2, sprop); } } while (++i < splen); - JS_free(cx, spvec); + cx->free(spvec); /* * Now sprop points to the last property in this scope, where @@ -1558,7 +1556,7 @@ JSScope::clear(JSContext *cx) LIVE_SCOPE_METER(cx, cx->runtime->liveScopeProps -= entryCount); if (table) - free(table); + js_free(table); clearMiddleDelete(); js_LeaveTraceIfGlobalObject(cx, object); initMinimal(cx); @@ -1594,7 +1592,7 @@ JSScope::replacingShapeChange(JSContext *cx, JSScopeProperty *sprop, JSScopeProp { if (shape == sprop->shape) shape = newsprop->shape; - else + else generateOwnShape(cx); } @@ -1604,7 +1602,7 @@ JSScope::sealingShapeChange(JSContext *cx) generateOwnShape(cx); } -void +void JSScope::shadowingShapeChange(JSContext *cx, JSScopeProperty *sprop) { generateOwnShape(cx); @@ -1651,7 +1649,7 @@ JSScopeProperty::trace(JSTracer *trc) { if (IS_GC_MARKING_TRACER(trc)) flags |= SPROP_MARK; - TRACE_ID(trc, id); + js_TraceId(trc, id); #if JS_HAS_GETTER_SETTER if (attrs & (JSPROP_GETTER | JSPROP_SETTER)) { diff --git a/js/src/jsscope.h b/js/src/jsscope.h index 481da6a4a5b..211b88c9014 100644 --- a/js/src/jsscope.h +++ b/js/src/jsscope.h @@ -265,6 +265,8 @@ struct JSScope { void extend(JSContext *cx, JSScopeProperty *sprop); + void trace(JSTracer *trc); + void brandingShapeChange(JSContext *cx, uint32 slot, jsval v); void deletingShapeChange(JSContext *cx, JSScopeProperty *sprop); void methodShapeChange(JSContext *cx, uint32 slot, jsval toval); @@ -283,7 +285,13 @@ struct JSScope { SEALED = 0x0002, BRANDED = 0x0004, INDEXED_PROPERTIES = 0x0008, - OWN_SHAPE = 0x0010 + OWN_SHAPE = 0x0010, + + /* + * This flag toggles with each shape-regenerating GC cycle. + * See JSRuntime::gcRegenShapesScopeFlag. + */ + SHAPE_REGEN = 0x0020 }; bool hadMiddleDelete() { return flags & MIDDLE_DELETE; } @@ -312,6 +320,8 @@ struct JSScope { bool hasOwnShape() { return flags & OWN_SHAPE; } void setOwnShape() { flags |= OWN_SHAPE; } + bool hasRegenFlag(uint8 regenFlag) { return (flags & SHAPE_REGEN) == regenFlag; } + bool owned() { return object != NULL; } }; @@ -493,6 +503,54 @@ JSScope::extend(JSContext *cx, JSScopeProperty *sprop) lastProp = sprop; } +inline void +JSScope::trace(JSTracer *trc) +{ + JSContext *cx = trc->context; + JSScopeProperty *sprop = lastProp; + uint8 regenFlag = cx->runtime->gcRegenShapesScopeFlag; + if (IS_GC_MARKING_TRACER(trc) && cx->runtime->gcRegenShapes && hasRegenFlag(regenFlag)) { + /* + * Either this scope has its own shape, which must be regenerated, or + * it must have the same shape as lastProp. + */ + uint32 newShape; + + if (sprop) { + if (!(sprop->flags & SPROP_FLAG_SHAPE_REGEN)) { + sprop->shape = js_RegenerateShapeForGC(cx); + sprop->flags |= SPROP_FLAG_SHAPE_REGEN; + } + newShape = sprop->shape; + } + if (!sprop || hasOwnShape()) { + newShape = js_RegenerateShapeForGC(cx); + JS_ASSERT_IF(sprop, newShape != sprop->shape); + } + shape = newShape; + flags ^= JSScope::SHAPE_REGEN; + + /* Also regenerate the shapes of empty scopes, in case they are not shared. */ + for (JSScope *empty = emptyScope; + empty && empty->hasRegenFlag(regenFlag); + empty = empty->emptyScope) { + empty->shape = js_RegenerateShapeForGC(cx); + empty->flags ^= JSScope::SHAPE_REGEN; + } + } + if (sprop) { + JS_ASSERT(has(sprop)); + + /* Trace scope's property tree ancestor line. */ + do { + if (hadMiddleDelete() && !has(sprop)) + continue; + sprop->trace(trc); + } while ((sprop = sprop->parent) != NULL); + } +} + + static JS_INLINE bool js_GetSprop(JSContext* cx, JSScopeProperty* sprop, JSObject* obj, jsval* vp) { @@ -545,14 +603,6 @@ js_SetSprop(JSContext* cx, JSScopeProperty* sprop, JSObject* obj, jsval* vp) extern JSScope * js_GetMutableScope(JSContext *cx, JSObject *obj); -/* - * These macros used to inline short code sequences, but they grew over time. - * We retain them for internal backward compatibility, and in case one or both - * ever shrink to inline-able size. - */ -#define TRACE_ID(trc, id) js_TraceId(trc, id) -#define TRACE_SCOPE_PROPERTY(trc, sprop) sprop->trace(trc) - extern void js_TraceId(JSTracer *trc, jsid id); diff --git a/js/src/jsscript.cpp b/js/src/jsscript.cpp index 35d203d5e59..54e87a75697 100644 --- a/js/src/jsscript.cpp +++ b/js/src/jsscript.cpp @@ -140,7 +140,7 @@ script_toSource(JSContext *cx, uintN argc, jsval *vp) } /* Allocate the source string and copy into it. */ - t = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar)); + t = (jschar *) cx->malloc((n + 1) * sizeof(jschar)); if (!t) return JS_FALSE; for (i = 0; i < j; i++) @@ -154,7 +154,7 @@ script_toSource(JSContext *cx, uintN argc, jsval *vp) /* Create and return a JS string for t. */ str = JS_NewUCString(cx, t, n); if (!str) { - JS_free(cx, t); + cx->free(t); return JS_FALSE; } *vp = STRING_TO_JSVAL(str); @@ -533,7 +533,7 @@ js_XDRScript(JSXDRState *xdr, JSScript **scriptp, JSBool *hasMagic) ok = JS_XDRBytes(xdr, (char *) code, length * sizeof(jsbytecode)); if (code != script->code) - JS_free(cx, code); + cx->free(code); if (!ok) goto error; @@ -576,7 +576,7 @@ js_XDRScript(JSXDRState *xdr, JSScript **scriptp, JSBool *hasMagic) filename = js_SaveScriptFilename(cx, filename); if (!filename) goto error; - JS_free(cx, (void *) script->filename); + cx->free((void *) script->filename); script->filename = filename; filenameWasSaved = JS_TRUE; } @@ -665,7 +665,7 @@ js_XDRScript(JSXDRState *xdr, JSScript **scriptp, JSBool *hasMagic) if (xdr->mode == JSXDR_DECODE) { JS_POP_TEMP_ROOT(cx, &tvr); if (script->filename && !filenameWasSaved) { - JS_free(cx, (void *) script->filename); + cx->free((void *) script->filename); script->filename = NULL; } js_DestroyScript(cx, script); @@ -783,7 +783,7 @@ script_thaw(JSContext *cx, uintN argc, jsval *vp) /* Swap bytes in Unichars to keep frozen strings machine-independent. */ from = (jschar *)buf; - to = (jschar *) JS_malloc(cx, len * sizeof(jschar)); + to = (jschar *) cx->malloc(len * sizeof(jschar)); if (!to) { JS_XDRDestroy(xdr); return JS_FALSE; @@ -839,7 +839,7 @@ out: JS_XDRMemSetData(xdr, NULL, 0); JS_XDRDestroy(xdr); #if IS_BIG_ENDIAN - JS_free(cx, buf); + cx->free(buf); #endif *vp = JSVAL_TRUE; return ok; @@ -995,13 +995,13 @@ typedef struct ScriptFilenameEntry { static void * js_alloc_table_space(void *priv, size_t size) { - return malloc(size); + return js_malloc(size); } static void js_free_table_space(void *priv, void *item, size_t size) { - free(item); + js_free(item); } static JSHashEntry * @@ -1010,7 +1010,7 @@ js_alloc_sftbl_entry(void *priv, const void *key) size_t nbytes = offsetof(ScriptFilenameEntry, filename) + strlen((const char *) key) + 1; - return (JSHashEntry *) malloc(JS_MAX(nbytes, sizeof(JSHashEntry))); + return (JSHashEntry *) js_malloc(JS_MAX(nbytes, sizeof(JSHashEntry))); } static void @@ -1018,7 +1018,7 @@ js_free_sftbl_entry(void *priv, JSHashEntry *he, uintN flag) { if (flag != HT_FREE_ENTRY) return; - free(he); + js_free(he); } static JSHashAllocOps sftbl_alloc_ops = { @@ -1080,7 +1080,7 @@ js_FreeRuntimeScriptState(JSRuntime *rt) while (!JS_CLIST_IS_EMPTY(&rt->scriptFilenamePrefixes)) { sfp = (ScriptFilenamePrefix *) rt->scriptFilenamePrefixes.next; JS_REMOVE_LINK(&sfp->links); - free(sfp); + js_free(sfp); } js_FinishRuntimeScriptState(rt); } @@ -1143,7 +1143,7 @@ SaveScriptFilename(JSRuntime *rt, const char *filename, uint32 flags) if (!sfp) { /* No such prefix: add one now. */ - sfp = (ScriptFilenamePrefix *) malloc(sizeof(ScriptFilenamePrefix)); + sfp = (ScriptFilenamePrefix *) js_malloc(sizeof(ScriptFilenamePrefix)); if (!sfp) return NULL; JS_INSERT_AFTER(&sfp->links, link); @@ -1384,7 +1384,7 @@ js_NewScript(JSContext *cx, uint32 length, uint32 nsrcnotes, uint32 natoms, if (ntrynotes != 0) size += sizeof(JSTryNoteArray) + ntrynotes * sizeof(JSTryNote); - script = (JSScript *) JS_malloc(cx, size); + script = (JSScript *) cx->malloc(size); if (!script) return NULL; memset(script, 0, sizeof(JSScript)); @@ -1536,7 +1536,7 @@ js_NewScriptFromCG(JSContext *cx, JSCodeGenerator *cg) memcpy(JS_SCRIPT_UPVARS(script)->vector, cg->upvarMap.vector, cg->upvarList.count * sizeof(uint32)); cg->upvarList.clear(); - JS_free(cx, cg->upvarMap.vector); + cx->free(cg->upvarMap.vector); cg->upvarMap.vector = NULL; } @@ -1648,7 +1648,7 @@ js_DestroyScript(JSContext *cx, JSScript *script) } } - JS_free(cx, script); + cx->free(script); } void diff --git a/js/src/jsstdint.h b/js/src/jsstdint.h index 1ce569aea1b..247149bc592 100644 --- a/js/src/jsstdint.h +++ b/js/src/jsstdint.h @@ -40,7 +40,7 @@ /* * This header provides definitions for the types we use, * even on systems that lack . - * + * * NOTE: This header should only be included in private SpiderMonkey * code; public headers should use only the JS{Int,Uint}N types; see * the comment for them in "jsinttypes.h". diff --git a/js/src/jsstr.cpp b/js/src/jsstr.cpp index e9440dd82c2..c59efa86d02 100644 --- a/js/src/jsstr.cpp +++ b/js/src/jsstr.cpp @@ -145,7 +145,7 @@ js_ConcatStrings(JSContext *cx, JSString *left, JSString *right) if (!left->isMutable()) { /* We must copy if left does not own a buffer to realloc. */ - s = (jschar *) JS_malloc(cx, (ln + rn + 1) * sizeof(jschar)); + s = (jschar *) cx->malloc((ln + rn + 1) * sizeof(jschar)); if (!s) return NULL; js_strncpy(s, ls, ln); @@ -153,7 +153,7 @@ js_ConcatStrings(JSContext *cx, JSString *left, JSString *right) } else { /* We can realloc left's space and make it depend on our result. */ JS_ASSERT(left->isFlat()); - s = (jschar *) JS_realloc(cx, ls, (ln + rn + 1) * sizeof(jschar)); + s = (jschar *) cx->realloc(ls, (ln + rn + 1) * sizeof(jschar)); if (!s) return NULL; @@ -173,9 +173,9 @@ js_ConcatStrings(JSContext *cx, JSString *left, JSString *right) if (!str) { /* Out of memory: clean up any space we (re-)allocated. */ if (!ldep) { - JS_free(cx, s); + cx->free(s); } else { - s = (jschar *) JS_realloc(cx, ls, (ln + 1) * sizeof(jschar)); + s = (jschar *) cx->realloc(ls, (ln + 1) * sizeof(jschar)); if (s) left->mChars = s; } @@ -210,7 +210,7 @@ js_UndependString(JSContext *cx, JSString *str) if (str->isDependent()) { n = str->dependentLength(); size = (n + 1) * sizeof(jschar); - s = (jschar *) JS_malloc(cx, size); + s = (jschar *) cx->malloc(size); if (!s) return NULL; @@ -402,7 +402,7 @@ js_str_escape(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval return JS_FALSE; } - newchars = (jschar *) JS_malloc(cx, (newlength + 1) * sizeof(jschar)); + newchars = (jschar *) cx->malloc((newlength + 1) * sizeof(jschar)); if (!newchars) return JS_FALSE; for (i = 0, ni = 0; i < length; i++) { @@ -430,7 +430,7 @@ js_str_escape(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval str = js_NewString(cx, newchars, newlength); if (!str) { - JS_free(cx, newchars); + cx->free(newchars); return JS_FALSE; } *rval = STRING_TO_JSVAL(str); @@ -464,7 +464,7 @@ str_unescape(JSContext *cx, uintN argc, jsval *vp) str->getCharsAndLength(chars, length); /* Don't bother allocating less space for the new string. */ - newchars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar)); + newchars = (jschar *) cx->malloc((length + 1) * sizeof(jschar)); if (!newchars) return JS_FALSE; ni = i = 0; @@ -493,7 +493,7 @@ str_unescape(JSContext *cx, uintN argc, jsval *vp) str = js_NewString(cx, newchars, ni); if (!str) { - JS_free(cx, newchars); + cx->free(newchars); return JS_FALSE; } *vp = STRING_TO_JSVAL(str); @@ -695,7 +695,7 @@ str_toSource(JSContext *cx, uintN argc, jsval *vp) j = JS_snprintf(buf, sizeof buf, "(new %s(", js_StringClass.name); str->getCharsAndLength(s, k); n = j + k + 2; - t = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar)); + t = (jschar *) cx->malloc((n + 1) * sizeof(jschar)); if (!t) return JS_FALSE; for (i = 0; i < j; i++) @@ -707,7 +707,7 @@ str_toSource(JSContext *cx, uintN argc, jsval *vp) t[i] = 0; str = js_NewString(cx, t, n); if (!str) { - JS_free(cx, t); + cx->free(t); return JS_FALSE; } *vp = STRING_TO_JSVAL(str); @@ -799,7 +799,7 @@ js_toLowerCase(JSContext *cx, JSString *str) jschar *news; str->getCharsAndLength(s, n); - news = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar)); + news = (jschar *) cx->malloc((n + 1) * sizeof(jschar)); if (!news) return NULL; for (i = 0; i < n; i++) @@ -807,7 +807,7 @@ js_toLowerCase(JSContext *cx, JSString *str) news[n] = 0; str = js_NewString(cx, news, n); if (!str) { - JS_free(cx, news); + cx->free(news); return NULL; } return str; @@ -850,7 +850,7 @@ js_toUpperCase(JSContext *cx, JSString *str) jschar *news; str->getCharsAndLength(s, n); - news = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar)); + news = (jschar *) cx->malloc((n + 1) * sizeof(jschar)); if (!news) return NULL; for (i = 0; i < n; i++) @@ -858,7 +858,7 @@ js_toUpperCase(JSContext *cx, JSString *str) news[n] = 0; str = js_NewString(cx, news, n); if (!str) { - JS_free(cx, news); + cx->free(news); return NULL; } return str; @@ -1659,7 +1659,7 @@ find_replen(JSContext *cx, ReplaceData *rdata, size_t *sizep) lambda_out: js_FreeStack(cx, mark); if (freeMoreParens) - JS_free(cx, cx->regExpStatics.moreParens); + cx->free(cx->regExpStatics.moreParens); cx->regExpStatics = save; return ok; } @@ -1716,7 +1716,7 @@ replace_destroy(JSContext *cx, GlobData *data) ReplaceData *rdata; rdata = (ReplaceData *)data; - JS_free(cx, rdata->chars); + cx->free(rdata->chars); rdata->chars = NULL; } @@ -1741,9 +1741,9 @@ replace_glob(JSContext *cx, jsint count, GlobData *data) growth = leftlen + replen; chars = (jschar *) (rdata->chars - ? JS_realloc(cx, rdata->chars, (rdata->length + growth + 1) + ? cx->realloc(rdata->chars, (rdata->length + growth + 1) * sizeof(jschar)) - : JS_malloc(cx, (growth + 1) * sizeof(jschar))); + : cx->malloc((growth + 1) * sizeof(jschar))); if (!chars) return JS_FALSE; rdata->chars = chars; @@ -1826,7 +1826,7 @@ js_StringReplaceHelper(JSContext *cx, uintN argc, JSObject *lambda, if (!ok) goto out; length += leftlen; - chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar)); + chars = (jschar *) cx->malloc((length + 1) * sizeof(jschar)); if (!chars) { ok = JS_FALSE; goto out; @@ -1840,9 +1840,9 @@ js_StringReplaceHelper(JSContext *cx, uintN argc, JSObject *lambda, rightlen = cx->regExpStatics.rightContext.length; length = rdata.length + rightlen; chars = (jschar *) - JS_realloc(cx, rdata.chars, (length + 1) * sizeof(jschar)); + cx->realloc(rdata.chars, (length + 1) * sizeof(jschar)); if (!chars) { - JS_free(cx, rdata.chars); + cx->free(rdata.chars); ok = JS_FALSE; goto out; } @@ -1852,7 +1852,7 @@ js_StringReplaceHelper(JSContext *cx, uintN argc, JSObject *lambda, str = js_NewString(cx, chars, length); if (!str) { - JS_free(cx, chars); + cx->free(chars); ok = JS_FALSE; goto out; } @@ -2266,7 +2266,7 @@ tagify(JSContext *cx, const char *begin, JSString *param, const char *end, return JS_FALSE; } - tagbuf = (jschar *) JS_malloc(cx, (taglen + 1) * sizeof(jschar)); + tagbuf = (jschar *) cx->malloc((taglen + 1) * sizeof(jschar)); if (!tagbuf) return JS_FALSE; @@ -2294,7 +2294,7 @@ tagify(JSContext *cx, const char *begin, JSString *param, const char *end, str = js_NewString(cx, tagbuf, taglen); if (!str) { - free((char *)tagbuf); + js_free((char *)tagbuf); return JS_FALSE; } *vp = STRING_TO_JSVAL(str); @@ -2531,13 +2531,13 @@ str_fromCharCode(JSContext *cx, uintN argc, jsval *vp) *vp = STRING_TO_JSVAL(str); return JS_TRUE; } - chars = (jschar *) JS_malloc(cx, (argc + 1) * sizeof(jschar)); + chars = (jschar *) cx->malloc((argc + 1) * sizeof(jschar)); if (!chars) return JS_FALSE; for (i = 0; i < argc; i++) { code = js_ValueToUint16(cx, &argv[i]); if (JSVAL_IS_NULL(argv[i])) { - JS_free(cx, chars); + cx->free(chars); return JS_FALSE; } chars[i] = (jschar)code; @@ -2545,7 +2545,7 @@ str_fromCharCode(JSContext *cx, uintN argc, jsval *vp) chars[i] = 0; str = js_NewString(cx, chars, argc); if (!str) { - JS_free(cx, chars); + cx->free(chars); return JS_FALSE; } *vp = STRING_TO_JSVAL(str); @@ -2621,9 +2621,8 @@ js_GetUnitStringForChar(JSContext *cx, jschar c) JS_ASSERT(c < UNIT_STRING_LIMIT); rt = cx->runtime; if (!rt->unitStrings) { - sp = (JSString **) calloc(UNIT_STRING_LIMIT * sizeof(JSString *) + - UNIT_STRING_LIMIT * 2 * sizeof(jschar), - 1); + sp = (JSString **) js_calloc(UNIT_STRING_LIMIT * sizeof(JSString *) + + UNIT_STRING_LIMIT * 2 * sizeof(jschar)); if (!sp) { JS_ReportOutOfMemory(cx); return NULL; @@ -2639,7 +2638,7 @@ js_GetUnitStringForChar(JSContext *cx, jschar c) JS_UNLOCK_GC(rt); } else { JS_UNLOCK_GC(rt); - free(sp); + js_free(sp); } } if (!rt->unitStrings[c]) { @@ -2676,7 +2675,7 @@ js_GetUnitString(JSContext *cx, JSString *str, size_t index) void js_FinishUnitStrings(JSRuntime *rt) { - free(rt->unitStrings); + js_free(rt->unitStrings); rt->unitStrings = NULL; } @@ -2832,14 +2831,14 @@ js_NewStringCopyN(JSContext *cx, const jschar *s, size_t n) jschar *news; JSString *str; - news = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar)); + news = (jschar *) cx->malloc((n + 1) * sizeof(jschar)); if (!news) return NULL; js_strncpy(news, s, n); news[n] = 0; str = js_NewString(cx, news, n); if (!str) - JS_free(cx, news); + cx->free(news); return str; } @@ -2852,13 +2851,13 @@ js_NewStringCopyZ(JSContext *cx, const jschar *s) n = js_strlen(s); m = (n + 1) * sizeof(jschar); - news = (jschar *) JS_malloc(cx, m); + news = (jschar *) cx->malloc(m); if (!news) return NULL; memcpy(news, s, m); str = js_NewString(cx, news, n); if (!str) - JS_free(cx, news); + cx->free(news); return str; } @@ -2876,7 +2875,7 @@ js_PurgeDeflatedStringCache(JSRuntime *rt, JSString *str) #ifdef DEBUG rt->deflatedStringCacheBytes -= str->length(); #endif - free(he->value); + js_free(he->value); JS_HashTableRawRemove(rt->deflatedStringCache, hep, he); } JS_RELEASE_LOCK(rt->deflatedStringCacheLock); @@ -3121,7 +3120,7 @@ js_InflateString(JSContext *cx, const char *bytes, size_t *lengthp) if (js_CStringsAreUTF8) { if (!js_InflateStringToBuffer(cx, bytes, nbytes, NULL, &nchars)) goto bad; - chars = (jschar *) JS_malloc(cx, (nchars + 1) * sizeof (jschar)); + chars = (jschar *) cx->malloc((nchars + 1) * sizeof (jschar)); if (!chars) goto bad; #ifdef DEBUG @@ -3131,7 +3130,7 @@ js_InflateString(JSContext *cx, const char *bytes, size_t *lengthp) JS_ASSERT(ok); } else { nchars = nbytes; - chars = (jschar *) JS_malloc(cx, (nchars + 1) * sizeof(jschar)); + chars = (jschar *) cx->malloc((nchars + 1) * sizeof(jschar)); if (!chars) goto bad; for (i = 0; i < nchars; i++) @@ -3166,7 +3165,7 @@ js_DeflateString(JSContext *cx, const jschar *chars, size_t nchars) nbytes = js_GetDeflatedStringLength(cx, chars, nchars); if (nbytes == (size_t) -1) return NULL; - bytes = (char *) (cx ? JS_malloc(cx, nbytes + 1) : malloc(nbytes + 1)); + bytes = (char *) (cx ? cx->malloc(nbytes + 1) : js_malloc(nbytes + 1)); if (!bytes) return NULL; #ifdef DEBUG @@ -3176,7 +3175,7 @@ js_DeflateString(JSContext *cx, const jschar *chars, size_t nchars) JS_ASSERT(ok); } else { nbytes = nchars; - bytes = (char *) (cx ? JS_malloc(cx, nbytes + 1) : malloc(nbytes + 1)); + bytes = (char *) (cx ? cx->malloc(nbytes + 1) : js_malloc(nbytes + 1)); if (!bytes) return NULL; for (i = 0; i < nbytes; i++) @@ -3491,9 +3490,9 @@ js_GetStringBytes(JSContext *cx, JSString *str) str->setDeflated(); } else { if (cx) - JS_free(cx, bytes); + cx->free(bytes); else - free(bytes); + js_free(bytes); bytes = NULL; } } @@ -4836,8 +4835,8 @@ AddCharsToURI(JSContext *cx, JSCharBuffer *buf, if (!buf->chars || JS_HOWMANY(total, URI_CHUNK) > JS_HOWMANY(buf->length + 1, URI_CHUNK)) { total = JS_ROUNDUP(total, URI_CHUNK); - newchars = (jschar *) JS_realloc(cx, buf->chars, - total * sizeof(jschar)); + newchars = (jschar *) cx->realloc(buf->chars, + total * sizeof(jschar)); if (!newchars) return JS_FALSE; buf->chars = newchars; @@ -4860,7 +4859,7 @@ TransferBufferToString(JSContext *cx, JSCharBuffer *cb, jsval *rval) * don't worry about that case here. */ n = cb->length; - chars = (jschar *) JS_realloc(cx, cb->chars, (n + 1) * sizeof(jschar)); + chars = (jschar *) cx->realloc(cb->chars, (n + 1) * sizeof(jschar)); if (!chars) chars = cb->chars; str = js_NewString(cx, chars, n); @@ -4953,7 +4952,7 @@ Encode(JSContext *cx, JSString *str, const jschar *unescapedSet, return JS_TRUE; bad: - JS_free(cx, cb.chars); + cx->free(cb.chars); return JS_FALSE; } @@ -5048,7 +5047,7 @@ Decode(JSContext *cx, JSString *str, const jschar *reservedSet, jsval *rval) /* FALL THROUGH */ bad: - JS_free(cx, cb.chars); + cx->free(cb.chars); return JS_FALSE; } diff --git a/js/src/jstask.cpp b/js/src/jstask.cpp new file mode 100644 index 00000000000..9071e632efd --- /dev/null +++ b/js/src/jstask.cpp @@ -0,0 +1,126 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=4 sw=4 et tw=99 ft=cpp: + * + * ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is Mozilla SpiderMonkey JavaScript 1.9.1 code, released + * June 30, 2009. + * + * The Initial Developer of the Original Code is + * Andreas Gal + * + * Contributor(s): + * + * Alternatively, the contents of this file may be used under the terms of + * either of the GNU General Public License Version 2 or later (the "GPL"), + * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#include "jstask.h" + +#ifdef JS_THREADSAFE +static void start(void* arg) { + ((JSBackgroundThread*)arg)->work(); +} + +JSBackgroundThread::JSBackgroundThread() + : thread(NULL), stack(NULL), lock(NULL), wakeup(NULL), shutdown(false) +{ +} + +JSBackgroundThread::~JSBackgroundThread() +{ + if (wakeup) + PR_DestroyCondVar(wakeup); + if (lock) + PR_DestroyLock(lock); + /* PR_DestroyThread is not necessary. */ +} + +bool +JSBackgroundThread::init() +{ + if (!(lock = PR_NewLock())) + return false; + if (!(wakeup = PR_NewCondVar(lock))) + return false; + thread = PR_CreateThread(PR_USER_THREAD, start, this, PR_PRIORITY_LOW, + PR_LOCAL_THREAD, PR_JOINABLE_THREAD, 0); + return !!thread; +} + +void +JSBackgroundThread::cancel() +{ + PR_Lock(lock); + if (shutdown) { + PR_Unlock(lock); + return; + } + shutdown = true; + PR_NotifyCondVar(wakeup); + PR_Unlock(lock); + PR_JoinThread(thread); +} + +void +JSBackgroundThread::work() +{ + PR_Lock(lock); + while (!shutdown) { + PR_WaitCondVar(wakeup, PR_INTERVAL_NO_TIMEOUT); + JSBackgroundTask* t; + while ((t = stack) != NULL) { + stack = t->next; + PR_Unlock(lock); + t->run(); + delete t; + PR_Lock(lock); + } + } + PR_Unlock(lock); +} + +bool +JSBackgroundThread::busy() +{ + return !!stack; // we tolerate some racing here +} + +void +JSBackgroundThread::schedule(JSBackgroundTask* task) +{ + PR_Lock(lock); + if (shutdown) { + PR_Unlock(lock); + task->run(); + delete task; + return; + } + task->next = stack; + stack = task; + PR_NotifyCondVar(wakeup); + PR_Unlock(lock); +} + +#endif diff --git a/js/src/jstask.h b/js/src/jstask.h new file mode 100644 index 00000000000..30bc009a2c2 --- /dev/null +++ b/js/src/jstask.h @@ -0,0 +1,84 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=4 sw=4 et tw=99 ft=cpp: + * + * ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released + * June 30, 2009. + * + * The Initial Developer of the Original Code is + * Andreas Gal + * + * Contributor(s): + * + * Alternatively, the contents of this file may be used under the terms of + * either of the GNU General Public License Version 2 or later (the "GPL"), + * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#ifndef jstask_h___ +#define jstask_h___ + +class JSBackgroundTask { + friend class JSBackgroundThread; + JSBackgroundTask* next; + public: + virtual void run() = 0; +}; + +#ifdef JS_THREADSAFE + +#include "prthread.h" +#include "prlock.h" +#include "prcvar.h" + +class JSBackgroundThread { + PRThread* thread; + JSBackgroundTask* stack; + PRLock* lock; + PRCondVar* wakeup; + bool shutdown; + + public: + JSBackgroundThread(); + ~JSBackgroundThread(); + + bool init(); + void cancel(); + void work(); + bool busy(); + void schedule(JSBackgroundTask* task); +}; + +#else + +class JSBackgroundThread { + public: + void schedule(JSBackgroundTask* task) { + task->run(); + } +}; + +#endif + +#endif /* jstask_h___ */ diff --git a/js/src/jstracer.cpp b/js/src/jstracer.cpp index 5d4da83d874..7a899089cf9 100644 --- a/js/src/jstracer.cpp +++ b/js/src/jstracer.cpp @@ -80,6 +80,9 @@ #include "jsautooplen.h" // generated headers last #include "imacros.c.out" +using namespace avmplus; +using namespace nanojit; + #if JS_HAS_XML_SUPPORT #define ABORT_IF_XML(v) \ JS_BEGIN_MACRO \ @@ -87,12 +90,14 @@ ABORT_TRACE("xml detected"); \ JS_END_MACRO #else -#define ABORT_IF_XML(cx, v) ((void) 0) +#define ABORT_IF_XML(v) ((void) 0) #endif -/* Never use JSVAL_IS_BOOLEAN because it restricts the value (true, false) and - the type. What you want to use is JSVAL_TAG(x) == JSVAL_BOOLEAN and then - handle the undefined case properly (bug 457363). */ +/* + * Never use JSVAL_IS_BOOLEAN because it restricts the value (true, false) and + * the type. What you want to use is JSVAL_TAG(x) == JSVAL_BOOLEAN and then + * handle the undefined case properly (bug 457363). + */ #undef JSVAL_IS_BOOLEAN #define JSVAL_IS_BOOLEAN(x) JS_STATIC_ASSERT(0) @@ -104,8 +109,10 @@ static const char tagChar[] = "OIDISIBI"; /* Blacklist parameters. */ -/* Number of iterations of a loop where we start tracing. That is, we don't - start tracing until the beginning of the HOTLOOP-th iteration. */ +/* + * Number of iterations of a loop where we start tracing. That is, we don't + * start tracing until the beginning of the HOTLOOP-th iteration. + */ #define HOTLOOP 2 /* Attempt recording this many times before blacklisting permanently. */ @@ -147,12 +154,12 @@ static const char tagChar[] = "OIDISIBI"; #define CHECK_STATUS(expr) \ JS_BEGIN_MACRO \ JSRecordingStatus _status = (expr); \ - if (_status != JSRS_CONTINUE) \ + if (_status != JSRS_CONTINUE) \ return _status; \ JS_END_MACRO #ifdef JS_JIT_SPEW -#define ABORT_TRACE_RV(msg, value) \ +#define ABORT_TRACE_RV(msg, value) \ JS_BEGIN_MACRO \ debug_only_printf(LC_TMAbort, "abort: %d: %s\n", __LINE__, (msg)); \ return (value); \ @@ -250,38 +257,42 @@ js_InitJITStatsClass(JSContext *cx, JSObject *glob) #define INS_CONSTWORD(v) addName(lir->insImmPtr((void *) v), #v) #define INS_VOID() INS_CONST(JSVAL_TO_PSEUDO_BOOLEAN(JSVAL_VOID)) -using namespace avmplus; -using namespace nanojit; - static GC gc = GC(); static avmplus::AvmCore s_core = avmplus::AvmCore(); static avmplus::AvmCore* core = &s_core; #ifdef JS_JIT_SPEW -void -js_DumpPeerStability(JSTraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape, uint32 argc); +static void +DumpPeerStability(JSTraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape, uint32 argc); #endif -/* We really need a better way to configure the JIT. Shaver, where is - my fancy JIT object? */ -/* NB: this is raced on, if jstracer.cpp should ever be running MT. - I think it's harmless tho. */ +/* + * We really need a better way to configure the JIT. Shaver, where is + * my fancy JIT object? + * + * NB: this is raced on, if jstracer.cpp should ever be running MT. + * I think it's harmless tho. + */ static bool did_we_check_processor_features = false; /* ------ Debug logging control ------ */ -/* All the logging control stuff lives in here. It is shared between - all threads, but I think that's OK. */ +/* + * All the logging control stuff lives in here. It is shared between + * all threads, but I think that's OK. + */ LogControl js_LogController; #ifdef JS_JIT_SPEW -/* NB: this is raced on too, if jstracer.cpp should ever be running MT. - Also harmless. */ +/* + * NB: this is raced on too, if jstracer.cpp should ever be running MT. + * Also harmless. + */ static bool did_we_set_up_debug_logging = false; static void -js_InitJITLogController ( void ) +InitJITLogController() { char *tm, *tmf; uint32_t bits; @@ -298,6 +309,7 @@ js_InitJITLogController ( void ) if (strstr(tmf, "help")) goto help; bits = 0; + /* flags for jstracer.cpp */ if (strstr(tmf, "minimal")) bits |= LC_TMMinimal; if (strstr(tmf, "tracer")) bits |= LC_TMTracer; @@ -306,6 +318,7 @@ js_InitJITLogController ( void ) if (strstr(tmf, "abort")) bits |= LC_TMAbort; if (strstr(tmf, "stats")) bits |= LC_TMStats; if (strstr(tmf, "regexp")) bits |= LC_TMRegexp; + /* flags for nanojit */ if (strstr(tmf, "liveness")) bits |= LC_Liveness; if (strstr(tmf, "readlir")) bits |= LC_ReadLIR; @@ -377,8 +390,10 @@ getExitName(ExitType type) } #endif -/* The entire VM shares one oracle. Collisions and concurrent updates are tolerated and worst - case cause performance regressions. */ +/* + * The entire VM shares one oracle. Collisions and concurrent updates are + * tolerated and worst case cause performance regressions. + */ static Oracle oracle; Tracker::Tracker() @@ -462,17 +477,20 @@ Tracker::set(const void* v, LIns* i) p->map[(jsuword(v) & PAGEMASK) >> 2] = i; } -static inline jsuint argSlots(JSStackFrame* fp) +static inline jsuint +argSlots(JSStackFrame* fp) { return JS_MAX(fp->argc, fp->fun->nargs); } -static inline bool isNumber(jsval v) +static inline bool +isNumber(jsval v) { return JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v); } -static inline jsdouble asNumber(jsval v) +static inline jsdouble +asNumber(jsval v) { JS_ASSERT(isNumber(v)); if (JSVAL_IS_DOUBLE(v)) @@ -480,7 +498,8 @@ static inline jsdouble asNumber(jsval v) return (jsdouble)JSVAL_TO_INT(v); } -static inline bool isInt32(jsval v) +static inline bool +isInt32(jsval v) { if (!isNumber(v)) return false; @@ -489,7 +508,8 @@ static inline bool isInt32(jsval v) return JSDOUBLE_IS_INT(d, i); } -static inline jsint asInt32(jsval v) +static inline jsint +asInt32(jsval v) { JS_ASSERT(isNumber(v)); if (JSVAL_IS_INT(v)) @@ -502,7 +522,8 @@ static inline jsint asInt32(jsval v) } /* Return TT_DOUBLE for all numbers (int and double) and the tag otherwise. */ -static inline JSTraceType getPromotedType(jsval v) +static inline JSTraceType +GetPromotedType(jsval v) { if (JSVAL_IS_INT(v)) return TT_DOUBLE; @@ -515,14 +536,15 @@ static inline JSTraceType getPromotedType(jsval v) } uint8_t tag = JSVAL_TAG(v); JS_ASSERT(tag == JSVAL_DOUBLE || tag == JSVAL_STRING || tag == JSVAL_BOOLEAN); - JS_STATIC_ASSERT(TT_DOUBLE == JSVAL_DOUBLE); - JS_STATIC_ASSERT(TT_STRING == JSVAL_STRING); - JS_STATIC_ASSERT(TT_PSEUDOBOOLEAN == JSVAL_BOOLEAN); + JS_STATIC_ASSERT(static_cast(TT_DOUBLE) == JSVAL_DOUBLE); + JS_STATIC_ASSERT(static_cast(TT_STRING) == JSVAL_STRING); + JS_STATIC_ASSERT(static_cast(TT_PSEUDOBOOLEAN) == JSVAL_BOOLEAN); return JSTraceType(tag); } /* Return TT_INT32 for all whole numbers that fit into signed 32-bit and the tag otherwise. */ -static inline JSTraceType getCoercedType(jsval v) +static inline JSTraceType +getCoercedType(jsval v) { if (isInt32(v)) return TT_INT32; @@ -535,38 +557,40 @@ static inline JSTraceType getCoercedType(jsval v) } uint8_t tag = JSVAL_TAG(v); JS_ASSERT(tag == JSVAL_DOUBLE || tag == JSVAL_STRING || tag == JSVAL_BOOLEAN); - JS_STATIC_ASSERT(TT_DOUBLE == JSVAL_DOUBLE); - JS_STATIC_ASSERT(TT_STRING == JSVAL_STRING); - JS_STATIC_ASSERT(TT_PSEUDOBOOLEAN == JSVAL_BOOLEAN); + JS_STATIC_ASSERT(static_cast(TT_DOUBLE) == JSVAL_DOUBLE); + JS_STATIC_ASSERT(static_cast(TT_STRING) == JSVAL_STRING); + JS_STATIC_ASSERT(static_cast(TT_PSEUDOBOOLEAN) == JSVAL_BOOLEAN); return JSTraceType(tag); } -/* - * Constant seed and accumulate step borrowed from the DJB hash. - */ +/* Constant seed and accumulate step borrowed from the DJB hash. */ -#define ORACLE_MASK (ORACLE_SIZE - 1) -#define FRAGMENT_TABLE_MASK (FRAGMENT_TABLE_SIZE - 1) -#define HASH_SEED 5381 +const uintptr_t ORACLE_MASK = ORACLE_SIZE - 1; +JS_STATIC_ASSERT((ORACLE_MASK & ORACLE_SIZE) == 0); + +const uintptr_t FRAGMENT_TABLE_MASK = FRAGMENT_TABLE_SIZE - 1; +JS_STATIC_ASSERT((FRAGMENT_TABLE_MASK & FRAGMENT_TABLE_SIZE) == 0); + +const uintptr_t HASH_SEED = 5381; static inline void -hash_accum(uintptr_t& h, uintptr_t i, uintptr_t mask) +HashAccum(uintptr_t& h, uintptr_t i, uintptr_t mask) { h = ((h << 5) + h + (mask & i)) & mask; } -JS_REQUIRES_STACK static inline int -stackSlotHash(JSContext* cx, unsigned slot) +static JS_REQUIRES_STACK inline int +StackSlotHash(JSContext* cx, unsigned slot) { uintptr_t h = HASH_SEED; - hash_accum(h, uintptr_t(cx->fp->script), ORACLE_MASK); - hash_accum(h, uintptr_t(cx->fp->regs->pc), ORACLE_MASK); - hash_accum(h, uintptr_t(slot), ORACLE_MASK); + HashAccum(h, uintptr_t(cx->fp->script), ORACLE_MASK); + HashAccum(h, uintptr_t(cx->fp->regs->pc), ORACLE_MASK); + HashAccum(h, uintptr_t(slot), ORACLE_MASK); return int(h); } -JS_REQUIRES_STACK static inline int -globalSlotHash(JSContext* cx, unsigned slot) +static JS_REQUIRES_STACK inline int +GlobalSlotHash(JSContext* cx, unsigned slot) { uintptr_t h = HASH_SEED; JSStackFrame* fp = cx->fp; @@ -574,15 +598,14 @@ globalSlotHash(JSContext* cx, unsigned slot) while (fp->down) fp = fp->down; - hash_accum(h, uintptr_t(fp->script), ORACLE_MASK); - hash_accum(h, uintptr_t(OBJ_SHAPE(JS_GetGlobalForObject(cx, fp->scopeChain))), - ORACLE_MASK); - hash_accum(h, uintptr_t(slot), ORACLE_MASK); + HashAccum(h, uintptr_t(fp->script), ORACLE_MASK); + HashAccum(h, uintptr_t(OBJ_SHAPE(JS_GetGlobalForObject(cx, fp->scopeChain))), ORACLE_MASK); + HashAccum(h, uintptr_t(slot), ORACLE_MASK); return int(h); } static inline int -pcHash(jsbytecode* pc) +PCHash(jsbytecode* pc) { return int(uintptr_t(pc) & ORACLE_MASK); } @@ -599,42 +622,42 @@ Oracle::Oracle() JS_REQUIRES_STACK void Oracle::markGlobalSlotUndemotable(JSContext* cx, unsigned slot) { - _globalDontDemote.set(&gc, globalSlotHash(cx, slot)); + _globalDontDemote.set(&gc, GlobalSlotHash(cx, slot)); } /* Consult with the oracle whether we shouldn't demote a certain global variable. */ JS_REQUIRES_STACK bool Oracle::isGlobalSlotUndemotable(JSContext* cx, unsigned slot) const { - return _globalDontDemote.get(globalSlotHash(cx, slot)); + return _globalDontDemote.get(GlobalSlotHash(cx, slot)); } /* Tell the oracle that a certain slot at a certain stack slot should not be demoted. */ JS_REQUIRES_STACK void Oracle::markStackSlotUndemotable(JSContext* cx, unsigned slot) { - _stackDontDemote.set(&gc, stackSlotHash(cx, slot)); + _stackDontDemote.set(&gc, StackSlotHash(cx, slot)); } /* Consult with the oracle whether we shouldn't demote a certain slot. */ JS_REQUIRES_STACK bool Oracle::isStackSlotUndemotable(JSContext* cx, unsigned slot) const { - return _stackDontDemote.get(stackSlotHash(cx, slot)); + return _stackDontDemote.get(StackSlotHash(cx, slot)); } /* Tell the oracle that a certain slot at a certain bytecode location should not be demoted. */ void Oracle::markInstructionUndemotable(jsbytecode* pc) { - _pcDontDemote.set(&gc, pcHash(pc)); + _pcDontDemote.set(&gc, PCHash(pc)); } /* Consult with the oracle whether we shouldn't demote a certain bytecode location. */ bool Oracle::isInstructionUndemotable(jsbytecode* pc) const { - return _pcDontDemote.get(pcHash(pc)); + return _pcDontDemote.get(PCHash(pc)); } void @@ -645,7 +668,6 @@ Oracle::clearDemotability() _pcDontDemote.reset(); } - struct PCHashEntry : public JSDHashEntryStub { size_t count; }; @@ -653,7 +675,7 @@ struct PCHashEntry : public JSDHashEntryStub { #define PC_HASH_COUNT 1024 static void -js_Blacklist(jsbytecode* pc) +Blacklist(jsbytecode* pc) { AUDIT(blacklisted); JS_ASSERT(*pc == JSOP_LOOP || *pc == JSOP_NOP); @@ -661,7 +683,7 @@ js_Blacklist(jsbytecode* pc) } static void -js_Backoff(JSContext *cx, jsbytecode* pc, Fragment* tree=NULL) +Backoff(JSContext *cx, jsbytecode* pc, Fragment* tree = NULL) { JSDHashTable *table = &JS_TRACE_MONITOR(cx).recordAttempts; @@ -677,7 +699,7 @@ js_Backoff(JSContext *cx, jsbytecode* pc, Fragment* tree=NULL) JS_ASSERT(JS_DHASH_ENTRY_IS_LIVE(&(entry->hdr))); if (entry->count++ > (BL_ATTEMPTS * MAXPEERS)) { entry->count = 0; - js_Blacklist(pc); + Blacklist(pc); return; } } @@ -693,12 +715,12 @@ js_Backoff(JSContext *cx, jsbytecode* pc, Fragment* tree=NULL) * well. */ if (++tree->recordAttempts > BL_ATTEMPTS) - js_Blacklist(pc); + Blacklist(pc); } } static void -js_resetRecordingAttempts(JSContext *cx, jsbytecode* pc) +ResetRecordingAttempts(JSContext *cx, jsbytecode* pc) { JSDHashTable *table = &JS_TRACE_MONITOR(cx).recordAttempts; if (table->ops) { @@ -713,13 +735,13 @@ js_resetRecordingAttempts(JSContext *cx, jsbytecode* pc) } static inline size_t -fragmentHash(const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc) +FragmentHash(const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc) { uintptr_t h = HASH_SEED; - hash_accum(h, uintptr_t(ip), FRAGMENT_TABLE_MASK); - hash_accum(h, uintptr_t(globalObj), FRAGMENT_TABLE_MASK); - hash_accum(h, uintptr_t(globalShape), FRAGMENT_TABLE_MASK); - hash_accum(h, uintptr_t(argc), FRAGMENT_TABLE_MASK); + HashAccum(h, uintptr_t(ip), FRAGMENT_TABLE_MASK); + HashAccum(h, uintptr_t(globalObj), FRAGMENT_TABLE_MASK); + HashAccum(h, uintptr_t(globalShape), FRAGMENT_TABLE_MASK); + HashAccum(h, uintptr_t(argc), FRAGMENT_TABLE_MASK); return size_t(h); } @@ -727,10 +749,10 @@ fragmentHash(const void *ip, JSObject* globalObj, uint32 globalShape, uint32 arg * argc is cx->fp->argc at the trace loop header, i.e., the number of arguments * pushed for the innermost JS frame. This is required as part of the fragment * key because the fragment will write those arguments back to the interpreter - * stack when it exits, using its typemap, which implicitly incorporates a given - * value of argc. Without this feature, a fragment could be called as an inner - * tree with two different values of argc, and entry type checking or exit - * frame synthesis could crash. + * stack when it exits, using its typemap, which implicitly incorporates a + * given value of argc. Without this feature, a fragment could be called as an + * inner tree with two different values of argc, and entry type checking or + * exit frame synthesis could crash. */ struct VMFragment : public Fragment { @@ -751,7 +773,7 @@ static VMFragment* getVMFragment(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc) { - size_t h = fragmentHash(ip, globalObj, globalShape, argc); + size_t h = FragmentHash(ip, globalObj, globalShape, argc); VMFragment* vf = tm->vmfragments[h]; while (vf && ! (vf->globalObj == globalObj && @@ -764,15 +786,13 @@ getVMFragment(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 gl } static VMFragment* -getLoop(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, - uint32 argc) +getLoop(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc) { return getVMFragment(tm, ip, globalObj, globalShape, argc); } static Fragment* -getAnchor(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, - uint32 argc) +getAnchor(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc) { VMFragment *f = new (&gc) VMFragment(ip, globalObj, globalShape, argc); JS_ASSERT(f); @@ -789,7 +809,7 @@ getAnchor(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 global } else { /* this is the first fragment */ f->first = f; - size_t h = fragmentHash(ip, globalObj, globalShape, argc); + size_t h = FragmentHash(ip, globalObj, globalShape, argc); f->next = tm->vmfragments[h]; tm->vmfragments[h] = f; } @@ -801,9 +821,10 @@ getAnchor(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 global #ifdef DEBUG static void -ensureTreeIsUnique(JSTraceMonitor* tm, VMFragment* f, TreeInfo* ti) +AssertTreeIsUnique(JSTraceMonitor* tm, VMFragment* f, TreeInfo* ti) { JS_ASSERT(f->root == f); + /* * Check for duplicate entry type maps. This is always wrong and hints at * trace explosion since we are trying to stabilize something without @@ -823,21 +844,16 @@ ensureTreeIsUnique(JSTraceMonitor* tm, VMFragment* f, TreeInfo* ti) #endif static void -js_AttemptCompilation(JSContext *cx, JSTraceMonitor* tm, JSObject* globalObj, jsbytecode* pc, - uint32 argc) +AttemptCompilation(JSContext *cx, JSTraceMonitor* tm, JSObject* globalObj, jsbytecode* pc, + uint32 argc) { - /* - * If we already permanently blacklisted the location, undo that. - */ + /* If we already permanently blacklisted the location, undo that. */ JS_ASSERT(*(jsbytecode*)pc == JSOP_NOP || *(jsbytecode*)pc == JSOP_LOOP); *(jsbytecode*)pc = JSOP_LOOP; - js_resetRecordingAttempts(cx, pc); + ResetRecordingAttempts(cx, pc); - /* - * Breath new live into all peer fragments at the designated loop header. - */ - Fragment* f = (VMFragment*)getLoop(tm, pc, globalObj, OBJ_SHAPE(globalObj), - argc); + /* Breathe new life into all peer fragments at the designated loop header. */ + Fragment* f = (VMFragment*)getLoop(tm, pc, globalObj, OBJ_SHAPE(globalObj), argc); if (!f) { /* * If the global object's shape changed, we can't easily find the @@ -861,7 +877,8 @@ js_AttemptCompilation(JSContext *cx, JSTraceMonitor* tm, JSObject* globalObj, js JS_DEFINE_CALLINFO_1(static, DOUBLE, i2f, INT32, 1, 1) JS_DEFINE_CALLINFO_1(static, DOUBLE, u2f, UINT32, 1, 1) -static bool isi2f(LInsp i) +static bool +isi2f(LIns* i) { if (i->isop(LIR_i2f)) return true; @@ -869,8 +886,7 @@ static bool isi2f(LInsp i) if (nanojit::AvmCore::config.soft_float && i->isop(LIR_qjoin) && i->oprnd1()->isop(LIR_call) && - i->oprnd2()->isop(LIR_callh)) - { + i->oprnd2()->isop(LIR_callh)) { if (i->oprnd1()->callInfo() == &i2f_ci) return true; } @@ -878,7 +894,8 @@ static bool isi2f(LInsp i) return false; } -static bool isu2f(LInsp i) +static bool +isu2f(LIns* i) { if (i->isop(LIR_u2f)) return true; @@ -886,8 +903,7 @@ static bool isu2f(LInsp i) if (nanojit::AvmCore::config.soft_float && i->isop(LIR_qjoin) && i->oprnd1()->isop(LIR_call) && - i->oprnd2()->isop(LIR_callh)) - { + i->oprnd2()->isop(LIR_callh)) { if (i->oprnd1()->callInfo() == &u2f_ci) return true; } @@ -895,19 +911,19 @@ static bool isu2f(LInsp i) return false; } -static LInsp iu2fArg(LInsp i) +static LIns* +iu2fArg(LIns* i) { if (nanojit::AvmCore::config.soft_float && - i->isop(LIR_qjoin)) - { + i->isop(LIR_qjoin)) { return i->oprnd1()->arg(0); } return i->oprnd1(); } - -static LIns* demote(LirWriter *out, LInsp i) +static LIns* +demote(LirWriter *out, LIns* i) { if (i->isCall()) return callArgN(i, 0); @@ -921,7 +937,8 @@ static LIns* demote(LirWriter *out, LInsp i) return out->insImm(ci); } -static bool isPromoteInt(LIns* i) +static bool +isPromoteInt(LIns* i) { if (isi2f(i) || i->isconst()) return true; @@ -931,7 +948,8 @@ static bool isPromoteInt(LIns* i) return d == jsdouble(jsint(d)) && !JSDOUBLE_IS_NEGZERO(d); } -static bool isPromoteUint(LIns* i) +static bool +isPromoteUint(LIns* i) { if (isu2f(i) || i->isconst()) return true; @@ -941,12 +959,14 @@ static bool isPromoteUint(LIns* i) return d == jsdouble(jsuint(d)) && !JSDOUBLE_IS_NEGZERO(d); } -static bool isPromote(LIns* i) +static bool +isPromote(LIns* i) { return isPromoteInt(i) || isPromoteUint(i); } -static bool isconst(LIns* i, int32_t c) +static bool +IsConst(LIns* i, int32_t c) { return i->isconst() && i->imm32() == c; } @@ -955,7 +975,8 @@ static bool isconst(LIns* i, int32_t c) * Determine whether this operand is guaranteed to not overflow the specified * integer operation. */ -static bool overflowSafe(LOpcode op, LIns* i) +static bool +IsOverflowSafe(LOpcode op, LIns* i) { LIns* c; switch (op) { @@ -1066,7 +1087,7 @@ public: { } - LInsp quadCall(const CallInfo *ci, LInsp args[]) { + LIns* quadCall(const CallInfo *ci, LIns* args[]) { LInsp qlo, qhi; qlo = out->insCall(ci, args); @@ -1074,7 +1095,7 @@ public: return out->qjoin(qlo, qhi); } - LInsp ins1(LOpcode v, LInsp s0) + LIns* ins1(LOpcode v, LIns* s0) { if (v == LIR_fneg) return quadCall(&fneg_ci, &s0); @@ -1088,10 +1109,10 @@ public: return out->ins1(v, s0); } - LInsp ins2(LOpcode v, LInsp s0, LInsp s1) + LIns* ins2(LOpcode v, LIns* s0, LIns* s1) { - LInsp args[2]; - LInsp bv; + LIns* args[2]; + LIns* bv; // change the numeric value and order of these LIR opcodes and die if (LIR_fadd <= v && v <= LIR_fdiv) { @@ -1116,10 +1137,10 @@ public: return out->ins2(v, s0, s1); } - LInsp insCall(const CallInfo *ci, LInsp args[]) + LIns* insCall(const CallInfo *ci, LIns* args[]) { // if the return type is ARGSIZE_F, we have - // to do a quadCall ( qjoin(call,callh) ) + // to do a quadCall(qjoin(call,callh)) if ((ci->_argtypes & 3) == ARGSIZE_F) return quadCall(ci, args); @@ -1135,7 +1156,7 @@ public: { } - LInsp ins2(LOpcode v, LInsp s0, LInsp s1) + LIns* ins2(LOpcode v, LIns* s0, LIns* s1) { if (s0 == s1 && v == LIR_feq) { if (isPromote(s0)) { @@ -1143,8 +1164,8 @@ public: return insImm(1); } if (s0->isop(LIR_fmul) || s0->isop(LIR_fsub) || s0->isop(LIR_fadd)) { - LInsp lhs = s0->oprnd1(); - LInsp rhs = s0->oprnd2(); + LIns* lhs = s0->oprnd1(); + LIns* rhs = s0->oprnd2(); if (isPromote(lhs) && isPromote(rhs)) { // add/sub/mul promoted ints can't be nan return insImm(1); @@ -1163,8 +1184,8 @@ public: return out->ins2(v, demote(out, s0), demote(out, s1)); } } else if (v == LIR_or && - s0->isop(LIR_lsh) && isconst(s0->oprnd2(), 16) && - s1->isop(LIR_and) && isconst(s1->oprnd2(), 0xffff)) { + s0->isop(LIR_lsh) && IsConst(s0->oprnd2(), 16) && + s1->isop(LIR_and) && IsConst(s1->oprnd2(), 0xffff)) { LIns* msw = s0->oprnd1(); LIns* lsw = s1->oprnd1(); LIns* x; @@ -1172,16 +1193,16 @@ public: if (lsw->isop(LIR_add) && lsw->oprnd1()->isop(LIR_and) && lsw->oprnd2()->isop(LIR_and) && - isconst(lsw->oprnd1()->oprnd2(), 0xffff) && - isconst(lsw->oprnd2()->oprnd2(), 0xffff) && + IsConst(lsw->oprnd1()->oprnd2(), 0xffff) && + IsConst(lsw->oprnd2()->oprnd2(), 0xffff) && msw->isop(LIR_add) && msw->oprnd1()->isop(LIR_add) && msw->oprnd2()->isop(LIR_rsh) && msw->oprnd1()->oprnd1()->isop(LIR_rsh) && msw->oprnd1()->oprnd2()->isop(LIR_rsh) && - isconst(msw->oprnd2()->oprnd2(), 16) && - isconst(msw->oprnd1()->oprnd1()->oprnd2(), 16) && - isconst(msw->oprnd1()->oprnd2()->oprnd2(), 16) && + IsConst(msw->oprnd2()->oprnd2(), 16) && + IsConst(msw->oprnd1()->oprnd1()->oprnd2(), 16) && + IsConst(msw->oprnd1()->oprnd2()->oprnd2(), 16) && (x = lsw->oprnd1()->oprnd1()) == msw->oprnd1()->oprnd1()->oprnd1() && (y = lsw->oprnd2()->oprnd1()) == msw->oprnd1()->oprnd2()->oprnd1() && lsw == msw->oprnd2()->oprnd1()) { @@ -1192,21 +1213,21 @@ public: return out->ins2(v, s0, s1); } - LInsp insCall(const CallInfo *ci, LInsp args[]) + LIns* insCall(const CallInfo *ci, LIns* args[]) { if (ci == &js_DoubleToUint32_ci) { - LInsp s0 = args[0]; + LIns* s0 = args[0]; if (s0->isconstq()) return out->insImm(js_DoubleToECMAUint32(s0->imm64f())); if (isi2f(s0) || isu2f(s0)) return iu2fArg(s0); } else if (ci == &js_DoubleToInt32_ci) { - LInsp s0 = args[0]; + LIns* s0 = args[0]; if (s0->isconstq()) return out->insImm(js_DoubleToECMAInt32(s0->imm64f())); if (s0->isop(LIR_fadd) || s0->isop(LIR_fsub)) { - LInsp lhs = s0->oprnd1(); - LInsp rhs = s0->oprnd2(); + LIns* lhs = s0->oprnd1(); + LIns* rhs = s0->oprnd2(); if (isPromote(lhs) && isPromote(rhs)) { LOpcode op = LOpcode(s0->opcode() & ~LIR64); return out->ins2(op, demote(out, lhs), demote(out, rhs)); @@ -1214,6 +1235,7 @@ public: } if (isi2f(s0) || isu2f(s0)) return iu2fArg(s0); + // XXX ARM -- check for qjoin(call(UnboxDouble),call(UnboxDouble)) if (s0->isCall()) { const CallInfo* ci2 = s0->callInfo(); @@ -1240,7 +1262,7 @@ public: } } } else if (ci == &js_BoxDouble_ci) { - LInsp s0 = args[0]; + LIns* s0 = args[0]; JS_ASSERT(s0->isQuad()); if (isPromoteInt(s0)) { LIns* args2[] = { demote(out, s0), args[1] }; @@ -1254,15 +1276,15 @@ public: }; /* - * Visit the values in the given JSStackFrame that the tracer cares about. This visitor - * function is (implicitly) the primary definition of the native stack area layout. There - * are a few other independent pieces of code that must be maintained to assume the same - * layout. They are marked like this: + * Visit the values in the given JSStackFrame that the tracer cares about. This + * visitor function is (implicitly) the primary definition of the native stack + * area layout. There are a few other independent pieces of code that must be + * maintained to assume the same layout. They are marked like this: * * Duplicate native stack layout computation: see VisitFrameSlots header comment. */ template -JS_REQUIRES_STACK static bool +static JS_REQUIRES_STACK bool VisitFrameSlots(Visitor &visitor, unsigned depth, JSStackFrame *fp, JSStackFrame *up) { @@ -1301,14 +1323,14 @@ VisitFrameSlots(Visitor &visitor, unsigned depth, JSStackFrame *fp, } template -JS_REQUIRES_STACK static JS_ALWAYS_INLINE bool +static JS_REQUIRES_STACK JS_ALWAYS_INLINE bool VisitStackSlots(Visitor &visitor, JSContext *cx, unsigned callDepth) { return VisitFrameSlots(visitor, callDepth, cx->fp, NULL); } template -JS_REQUIRES_STACK static JS_ALWAYS_INLINE void +static JS_REQUIRES_STACK JS_ALWAYS_INLINE void VisitGlobalSlots(Visitor &visitor, JSContext *cx, JSObject *globalObj, unsigned ngslots, uint16 *gslots) { @@ -1321,7 +1343,7 @@ VisitGlobalSlots(Visitor &visitor, JSContext *cx, JSObject *globalObj, class AdjustCallerTypeVisitor; template -JS_REQUIRES_STACK static JS_ALWAYS_INLINE void +static JS_REQUIRES_STACK JS_ALWAYS_INLINE void VisitGlobalSlots(Visitor &visitor, JSContext *cx, SlotList &gslots) { VisitGlobalSlots(visitor, cx, JS_GetGlobalForObject(cx, cx->fp->scopeChain), @@ -1330,7 +1352,7 @@ VisitGlobalSlots(Visitor &visitor, JSContext *cx, SlotList &gslots) template -JS_REQUIRES_STACK static JS_ALWAYS_INLINE void +static JS_REQUIRES_STACK JS_ALWAYS_INLINE void VisitSlots(Visitor& visitor, JSContext* cx, JSObject* globalObj, unsigned callDepth, unsigned ngslots, uint16* gslots) { @@ -1339,7 +1361,7 @@ VisitSlots(Visitor& visitor, JSContext* cx, JSObject* globalObj, } template -JS_REQUIRES_STACK static JS_ALWAYS_INLINE void +static JS_REQUIRES_STACK JS_ALWAYS_INLINE void VisitSlots(Visitor& visitor, JSContext* cx, unsigned callDepth, unsigned ngslots, uint16* gslots) { @@ -1348,7 +1370,7 @@ VisitSlots(Visitor& visitor, JSContext* cx, unsigned callDepth, } template -JS_REQUIRES_STACK static JS_ALWAYS_INLINE void +static JS_REQUIRES_STACK JS_ALWAYS_INLINE void VisitSlots(Visitor &visitor, JSContext *cx, JSObject *globalObj, unsigned callDepth, const SlotList& slots) { @@ -1357,7 +1379,7 @@ VisitSlots(Visitor &visitor, JSContext *cx, JSObject *globalObj, } template -JS_REQUIRES_STACK static JS_ALWAYS_INLINE void +static JS_REQUIRES_STACK JS_ALWAYS_INLINE void VisitSlots(Visitor &visitor, JSContext *cx, unsigned callDepth, const SlotList& slots) { @@ -1417,16 +1439,21 @@ public: } }; -/* Calculate the total number of native frame slots we need from this frame - all the way back to the entry frame, including the current stack usage. */ +/* + * Calculate the total number of native frame slots we need from this frame all + * the way back to the entry frame, including the current stack usage. + */ JS_REQUIRES_STACK unsigned -js_NativeStackSlots(JSContext *cx, unsigned callDepth) +NativeStackSlots(JSContext *cx, unsigned callDepth) { JSStackFrame* fp = cx->fp; unsigned slots = 0; unsigned depth = callDepth; for (;;) { - /* Duplicate native stack layout computation: see VisitFrameSlots header comment. */ + /* + * Duplicate native stack layout computation: see VisitFrameSlots + * header comment. + */ unsigned operands = fp->regs->sp - StackBase(fp); slots += operands; if (fp->callee) @@ -1447,7 +1474,7 @@ js_NativeStackSlots(JSContext *cx, unsigned callDepth) if (missing > 0) slots += missing; } - JS_NOT_REACHED("js_NativeStackSlots"); + JS_NOT_REACHED("NativeStackSlots"); } class CaptureTypesVisitor : public SlotVisitorBase @@ -1504,7 +1531,7 @@ public: JS_REQUIRES_STACK void TypeMap::captureTypes(JSContext* cx, JSObject* globalObj, SlotList& slots, unsigned callDepth) { - setLength(js_NativeStackSlots(cx, callDepth) + slots.length()); + setLength(NativeStackSlots(cx, callDepth) + slots.length()); CaptureTypesVisitor visitor(cx, data()); VisitSlots(visitor, cx, globalObj, callDepth, slots); JS_ASSERT(visitor.length() == length()); @@ -1530,10 +1557,13 @@ TypeMap::matches(TypeMap& other) const return !memcmp(data(), other.data(), length()); } -/* Use the provided storage area to create a new type map that contains the partial type map - with the rest of it filled up from the complete type map. */ +/* + * Use the provided storage area to create a new type map that contains the + * partial type map with the rest of it filled up from the complete type + * map. + */ static void -mergeTypeMaps(JSTraceType** partial, unsigned* plength, JSTraceType* complete, unsigned clength, JSTraceType* mem) +MergeTypeMaps(JSTraceType** partial, unsigned* plength, JSTraceType* complete, unsigned clength, JSTraceType* mem) { unsigned l = *plength; JS_ASSERT(l < clength); @@ -1545,7 +1575,7 @@ mergeTypeMaps(JSTraceType** partial, unsigned* plength, JSTraceType* complete, u /* Specializes a tree to any missing globals, including any dependent trees. */ static JS_REQUIRES_STACK void -specializeTreesToMissingGlobals(JSContext* cx, JSObject* globalObj, TreeInfo* root) +SpecializeTreesToMissingGlobals(JSContext* cx, JSObject* globalObj, TreeInfo* root) { TreeInfo* ti = root; @@ -1554,19 +1584,38 @@ specializeTreesToMissingGlobals(JSContext* cx, JSObject* globalObj, TreeInfo* ro for (unsigned i = 0; i < root->dependentTrees.length(); i++) { ti = (TreeInfo*)root->dependentTrees[i]->vmprivate; + /* ti can be NULL if we hit the recording tree in emitTreeCall; this is harmless. */ if (ti && ti->nGlobalTypes() < ti->globalSlots->length()) - specializeTreesToMissingGlobals(cx, globalObj, ti); + SpecializeTreesToMissingGlobals(cx, globalObj, ti); } for (unsigned i = 0; i < root->linkedTrees.length(); i++) { ti = (TreeInfo*)root->linkedTrees[i]->vmprivate; if (ti && ti->nGlobalTypes() < ti->globalSlots->length()) - specializeTreesToMissingGlobals(cx, globalObj, ti); + SpecializeTreesToMissingGlobals(cx, globalObj, ti); } } +static inline JSTraceType* +GetStackTypeMap(nanojit::SideExit* exit) +{ + return (JSTraceType*)(((VMSideExit*)exit) + 1); +} + +static inline JSTraceType* +GetGlobalTypeMap(nanojit::SideExit* exit) +{ + return GetStackTypeMap(exit) + ((VMSideExit*)exit)->numStackSlots; +} + +static inline JSTraceType* +GetFullTypeMap(nanojit::SideExit* exit) +{ + return GetStackTypeMap(exit); +} + static void -js_TrashTree(JSContext* cx, Fragment* f); +TrashTree(JSContext* cx, Fragment* f); JS_REQUIRES_STACK TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _fragment, @@ -1636,7 +1685,7 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _frag /* If we came from exit, we might not have enough global types. */ if (ti->globalSlots->length() > ti->nGlobalTypes()) - specializeTreesToMissingGlobals(cx, globalObj, ti); + SpecializeTreesToMissingGlobals(cx, globalObj, ti); /* read into registers all values on the stack and all globals we know so far */ import(treeInfo, lirbuf->sp, stackSlots, ngslots, callDepth, typeMap); @@ -1650,8 +1699,10 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _frag guard(true, lir->ins_eq0(x), snapshot(TIMEOUT_EXIT)); } - /* If we are attached to a tree call guard, make sure the guard the inner tree exited from - is what we expect it to be. */ + /* + * If we are attached to a tree call guard, make sure the guard the inner + * tree exited from is what we expect it to be. + */ if (_anchor && _anchor->exitType == NESTED_EXIT) { LIns* nested_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, lastTreeExitGuard)), @@ -1690,10 +1741,10 @@ TraceRecorder::~TraceRecorder() } if (trashSelf) - js_TrashTree(cx, fragment->root); + TrashTree(cx, fragment->root); for (unsigned int i = 0; i < whichTreesToTrash.length(); i++) - js_TrashTree(cx, whichTreesToTrash[i]); + TrashTree(cx, whichTreesToTrash[i]); } else if (wasRootFragment) { delete treeInfo; } @@ -1708,12 +1759,14 @@ TraceRecorder::~TraceRecorder() delete generatedTraceableNative; } -void TraceRecorder::removeFragmentoReferences() +void +TraceRecorder::removeFragmentoReferences() { fragment = NULL; } -void TraceRecorder::deepAbort() +void +TraceRecorder::deepAbort() { debug_only_print0(LC_TMTracer|LC_TMAbort, "deep abort"); deepAborted = true; @@ -1724,8 +1777,10 @@ inline LIns* TraceRecorder::addName(LIns* ins, const char* name) { #ifdef JS_JIT_SPEW - /* We'll only ask for verbose Nanojit when .lcbits > 0, so - there's no point in adding names otherwise. */ + /* + * We'll only ask for verbose Nanojit when .lcbits > 0, so there's no point + * in adding names otherwise. + */ if (js_LogController.lcbits > 0) lirbuf->names->addName(ins, name); #endif @@ -1739,7 +1794,7 @@ TraceRecorder::getCallDepth() const return callDepth; } -/* Determine the offset in the native global frame for a jsval we track */ +/* Determine the offset in the native global frame for a jsval we track. */ ptrdiff_t TraceRecorder::nativeGlobalOffset(jsval* p) const { @@ -1749,7 +1804,7 @@ TraceRecorder::nativeGlobalOffset(jsval* p) const return sizeof(InterpState) + ((p - globalObj->dslots) + JS_INITIAL_NSLOTS) * sizeof(double); } -/* Determine whether a value is a global stack slot */ +/* Determine whether a value is a global stack slot. */ bool TraceRecorder::isGlobal(jsval* p) const { @@ -1757,12 +1812,12 @@ TraceRecorder::isGlobal(jsval* p) const (size_t(p - globalObj->dslots) < (STOBJ_NSLOTS(globalObj) - JS_INITIAL_NSLOTS))); } -/* +/* * Return the offset in the native stack for the given jsval. More formally, * |p| must be the address of a jsval that is represented in the native stack * area. The return value is the offset, from InterpState::stackBase, in bytes, - * where the native representation of |*p| is stored. To get the offset relative - * to InterpState::sp, subtract TreeInfo::nativeStackBase. + * where the native representation of |*p| is stored. To get the offset + * relative to InterpState::sp, subtract TreeInfo::nativeStackBase. */ JS_REQUIRES_STACK ptrdiff_t TraceRecorder::nativeStackOffset(jsval* p) const @@ -1770,9 +1825,10 @@ TraceRecorder::nativeStackOffset(jsval* p) const CountSlotsVisitor visitor(p); VisitStackSlots(visitor, cx, callDepth); size_t offset = visitor.count() * sizeof(double); + /* - * If it's not in a pending frame, it must be on the stack of the current frame above - * sp but below fp->slots + script->nslots. + * If it's not in a pending frame, it must be on the stack of the current + * frame above sp but below fp->slots + script->nslots. */ if (!visitor.stopped()) { JS_ASSERT(size_t(p - cx->fp->slots) < cx->fp->script->nslots); @@ -1781,8 +1837,7 @@ TraceRecorder::nativeStackOffset(jsval* p) const return offset; } -/* Track the maximum number of native frame slots we need during - execution. */ +/* Track the maximum number of native frame slots we need during execution. */ void TraceRecorder::trackNativeStackUse(unsigned slots) { @@ -1790,9 +1845,11 @@ TraceRecorder::trackNativeStackUse(unsigned slots) treeInfo->maxNativeStackSlots = slots; } -/* Unbox a jsval into a slot. Slots are wide enough to hold double values directly (instead of - storing a pointer to them). We now assert instead of type checking, the caller must ensure the - types are compatible. */ +/* + * Unbox a jsval into a slot. Slots are wide enough to hold double values + * directly (instead of storing a pointer to them). We assert instead of + * type checking. The caller must ensure the types are compatible. + */ static void ValueToNative(JSContext* cx, jsval v, JSTraceType type, double* slot) { @@ -1808,6 +1865,7 @@ ValueToNative(JSContext* cx, jsval v, JSTraceType type, double* slot) ? "null" : STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name); return; + case TT_INT32: jsint i; if (JSVAL_IS_INT(v)) @@ -1818,6 +1876,7 @@ ValueToNative(JSContext* cx, jsval v, JSTraceType type, double* slot) JS_ASSERT(JSVAL_IS_INT(v)); debug_only_printf(LC_TMTracer, "int<%d> ", *(jsint*)slot); return; + case TT_DOUBLE: jsdouble d; if (JSVAL_IS_INT(v)) @@ -1828,25 +1887,30 @@ ValueToNative(JSContext* cx, jsval v, JSTraceType type, double* slot) *(jsdouble*)slot = d; debug_only_printf(LC_TMTracer, "double<%g> ", d); return; + case TT_JSVAL: JS_NOT_REACHED("found jsval type in an entry type map"); return; + case TT_STRING: JS_ASSERT(tag == JSVAL_STRING); *(JSString**)slot = JSVAL_TO_STRING(v); debug_only_printf(LC_TMTracer, "string<%p> ", (void*)(*(JSString**)slot)); return; + case TT_NULL: JS_ASSERT(tag == JSVAL_OBJECT); *(JSObject**)slot = NULL; debug_only_print0(LC_TMTracer, "null "); return; + case TT_PSEUDOBOOLEAN: /* Watch out for pseudo-booleans. */ JS_ASSERT(tag == JSVAL_BOOLEAN); *(JSBool*)slot = JSVAL_TO_PSEUDO_BOOLEAN(v); debug_only_printf(LC_TMTracer, "pseudoboolean<%d> ", *(JSBool*)slot); return; + case TT_FUNCTION: { JS_ASSERT(tag == JSVAL_OBJECT); JSObject* obj = JSVAL_TO_OBJECT(v); @@ -1866,8 +1930,10 @@ ValueToNative(JSContext* cx, jsval v, JSTraceType type, double* slot) JS_NOT_REACHED("unexpected type"); } -/* We maintain an emergency pool of doubles so we can recover safely if a trace runs - out of memory (doubles or objects). */ +/* + * We maintain an emergency pool of doubles so we can recover safely if a trace + * runs out of memory (doubles or objects). + */ static jsval AllocateDoubleFromReservedPool(JSContext* cx) { @@ -1877,7 +1943,7 @@ AllocateDoubleFromReservedPool(JSContext* cx) } static bool -js_ReplenishReservedPool(JSContext* cx, JSTraceMonitor* tm) +ReplenishReservedPool(JSContext* cx, JSTraceMonitor* tm) { /* We should not be called with a full pool. */ JS_ASSERT((size_t) (tm->reservedDoublePoolPtr - tm->reservedDoublePool) < @@ -1923,9 +1989,11 @@ oom: return false; } -/* Box a value from the native stack back into the jsval format. Integers - that are too large to fit into a jsval are automatically boxed into - heap-allocated doubles. */ +/* + * Box a value from the native stack back into the jsval format. Integers that + * are too large to fit into a jsval are automatically boxed into + * heap-allocated doubles. + */ static void NativeToValue(JSContext* cx, jsval& v, JSTraceType type, double* slot) { @@ -1941,6 +2009,7 @@ NativeToValue(JSContext* cx, jsval& v, JSTraceType type, double* slot) ? "null" : STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name); break; + case TT_INT32: i = *(jsint*)slot; debug_only_printf(LC_TMTracer, "int<%d> ", i); @@ -1957,8 +2026,10 @@ NativeToValue(JSContext* cx, jsval& v, JSTraceType type, double* slot) if (JSDOUBLE_IS_INT(d, i)) goto store_int; store_double: { - /* Its not safe to trigger the GC here, so use an emergency heap if we are out of - double boxes. */ + /* + * It's not safe to trigger the GC here, so use an emergency heap if we + * are out of double boxes. + */ if (cx->doubleFreeList) { #ifdef DEBUG JSBool ok = @@ -1972,25 +2043,30 @@ NativeToValue(JSContext* cx, jsval& v, JSTraceType type, double* slot) *JSVAL_TO_DOUBLE(v) = d; return; } + case TT_JSVAL: v = *(jsval*)slot; JS_ASSERT(v != JSVAL_ERROR_COOKIE); /* don't leak JSVAL_ERROR_COOKIE */ debug_only_printf(LC_TMTracer, "box<%p> ", (void*)v); break; + case TT_STRING: v = STRING_TO_JSVAL(*(JSString**)slot); debug_only_printf(LC_TMTracer, "string<%p> ", (void*)(*(JSString**)slot)); break; + case TT_NULL: JS_ASSERT(*(JSObject**)slot == NULL); v = JSVAL_NULL; debug_only_printf(LC_TMTracer, "null<%p> ", (void*)(*(JSObject**)slot)); break; + case TT_PSEUDOBOOLEAN: /* Watch out for pseudo-booleans. */ v = PSEUDO_BOOLEAN_TO_JSVAL(*(JSBool*)slot); debug_only_printf(LC_TMTracer, "boolean<%d> ", *(JSBool*)slot); break; + case TT_FUNCTION: { JS_ASSERT(HAS_FUNCTION_CLASS(*(JSObject**)slot)); v = OBJECT_TO_JSVAL(*(JSObject**)slot); @@ -2128,8 +2204,8 @@ FlushNativeGlobalFrame(JSContext *cx, double *global, unsigned ngslots, * callDepth Call depth of current point relative to trace entry */ template -JSTraceType JS_INLINE -js_GetUpvarOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) +inline JSTraceType +GetUpvarOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) { InterpState* state = cx->interpState; FrameInfo** fip = state->rp + callDepth; @@ -2191,9 +2267,9 @@ struct UpvarArgTraits { }; uint32 JS_FASTCALL -js_GetUpvarArgOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) +GetUpvarArgOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) { - return js_GetUpvarOnTrace(cx, upvarLevel, slot, callDepth, result); + return GetUpvarOnTrace(cx, upvarLevel, slot, callDepth, result); } // For this traits type, 'slot' is an index into the local slots array. @@ -2208,15 +2284,15 @@ struct UpvarVarTraits { }; uint32 JS_FASTCALL -js_GetUpvarVarOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) +GetUpvarVarOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) { - return js_GetUpvarOnTrace(cx, upvarLevel, slot, callDepth, result); + return GetUpvarOnTrace(cx, upvarLevel, slot, callDepth, result); } /* - * For this traits type, 'slot' is an index into the stack area (within slots, after nfixed) - * of a frame with no function. (On trace, the top-level frame is the only one that can have - * no function.) + * For this traits type, 'slot' is an index into the stack area (within slots, + * after nfixed) of a frame with no function. (On trace, the top-level frame is + * the only one that can have no function.) */ struct UpvarStackTraits { static jsval interp_get(JSStackFrame* fp, int32 slot) { @@ -2225,8 +2301,8 @@ struct UpvarStackTraits { static uint32 native_slot(uint32 argc, int32 slot) { /* - * Locals are not imported by the tracer when the frame has no function, so - * we do not add fp->script->nfixed. + * Locals are not imported by the tracer when the frame has no + * function, so we do not add fp->script->nfixed. */ JS_ASSERT(argc == 0); return slot; @@ -2234,13 +2310,13 @@ struct UpvarStackTraits { }; uint32 JS_FASTCALL -js_GetUpvarStackOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) +GetUpvarStackOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) { - return js_GetUpvarOnTrace(cx, upvarLevel, slot, callDepth, result); + return GetUpvarOnTrace(cx, upvarLevel, slot, callDepth, result); } /* - * Generic function to read upvars from Call objects of active heavyweight functions. + * Generic function to read upvars from Call objects of active heavyweight functions. * callee Callee Function object in which the upvar is accessed. * scopeIndex Number of parent steps to make from |callee| to find upvar definition. * This must be at least 1 because |callee| is a Function and we must reach a Call. @@ -2248,9 +2324,9 @@ js_GetUpvarStackOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 cal * callDepth callDepth of current point relative to trace entry. */ template -uint32 JS_INLINE -js_GetFromClosure(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, - double* result) +inline uint32 +GetFromClosure(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, + double* result) { JS_ASSERT(scopeIndex >= 1); JS_ASSERT(OBJ_GET_CLASS(cx, callee) == &js_FunctionClass); @@ -2298,32 +2374,32 @@ js_GetFromClosure(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slo struct ArgClosureTraits { - static JS_INLINE uint32 adj_slot(JSStackFrame* fp, uint32 slot) { return fp->argc + slot; } - static JS_INLINE jsval* slots(JSStackFrame* fp) { return fp->argv; } + static inline uint32 adj_slot(JSStackFrame* fp, uint32 slot) { return fp->argc + slot; } + static inline jsval* slots(JSStackFrame* fp) { return fp->argv; } private: ArgClosureTraits(); }; uint32 JS_FASTCALL -js_GetClosureArg(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, - double* result) +GetClosureArg(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, + double* result) { - return js_GetFromClosure(cx, callee, scopeIndex, slot, callDepth, result); + return GetFromClosure(cx, callee, scopeIndex, slot, callDepth, result); } struct VarClosureTraits { - static JS_INLINE uint32 adj_slot(JSStackFrame* fp, uint32 slot) { return slot; } - static JS_INLINE jsval* slots(JSStackFrame* fp) { return fp->slots; } + static inline uint32 adj_slot(JSStackFrame* fp, uint32 slot) { return slot; } + static inline jsval* slots(JSStackFrame* fp) { return fp->slots; } private: VarClosureTraits(); }; uint32 JS_FASTCALL -js_GetClosureVar(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, - double* result) +GetClosureVar(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, + double* result) { - return js_GetFromClosure(cx, callee, scopeIndex, slot, callDepth, result); + return GetFromClosure(cx, callee, scopeIndex, slot, callDepth, result); } /** @@ -2362,6 +2438,7 @@ FlushNativeStackFrame(JSContext* cx, unsigned callDepth, JSTraceType* mp, double JS_ASSERT(n != 0); --n; } + // Skip over stopFrame itself. JS_ASSERT(n != 0); --n; @@ -2397,7 +2474,7 @@ FlushNativeStackFrame(JSContext* cx, unsigned callDepth, JSTraceType* mp, double * involves it calling the debugger hook. * * Allocating the Call object must not fail, so use an object - * previously reserved by js_ExecuteTree if needed. + * previously reserved by ExecuteTree if needed. */ void* hookData = ((JSInlineFrame*)fp)->hookData; ((JSInlineFrame*)fp)->hookData = NULL; @@ -2430,10 +2507,13 @@ TraceRecorder::import(LIns* base, ptrdiff_t offset, jsval* p, JSTraceType t, LIns* ins; if (t == TT_INT32) { /* demoted */ JS_ASSERT(isInt32(*p)); - /* Ok, we have a valid demotion attempt pending, so insert an integer - read and promote it to double since all arithmetic operations expect - to see doubles on entry. The first op to use this slot will emit a - f2i cast which will cancel out the i2f we insert here. */ + + /* + * Ok, we have a valid demotion attempt pending, so insert an integer + * read and promote it to double since all arithmetic operations expect + * to see doubles on entry. The first op to use this slot will emit a + * f2i cast which will cancel out the i2f we insert here. + */ ins = lir->insLoad(LIR_ld, base, offset); ins = lir->ins1(LIR_i2f, ins); } else { @@ -2582,26 +2662,30 @@ JS_REQUIRES_STACK void TraceRecorder::import(TreeInfo* treeInfo, LIns* sp, unsigned stackSlots, unsigned ngslots, unsigned callDepth, JSTraceType* typeMap) { - /* If we get a partial list that doesn't have all the types (i.e. recording from a side - exit that was recorded but we added more global slots later), merge the missing types - from the entry type map. This is safe because at the loop edge we verify that we - have compatible types for all globals (entry type and loop edge type match). While - a different trace of the tree might have had a guard with a different type map for - these slots we just filled in here (the guard we continue from didn't know about them), - since we didn't take that particular guard the only way we could have ended up here - is if that other trace had at its end a compatible type distribution with the entry - map. Since thats exactly what we used to fill in the types our current side exit - didn't provide, this is always safe to do. */ + /* + * If we get a partial list that doesn't have all the types (i.e. recording + * from a side exit that was recorded but we added more global slots + * later), merge the missing types from the entry type map. This is safe + * because at the loop edge we verify that we have compatible types for all + * globals (entry type and loop edge type match). While a different trace + * of the tree might have had a guard with a different type map for these + * slots we just filled in here (the guard we continue from didn't know + * about them), since we didn't take that particular guard the only way we + * could have ended up here is if that other trace had at its end a + * compatible type distribution with the entry map. Since that's exactly + * what we used to fill in the types our current side exit didn't provide, + * this is always safe to do. + */ JSTraceType* globalTypeMap = typeMap + stackSlots; unsigned length = treeInfo->nGlobalTypes(); /* - * This is potentially the typemap of the side exit and thus shorter than the tree's - * global type map. + * This is potentially the typemap of the side exit and thus shorter than + * the tree's global type map. */ if (ngslots < length) { - mergeTypeMaps(&globalTypeMap/*out param*/, &ngslots/*out param*/, + MergeTypeMaps(&globalTypeMap /* out param */, &ngslots /* out param */, treeInfo->globalTypeMap(), length, (JSTraceType*)alloca(sizeof(JSTraceType) * length)); } @@ -2609,9 +2693,8 @@ TraceRecorder::import(TreeInfo* treeInfo, LIns* sp, unsigned stackSlots, unsigne ptrdiff_t offset = -treeInfo->nativeStackBase; /* - * Check whether there are any values on the stack we have to unbox and - * do that first before we waste any time fetching the state from the - * stack. + * Check whether there are any values on the stack we have to unbox and do + * that first before we waste any time fetching the state from the stack. */ ImportBoxedStackSlotVisitor boxedStackVisitor(*this, sp, offset, typeMap); VisitStackSlots(boxedStackVisitor, cx, callDepth); @@ -2636,6 +2719,7 @@ TraceRecorder::isValidSlot(JSScope* scope, JSScopeProperty* sprop) if (sprop->attrs & JSPROP_READONLY) ABORT_TRACE_RV("writing to a read-only property", false); } + /* This check applies even when setflags == 0. */ if (setflags != JOF_SET && !SPROP_HAS_STUB_GETTER(sprop)) ABORT_TRACE_RV("non-stub getter", false); @@ -2652,9 +2736,10 @@ TraceRecorder::lazilyImportGlobalSlot(unsigned slot) { if (slot != uint16(slot)) /* we use a table of 16-bit ints, bail out if that's not enough */ return false; + /* - * If the global object grows too large, alloca in js_ExecuteTree might fail, so - * abort tracing on global objects with unreasonably many slots. + * If the global object grows too large, alloca in ExecuteTree might fail, + * so abort tracing on global objects with unreasonably many slots. */ if (STOBJ_NSLOTS(globalObj) > MAX_GLOBAL_SLOTS) return false; @@ -2662,6 +2747,7 @@ TraceRecorder::lazilyImportGlobalSlot(unsigned slot) if (known(vp)) return true; /* we already have it */ unsigned index = treeInfo->globalSlots->length(); + /* Add the slot to the list of interned global slots. */ JS_ASSERT(treeInfo->nGlobalTypes() == treeInfo->globalSlots->length()); treeInfo->globalSlots->add(slot); @@ -2671,7 +2757,7 @@ TraceRecorder::lazilyImportGlobalSlot(unsigned slot) treeInfo->typeMap.add(type); import(lirbuf->state, sizeof(struct InterpState) + slot*sizeof(double), vp, type, "global", index, NULL); - specializeTreesToMissingGlobals(cx, globalObj, treeInfo); + SpecializeTreesToMissingGlobals(cx, globalObj, treeInfo); return true; } @@ -2679,9 +2765,11 @@ TraceRecorder::lazilyImportGlobalSlot(unsigned slot) LIns* TraceRecorder::writeBack(LIns* i, LIns* base, ptrdiff_t offset) { - /* Sink all type casts targeting the stack into the side exit by simply storing the original - (uncasted) value. Each guard generates the side exit map based on the types of the - last stores to every stack location, so its safe to not perform them on-trace. */ + /* + * Sink all type casts targeting the stack into the side exit by simply storing the original + * (uncasted) value. Each guard generates the side exit map based on the types of the + * last stores to every stack location, so it's safe to not perform them on-trace. + */ if (isPromoteInt(i)) i = ::demote(lir, i); return lir->insStorei(i, base, offset); @@ -2695,9 +2783,13 @@ TraceRecorder::set(jsval* p, LIns* i, bool initializing) JS_ASSERT(initializing || known(p)); checkForGlobalObjectReallocation(); tracker.set(p, i); - /* If we are writing to this location for the first time, calculate the offset into the - native frame manually, otherwise just look up the last load or store associated with - the same source address (p) and use the same offset/base. */ + + /* + * If we are writing to this location for the first time, calculate the + * offset into the native frame manually. Otherwise just look up the last + * load or store associated with the same source address (p) and use the + * same offset/base. + */ LIns* x = nativeFrameTracker.get(p); if (!x) { if (isGlobal(p)) @@ -2760,7 +2852,7 @@ TraceRecorder::checkForGlobalObjectReallocation() /* Determine whether the current branch is a loop edge (taken or not taken). */ static JS_REQUIRES_STACK bool -js_IsLoopEdge(jsbytecode* pc, jsbytecode* header) +IsLoopEdge(jsbytecode* pc, jsbytecode* header) { switch (*pc) { case JSOP_IFEQ: @@ -2805,8 +2897,11 @@ public: if (isPromote && *mTypeMap == TT_DOUBLE) { mLir->insStorei(mRecorder.get(vp), mLirbuf->state, mRecorder.nativeGlobalOffset(vp)); - /* Aggressively undo speculation so the inner tree will compile - if this fails. */ + + /* + * Aggressively undo speculation so the inner tree will compile + * if this fails. + */ oracle.markGlobalSlotUndemotable(mCx, slot); } JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32)); @@ -2847,8 +2942,11 @@ public: mLir->insStorei(mRecorder.get(vp), mLirbuf->sp, -mRecorder.treeInfo->nativeStackBase + mRecorder.nativeStackOffset(vp)); - /* Aggressively undo speculation so the inner tree will - compile if this fails. */ + + /* + * Aggressively undo speculation so the inner tree will compile + * if this fails. + */ oracle.markStackSlotUndemotable(mCx, mSlotnum); } JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32)); @@ -2861,9 +2959,9 @@ public: }; /* - * Promote slots if necessary to match the called tree's type map. This function is - * infallible and must only be called if we are certain that it is possible to - * reconcile the types for each slot in the inner and outer trees. + * Promote slots if necessary to match the called tree's type map. This + * function is infallible and must only be called if we are certain that it is + * possible to reconcile the types for each slot in the inner and outer trees. */ JS_REQUIRES_STACK void TraceRecorder::adjustCallerTypes(Fragment* f) @@ -2895,8 +2993,8 @@ TraceRecorder::determineSlotType(jsval* vp) m = TT_OBJECT; } else { JS_ASSERT(JSVAL_TAG(*vp) == JSVAL_STRING || JSVAL_TAG(*vp) == JSVAL_BOOLEAN); - JS_STATIC_ASSERT(TT_STRING == JSVAL_STRING); - JS_STATIC_ASSERT(TT_PSEUDOBOOLEAN == JSVAL_BOOLEAN); + JS_STATIC_ASSERT(static_cast(TT_STRING) == JSVAL_STRING); + JS_STATIC_ASSERT(static_cast(TT_PSEUDOBOOLEAN) == JSVAL_BOOLEAN); m = JSTraceType(JSVAL_TAG(*vp)); } JS_ASSERT(m != TT_INT32 || isInt32(*vp)); @@ -2939,28 +3037,37 @@ TraceRecorder::snapshot(ExitType exitType) JSFrameRegs* regs = fp->regs; jsbytecode* pc = regs->pc; - /* Check for a return-value opcode that needs to restart at the next instruction. */ + /* + * Check for a return-value opcode that needs to restart at the next + * instruction. + */ const JSCodeSpec& cs = js_CodeSpec[*pc]; /* * When calling a _FAIL native, make the snapshot's pc point to the next - * instruction after the CALL or APPLY. Even on failure, a _FAIL native must not - * be called again from the interpreter. + * instruction after the CALL or APPLY. Even on failure, a _FAIL native + * must not be called again from the interpreter. */ bool resumeAfter = (pendingTraceableNative && JSTN_ERRTYPE(pendingTraceableNative) == FAIL_STATUS); if (resumeAfter) { - JS_ASSERT(*pc == JSOP_CALL || *pc == JSOP_APPLY || *pc == JSOP_NEW); + JS_ASSERT(*pc == JSOP_CALL || *pc == JSOP_APPLY || *pc == JSOP_NEW || + *pc == JSOP_SETPROP || *pc == JSOP_SETNAME); pc += cs.length; regs->pc = pc; MUST_FLOW_THROUGH("restore_pc"); } - /* Generate the entry map for the (possibly advanced) pc and stash it in the trace. */ - unsigned stackSlots = js_NativeStackSlots(cx, callDepth); + /* + * Generate the entry map for the (possibly advanced) pc and stash it in + * the trace. + */ + unsigned stackSlots = NativeStackSlots(cx, callDepth); - /* It's sufficient to track the native stack use here since all stores above the - stack watermark defined by guards are killed. */ + /* + * It's sufficient to track the native stack use here since all stores + * above the stack watermark defined by guards are killed. + */ trackNativeStackUse(stackSlots + 1); /* Capture the type map into a temporary location. */ @@ -2972,9 +3079,9 @@ TraceRecorder::snapshot(ExitType exitType) /* * Determine the type of a store by looking at the current type of the - * actual value the interpreter is using. For numbers we have to check - * what kind of store we used last (integer or double) to figure out - * what the side exit show reflect in its typemap. + * actual value the interpreter is using. For numbers we have to check what + * kind of store we used last (integer or double) to figure out what the + * side exit show reflect in its typemap. */ DetermineTypesVisitor detVisitor(*this, typemap); VisitSlots(detVisitor, cx, callDepth, ngslots, @@ -2983,8 +3090,9 @@ TraceRecorder::snapshot(ExitType exitType) ngslots + stackSlots); /* - * If we are currently executing a traceable native or we are attaching a second trace - * to it, the value on top of the stack is a jsval. Make a note of this in the typemap. + * If we are currently executing a traceable native or we are attaching a + * second trace to it, the value on top of the stack is a jsval. Make a + * note of this in the typemap. */ if (pendingTraceableNative && (pendingTraceableNative->flags & JSTN_UNBOX_AFTER)) typemap[stackSlots - 1] = TT_JSVAL; @@ -2994,9 +3102,11 @@ TraceRecorder::snapshot(ExitType exitType) MUST_FLOW_LABEL(restore_pc); regs->pc = pc - cs.length; } else { - /* If we take a snapshot on a goto, advance to the target address. This avoids inner - trees returning on a break goto, which the outer recorder then would confuse with - a break in the outer tree. */ + /* + * If we take a snapshot on a goto, advance to the target address. This + * avoids inner trees returning on a break goto, which the outer + * recorder then would confuse with a break in the outer tree. + */ if (*pc == JSOP_GOTO) pc += GET_JUMP_OFFSET(pc); else if (*pc == JSOP_GOTOX) @@ -3014,7 +3124,7 @@ TraceRecorder::snapshot(ExitType exitType) VMSideExit* e = exits[n]; if (e->pc == pc && e->imacpc == fp->imacpc && ngslots == e->numGlobalSlots && - !memcmp(getFullTypeMap(exits[n]), typemap, typemap_size)) { + !memcmp(GetFullTypeMap(exits[n]), typemap, typemap_size)) { AUDIT(mergedLoopExits); JS_ARENA_RELEASE(&cx->tempPool, mark); return e; @@ -3058,7 +3168,7 @@ TraceRecorder::snapshot(ExitType exitType) exit->rp_adj = exit->calldepth * sizeof(FrameInfo*); exit->nativeCalleeWord = 0; exit->lookupFlags = js_InferFlags(cx, 0); - memcpy(getFullTypeMap(exit), typemap, typemap_size); + memcpy(GetFullTypeMap(exit), typemap, typemap_size); JS_ARENA_RELEASE(&cx->tempPool, mark); return exit; @@ -3137,16 +3247,21 @@ TraceRecorder::copy(VMSideExit* copy) return exit; } -/* Emit a guard for condition (cond), expecting to evaluate to boolean result (expected) - and generate a side exit with type exitType to jump to if the condition does not hold. */ +/* + * Emit a guard for condition (cond), expecting to evaluate to boolean result + * (expected) and generate a side exit with type exitType to jump to if the + * condition does not hold. + */ JS_REQUIRES_STACK void TraceRecorder::guard(bool expected, LIns* cond, ExitType exitType) { guard(expected, cond, snapshot(exitType)); } -/* Try to match the type of a slot to type t. checkType is used to verify that the type of - * values flowing into the loop edge is compatible with the type we expect in the loop header. +/* + * Try to match the type of a slot to type t. checkType is used to verify that + * the type of each value flowing into the loop edge is compatible with the + * type we expect in the loop header. * * @param v Value. * @param t Typemap entry for value. @@ -3168,11 +3283,14 @@ TraceRecorder::checkType(jsval& v, JSTraceType t, jsval*& stage_val, LIns*& stag if (!isNumber(v)) return false; /* not a number? type mismatch */ LIns* i = get(&v); + /* This is always a type mismatch, we can't close a double to an int. */ if (!isPromoteInt(i)) return false; + /* Looks good, slot is an int32, the last instruction should be promotable. */ JS_ASSERT(isInt32(v) && isPromoteInt(i)); + /* Overwrite the value in this slot with the argument promoted back to an integer. */ stage_val = &v; stage_ins = f2i(i); @@ -3189,8 +3307,12 @@ TraceRecorder::checkType(jsval& v, JSTraceType t, jsval*& stage_val, LIns*& stag if (!isNumber(v)) return false; /* not a number? type mismatch */ LIns* i = get(&v); - /* We sink i2f conversions into the side exit, but at the loop edge we have to make - sure we promote back to double if at loop entry we want a double. */ + + /* + * We sink i2f conversions into the side exit, but at the loop edge we + * have to make sure we promote back to double if at loop entry we want + * a double. + */ if (isPromoteInt(i)) { stage_val = &v; stage_ins = lir->ins1(LIR_i2f, i); @@ -3416,8 +3538,9 @@ public: }; /** - * Make sure that the current values in the given stack frame and all stack frames - * up and including entryFrame are type-compatible with the entry map. + * Make sure that the current values in the given stack frame and all stack + * frames up to and including entryFrame are type-compatible with the entry + * map. * * @param root_peer First fragment in peer list. * @param stable_peer Outparam for first type stable peer. @@ -3427,15 +3550,15 @@ public: JS_REQUIRES_STACK bool TraceRecorder::deduceTypeStability(Fragment* root_peer, Fragment** stable_peer, bool& demote) { - JS_ASSERT(treeInfo->globalSlots->length() == - treeInfo->nGlobalTypes()); + JS_ASSERT(treeInfo->globalSlots->length() == treeInfo->nGlobalTypes()); if (stable_peer) *stable_peer = NULL; /* - * Rather than calculate all of this stuff twice, it gets cached locally. The "stage" buffers - * are for calls to set() that will change the exit types. + * Rather than calculate all of this stuff twice, it gets cached locally. + * The "stage" buffers are for calls to set() that will change the exit + * types. */ bool success; unsigned stage_count; @@ -3464,8 +3587,9 @@ TraceRecorder::deduceTypeStability(Fragment* root_peer, Fragment** stable_peer, demote = false; - /* At this point the tree is about to be incomplete, so let's see if we can connect to any - * peer fragment that is type stable. + /* + * At this point the tree is about to be incomplete, so let's see if we can + * connect to any peer fragment that is type stable. */ Fragment* f; TreeInfo* ti; @@ -3476,6 +3600,7 @@ TraceRecorder::deduceTypeStability(Fragment* root_peer, Fragment** stable_peer, if (!f->code()) continue; ti = (TreeInfo*)f->vmprivate; + /* Don't allow varying stack depths */ if ((ti->nStackTypes != treeInfo->nStackTypes) || (ti->typeMap.length() != treeInfo->typeMap.length()) || @@ -3491,8 +3616,9 @@ TraceRecorder::deduceTypeStability(Fragment* root_peer, Fragment** stable_peer, if (success) { /* - * There was a successful match. We don't care about restoring the saved staging, but - * we do need to clear the original undemote list. + * There was a successful match. We don't care about restoring the + * saved staging, but we do need to clear the original undemote + * list. */ for (unsigned i = 0; i < stage_count; i++) set(stage_vals[i], stage_ins[i]); @@ -3504,18 +3630,16 @@ TraceRecorder::deduceTypeStability(Fragment* root_peer, Fragment** stable_peer, } /* - * If this is a loop trace and it would be stable with demotions, build an undemote list - * and return true. Our caller should sniff this and trash the tree, recording a new one - * that will assumedly stabilize. + * If this is a loop trace and it would be stable with demotions, build an + * undemote list and return true. Our caller should sniff this and trash + * the tree, recording a new one that will assumedly stabilize. */ if (demote && fragment->kind == LoopTrace) { UndemoteVisitor visitor(*this, treeInfo->stackTypeMap()); VisitSlots(visitor, cx, 0, *treeInfo->globalSlots); return true; - } else { - demote = false; } - + demote = false; return false; } @@ -3537,8 +3661,7 @@ FlushJITCache(JSContext* cx) Fragmento* fragmento = tm->fragmento; if (fragmento) { if (tm->prohibitFlush) { - debug_only_print0(LC_TMTracer, - "Deferring fragmento flush due to deep bail.\n"); + debug_only_print0(LC_TMTracer, "Deferring fragmento flush due to deep bail.\n"); tm->needFlush = JS_TRUE; return; } @@ -3581,7 +3704,7 @@ TraceRecorder::compile(JSTraceMonitor* tm) Fragmento* fragmento = tm->fragmento; if (treeInfo->maxNativeStackSlots >= MAX_NATIVE_STACK_SLOTS) { debug_only_print0(LC_TMTracer, "Blacklist: excessive stack use.\n"); - js_Blacklist((jsbytecode*) fragment->root->ip); + Blacklist((jsbytecode*) fragment->root->ip); return; } if (anchor && anchor->exitType != CASE_EXIT) @@ -3595,11 +3718,11 @@ TraceRecorder::compile(JSTraceMonitor* tm) return; if (fragmento->assm()->error() != nanojit::None) { debug_only_print0(LC_TMTracer, "Blacklisted: error during compilation\n"); - js_Blacklist((jsbytecode*) fragment->root->ip); + Blacklist((jsbytecode*) fragment->root->ip); return; } - js_resetRecordingAttempts(cx, (jsbytecode*) fragment->ip); - js_resetRecordingAttempts(cx, (jsbytecode*) fragment->root->ip); + ResetRecordingAttempts(cx, (jsbytecode*) fragment->ip); + ResetRecordingAttempts(cx, (jsbytecode*) fragment->root->ip); if (anchor) { #ifdef NANOJIT_IA32 if (anchor->exitType == CASE_EXIT) @@ -3612,27 +3735,28 @@ TraceRecorder::compile(JSTraceMonitor* tm) JS_ASSERT(!fragment->vmprivate); if (fragment == fragment->root) fragment->vmprivate = treeInfo; + /* :TODO: windows support */ #if defined DEBUG && !defined WIN32 const char* filename = cx->fp->script->filename; - char* label = (char*)malloc((filename ? strlen(filename) : 7) + 16); + char* label = (char*)js_malloc((filename ? strlen(filename) : 7) + 16); sprintf(label, "%s:%u", filename ? filename : "", js_FramePCToLineNumber(cx, cx->fp)); fragmento->labels->add(fragment, sizeof(Fragment), 0, label); - free(label); + js_free(label); #endif AUDIT(traceCompleted); } static bool -js_JoinPeersIfCompatible(Fragmento* frago, Fragment* stableFrag, TreeInfo* stableTree, - VMSideExit* exit) +JoinPeersIfCompatible(Fragmento* frago, Fragment* stableFrag, TreeInfo* stableTree, + VMSideExit* exit) { JS_ASSERT(exit->numStackSlots == stableTree->nStackTypes); /* Must have a matching type unstable exit. */ if ((exit->numGlobalSlots + exit->numStackSlots != stableTree->typeMap.length()) || - memcmp(getFullTypeMap(exit), stableTree->typeMap.data(), stableTree->typeMap.length())) { + memcmp(GetFullTypeMap(exit), stableTree->typeMap.data(), stableTree->typeMap.length())) { return false; } @@ -3650,9 +3774,9 @@ JS_REQUIRES_STACK void TraceRecorder::closeLoop(JSTraceMonitor* tm, bool& demote) { /* - * We should have arrived back at the loop header, and hence we don't want to be in an imacro - * here and the opcode should be either JSOP_LOOP, or in case this loop was blacklisted in the - * meantime JSOP_NOP. + * We should have arrived back at the loop header, and hence we don't want + * to be in an imacro here and the opcode should be either JSOP_LOOP or, in + * case this loop was blacklisted in the meantime, JSOP_NOP. */ JS_ASSERT((*cx->fp->regs->pc == JSOP_LOOP || *cx->fp->regs->pc == JSOP_NOP) && !cx->fp->imacpc); @@ -3664,7 +3788,7 @@ TraceRecorder::closeLoop(JSTraceMonitor* tm, bool& demote) if (callDepth != 0) { debug_only_print0(LC_TMTracer, "Blacklisted: stack depth mismatch, possible recursion.\n"); - js_Blacklist((jsbytecode*) fragment->root->ip); + Blacklist((jsbytecode*) fragment->root->ip); trashSelf = true; return; } @@ -3738,14 +3862,14 @@ TraceRecorder::closeLoop(JSTraceMonitor* tm, bool& demote) debug_only_print0(LC_TMTracer, "updating specializations on dependent and linked trees\n"); if (fragment->root->vmprivate) - specializeTreesToMissingGlobals(cx, globalObj, (TreeInfo*)fragment->root->vmprivate); + SpecializeTreesToMissingGlobals(cx, globalObj, (TreeInfo*)fragment->root->vmprivate); /* * If this is a newly formed tree, and the outer tree has not been compiled yet, we * should try to compile the outer tree again. */ if (outer) - js_AttemptCompilation(cx, tm, globalObj, outer, outerArgc); + AttemptCompilation(cx, tm, globalObj, outer, outerArgc); #ifdef JS_JIT_SPEW debug_only_printf(LC_TMMinimal, "recording completed at %s:%u@%u via closeLoop\n", @@ -3775,7 +3899,7 @@ TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, VMFragment* peer_root) uexit = ti->unstableExits; unext = &ti->unstableExits; while (uexit != NULL) { - bool remove = js_JoinPeersIfCompatible(fragmento, fragment, treeInfo, uexit->exit); + bool remove = JoinPeersIfCompatible(fragmento, fragment, treeInfo, uexit->exit); JS_ASSERT(!remove || fragment != peer); debug_only_stmt( if (remove) { @@ -3785,14 +3909,17 @@ TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, VMFragment* peer_root) } ) if (!remove) { - /* See if this exit contains mismatch demotions, which imply trashing a tree. - This is actually faster than trashing the original tree as soon as the - instability is detected, since we could have compiled a fairly stable - tree that ran faster with integers. */ + /* + * See if this exit contains mismatch demotions, which + * imply trashing a tree. This is actually faster than + * trashing the original tree as soon as the instability is + * detected, since we could have compiled a fairly stable + * tree that ran faster with integers. + */ unsigned stackCount = 0; unsigned globalCount = 0; t1 = treeInfo->stackTypeMap(); - t2 = getStackTypeMap(uexit->exit); + t2 = GetStackTypeMap(uexit->exit); for (unsigned i = 0; i < uexit->exit->numStackSlots; i++) { if (t2[i] == TT_INT32 && t1[i] == TT_DOUBLE) { stackDemotes[stackCount++] = i; @@ -3802,7 +3929,7 @@ TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, VMFragment* peer_root) } } t1 = treeInfo->globalTypeMap(); - t2 = getGlobalTypeMap(uexit->exit); + t2 = GetGlobalTypeMap(uexit->exit); for (unsigned i = 0; i < uexit->exit->numGlobalSlots; i++) { if (t2[i] == TT_INT32 && t1[i] == TT_DOUBLE) { globalDemotes[globalCount++] = i; @@ -3837,8 +3964,8 @@ TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, VMFragment* peer_root) } } - debug_only_stmt(js_DumpPeerStability(traceMonitor, peer_root->ip, peer_root->globalObj, - peer_root->globalShape, peer_root->argc);) + debug_only_stmt(DumpPeerStability(traceMonitor, peer_root->ip, peer_root->globalObj, + peer_root->globalShape, peer_root->argc);) } /* Emit an always-exit guard and compile the tree (used for break statements. */ @@ -3847,7 +3974,7 @@ TraceRecorder::endLoop(JSTraceMonitor* tm) { if (callDepth != 0) { debug_only_print0(LC_TMTracer, "Blacklisted: stack depth mismatch, possible recursion.\n"); - js_Blacklist((jsbytecode*) fragment->root->ip); + Blacklist((jsbytecode*) fragment->root->ip); trashSelf = true; return; } @@ -3862,19 +3989,21 @@ TraceRecorder::endLoop(JSTraceMonitor* tm) VMFragment* root = (VMFragment*)fragment->root; joinEdgesToEntry(tm->fragmento, getLoop(tm, root->ip, root->globalObj, root->globalShape, root->argc)); - /* Note: this must always be done, in case we added new globals on trace and haven't yet - propagated those to linked and dependent trees. */ + /* + * Note: this must always be done, in case we added new globals on trace + * and haven't yet propagated those to linked and dependent trees. + */ debug_only_print0(LC_TMTracer, "updating specializations on dependent and linked trees\n"); if (fragment->root->vmprivate) - specializeTreesToMissingGlobals(cx, globalObj, (TreeInfo*)fragment->root->vmprivate); + SpecializeTreesToMissingGlobals(cx, globalObj, (TreeInfo*)fragment->root->vmprivate); /* - * If this is a newly formed tree, and the outer tree has not been compiled yet, we - * should try to compile the outer tree again. + * If this is a newly formed tree, and the outer tree has not been compiled + * yet, we should try to compile the outer tree again. */ if (outer) - js_AttemptCompilation(cx, tm, globalObj, outer, outerArgc); + AttemptCompilation(cx, tm, globalObj, outer, outerArgc); #ifdef JS_JIT_SPEW debug_only_printf(LC_TMMinimal, "Recording completed at %s:%u@%u via endLoop\n", @@ -3891,18 +4020,29 @@ TraceRecorder::prepareTreeCall(Fragment* inner) { TreeInfo* ti = (TreeInfo*)inner->vmprivate; inner_sp_ins = lirbuf->sp; - /* The inner tree expects to be called from the current frame. If the outer tree (this - trace) is currently inside a function inlining code (calldepth > 0), we have to advance - the native stack pointer such that we match what the inner trace expects to see. We - move it back when we come out of the inner tree call. */ + + /* + * The inner tree expects to be called from the current frame. If the outer + * tree (this trace) is currently inside a function inlining code + * (calldepth > 0), we have to advance the native stack pointer such that + * we match what the inner trace expects to see. We move it back when we + * come out of the inner tree call. + */ if (callDepth > 0) { - /* Calculate the amount we have to lift the native stack pointer by to compensate for - any outer frames that the inner tree doesn't expect but the outer tree has. */ + /* + * Calculate the amount we have to lift the native stack pointer by to + * compensate for any outer frames that the inner tree doesn't expect + * but the outer tree has. + */ ptrdiff_t sp_adj = nativeStackOffset(&cx->fp->argv[-2]); - /* Calculate the amount we have to lift the call stack by */ + + /* Calculate the amount we have to lift the call stack by. */ ptrdiff_t rp_adj = callDepth * sizeof(FrameInfo*); - /* Guard that we have enough stack space for the tree we are trying to call on top - of the new value for sp. */ + + /* + * Guard that we have enough stack space for the tree we are trying to + * call on top of the new value for sp. + */ debug_only_printf(LC_TMTracer, "sp_adj=%d outer=%d inner=%d\n", sp_adj, treeInfo->nativeStackBase, ti->nativeStackBase); @@ -3911,10 +4051,12 @@ TraceRecorder::prepareTreeCall(Fragment* inner) + sp_adj /* adjust for stack in outer frame inner tree can't see */ + ti->maxNativeStackSlots * sizeof(double)); /* plus the inner tree's stack */ guard(true, lir->ins2(LIR_lt, sp_top, eos_ins), OOM_EXIT); + /* Guard that we have enough call stack space. */ LIns* rp_top = lir->ins2i(LIR_piadd, lirbuf->rp, rp_adj + ti->maxCallDepth * sizeof(FrameInfo*)); guard(true, lir->ins2(LIR_lt, rp_top, eor_ins), OOM_EXIT); + /* We have enough space, so adjust sp and rp to their new level. */ lir->insStorei(inner_sp_ins = lir->ins2i(LIR_piadd, lirbuf->sp, - treeInfo->nativeStackBase /* rebase sp to beginning of outer tree's stack */ @@ -3940,19 +4082,20 @@ TraceRecorder::emitTreeCall(Fragment* inner, VMSideExit* exit) #ifdef DEBUG JSTraceType* map; size_t i; - map = getGlobalTypeMap(exit); + map = GetGlobalTypeMap(exit); for (i = 0; i < exit->numGlobalSlots; i++) JS_ASSERT(map[i] != TT_JSVAL); - map = getStackTypeMap(exit); + map = GetStackTypeMap(exit); for (i = 0; i < exit->numStackSlots; i++) JS_ASSERT(map[i] != TT_JSVAL); #endif - /* bug 502604 - It is illegal to extend from the outer typemap without first extending from the - * inner. Make a new typemap here. + /* + * Bug 502604 - It is illegal to extend from the outer typemap without + * first extending from the inner. Make a new typemap here. */ TypeMap fullMap; - fullMap.add(getStackTypeMap(exit), exit->numStackSlots); - fullMap.add(getGlobalTypeMap(exit), exit->numGlobalSlots); + fullMap.add(GetStackTypeMap(exit), exit->numStackSlots); + fullMap.add(GetGlobalTypeMap(exit), exit->numGlobalSlots); TreeInfo* innerTree = (TreeInfo*)exit->from->root->vmprivate; if (exit->numGlobalSlots < innerTree->nGlobalTypes()) { fullMap.add(innerTree->globalTypeMap() + exit->numGlobalSlots, @@ -3972,6 +4115,7 @@ TraceRecorder::emitTreeCall(Fragment* inner, VMSideExit* exit) * we called the inner tree at recording time. */ guard(true, lir->ins2(LIR_eq, ret, INS_CONSTPTR(exit)), NESTED_EXIT); + /* Register us as a dependent tree of the inner tree. */ ((TreeInfo*)inner->vmprivate)->dependentTrees.addUnique(fragment->root); treeInfo->linkedTrees.addUnique(inner); @@ -3994,18 +4138,21 @@ TraceRecorder::trackCfgMerges(jsbytecode* pc) } } -/* Invert the direction of the guard if this is a loop edge that is not - taken (thin loop). */ +/* + * Invert the direction of the guard if this is a loop edge that is not + * taken (thin loop). + */ JS_REQUIRES_STACK void TraceRecorder::emitIf(jsbytecode* pc, bool cond, LIns* x) { ExitType exitType; - if (js_IsLoopEdge(pc, (jsbytecode*)fragment->root->ip)) { + if (IsLoopEdge(pc, (jsbytecode*)fragment->root->ip)) { exitType = LOOP_EXIT; /* - * If we are about to walk out of the loop, generate code for the inverse loop - * condition, pretending we recorded the case that stays on trace. + * If we are about to walk out of the loop, generate code for the + * inverse loop condition, pretending we recorded the case that stays + * on trace. */ if ((*pc == JSOP_IFEQ || *pc == JSOP_IFEQX) == cond) { JS_ASSERT(*pc == JSOP_IFNE || *pc == JSOP_IFNEX || *pc == JSOP_IFEQ || *pc == JSOP_IFEQX); @@ -4015,9 +4162,9 @@ TraceRecorder::emitIf(jsbytecode* pc, bool cond, LIns* x) } /* - * Conditional guards do not have to be emitted if the condition is constant. We - * make a note whether the loop condition is true or false here, so we later know - * whether to emit a loop edge or a loop end. + * Conditional guards do not have to be emitted if the condition is + * constant. We make a note whether the loop condition is true or false + * here, so we later know whether to emit a loop edge or a loop end. */ if (x->isconst()) { loop = (x->imm32() == cond); @@ -4045,12 +4192,12 @@ TraceRecorder::fuseIf(jsbytecode* pc, bool cond, LIns* x) JS_REQUIRES_STACK JSRecordingStatus TraceRecorder::checkTraceEnd(jsbytecode *pc) { - if (js_IsLoopEdge(pc, (jsbytecode*)fragment->root->ip)) { + if (IsLoopEdge(pc, (jsbytecode*)fragment->root->ip)) { /* - * If we compile a loop, the trace should have a zero stack balance at the loop - * edge. Currently we are parked on a comparison op or IFNE/IFEQ, so advance - * pc to the loop header and adjust the stack pointer and pretend we have - * reached the loop header. + * If we compile a loop, the trace should have a zero stack balance at + * the loop edge. Currently we are parked on a comparison op or + * IFNE/IFEQ, so advance pc to the loop header and adjust the stack + * pointer and pretend we have reached the loop header. */ if (loop) { JS_ASSERT(!cx->fp->imacpc && (pc == cx->fp->regs->pc || pc == cx->fp->regs->pc + 1)); @@ -4066,14 +4213,14 @@ TraceRecorder::checkTraceEnd(jsbytecode *pc) *cx->fp->regs = orig; /* - * If compiling this loop generated new oracle information which will likely - * lead to a different compilation result, immediately trigger another - * compiler run. This is guaranteed to converge since the oracle only - * accumulates adverse information but never drops it (except when we - * flush it during garbage collection.) + * If compiling this loop generated new oracle information which + * will likely lead to a different compilation result, immediately + * trigger another compiler run. This is guaranteed to converge + * since the oracle only accumulates adverse information but never + * drops it (except when we flush it during garbage collection.) */ if (demote) - js_AttemptCompilation(cx, traceMonitor, globalObj, outer, outerArgc); + AttemptCompilation(cx, traceMonitor, globalObj, outer, outerArgc); } else { endLoop(traceMonitor); } @@ -4125,7 +4272,7 @@ TraceRecorder::hasIteratorMethod(JSObject* obj) } int -nanojit::StackFilter::getTop(LInsp guard) +nanojit::StackFilter::getTop(LIns* guard) { VMSideExit* e = (VMSideExit*)guard->record()->exit; if (sp == lirbuf->sp) @@ -4160,7 +4307,7 @@ nanojit::Fragment::onDestroy() } static JS_REQUIRES_STACK bool -js_DeleteRecorder(JSContext* cx) +DeleteRecorder(JSContext* cx) { JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); @@ -4168,9 +4315,7 @@ js_DeleteRecorder(JSContext* cx) delete tm->recorder; tm->recorder = NULL; - /* - * If we ran out of memory, flush the code cache. - */ + /* If we ran out of memory, flush the code cache. */ if (JS_TRACE_MONITOR(cx).fragmento->assm()->error() == OutOMem || js_OverfullFragmento(tm, tm->fragmento)) { FlushJITCache(cx); @@ -4180,12 +4325,10 @@ js_DeleteRecorder(JSContext* cx) return true; } -/** - * Checks whether the shape of the global object has changed. - */ +/* Check whether the shape of the global object has changed. */ static JS_REQUIRES_STACK bool CheckGlobalObjectShape(JSContext* cx, JSTraceMonitor* tm, JSObject* globalObj, - uint32 *shape=NULL, SlotList** slots=NULL) + uint32 *shape = NULL, SlotList** slots = NULL) { if (tm->needFlush) { FlushJITCache(cx); @@ -4200,6 +4343,7 @@ CheckGlobalObjectShape(JSContext* cx, JSTraceMonitor* tm, JSObject* globalObj, if (tm->recorder) { VMFragment* root = (VMFragment*)tm->recorder->getFragment()->root; TreeInfo* ti = tm->recorder->getTreeInfo(); + /* Check the global shape matches the recorder's treeinfo's shape. */ if (globalObj != root->globalObj || globalShape != root->globalShape) { AUDIT(globalShapeMismatchAtEntry); @@ -4207,7 +4351,7 @@ CheckGlobalObjectShape(JSContext* cx, JSTraceMonitor* tm, JSObject* globalObj, "Global object/shape mismatch (%p/%u vs. %p/%u), flushing cache.\n", (void*)globalObj, globalShape, (void*)root->globalObj, root->globalShape); - js_Backoff(cx, (jsbytecode*) root->ip); + Backoff(cx, (jsbytecode*) root->ip); FlushJITCache(cx); return false; } @@ -4248,9 +4392,9 @@ CheckGlobalObjectShape(JSContext* cx, JSTraceMonitor* tm, JSObject* globalObj, } static JS_REQUIRES_STACK bool -js_StartRecorder(JSContext* cx, VMSideExit* anchor, Fragment* f, TreeInfo* ti, - unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap, - VMSideExit* expectedInnerExit, jsbytecode* outer, uint32 outerArgc) +StartRecorder(JSContext* cx, VMSideExit* anchor, Fragment* f, TreeInfo* ti, + unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap, + VMSideExit* expectedInnerExit, jsbytecode* outer, uint32 outerArgc) { JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); if (JS_TRACE_MONITOR(cx).needFlush) { @@ -4260,7 +4404,7 @@ js_StartRecorder(JSContext* cx, VMSideExit* anchor, Fragment* f, TreeInfo* ti, JS_ASSERT(f->root != f || !cx->fp->imacpc); - /* start recording if no exception during construction */ + /* Start recording if no exception during construction. */ tm->recorder = new (&gc) TraceRecorder(cx, anchor, f, ti, stackSlots, ngslots, typeMap, expectedInnerExit, outer, outerArgc); @@ -4269,13 +4413,14 @@ js_StartRecorder(JSContext* cx, VMSideExit* anchor, Fragment* f, TreeInfo* ti, js_AbortRecording(cx, "setting up recorder failed"); return false; } - /* clear any leftover error state */ + + /* Clear any leftover error state. */ tm->fragmento->assm()->setError(None); return true; } static void -js_TrashTree(JSContext* cx, Fragment* f) +TrashTree(JSContext* cx, Fragment* f) { JS_ASSERT((!f->code()) == (!f->vmprivate)); JS_ASSERT(f == f->root); @@ -4290,17 +4435,17 @@ js_TrashTree(JSContext* cx, Fragment* f) Fragment** data = ti->dependentTrees.data(); unsigned length = ti->dependentTrees.length(); for (unsigned n = 0; n < length; ++n) - js_TrashTree(cx, data[n]); + TrashTree(cx, data[n]); data = ti->linkedTrees.data(); length = ti->linkedTrees.length(); for (unsigned n = 0; n < length; ++n) - js_TrashTree(cx, data[n]); + TrashTree(cx, data[n]); delete ti; JS_ASSERT(!f->code() && !f->vmprivate); } static int -js_SynthesizeFrame(JSContext* cx, const FrameInfo& fi) +SynthesizeFrame(JSContext* cx, const FrameInfo& fi) { VOUCH_DOES_NOT_REQUIRE_STACK(); @@ -4312,8 +4457,8 @@ js_SynthesizeFrame(JSContext* cx, const FrameInfo& fi) /* Assert that we have a correct sp distance from cx->fp->slots in fi. */ JSStackFrame* fp = cx->fp; JS_ASSERT_IF(!fi.imacpc, - js_ReconstructStackDepth(cx, fp->script, fi.pc) - == uintN(fi.spdist - fp->script->nfixed)); + js_ReconstructStackDepth(cx, fp->script, fi.pc) == + uintN(fi.spdist - fp->script->nfixed)); uintN nframeslots = JS_HOWMANY(sizeof(JSInlineFrame), sizeof(jsval)); JSScript* script = fun->u.i.script; @@ -4352,7 +4497,10 @@ js_SynthesizeFrame(JSContext* cx, const FrameInfo& fi) a->avail += nbytes; JS_ASSERT(missing == 0); } else { - /* This allocation is infallible: js_ExecuteTree reserved enough stack. */ + /* + * This allocation is infallible: ExecuteTree reserved enough stack. + * (But see bug 491023.) + */ JS_ARENA_ALLOCATE_CAST(newsp, jsval *, &cx->stackPool, nbytes); JS_ASSERT(newsp); @@ -4449,10 +4597,14 @@ js_SynthesizeFrame(JSContext* cx, const FrameInfo& fi) newifp->hookData = NULL; } - /* Duplicate native stack layout computation: see VisitFrameSlots header comment. */ - // FIXME? we must count stack slots from caller's operand stack up to (but not including) - // callee's, including missing arguments. Could we shift everything down to the caller's - // fp->slots (where vars start) and avoid some of the complexity? + /* + * Duplicate native stack layout computation: see VisitFrameSlots header comment. + * + * FIXME - We must count stack slots from caller's operand stack up to (but + * not including) callee's, including missing arguments. Could we shift + * everything down to the caller's fp->slots (where vars start) and avoid + * some of the complexity? + */ return (fi.spdist - fp->down->script->nfixed) + ((fun->nargs > fp->argc) ? fun->nargs - fp->argc : 0) + script->nfixed + 1/*argsobj*/; @@ -4466,7 +4618,7 @@ SynthesizeSlowNativeFrame(JSContext *cx, VMSideExit *exit) void *mark; JSInlineFrame *ifp; - /* This allocation is infallible: js_ExecuteTree reserved enough stack. */ + /* This allocation is infallible: ExecuteTree reserved enough stack. */ mark = JS_ARENA_MARK(&cx->stackPool); JS_ARENA_ALLOCATE_CAST(ifp, JSInlineFrame *, &cx->stackPool, sizeof(JSInlineFrame)); JS_ASSERT(ifp); @@ -4502,16 +4654,16 @@ SynthesizeSlowNativeFrame(JSContext *cx, VMSideExit *exit) cx->fp = fp; } -JS_REQUIRES_STACK bool -js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, jsbytecode* outer, - uint32 outerArgc, JSObject* globalObj, uint32 globalShape, - SlotList* globalSlots, uint32 argc) +static JS_REQUIRES_STACK bool +RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, jsbytecode* outer, + uint32 outerArgc, JSObject* globalObj, uint32 globalShape, + SlotList* globalSlots, uint32 argc) { JS_ASSERT(f->root == f); /* Make sure the global type map didn't change on us. */ if (!CheckGlobalObjectShape(cx, tm, globalObj)) { - js_Backoff(cx, (jsbytecode*) f->root->ip); + Backoff(cx, (jsbytecode*) f->root->ip); return false; } @@ -4532,7 +4684,7 @@ js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, jsbytecode* outer, f->lirbuf = tm->lirbuf; if (f->lirbuf->outOMem() || js_OverfullFragmento(tm, tm->fragmento)) { - js_Backoff(cx, (jsbytecode*) f->root->ip); + Backoff(cx, (jsbytecode*) f->root->ip); FlushJITCache(cx); debug_only_print0(LC_TMTracer, "Out of memory recording new tree, flushing cache.\n"); @@ -4541,42 +4693,42 @@ js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, jsbytecode* outer, JS_ASSERT(!f->code() && !f->vmprivate); - /* setup the VM-private treeInfo structure for this fragment */ + /* Set up the VM-private treeInfo structure for this fragment. */ TreeInfo* ti = new (&gc) TreeInfo(f, globalSlots); - /* capture the coerced type of each active slot in the type map */ - ti->typeMap.captureTypes(cx, globalObj, *globalSlots, 0/*callDepth*/); + /* Capture the coerced type of each active slot in the type map. */ + ti->typeMap.captureTypes(cx, globalObj, *globalSlots, 0 /* callDepth */); ti->nStackTypes = ti->typeMap.length() - globalSlots->length(); #ifdef DEBUG - ensureTreeIsUnique(tm, (VMFragment*)f, ti); + AssertTreeIsUnique(tm, (VMFragment*)f, ti); ti->treeFileName = cx->fp->script->filename; ti->treeLineNumber = js_FramePCToLineNumber(cx, cx->fp); ti->treePCOffset = FramePCOffset(cx->fp); #endif - /* determine the native frame layout at the entry point */ + /* Determine the native frame layout at the entry point. */ unsigned entryNativeStackSlots = ti->nStackTypes; - JS_ASSERT(entryNativeStackSlots == js_NativeStackSlots(cx, 0/*callDepth*/)); + JS_ASSERT(entryNativeStackSlots == NativeStackSlots(cx, 0 /* callDepth */)); ti->nativeStackBase = (entryNativeStackSlots - (cx->fp->regs->sp - StackBase(cx->fp))) * sizeof(double); ti->maxNativeStackSlots = entryNativeStackSlots; ti->maxCallDepth = 0; ti->script = cx->fp->script; - /* recording primary trace */ - if (!js_StartRecorder(cx, NULL, f, ti, - ti->nStackTypes, - ti->globalSlots->length(), - ti->typeMap.data(), NULL, outer, outerArgc)) { + /* Recording primary trace. */ + if (!StartRecorder(cx, NULL, f, ti, + ti->nStackTypes, + ti->globalSlots->length(), + ti->typeMap.data(), NULL, outer, outerArgc)) { return false; } return true; } -JS_REQUIRES_STACK static inline void -markSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot) +static JS_REQUIRES_STACK inline void +MarkSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot) { if (slot < ti->nStackTypes) { oracle.markStackSlotUndemotable(cx, slot); @@ -4587,8 +4739,8 @@ markSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot) oracle.markGlobalSlotUndemotable(cx, gslots[slot - ti->nStackTypes]); } -JS_REQUIRES_STACK static inline bool -isSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot) +static JS_REQUIRES_STACK inline bool +IsSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot) { if (slot < ti->nStackTypes) return oracle.isStackSlotUndemotable(cx, slot); @@ -4597,9 +4749,9 @@ isSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot) return oracle.isGlobalSlotUndemotable(cx, gslots[slot - ti->nStackTypes]); } -JS_REQUIRES_STACK static bool -js_AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, - jsbytecode* outer, uint32 outerArgc) +static JS_REQUIRES_STACK bool +AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, jsbytecode* outer, + uint32 outerArgc) { JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); if (tm->needFlush) { @@ -4613,16 +4765,16 @@ js_AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, JS_ASSERT(exit->from->root->code()); /* - * The loop edge exit might not know about all types since the tree could have - * been further specialized since it was recorded. Fill in the missing types - * from the entry type map. + * The loop edge exit might not know about all types since the tree could + * have been further specialized since it was recorded. Fill in the missing + * types from the entry type map. */ - JSTraceType* m = getFullTypeMap(exit); + JSTraceType* m = GetFullTypeMap(exit); unsigned ngslots = exit->numGlobalSlots; if (ngslots < from_ti->nGlobalTypes()) { uint32 partial = exit->numStackSlots + exit->numGlobalSlots; m = (JSTraceType*)alloca(from_ti->typeMap.length() * sizeof(JSTraceType)); - memcpy(m, getFullTypeMap(exit), partial); + memcpy(m, GetFullTypeMap(exit), partial); memcpy(m + partial, from_ti->globalTypeMap() + exit->numGlobalSlots, from_ti->nGlobalTypes() - exit->numGlobalSlots); ngslots = from_ti->nGlobalTypes(); @@ -4635,7 +4787,7 @@ js_AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, */ for (unsigned i = 0; i < from_ti->typeMap.length(); i++) { if (m[i] == TT_DOUBLE) - markSlotUndemotable(cx, from_ti, i); + MarkSlotUndemotable(cx, from_ti, i); } bool bound = false; @@ -4644,9 +4796,11 @@ js_AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, continue; TreeInfo* ti = (TreeInfo*)f->vmprivate; JS_ASSERT(exit->numStackSlots == ti->nStackTypes); + /* Check the minimum number of slots that need to be compared. */ unsigned checkSlots = JS_MIN(from_ti->typeMap.length(), ti->typeMap.length()); JSTraceType* m2 = ti->typeMap.data(); + /* Analyze the exit typemap against the peer typemap. * Two conditions are important: * 1) Typemaps are identical: these peers can be attached. @@ -4661,10 +4815,12 @@ js_AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, if (m[i] == m2[i]) continue; matched = false; - /* If there's an I->D that cannot be resolved, flag it. + + /* + * If there's an I->D that cannot be resolved, flag it. * Otherwise, break and go to the next peer. */ - if (m[i] == TT_INT32 && m2[i] == TT_DOUBLE && isSlotUndemotable(cx, ti, i)) { + if (m[i] == TT_INT32 && m2[i] == TT_DOUBLE && IsSlotUndemotable(cx, ti, i)) { undemote = true; } else { undemote = false; @@ -4674,15 +4830,17 @@ js_AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, if (matched) { JS_ASSERT(from_ti->globalSlots == ti->globalSlots); JS_ASSERT(from_ti->nStackTypes == ti->nStackTypes); + /* Capture missing globals on both trees and link the fragments together. */ if (from != f) { ti->dependentTrees.addUnique(from); from_ti->linkedTrees.addUnique(f); } if (ti->nGlobalTypes() < ti->globalSlots->length()) - specializeTreesToMissingGlobals(cx, globalObj, ti); + SpecializeTreesToMissingGlobals(cx, globalObj, ti); exit->target = f; tm->fragmento->assm()->patch(exit); + /* Now erase this exit from the unstable exit list. */ UnstableExit** tail = &from_ti->unstableExits; for (UnstableExit* uexit = from_ti->unstableExits; uexit != NULL; uexit = uexit->next) { @@ -4695,11 +4853,12 @@ js_AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, tail = &uexit->next; } JS_ASSERT(bound); - debug_only_stmt( js_DumpPeerStability(tm, f->ip, from->globalObj, from->globalShape, from->argc); ) + debug_only_stmt( DumpPeerStability(tm, f->ip, from->globalObj, from->globalShape, from->argc); ) break; } else if (undemote) { /* The original tree is unconnectable, so trash it. */ - js_TrashTree(cx, f); + TrashTree(cx, f); + /* We shouldn't attempt to record now, since we'll hit a duplicate. */ return false; } @@ -4708,12 +4867,12 @@ js_AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, return false; VMFragment* root = (VMFragment*)from->root; - return js_RecordTree(cx, tm, from->first, outer, outerArgc, root->globalObj, - root->globalShape, from_ti->globalSlots, cx->fp->argc); + return RecordTree(cx, tm, from->first, outer, outerArgc, root->globalObj, + root->globalShape, from_ti->globalSlots, cx->fp->argc); } static JS_REQUIRES_STACK bool -js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom, jsbytecode* outer +AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom, jsbytecode* outer #ifdef MOZ_TRACEVIS , TraceVisStateObj* tvso = NULL #endif @@ -4732,7 +4891,10 @@ js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom JS_ASSERT(f->vmprivate); TreeInfo* ti = (TreeInfo*)f->vmprivate; - /* Don't grow trees above a certain size to avoid code explosion due to tail duplication. */ + /* + * Don't grow trees above a certain size to avoid code explosion due to + * tail duplication. + */ if (ti->branchCount >= MAX_BRANCHES) { #ifdef MOZ_TRACEVIS if (tvso) tvso->r = R_FAIL_EXTEND_MAX_BRANCHES; @@ -4750,9 +4912,10 @@ js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom } /* - * If we are recycling a fragment, it might have a different ip so reset it here. This - * can happen when attaching a branch to a NESTED_EXIT, which might extend along separate paths - * (i.e. after the loop edge, and after a return statement). + * If we are recycling a fragment, it might have a different ip so reset it + * here. This can happen when attaching a branch to a NESTED_EXIT, which + * might extend along separate paths (i.e. after the loop edge, and after a + * return statement). */ c->ip = cx->fp->regs->pc; @@ -4768,33 +4931,38 @@ js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom JSTraceType* typeMap; TypeMap fullMap; if (exitedFrom == NULL) { - /* If we are coming straight from a simple side exit, just use that exit's type map - as starting point. */ + /* + * If we are coming straight from a simple side exit, just use that + * exit's type map as starting point. + */ ngslots = anchor->numGlobalSlots; stackSlots = anchor->numStackSlots; - typeMap = getFullTypeMap(anchor); + typeMap = GetFullTypeMap(anchor); } else { - /* If we side-exited on a loop exit and continue on a nesting guard, the nesting - guard (anchor) has the type information for everything below the current scope, - and the actual guard we exited from has the types for everything in the current - scope (and whatever it inlined). We have to merge those maps here. */ + /* + * If we side-exited on a loop exit and continue on a nesting + * guard, the nesting guard (anchor) has the type information for + * everything below the current scope, and the actual guard we + * exited from has the types for everything in the current scope + * (and whatever it inlined). We have to merge those maps here. + */ VMSideExit* e1 = anchor; VMSideExit* e2 = exitedFrom; - fullMap.add(getStackTypeMap(e1), e1->numStackSlotsBelowCurrentFrame); - fullMap.add(getStackTypeMap(e2), e2->numStackSlots); + fullMap.add(GetStackTypeMap(e1), e1->numStackSlotsBelowCurrentFrame); + fullMap.add(GetStackTypeMap(e2), e2->numStackSlots); stackSlots = fullMap.length(); - fullMap.add(getGlobalTypeMap(e2), e2->numGlobalSlots); + fullMap.add(GetGlobalTypeMap(e2), e2->numGlobalSlots); if (e2->numGlobalSlots < e1->numGlobalSlots) { /* * Watch out for an extremely rare case (bug 502714). The sequence of events is: - * + * * 1) Inner tree compiles not knowing about global X (which has type A). * 2) Inner tree learns about global X and specializes it to a different type * (type B). * 3) Outer tree records inner tree with global X as type A, exiting as B. * 4) Outer tree now has a nesting guard with typeof(X)=B. * 5) Inner tree takes its original exit that does not know about X. - * + * * In this case, the nesting guard fails, and now it is illegal to use the nested * typemap entry for X. The correct entry is in the inner guard's TreeInfo, * analogous to the solution for bug 476653. @@ -4808,15 +4976,15 @@ js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom slots += addSlots; } if (slots < e1->numGlobalSlots) - fullMap.add(getGlobalTypeMap(e1) + slots, e1->numGlobalSlots - slots); + fullMap.add(GetGlobalTypeMap(e1) + slots, e1->numGlobalSlots - slots); JS_ASSERT(slots == e1->numGlobalSlots); } ngslots = e1->numGlobalSlots; typeMap = fullMap.data(); } JS_ASSERT(ngslots >= anchor->numGlobalSlots); - bool rv = js_StartRecorder(cx, anchor, c, (TreeInfo*)f->vmprivate, stackSlots, - ngslots, typeMap, exitedFrom, outer, cx->fp->argc); + bool rv = StartRecorder(cx, anchor, c, (TreeInfo*)f->vmprivate, stackSlots, + ngslots, typeMap, exitedFrom, outer, cx->fp->argc); #ifdef MOZ_TRACEVIS if (!rv && tvso) tvso->r = R_FAIL_EXTEND_START; @@ -4830,11 +4998,11 @@ js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom } static JS_REQUIRES_STACK VMSideExit* -js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, - VMSideExit** innermostNestedGuardp); +ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, + VMSideExit** innermostNestedGuardp); -JS_REQUIRES_STACK bool -js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount) +static JS_REQUIRES_STACK bool +RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount) { #ifdef JS_THREADSAFE if (OBJ_SCOPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain))->title.ownercx != cx) { @@ -4875,13 +5043,15 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount) /* Make sure inner tree call will not run into an out-of-memory condition. */ if (tm->reservedDoublePoolPtr < (tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS) && - !js_ReplenishReservedPool(cx, tm)) { + !ReplenishReservedPool(cx, tm)) { js_AbortRecording(cx, "Couldn't call inner tree (out of memory)"); return false; } - /* Make sure the shape of the global object still matches (this might flush - the JIT cache). */ + /* + * Make sure the shape of the global object still matches (this might flush + * the JIT cache). + */ JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain); uint32 globalShape = -1; SlotList* globalSlots = NULL; @@ -4917,13 +5087,13 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount) return false; } } - return js_RecordTree(cx, tm, f, outer, outerArgc, globalObj, globalShape, globalSlots, argc); + return RecordTree(cx, tm, f, outer, outerArgc, globalObj, globalShape, globalSlots, argc); } r->adjustCallerTypes(f); r->prepareTreeCall(f); VMSideExit* innermostNestedGuard = NULL; - VMSideExit* lr = js_ExecuteTree(cx, f, inlineCallCount, &innermostNestedGuard); + VMSideExit* lr = ExecuteTree(cx, f, inlineCallCount, &innermostNestedGuard); if (!lr || r->wasDeepAborted()) { if (!lr) js_AbortRecording(cx, "Couldn't call inner tree"); @@ -4938,23 +5108,27 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount) if (innermostNestedGuard) { js_AbortRecording(cx, "Inner tree took different side exit, abort current " "recording and grow nesting tree"); - return js_AttemptToExtendTree(cx, innermostNestedGuard, lr, outer); + return AttemptToExtendTree(cx, innermostNestedGuard, lr, outer); } - /* emit a call to the inner tree and continue recording the outer tree trace */ + + /* Emit a call to the inner tree and continue recording the outer tree trace. */ r->emitTreeCall(f, lr); return true; + case UNSTABLE_LOOP_EXIT: - /* abort recording so the inner loop can become type stable. */ + /* Abort recording so the inner loop can become type stable. */ js_AbortRecording(cx, "Inner tree is trying to stabilize, abort outer recording"); - return js_AttemptToStabilizeTree(cx, globalObj, lr, outer, outerFragment->argc); + return AttemptToStabilizeTree(cx, globalObj, lr, outer, outerFragment->argc); + case OVERFLOW_EXIT: oracle.markInstructionUndemotable(cx->fp->regs->pc); - /* fall through */ + /* FALL THROUGH */ case BRANCH_EXIT: case CASE_EXIT: - /* abort recording the outer tree, extend the inner tree */ + /* Abort recording the outer tree, extend the inner tree. */ js_AbortRecording(cx, "Inner tree is trying to grow, abort outer recording"); - return js_AttemptToExtendTree(cx, lr, NULL, outer); + return AttemptToExtendTree(cx, lr, NULL, outer); + default: debug_only_printf(LC_TMTracer, "exit_type=%s\n", getExitName(lr->exitType)); js_AbortRecording(cx, "Inner tree not suitable for calling"); @@ -4963,7 +5137,7 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount) } static bool -js_IsEntryTypeCompatible(jsval* vp, JSTraceType* m) +IsEntryTypeCompatible(jsval* vp, JSTraceType* m) { unsigned tag = JSVAL_TAG(*vp); @@ -5039,7 +5213,7 @@ public: JS_REQUIRES_STACK JS_ALWAYS_INLINE void visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) { debug_only_printf(LC_TMTracer, "global%d=", n); - if (!js_IsEntryTypeCompatible(vp, mTypeMap)) { + if (!IsEntryTypeCompatible(vp, mTypeMap)) { mOk = false; } else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) { oracle.markGlobalSlotUndemotable(mCx, slot); @@ -5054,7 +5228,7 @@ public: visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) { for (size_t i = 0; i < count; ++i) { debug_only_printf(LC_TMTracer, "%s%u=", stackSlotKind(), unsigned(i)); - if (!js_IsEntryTypeCompatible(vp, mTypeMap)) { + if (!IsEntryTypeCompatible(vp, mTypeMap)) { mOk = false; } else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) { oracle.markStackSlotUndemotable(mCx, mStackSlotNum); @@ -5092,15 +5266,16 @@ TraceRecorder::findNestedCompatiblePeer(Fragment* f) debug_only_printf(LC_TMTracer, "checking nested types %p: ", (void*)f); if (ngslots > ti->nGlobalTypes()) - specializeTreesToMissingGlobals(cx, globalObj, ti); + SpecializeTreesToMissingGlobals(cx, globalObj, ti); /* - * Determine whether the typemap of the inner tree matches the outer tree's - * current state. If the inner tree expects an integer, but the outer tree - * doesn't guarantee an integer for that slot, we mark the slot undemotable - * and mismatch here. This will force a new tree to be compiled that accepts - * a double for the slot. If the inner tree expects a double, but the outer - * tree has an integer, we can proceed, but we mark the location undemotable. + * Determine whether the typemap of the inner tree matches the outer + * tree's current state. If the inner tree expects an integer, but the + * outer tree doesn't guarantee an integer for that slot, we mark the + * slot undemotable and mismatch here. This will force a new tree to be + * compiled that accepts a double for the slot. If the inner tree + * expects a double, but the outer tree has an integer, we can proceed, + * but we mark the location undemotable. */ TypeCompatibilityVisitor visitor(*this, ti->typeMap.data()); VisitSlots(visitor, cx, 0, *treeInfo->globalSlots); @@ -5126,7 +5301,7 @@ public: JS_ALWAYS_INLINE void checkSlot(jsval *vp, char const *name, int i) { debug_only_printf(LC_TMTracer, "%s%d=", name, i); JS_ASSERT(*(uint8_t*)mTypeMap != 0xCD); - mOk = js_IsEntryTypeCompatible(vp, mTypeMap++); + mOk = IsEntryTypeCompatible(vp, mTypeMap++); } JS_REQUIRES_STACK JS_ALWAYS_INLINE void @@ -5158,16 +5333,16 @@ public: * @return True if compatible (with or without demotions), false otherwise. */ static JS_REQUIRES_STACK bool -js_CheckEntryTypes(JSContext* cx, JSObject* globalObj, TreeInfo* ti) +CheckEntryTypes(JSContext* cx, JSObject* globalObj, TreeInfo* ti) { unsigned int ngslots = ti->globalSlots->length(); - JS_ASSERT(ti->nStackTypes == js_NativeStackSlots(cx, 0)); + JS_ASSERT(ti->nStackTypes == NativeStackSlots(cx, 0)); if (ngslots > ti->nGlobalTypes()) - specializeTreesToMissingGlobals(cx, globalObj, ti); + SpecializeTreesToMissingGlobals(cx, globalObj, ti); - JS_ASSERT(ti->typeMap.length() == js_NativeStackSlots(cx, 0) + ngslots); + JS_ASSERT(ti->typeMap.length() == NativeStackSlots(cx, 0) + ngslots); JS_ASSERT(ti->typeMap.length() == ti->nStackTypes + ngslots); JS_ASSERT(ti->nGlobalTypes() == ngslots); @@ -5188,7 +5363,7 @@ js_CheckEntryTypes(JSContext* cx, JSObject* globalObj, TreeInfo* ti) * @out count Number of fragments consulted. */ static JS_REQUIRES_STACK Fragment* -js_FindVMCompatiblePeer(JSContext* cx, JSObject* globalObj, Fragment* f, uintN& count) +FindVMCompatiblePeer(JSContext* cx, JSObject* globalObj, Fragment* f, uintN& count) { count = 0; for (; f != NULL; f = f->peer) { @@ -5196,7 +5371,7 @@ js_FindVMCompatiblePeer(JSContext* cx, JSObject* globalObj, Fragment* f, uintN& continue; debug_only_printf(LC_TMTracer, "checking vm types %p (ip: %p): ", (void*)f, f->ip); - if (js_CheckEntryTypes(cx, globalObj, (TreeInfo*)f->vmprivate)) + if (CheckEntryTypes(cx, globalObj, (TreeInfo*)f->vmprivate)) return f; ++count; } @@ -5206,12 +5381,9 @@ js_FindVMCompatiblePeer(JSContext* cx, JSObject* globalObj, Fragment* f, uintN& static void LeaveTree(InterpState&, VMSideExit* lr); -/** - * Executes a tree. - */ static JS_REQUIRES_STACK VMSideExit* -js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, - VMSideExit** innermostNestedGuardp) +ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, + VMSideExit** innermostNestedGuardp) { #ifdef MOZ_TRACEVIS TraceVisStateObj tvso(S_EXECUTE); @@ -5227,8 +5399,10 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, unsigned globalFrameSize = STOBJ_NSLOTS(globalObj); /* Make sure the global object is sane. */ - JS_ASSERT(!ngslots || (OBJ_SHAPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain)) == - ((VMFragment*)f)->globalShape)); + JS_ASSERT_IF(ngslots != 0, + OBJ_SHAPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain)) == + ((VMFragment*)f)->globalShape); + /* Make sure our caller replenished the double pool. */ JS_ASSERT(tm->reservedDoublePoolPtr >= tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS); @@ -5240,7 +5414,7 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, uintN savedProhibitFlush = JS_TRACE_MONITOR(cx).prohibitFlush; #endif - /* Setup the interpreter state block, which is followed by the native global frame. */ + /* Set up the interpreter state block, which is followed by the native global frame. */ InterpState* state = (InterpState*)alloca(sizeof(InterpState) + (globalFrameSize+1)*sizeof(double)); state->cx = cx; state->inlineCallCountp = &inlineCallCount; @@ -5251,16 +5425,16 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, state->rpAtLastTreeCall = NULL; state->builtinStatus = 0; - /* Setup the native global frame. */ + /* Set up the native global frame. */ double* global = (double*)(state+1); - /* Setup the native stack frame. */ + /* Set up the native stack frame. */ double stack_buffer[MAX_NATIVE_STACK_SLOTS]; state->stackBase = stack_buffer; state->sp = stack_buffer + (ti->nativeStackBase/sizeof(double)); state->eos = stack_buffer + MAX_NATIVE_STACK_SLOTS; - /* Setup the native call stack frame. */ + /* Set up the native call stack frame. */ FrameInfo* callstack_buffer[MAX_CALL_STACK_ENTRIES]; state->callstackBase = callstack_buffer; state->rp = callstack_buffer; @@ -5288,7 +5462,7 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, f->code()); JS_ASSERT(ti->nGlobalTypes() == ngslots); - BuildNativeFrame(cx, globalObj, 0/*callDepth*/, ngslots, gslots, + BuildNativeFrame(cx, globalObj, 0 /* callDepth */, ngslots, gslots, ti->typeMap.data(), global, stack_buffer); union { NIns *code; GuardRecord* (FASTCALL *func)(InterpState*, Fragment*); } u; @@ -5305,6 +5479,7 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, debug_only_stmt(fflush(NULL)); GuardRecord* rec; + // Note that the block scoping is crucial here for TraceVis; the // TraceVisStateObj constructors and destructors must run at the right times. { @@ -5339,34 +5514,46 @@ LeaveTree(InterpState& state, VMSideExit* lr) FrameInfo** callstack = state.callstackBase; double* stack = state.stackBase; - /* Except if we find that this is a nested bailout, the guard the call returned is the - one we have to use to adjust pc and sp. */ + /* + * Except if we find that this is a nested bailout, the guard the call + * returned is the one we have to use to adjust pc and sp. + */ VMSideExit* innermost = lr; - /* While executing a tree we do not update state.sp and state.rp even if they grow. Instead, - guards tell us by how much sp and rp should be incremented in case of a side exit. When - calling a nested tree, however, we actively adjust sp and rp. If we have such frames - from outer trees on the stack, then rp will have been adjusted. Before we can process - the stack of the frames of the tree we directly exited from, we have to first work our - way through the outer frames and generate interpreter frames for them. Once the call - stack (rp) is empty, we can process the final frames (which again are not directly - visible and only the guard we exited on will tells us about). */ + /* + * While executing a tree we do not update state.sp and state.rp even if + * they grow. Instead, guards tell us by how much sp and rp should be + * incremented in case of a side exit. When calling a nested tree, however, + * we actively adjust sp and rp. If we have such frames from outer trees on + * the stack, then rp will have been adjusted. Before we can process the + * stack of the frames of the tree we directly exited from, we have to + * first work our way through the outer frames and generate interpreter + * frames for them. Once the call stack (rp) is empty, we can process the + * final frames (which again are not directly visible and only the guard we + * exited on will tells us about). + */ FrameInfo** rp = (FrameInfo**)state.rp; if (lr->exitType == NESTED_EXIT) { VMSideExit* nested = state.lastTreeCallGuard; if (!nested) { - /* If lastTreeCallGuard is not set in state, we only have a single level of - nesting in this exit, so lr itself is the innermost and outermost nested - guard, and hence we set nested to lr. The calldepth of the innermost guard - is not added to state.rp, so we do it here manually. For a nesting depth - greater than 1 the CallTree builtin already added the innermost guard's - calldepth to state.rpAtLastTreeCall. */ + /* + * If lastTreeCallGuard is not set in state, we only have a single + * level of nesting in this exit, so lr itself is the innermost and + * outermost nested guard, and hence we set nested to lr. The + * calldepth of the innermost guard is not added to state.rp, so we + * do it here manually. For a nesting depth greater than 1 the + * CallTree builtin already added the innermost guard's calldepth + * to state.rpAtLastTreeCall. + */ nested = lr; rp += lr->calldepth; } else { - /* During unwinding state.rp gets overwritten at every step and we restore - it here to its state at the innermost nested guard. The builtin already - added the calldepth of that innermost guard to rpAtLastTreeCall. */ + /* + * During unwinding state.rp gets overwritten at every step and we + * restore it here to its state at the innermost nested guard. The + * builtin already added the calldepth of that innermost guard to + * rpAtLastTreeCall. + */ rp = (FrameInfo**)state.rpAtLastTreeCall; } innermost = state.lastTreeExitGuard; @@ -5407,14 +5594,14 @@ LeaveTree(InterpState& state, VMSideExit* lr) * then immediately flunked the guard on state->builtinStatus. * * Now LeaveTree has been called again from the tail of - * js_ExecuteTree. We are about to return to the interpreter. Adjust + * ExecuteTree. We are about to return to the interpreter. Adjust * the top stack frame to resume on the next op. */ - JS_ASSERT(*cx->fp->regs->pc == JSOP_CALL || - *cx->fp->regs->pc == JSOP_APPLY || - *cx->fp->regs->pc == JSOP_NEW); - uintN argc = GET_ARGC(cx->fp->regs->pc); - cx->fp->regs->pc += JSOP_CALL_LENGTH; + jsbytecode *pc = cx->fp->regs->pc; + JS_ASSERT(*pc == JSOP_CALL || *pc == JSOP_APPLY || *pc == JSOP_NEW || + *pc == JSOP_SETPROP || *pc == JSOP_SETNAME); + uintN argc = (js_CodeSpec[*pc].format & JOF_INVOKE) ? GET_ARGC(pc) : 0; + cx->fp->regs->pc += js_CodeSpec[*pc].length; cx->fp->regs->sp -= argc + 1; JS_ASSERT_IF(!cx->fp->imacpc, cx->fp->slots + cx->fp->script->nfixed + @@ -5425,7 +5612,7 @@ LeaveTree(InterpState& state, VMSideExit* lr) * The return value was not available when we reconstructed the stack, * but we have it now. Box it. */ - JSTraceType* typeMap = getStackTypeMap(innermost); + JSTraceType* typeMap = GetStackTypeMap(innermost); /* * If there's a tree call around the point that we deep exited at, @@ -5447,9 +5634,11 @@ LeaveTree(InterpState& state, VMSideExit* lr) JS_ARENA_RELEASE(&cx->stackPool, state.stackMark); while (callstack < rp) { - /* Synthesize a stack frame and write out the values in it using the type map pointer - on the native call stack. */ - js_SynthesizeFrame(cx, **callstack); + /* + * Synthesize a stack frame and write out the values in it using the + * type map pointer on the native call stack. + */ + SynthesizeFrame(cx, **callstack); int slots = FlushNativeStackFrame(cx, 1 /* callDepth */, (JSTraceType*)(*callstack + 1), stack, cx->fp); #ifdef DEBUG @@ -5461,20 +5650,25 @@ LeaveTree(InterpState& state, VMSideExit* lr) FramePCOffset(fp), slots); #endif - /* Keep track of the additional frames we put on the interpreter stack and the native - stack slots we consumed. */ + /* + * Keep track of the additional frames we put on the interpreter stack + * and the native stack slots we consumed. + */ ++*state.inlineCallCountp; ++callstack; stack += slots; } - /* We already synthesized the frames around the innermost guard. Here we just deal - with additional frames inside the tree we are bailing out from. */ + /* + * We already synthesized the frames around the innermost guard. Here we + * just deal with additional frames inside the tree we are bailing out + * from. + */ JS_ASSERT(rp == callstack); unsigned calldepth = innermost->calldepth; unsigned calldepth_slots = 0; for (unsigned n = 0; n < calldepth; ++n) { - calldepth_slots += js_SynthesizeFrame(cx, *callstack[n]); + calldepth_slots += SynthesizeFrame(cx, *callstack[n]); ++*state.inlineCallCountp; #ifdef DEBUG JSStackFrame* fp = cx->fp; @@ -5485,16 +5679,21 @@ LeaveTree(InterpState& state, VMSideExit* lr) #endif } - /* Adjust sp and pc relative to the tree we exited from (not the tree we entered into). - These are our final values for sp and pc since js_SynthesizeFrame has already taken - care of all frames in between. But first we recover fp->blockChain, which comes from - the side exit struct. */ + /* + * Adjust sp and pc relative to the tree we exited from (not the tree we + * entered into). These are our final values for sp and pc since + * SynthesizeFrame has already taken care of all frames in between. But + * first we recover fp->blockChain, which comes from the side exit + * struct. + */ JSStackFrame* fp = cx->fp; fp->blockChain = innermost->block; - /* If we are not exiting from an inlined frame the state->sp is spbase, otherwise spbase - is whatever slots frames around us consume. */ + /* + * If we are not exiting from an inlined frame, the state->sp is spbase. + * Otherwise spbase is whatever slots frames around us consume. + */ fp->regs->pc = innermost->pc; fp->imacpc = innermost->imacpc; fp->regs->sp = StackBase(fp) + (innermost->sp_adj / sizeof(double)) - calldepth_slots; @@ -5521,47 +5720,54 @@ LeaveTree(InterpState& state, VMSideExit* lr) calldepth, cycles); - /* If this trace is part of a tree, later branches might have added additional globals for - which we don't have any type information available in the side exit. We merge in this - information from the entry type-map. See also comment in the constructor of TraceRecorder - why this is always safe to do. */ + /* + * If this trace is part of a tree, later branches might have added + * additional globals for which we don't have any type information + * available in the side exit. We merge in this information from the entry + * type-map. See also the comment in the constructor of TraceRecorder + * regarding why this is always safe to do. + */ TreeInfo* outermostTree = state.outermostTree; uint16* gslots = outermostTree->globalSlots->data(); unsigned ngslots = outermostTree->globalSlots->length(); JS_ASSERT(ngslots == outermostTree->nGlobalTypes()); JSTraceType* globalTypeMap; - /* Are there enough globals? This is the ideal fast path. */ + /* Are there enough globals? */ if (innermost->numGlobalSlots == ngslots) { - globalTypeMap = getGlobalTypeMap(innermost); - /* Otherwise, merge the typemap of the innermost entry and exit together. This should always - work because it is invalid for nested trees or linked trees to have incompatible types. - Thus, whenever a new global type is lazily added into a tree, all dependent and linked - trees are immediately specialized (see bug 476653). */ + /* Yes. This is the ideal fast path. */ + globalTypeMap = GetGlobalTypeMap(innermost); } else { + /* + * No. Merge the typemap of the innermost entry and exit together. This + * should always work because it is invalid for nested trees or linked + * trees to have incompatible types. Thus, whenever a new global type + * is lazily added into a tree, all dependent and linked trees are + * immediately specialized (see bug 476653). + */ TreeInfo* ti = (TreeInfo*)innermost->from->root->vmprivate; JS_ASSERT(ti->nGlobalTypes() == ngslots); JS_ASSERT(ti->nGlobalTypes() > innermost->numGlobalSlots); globalTypeMap = (JSTraceType*)alloca(ngslots * sizeof(JSTraceType)); - memcpy(globalTypeMap, getGlobalTypeMap(innermost), innermost->numGlobalSlots); + memcpy(globalTypeMap, GetGlobalTypeMap(innermost), innermost->numGlobalSlots); memcpy(globalTypeMap + innermost->numGlobalSlots, ti->globalTypeMap() + innermost->numGlobalSlots, ti->nGlobalTypes() - innermost->numGlobalSlots); } - /* write back native stack frame */ + /* Write back the topmost native stack frame. */ #ifdef DEBUG int slots = #endif FlushNativeStackFrame(cx, innermost->calldepth, - getStackTypeMap(innermost), + GetStackTypeMap(innermost), stack, NULL); JS_ASSERT(unsigned(slots) == innermost->numStackSlots); if (innermost->nativeCalleeWord) SynthesizeSlowNativeFrame(cx, innermost); - /* write back interned globals */ + /* Write back interned globals. */ double* global = (double*)(&state + 1); FlushNativeGlobalFrame(cx, global, ngslots, gslots, globalTypeMap); @@ -5571,7 +5777,7 @@ LeaveTree(InterpState& state, VMSideExit* lr) cx->nativeVp = NULL; #ifdef DEBUG - // Verify that our state restoration worked. + /* Verify that our state restoration worked. */ for (JSStackFrame* fp = cx->fp; fp; fp = fp->down) { JS_ASSERT_IF(fp->callee, JSVAL_IS_OBJECT(fp->argv[-1])); } @@ -5599,18 +5805,19 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) if (tm->recorder) { jsbytecode* innerLoopHeaderPC = cx->fp->regs->pc; - if (js_RecordLoopEdge(cx, tm->recorder, inlineCallCount)) + if (RecordLoopEdge(cx, tm->recorder, inlineCallCount)) return true; /* - * js_RecordLoopEdge will invoke an inner tree if we have a matching one. If we - * arrive here, that tree didn't run to completion and instead we mis-matched - * or the inner tree took a side exit other than the loop exit. We are thus - * no longer guaranteed to be parked on the same loop header js_MonitorLoopEdge - * was called for. In fact, this might not even be a loop header at all. Hence - * if the program counter no longer hovers over the inner loop header, return to - * the interpreter and do not attempt to trigger or record a new tree at this - * location. + * RecordLoopEdge will invoke an inner tree if we have a matching + * one. If we arrive here, that tree didn't run to completion and + * instead we mis-matched or the inner tree took a side exit other than + * the loop exit. We are thus no longer guaranteed to be parked on the + * same loop header js_MonitorLoopEdge was called for. In fact, this + * might not even be a loop header at all. Hence if the program counter + * no longer hovers over the inner loop header, return to the + * interpreter and do not attempt to trigger or record a new tree at + * this location. */ if (innerLoopHeaderPC != cx->fp->regs->pc) { #ifdef MOZ_TRACEVIS @@ -5623,20 +5830,23 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) /* Check the pool of reserved doubles (this might trigger a GC). */ if (tm->reservedDoublePoolPtr < (tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS) && - !js_ReplenishReservedPool(cx, tm)) { + !ReplenishReservedPool(cx, tm)) { #ifdef MOZ_TRACEVIS tvso.r = R_DOUBLES; #endif return false; /* Out of memory, don't try to record now. */ } - /* Make sure the shape of the global object still matches (this might flush the JIT cache). */ + /* + * Make sure the shape of the global object still matches (this might flush + * the JIT cache). + */ JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain); uint32 globalShape = -1; SlotList* globalSlots = NULL; if (!CheckGlobalObjectShape(cx, tm, globalObj, &globalShape, &globalSlots)) { - js_Backoff(cx, cx->fp->regs->pc); + Backoff(cx, cx->fp->regs->pc); return false; } @@ -5663,8 +5873,10 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) return false; } - /* If we have no code in the anchor and no peers, we definitively won't be able to - activate any trees so, start compiling. */ + /* + * If we have no code in the anchor and no peers, we definitively won't be + * able to activate any trees, so start compiling. + */ if (!f->code() && !f->peer) { record: if (++f->hits() < HOTLOOP) { @@ -5673,10 +5885,14 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) #endif return false; } - /* We can give RecordTree the root peer. If that peer is already taken, it will - walk the peer list and find us a free slot or allocate a new tree if needed. */ - bool rv = js_RecordTree(cx, tm, f->first, NULL, 0, globalObj, globalShape, - globalSlots, argc); + + /* + * We can give RecordTree the root peer. If that peer is already taken, + * it will walk the peer list and find us a free slot or allocate a new + * tree if needed. + */ + bool rv = RecordTree(cx, tm, f->first, NULL, 0, globalObj, globalShape, + globalSlots, argc); #ifdef MOZ_TRACEVIS if (!rv) tvso.r = R_FAIL_RECORD_TREE; @@ -5690,14 +5906,17 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) FramePCOffset(cx->fp), (void*)f, f->ip); uintN count; - Fragment* match = js_FindVMCompatiblePeer(cx, globalObj, f, count); + Fragment* match = FindVMCompatiblePeer(cx, globalObj, f, count); if (!match) { if (count < MAXPEERS) goto record; - /* If we hit the max peers ceiling, don't try to lookup fragments all the time. Thats - expensive. This must be a rather type-unstable loop. */ + + /* + * If we hit the max peers ceiling, don't try to lookup fragments all + * the time. That's expensive. This must be a rather type-unstable loop. + */ debug_only_print0(LC_TMTracer, "Blacklisted: too many peer trees.\n"); - js_Blacklist((jsbytecode*) f->root->ip); + Blacklist((jsbytecode*) f->root->ip); #ifdef MOZ_TRACEVIS tvso.r = R_MAX_PEERS; #endif @@ -5707,7 +5926,7 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) VMSideExit* lr = NULL; VMSideExit* innermostNestedGuard = NULL; - lr = js_ExecuteTree(cx, match, inlineCallCount, &innermostNestedGuard); + lr = ExecuteTree(cx, match, inlineCallCount, &innermostNestedGuard); if (!lr) { #ifdef MOZ_TRACEVIS tvso.r = R_FAIL_EXECUTE_TREE; @@ -5715,31 +5934,35 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) return false; } - /* If we exit on a branch, or on a tree call guard, try to grow the inner tree (in case - of a branch exit), or the tree nested around the tree we exited from (in case of the - tree call guard). */ + /* + * If we exit on a branch, or on a tree call guard, try to grow the inner + * tree (in case of a branch exit), or the tree nested around the tree we + * exited from (in case of the tree call guard). + */ bool rv; switch (lr->exitType) { case UNSTABLE_LOOP_EXIT: - rv = js_AttemptToStabilizeTree(cx, globalObj, lr, NULL, NULL); + rv = AttemptToStabilizeTree(cx, globalObj, lr, NULL, NULL); #ifdef MOZ_TRACEVIS if (!rv) tvso.r = R_FAIL_STABILIZE; #endif return rv; + case OVERFLOW_EXIT: oracle.markInstructionUndemotable(cx->fp->regs->pc); - /* fall through */ + /* FALL THROUGH */ case BRANCH_EXIT: case CASE_EXIT: - return js_AttemptToExtendTree(cx, lr, NULL, NULL + return AttemptToExtendTree(cx, lr, NULL, NULL #ifdef MOZ_TRACEVIS , &tvso #endif ); + case LOOP_EXIT: if (innermostNestedGuard) - return js_AttemptToExtendTree(cx, innermostNestedGuard, lr, NULL + return AttemptToExtendTree(cx, innermostNestedGuard, lr, NULL #ifdef MOZ_TRACEVIS , &tvso #endif @@ -5748,6 +5971,7 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) tvso.r = R_NO_EXTEND_OUTER; #endif return false; + #ifdef MOZ_TRACEVIS case MISMATCH_EXIT: tvso.r = R_MISMATCH_EXIT; return false; case OOM_EXIT: tvso.r = R_OOM_EXIT; return false; @@ -5755,8 +5979,12 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) case DEEP_BAIL_EXIT: tvso.r = R_DEEP_BAIL_EXIT; return false; case STATUS_EXIT: tvso.r = R_STATUS_EXIT; return false; #endif + default: - /* No, this was an unusual exit (i.e. out of memory/GC), so just resume interpretation. */ + /* + * No, this was an unusual exit (i.e. out of memory/GC), so just resume + * interpretation. + */ #ifdef MOZ_TRACEVIS tvso.r = R_OTHER_EXIT; #endif @@ -5794,9 +6022,12 @@ TraceRecorder::monitorRecording(JSContext* cx, TraceRecorder* tr, JSOp op) } ) - /* If op is not a break or a return from a loop, continue recording and follow the - trace. We check for imacro-calling bytecodes inside each switch case to resolve - the if (JSOP_IS_IMACOP(x)) conditions at compile time. */ + /* + * If op is not a break or a return from a loop, continue recording and + * follow the trace. We check for imacro-calling bytecodes inside each + * switch case to resolve the if (JSOP_IS_IMACOP(x)) conditions at compile + * time. + */ JSRecordingStatus status; #ifdef DEBUG @@ -5844,7 +6075,7 @@ TraceRecorder::monitorRecording(JSContext* cx, TraceRecorder* tr, JSOp op) stop_recording: /* If we recorded the end of the trace, destroy the recorder now. */ if (tr->fragment->lastIns) { - js_DeleteRecorder(cx); + DeleteRecorder(cx); return status; } @@ -5864,13 +6095,13 @@ js_AbortRecording(JSContext* cx, const char* reason) Fragment* f = tm->recorder->getFragment(); /* - * If the recorder already had its fragment disposed, or we actually finished - * recording and this recorder merely is passing through the deep abort state - * to the next recorder on the stack, just destroy the recorder. There is - * nothing to abort. + * If the recorder already had its fragment disposed, or we actually + * finished recording and this recorder merely is passing through the deep + * abort state to the next recorder on the stack, just destroy the + * recorder. There is nothing to abort. */ if (!f || f->lastIns) { - js_DeleteRecorder(cx); + DeleteRecorder(cx); return; } @@ -5888,12 +6119,10 @@ js_AbortRecording(JSContext* cx, const char* reason) reason); #endif - js_Backoff(cx, (jsbytecode*) f->root->ip, f->root); + Backoff(cx, (jsbytecode*) f->root->ip, f->root); - /* - * If js_DeleteRecorder flushed the code cache, we can't rely on f any more. - */ - if (!js_DeleteRecorder(cx)) + /* If DeleteRecorder flushed the code cache, we can't rely on f any more. */ + if (!DeleteRecorder(cx)) return; /* @@ -5901,12 +6130,12 @@ js_AbortRecording(JSContext* cx, const char* reason) * TreeInfo object. */ if (!f->code() && (f->root == f)) - js_TrashTree(cx, f); + TrashTree(cx, f); } #if defined NANOJIT_IA32 static bool -js_CheckForSSE2() +CheckForSSE2() { int features = 0; #if defined _MSC_VER @@ -6146,9 +6375,9 @@ void js_InitJIT(JSTraceMonitor *tm) { #if defined JS_JIT_SPEW - /* Set up debug logging */ + /* Set up debug logging. */ if (!did_we_set_up_debug_logging) { - js_InitJITLogController(); + InitJITLogController(); did_we_set_up_debug_logging = true; } #else @@ -6158,7 +6387,7 @@ js_InitJIT(JSTraceMonitor *tm) if (!did_we_check_processor_features) { #if defined NANOJIT_IA32 avmplus::AvmCore::config.use_cmov = - avmplus::AvmCore::config.sse2 = js_CheckForSSE2(); + avmplus::AvmCore::config.sse2 = CheckForSSE2(); #endif #if defined NANOJIT_ARM bool arm_vfp = js_arm_check_vfp(); @@ -6185,9 +6414,7 @@ js_InitJIT(JSTraceMonitor *tm) did_we_check_processor_features = true; } - /* - * Set the default size for the code cache to 16MB. - */ + /* Set the default size for the code cache to 16MB. */ tm->maxCodeCacheBytes = 16 M; if (!tm->recordAttempts.ops) { @@ -6261,7 +6488,7 @@ js_FinishJIT(JSTraceMonitor *tm) for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) { VMFragment* f = tm->vmfragments[i]; - while(f) { + while (f) { VMFragment* next = f->next; tm->fragmento->clearFragment(f); f = next; @@ -6313,9 +6540,7 @@ js_PurgeJITOracle() } static JSDHashOperator -js_PurgeScriptRecordingAttempts(JSDHashTable *table, - JSDHashEntryHdr *hdr, - uint32 number, void *arg) +PurgeScriptRecordingAttempts(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number, void *arg) { PCHashEntry *e = (PCHashEntry *)hdr; JSScript *script = (JSScript *)arg; @@ -6326,19 +6551,17 @@ js_PurgeScriptRecordingAttempts(JSDHashTable *table, return JS_DHASH_NEXT; } -/* - * Call 'action' for each root fragment created for 'script'. - */ +/* Call 'action' for each root fragment created for 'script'. */ template static void -js_IterateScriptFragments(JSContext* cx, JSScript* script, FragmentAction action) +IterateScriptFragments(JSContext* cx, JSScript* script, FragmentAction action) { JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) { for (VMFragment **f = &(tm->vmfragments[i]); *f; ) { VMFragment* frag = *f; - /* Disable future use of any script-associated VMFragment.*/ if (JS_UPTRDIFF(frag->ip, script->code) < script->length) { + /* This fragment is associated with the script. */ JS_ASSERT(frag->root == frag); VMFragment* next = frag->next; if (action(cx, tm, frag)) { @@ -6359,15 +6582,15 @@ js_IterateScriptFragments(JSContext* cx, JSScript* script, FragmentAction action } static bool -trashTreeAction(JSContext* cx, JSTraceMonitor* tm, Fragment* frag) +TrashTreeAction(JSContext* cx, JSTraceMonitor* tm, Fragment* frag) { for (Fragment *p = frag; p; p = p->peer) - js_TrashTree(cx, p); + TrashTree(cx, p); return false; } static bool -clearFragmentAction(JSContext* cx, JSTraceMonitor* tm, Fragment* frag) +ClearFragmentAction(JSContext* cx, JSTraceMonitor* tm, Fragment* frag) { tm->fragmento->clearFragment(frag); return true; @@ -6380,16 +6603,15 @@ js_PurgeScriptFragments(JSContext* cx, JSScript* script) return; debug_only_printf(LC_TMTracer, "Purging fragments for JSScript %p.\n", (void*)script); - /* - * js_TrashTree trashes dependent trees recursively, so we must do all the trashing - * before clearing in order to avoid calling js_TrashTree with a deleted fragment. - */ - js_IterateScriptFragments(cx, script, trashTreeAction); - js_IterateScriptFragments(cx, script, clearFragmentAction); - JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); - JS_DHashTableEnumerate(&(tm->recordAttempts), - js_PurgeScriptRecordingAttempts, script); + /* + * TrashTree trashes dependent trees recursively, so we must do all the trashing + * before clearing in order to avoid calling TrashTree with a deleted fragment. + */ + IterateScriptFragments(cx, script, TrashTreeAction); + IterateScriptFragments(cx, script, ClearFragmentAction); + JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); + JS_DHashTableEnumerate(&(tm->recordAttempts), PurgeScriptRecordingAttempts, script); } bool @@ -6501,9 +6723,9 @@ TraceRecorder::scopeChain() const } /* - * Return the frame of a call object if that frame is part of the current trace. |depthp| is an - * optional outparam: if it is non-null, it will be filled in with the depth of the call object's - * frame relevant to cx->fp. + * Return the frame of a call object if that frame is part of the current + * trace. |depthp| is an optional outparam: if it is non-null, it will be + * filled in with the depth of the call object's frame relevant to cx->fp. */ JS_REQUIRES_STACK JSStackFrame* TraceRecorder::frameIfInRange(JSObject* obj, unsigned* depthp) const @@ -6522,22 +6744,22 @@ TraceRecorder::frameIfInRange(JSObject* obj, unsigned* depthp) const return NULL; } -JS_DEFINE_CALLINFO_6(extern, UINT32, js_GetClosureVar, CONTEXT, OBJECT, UINT32, +JS_DEFINE_CALLINFO_6(extern, UINT32, GetClosureVar, CONTEXT, OBJECT, UINT32, UINT32, UINT32, DOUBLEPTR, 0, 0) -JS_DEFINE_CALLINFO_6(extern, UINT32, js_GetClosureArg, CONTEXT, OBJECT, UINT32, +JS_DEFINE_CALLINFO_6(extern, UINT32, GetClosureArg, CONTEXT, OBJECT, UINT32, UINT32, UINT32, DOUBLEPTR, 0, 0) /* - * Search the scope chain for a property lookup operation at the current PC and generate LIR - * to access the given property. Return JSRS_CONTINUE on success, otherwise abort and return - * JSRS_STOP. There are 3 outparams: + * Search the scope chain for a property lookup operation at the current PC and + * generate LIR to access the given property. Return JSRS_CONTINUE on success, + * otherwise abort and return JSRS_STOP. There are 3 outparams: * * vp the address of the current property value * ins LIR instruction representing the property value on trace - * tracked true iff the property value is tracked on this trace. If true, then the - * tracked value can be modified using the tracker set functions. If false, - * then the value comes from a call to a builtin to access an upvar, and - * can not be modified directly. + * tracked true iff the property value is tracked on this trace. If true, + * then the tracked value can be modified using the tracker set + * functions. If false, then the value comes from a call to a + * builtin to access an upvar, and can't be modified directly. */ JS_REQUIRES_STACK JSRecordingStatus TraceRecorder::scopeChainProp(JSObject* obj, jsval*& vp, LIns*& ins, bool& tracked) @@ -6621,19 +6843,19 @@ TraceRecorder::scopeChainProp(JSObject* obj, jsval*& vp, LIns*& ins, bool& track LIns* callee_ins = get(&cx->fp->argv[-2]); LIns* outp = lir->insAlloc(sizeof(double)); - LIns* args[] = { + LIns* args[] = { outp, INS_CONST(callDepth), INS_CONST(slot), - INS_CONST(scopeIndex), + INS_CONST(scopeIndex), callee_ins, - cx_ins + cx_ins }; const CallInfo* ci; if (sprop->getter == js_GetCallArg) - ci = &js_GetClosureArg_ci; + ci = &GetClosureArg_ci; else - ci = &js_GetClosureVar_ci; + ci = &GetClosureVar_ci; LIns* call_ins = lir->insCall(ci, args); JSTraceType type = getCoercedType(*vp); @@ -6688,8 +6910,6 @@ TraceRecorder::stack(int n, LIns* i) set(&stackval(n), i, n >= 0); } -extern jsdouble FASTCALL js_dmod(jsdouble a, jsdouble b); - JS_REQUIRES_STACK LIns* TraceRecorder::alu(LOpcode v, jsdouble v0, jsdouble v1, LIns* s0, LIns* s1) { @@ -6762,36 +6982,30 @@ TraceRecorder::alu(LOpcode v, jsdouble v0, jsdouble v1, LIns* s0, LIns* s1) exit = snapshot(OVERFLOW_EXIT); - /* - * Make sure we don't trigger division by zero at runtime. - */ + /* Make sure we don't trigger division by zero at runtime. */ if (!d1->isconst()) guard(false, lir->ins_eq0(d1), exit); result = lir->ins2(v = LIR_div, d0, d1); - /* - * As long the modulus is zero, the result is an integer. - */ + /* As long the modulus is zero, the result is an integer. */ guard(true, lir->ins_eq0(lir->ins1(LIR_mod, result)), exit); - /* Don't lose a -0 */ + + /* Don't lose a -0. */ guard(false, lir->ins_eq0(result), exit); break; + case LIR_fmod: { if (d0->isconst() && d1->isconst()) return lir->ins1(LIR_i2f, lir->insImm(jsint(r))); exit = snapshot(OVERFLOW_EXIT); - /* - * Make sure we don't trigger division by zero at runtime. - */ + /* Make sure we don't trigger division by zero at runtime. */ if (!d1->isconst()) guard(false, lir->ins_eq0(d1), exit); result = lir->ins1(v = LIR_mod, lir->ins2(LIR_div, d0, d1)); - /* - * If the result is not 0, it is always within the integer domain. - */ + /* If the result is not 0, it is always within the integer domain. */ LIns* branch = lir->insBranch(LIR_jf, lir->ins_eq0(result), NULL); /* @@ -6803,6 +7017,7 @@ TraceRecorder::alu(LOpcode v, jsdouble v0, jsdouble v1, LIns* s0, LIns* s1) break; } #endif + default: v = (LOpcode)((int)v & ~LIR64); result = lir->ins2(v, d0, d1); @@ -6814,7 +7029,7 @@ TraceRecorder::alu(LOpcode v, jsdouble v0, jsdouble v1, LIns* s0, LIns* s1) * that will inform the oracle and cause a non-demoted trace to be * attached that uses floating-point math for this operation. */ - if (!result->isconst() && (!overflowSafe(v, d0) || !overflowSafe(v, d1))) { + if (!result->isconst() && (!IsOverflowSafe(v, d0) || !IsOverflowSafe(v, d1))) { exit = snapshot(OVERFLOW_EXIT); guard(false, lir->ins1(LIR_ov, result), exit); if (v == LIR_mul) // make sure we don't lose a -0 @@ -6933,9 +7148,11 @@ TraceRecorder::ifop() } #ifdef NANOJIT_IA32 -/* Record LIR for a tableswitch or tableswitchx op. We record LIR only the - "first" time we hit the op. Later, when we start traces after exiting that - trace, we just patch. */ +/* + * Record LIR for a tableswitch or tableswitchx op. We record LIR only the + * "first" time we hit the op. Later, when we start traces after exiting that + * trace, we just patch. + */ JS_REQUIRES_STACK LIns* TraceRecorder::tableswitch() { @@ -6943,7 +7160,7 @@ TraceRecorder::tableswitch() if (!isNumber(v)) return NULL; - /* no need to guard if condition is constant */ + /* No need to guard if the condition is constant. */ LIns* v_ins = f2i(get(&v)); if (v_ins->isconst() || v_ins->isconstq()) return NULL; @@ -6970,11 +7187,15 @@ TraceRecorder::tableswitch() high = GET_JUMPX_OFFSET(pc); } - /* Really large tables won't fit in a page. This is a conservative check. - If it matters in practice we need to go off-page. */ + /* + * Really large tables won't fit in a page. This is a conservative check. + * If it matters in practice we need to go off-page. + */ if ((high + 1 - low) * sizeof(intptr_t*) + 128 > (unsigned) LARGEST_UNDERRUN_PROT) { - // This throws away the return value of switchop but it seems - // ok because switchop always returns true. + /* + * This throws away the return value of switchop but it seems ok + * because switchop always returns true. + */ (void) switchop(); return NULL; } @@ -7000,7 +7221,8 @@ TraceRecorder::switchop() { jsval& v = stackval(-1); LIns* v_ins = get(&v); - /* no need to guard if condition is constant */ + + /* No need to guard if the condition is constant. */ if (v_ins->isconst() || v_ins->isconstq()) return JSRS_CONTINUE; if (isNumber(v)) { @@ -7036,8 +7258,8 @@ TraceRecorder::inc(jsval& v, jsint incr, bool pre) } /* - * On exit, v_ins is the incremented unboxed value, and the appropriate - * value (pre- or post-increment as described by pre) is stacked. + * On exit, v_ins is the incremented unboxed value, and the appropriate value + * (pre- or post-increment as described by pre) is stacked. */ JS_REQUIRES_STACK JSRecordingStatus TraceRecorder::inc(jsval& v, LIns*& v_ins, jsint incr, bool pre) @@ -7105,24 +7327,24 @@ TraceRecorder::incElem(jsint incr, bool pre) } static bool -evalCmp(LOpcode op, double result) +EvalCmp(LOpcode op, double l, double r) { bool cond; switch (op) { case LIR_feq: - cond = (result == 0); + cond = (l == r); break; case LIR_flt: - cond = result < 0; + cond = l < r; break; case LIR_fgt: - cond = result > 0; + cond = l > r; break; case LIR_fle: - cond = result <= 0; + cond = l <= r; break; case LIR_fge: - cond = result >= 0; + cond = l >= r; break; default: JS_NOT_REACHED("unexpected comparison op"); @@ -7132,17 +7354,11 @@ evalCmp(LOpcode op, double result) } static bool -evalCmp(LOpcode op, double l, double r) -{ - return evalCmp(op, l - r); -} - -static bool -evalCmp(LOpcode op, JSString* l, JSString* r) +EvalCmp(LOpcode op, JSString* l, JSString* r) { if (op == LIR_feq) return js_EqualStrings(l, r); - return evalCmp(op, js_CompareStrings(l, r)); + return EvalCmp(op, js_CompareStrings(l, r), 0); } JS_REQUIRES_STACK void @@ -7155,8 +7371,8 @@ TraceRecorder::strictEquality(bool equal, bool cmpCase) LIns* x; bool cond; - JSTraceType ltag = getPromotedType(l); - if (ltag != getPromotedType(r)) { + JSTraceType ltag = GetPromotedType(l); + if (ltag != GetPromotedType(r)) { cond = !equal; x = lir->insImm(cond); } else if (ltag == TT_STRING) { @@ -7215,7 +7431,7 @@ TraceRecorder::equalityHelper(jsval l, jsval r, LIns* l_ins, LIns* r_ins, * a primitive value (which would terminate recursion). */ - if (getPromotedType(l) == getPromotedType(r)) { + if (GetPromotedType(l) == GetPromotedType(r)) { if (JSVAL_TAG(l) == JSVAL_OBJECT || JSVAL_TAG(l) == JSVAL_BOOLEAN) { cond = (l == r); } else if (JSVAL_IS_STRING(l)) { @@ -7360,7 +7576,7 @@ TraceRecorder::relational(LOpcode op, bool tryBranchAfterCond) LIns* args[] = { r_ins, l_ins }; l_ins = lir->insCall(&js_CompareStrings_ci, args); r_ins = lir->insImm(0); - cond = evalCmp(op, JSVAL_TO_STRING(l), JSVAL_TO_STRING(r)); + cond = EvalCmp(op, JSVAL_TO_STRING(l), JSVAL_TO_STRING(r)); goto do_comparison; } @@ -7420,12 +7636,15 @@ TraceRecorder::relational(LOpcode op, bool tryBranchAfterCond) tmp = r; rnum = js_ValueToNumber(cx, &tmp); } - cond = evalCmp(op, lnum, rnum); + cond = EvalCmp(op, lnum, rnum); fp = true; /* 11.8.5 steps 6-15. */ do_comparison: - /* If the result is not a number or it's not a quad, we must use an integer compare. */ + /* + * If the result is not a number or it's not a quad, we must use an integer + * compare. + */ if (!fp) { JS_ASSERT(op >= LIR_feq && op <= LIR_fge); op = LOpcode(op + (LIR_eq - LIR_feq)); @@ -7507,7 +7726,7 @@ TraceRecorder::binary(LOpcode op) bool rightIsNumber = isNumber(r); jsdouble rnum = rightIsNumber ? asNumber(r) : 0; - if ((op >= LIR_sub && op <= LIR_ush) || // sub, mul, (callh), or, xor, (not,) lsh, rsh, ush + if ((op >= LIR_sub && op <= LIR_ush) || // sub, mul, (callh), or, xor, (not,) lsh, rsh, ush (op >= LIR_fsub && op <= LIR_fmod)) { // fsub, fmul, fdiv, fmod LIns* args[2]; if (JSVAL_IS_STRING(l)) { @@ -7581,6 +7800,41 @@ TraceRecorder::map_is_native(JSObjectMap* map, LIns* map_ins, LIns*& ops_ins, si return true; } +JS_REQUIRES_STACK JSRecordingStatus +TraceRecorder::guardNativePropertyOp(JSObject* aobj, LIns* map_ins) +{ + /* + * Interpreter calls to PROPERTY_CACHE_TEST guard on native object ops + * which is required to use native objects (those whose maps are scopes), + * or even more narrow conditions required because the cache miss case + * will call a particular object-op (js_GetProperty, js_SetProperty). + * + * We parameterize using offsetof and guard on match against the hook at + * the given offset in js_ObjectOps. TraceRecorder::record_JSOP_SETPROP + * guards the js_SetProperty case. + */ + uint32 format = js_CodeSpec[*cx->fp->regs->pc].format; + uint32 mode = JOF_MODE(format); + + // No need to guard native-ness of global object. + JS_ASSERT(OBJ_IS_NATIVE(globalObj)); + if (aobj != globalObj) { + size_t op_offset = offsetof(JSObjectOps, objectMap); + if (mode == JOF_PROP || mode == JOF_VARPROP) { + op_offset = (format & JOF_SET) + ? offsetof(JSObjectOps, setProperty) + : offsetof(JSObjectOps, getProperty); + } else { + JS_ASSERT(mode == JOF_NAME); + } + + LIns* ops_ins; + if (!map_is_native(aobj->map, map_ins, ops_ins, op_offset)) + ABORT_TRACE("non-native map"); + } + return JSRS_CONTINUE; +} + JS_REQUIRES_STACK JSRecordingStatus TraceRecorder::test_property_cache(JSObject* obj, LIns* obj_ins, JSObject*& obj2, jsuword& pcval) { @@ -7598,33 +7852,8 @@ TraceRecorder::test_property_cache(JSObject* obj, LIns* obj_ins, JSObject*& obj2 } LIns* map_ins = map(obj_ins); - LIns* ops_ins; - // Interpreter calls to PROPERTY_CACHE_TEST guard on native object ops - // which is required to use native objects (those whose maps are scopes), - // or even more narrow conditions required because the cache miss case - // will call a particular object-op (js_GetProperty, js_SetProperty). - // - // We parameterize using offsetof and guard on match against the hook at - // the given offset in js_ObjectOps. TraceRecorder::record_JSOP_SETPROP - // guards the js_SetProperty case. - uint32 format = js_CodeSpec[*pc].format; - uint32 mode = JOF_MODE(format); - - // No need to guard native-ness of global object. - JS_ASSERT(OBJ_IS_NATIVE(globalObj)); - if (aobj != globalObj) { - size_t op_offset = offsetof(JSObjectOps, objectMap); - if (mode == JOF_PROP || mode == JOF_VARPROP) { - JS_ASSERT(!(format & JOF_SET)); - op_offset = offsetof(JSObjectOps, getProperty); - } else { - JS_ASSERT(mode == JOF_NAME); - } - - if (!map_is_native(aobj->map, map_ins, ops_ins, op_offset)) - ABORT_TRACE("non-native map"); - } + CHECK_STATUS(guardNativePropertyOp(aobj, map_ins)); JSAtom* atom; JSPropCacheEntry* entry; @@ -7692,31 +7921,60 @@ TraceRecorder::test_property_cache(JSObject* obj, LIns* obj_ins, JSObject*& obj2 JS_ASSERT(cx->requestDepth); #endif - // Emit guard(s), common code for both hit and miss cases. + return guardPropertyCacheHit(obj_ins, map_ins, aobj, obj2, entry, pcval); +} + +JS_REQUIRES_STACK JSRecordingStatus +TraceRecorder::guardPropertyCacheHit(LIns* obj_ins, + LIns* map_ins, + JSObject* aobj, + JSObject* obj2, + JSPropCacheEntry* entry, + jsuword& pcval) +{ + uint32 vshape = PCVCAP_SHAPE(entry->vcap); + // Check for first-level cache hit and guard on kshape if possible. // Otherwise guard on key object exact match. if (PCVCAP_TAG(entry->vcap) <= 1) { if (aobj != globalObj) { LIns* shape_ins = addName(lir->insLoad(LIR_ld, map_ins, offsetof(JSScope, shape)), "shape"); - guard(true, addName(lir->ins2i(LIR_eq, shape_ins, entry->kshape), "guard(kshape)(test_property_cache)"), + guard(true, + addName(lir->ins2i(LIR_eq, shape_ins, entry->kshape), "guard_kshape"), BRANCH_EXIT); } + + if (entry->adding()) { + if (aobj == globalObj) + ABORT_TRACE("adding a property to the global object"); + + LIns *vshape_ins = addName( + lir->insLoad(LIR_ld, + addName(lir->insLoad(LIR_ldp, cx_ins, offsetof(JSContext, runtime)), + "runtime"), + offsetof(JSRuntime, protoHazardShape)), + "protoHazardShape"); + guard(true, + addName(lir->ins2i(LIR_eq, vshape_ins, vshape), "guard_protoHazardShape"), + MISMATCH_EXIT); + } } else { #ifdef DEBUG - JSOp op = js_GetOpcode(cx, cx->fp->script, pc); + JSOp op = js_GetOpcode(cx, cx->fp->script, cx->fp->regs->pc); JSAtom *pcatom; if (op == JSOP_LENGTH) { pcatom = cx->runtime->atomState.lengthAtom; } else { ptrdiff_t pcoff = (JOF_TYPE(js_CodeSpec[op].format) == JOF_SLOTATOM) ? SLOTNO_LEN : 0; - GET_ATOM_FROM_BYTECODE(cx->fp->script, pc, pcoff, pcatom); + GET_ATOM_FROM_BYTECODE(cx->fp->script, cx->fp->regs->pc, pcoff, pcatom); } JS_ASSERT(entry->kpc == (jsbytecode *) pcatom); JS_ASSERT(entry->kshape == jsuword(aobj)); #endif if (aobj != globalObj && !obj_ins->isconstp()) { - guard(true, addName(lir->ins2i(LIR_eq, obj_ins, entry->kshape), "guard(kobj)"), + guard(true, + addName(lir->ins2i(LIR_eq, obj_ins, entry->kshape), "guard_kobj"), BRANCH_EXIT); } } @@ -7724,26 +7982,25 @@ TraceRecorder::test_property_cache(JSObject* obj, LIns* obj_ins, JSObject*& obj2 // For any hit that goes up the scope and/or proto chains, we will need to // guard on the shape of the object containing the property. if (PCVCAP_TAG(entry->vcap) >= 1) { - jsuword vcap = entry->vcap; - uint32 vshape = PCVCAP_SHAPE(vcap); JS_ASSERT(OBJ_SHAPE(obj2) == vshape); LIns* obj2_ins; if (PCVCAP_TAG(entry->vcap) == 1) { // Duplicate the special case in PROPERTY_CACHE_TEST. - obj2_ins = stobj_get_fslot(obj_ins, JSSLOT_PROTO); + obj2_ins = addName(stobj_get_fslot(obj_ins, JSSLOT_PROTO), "proto"); guard(false, lir->ins_eq0(obj2_ins), BRANCH_EXIT); } else { obj2_ins = INS_CONSTPTR(obj2); } map_ins = map(obj2_ins); + LIns* ops_ins; if (!map_is_native(obj2->map, map_ins, ops_ins)) ABORT_TRACE("non-native map"); LIns* shape_ins = addName(lir->insLoad(LIR_ld, map_ins, offsetof(JSScope, shape)), - "shape"); + "obj2_shape"); guard(true, - addName(lir->ins2i(LIR_eq, shape_ins, vshape), "guard(vshape)(test_property_cache)"), + addName(lir->ins2i(LIR_eq, shape_ins, vshape), "guard_vshape"), BRANCH_EXIT); } @@ -7801,16 +8058,6 @@ TraceRecorder::stobj_get_slot(LIns* obj_ins, unsigned slot, LIns*& dslots_ins) return stobj_get_dslot(obj_ins, slot - JS_INITIAL_NSLOTS, dslots_ins); } -JSRecordingStatus -TraceRecorder::native_set(LIns* obj_ins, JSScopeProperty* sprop, LIns*& dslots_ins, LIns* v_ins) -{ - if (SPROP_HAS_STUB_SETTER(sprop) && sprop->slot != SPROP_INVALID_SLOT) { - stobj_set_slot(obj_ins, sprop->slot, dslots_ins, v_ins); - return JSRS_CONTINUE; - } - ABORT_TRACE("unallocated or non-stub sprop"); -} - JSRecordingStatus TraceRecorder::native_get(LIns* obj_ins, LIns* pobj_ins, JSScopeProperty* sprop, LIns*& dslots_ins, LIns*& v_ins) @@ -7925,15 +8172,14 @@ TraceRecorder::getThis(LIns*& this_ins) if (!thisObj) ABORT_TRACE_ERROR("js_ComputeThisForName failed"); - /* - * In global code, bake in the global object as 'this' object. - */ + /* In global code, bake in the global object as 'this' object. */ if (!cx->fp->callee) { JS_ASSERT(callDepth == 0); this_ins = INS_CONSTPTR(thisObj); /* - * We don't have argv[-1] in global code, so we don't update the tracker here. + * We don't have argv[-1] in global code, so we don't update the + * tracker here. */ return JSRS_CONTINUE; } @@ -7942,11 +8188,12 @@ TraceRecorder::getThis(LIns*& this_ins) JS_ASSERT(JSVAL_IS_OBJECT(thisv)); /* - * Traces type-specialize between null and objects, so if we currently see a null - * value in argv[-1], this trace will only match if we see null at runtime as well. - * Bake in the global object as 'this' object, updating the tracker as well. We - * can only detect this condition prior to calling js_ComputeThisForFrame, since it - * updates the interpreter's copy of argv[-1]. + * Traces type-specialize between null and objects, so if we currently see + * a null value in argv[-1], this trace will only match if we see null at + * runtime as well. Bake in the global object as 'this' object, updating + * the tracker as well. We can only detect this condition prior to calling + * js_ComputeThisForFrame, since it updates the interpreter's copy of + * argv[-1]. */ JSClass* clasp = NULL;; if (JSVAL_IS_NULL(original) || @@ -7964,8 +8211,8 @@ TraceRecorder::getThis(LIns*& this_ins) this_ins = get(&thisv); /* - * The only unwrapped object that needs to be wrapped that we can get here is the - * global object obtained throught the scope chain. + * The only unwrapped object that needs to be wrapped that we can get here + * is the global object obtained throught the scope chain. */ JSObject* obj = js_GetWrappedObject(cx, JSVAL_TO_OBJECT(thisv)); JSObject* inner = obj; @@ -7977,8 +8224,10 @@ TraceRecorder::getThis(LIns*& this_ins) original == OBJECT_TO_JSVAL(inner) || original == OBJECT_TO_JSVAL(obj)); - // If the returned this object is the unwrapped inner or outer object, - // then we need to use the wrapped outer object. + /* + * If the returned this object is the unwrapped inner or outer object, + * then we need to use the wrapped outer object. + */ LIns* is_inner = lir->ins2(LIR_eq, this_ins, INS_CONSTPTR(inner)); LIns* is_outer = lir->ins2(LIR_eq, this_ins, INS_CONSTPTR(obj)); LIns* wrapper = INS_CONSTPTR(JSVAL_TO_OBJECT(thisv)); @@ -8053,8 +8302,8 @@ JS_REQUIRES_STACK JSRecordingStatus TraceRecorder::guardPrototypeHasNoIndexedProperties(JSObject* obj, LIns* obj_ins, ExitType exitType) { /* - * Guard that no object along the prototype chain has any indexed properties which - * might become visible through holes in the array. + * Guard that no object along the prototype chain has any indexed + * properties which might become visible through holes in the array. */ VMSideExit* exit = snapshot(exitType); @@ -8088,16 +8337,22 @@ TraceRecorder::guardNotGlobalObject(JSObject* obj, LIns* obj_ins) JS_REQUIRES_STACK void TraceRecorder::clearFrameSlotsFromCache() { - /* Clear out all slots of this frame in the nativeFrameTracker. Different locations on the - VM stack might map to different locations on the native stack depending on the - number of arguments (i.e.) of the next call, so we have to make sure we map - those in to the cache with the right offsets. */ + /* + * Clear out all slots of this frame in the nativeFrameTracker. Different + * locations on the VM stack might map to different locations on the native + * stack depending on the number of arguments (i.e.) of the next call, so + * we have to make sure we map those in to the cache with the right + * offsets. + */ JSStackFrame* fp = cx->fp; jsval* vp; jsval* vpstop; - // Duplicate native stack layout computation: see VisitFrameSlots header comment. - // This doesn't do layout arithmetic, but it must clear out all the slots defined as - // imported by VisitFrameSlots. + + /* + * Duplicate native stack layout computation: see VisitFrameSlots header comment. + * This doesn't do layout arithmetic, but it must clear out all the slots defined as + * imported by VisitFrameSlots. + */ if (fp->callee) { vp = &fp->argv[-2]; vpstop = &fp->argv[argSlots(fp)]; @@ -8118,6 +8373,7 @@ TraceRecorder::record_EnterFrame() if (++callDepth >= MAX_CALLDEPTH) ABORT_TRACE("exceeded maximum call depth"); + // FIXME: Allow and attempt to inline a single level of recursion until we compile // recursive calls as independent trees (459301). if (fp->script == fp->down->script && fp->down->down && fp->down->down->script == fp->script) @@ -8217,8 +8473,11 @@ TraceRecorder::record_JSOP_RETURN() return JSRS_STOP; } - // If we have created an |arguments| object for the frame, we must copy the argument - // values into the object as properties in case it is used after this frame returns. + /* + * If we have created an |arguments| object for the frame, we must copy the + * argument values into the object as properties in case it is used after + * this frame returns. + */ if (cx->fp->argsobj) { LIns* argsobj_ins = get(&cx->fp->argsobj); LIns* length_ins = INS_CONST(cx->fp->argc); @@ -8511,9 +8770,10 @@ TraceRecorder::record_JSOP_NEG() if (isNumber(v)) { LIns* a = get(&v); - /* If we're a promoted integer, we have to watch out for 0s since -0 is a double. - Only follow this path if we're not an integer that's 0 and we're not a double - that's zero. + /* + * If we're a promoted integer, we have to watch out for 0s since -0 is + * a double. Only follow this path if we're not an integer that's 0 and + * we're not a double that's zero. */ if (!oracle.isInstructionUndemotable(cx->fp->regs->pc) && isPromoteInt(a) && @@ -8595,15 +8855,6 @@ TraceRecorder::record_JSOP_OBJTOP() return JSRS_CONTINUE; } -JSBool -js_Array(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval); - -JSBool -js_Object(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval); - -JSBool -js_Date(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval); - JSRecordingStatus TraceRecorder::getClassPrototype(JSObject* ctor, LIns*& proto_ins) { @@ -8703,6 +8954,83 @@ TraceRecorder::newArray(JSObject* ctor, uint32 argc, jsval* argv, jsval* rval) return JSRS_CONTINUE; } +JS_REQUIRES_STACK void +TraceRecorder::propagateFailureToBuiltinStatus(LIns* ok_ins, LIns*& status_ins) +{ + /* + * Check the boolean return value (ok_ins) of a native JSNative, + * JSFastNative, or JSPropertyOp hook for failure. On failure, set the + * JSBUILTIN_ERROR bit of cx->builtinStatus. + * + * If the return value (ok_ins) is true, status' == status. Otherwise + * status' = status | JSBUILTIN_ERROR. We calculate (rval&1)^1, which is 1 + * if rval is JS_FALSE (error), and then shift that by 1, which is the log2 + * of JSBUILTIN_ERROR. + */ + JS_STATIC_ASSERT(((JS_TRUE & 1) ^ 1) << 1 == 0); + JS_STATIC_ASSERT(((JS_FALSE & 1) ^ 1) << 1 == JSBUILTIN_ERROR); + status_ins = lir->ins2(LIR_or, + status_ins, + lir->ins2i(LIR_lsh, + lir->ins2i(LIR_xor, + lir->ins2i(LIR_and, ok_ins, 1), + 1), + 1)); + lir->insStorei(status_ins, lirbuf->state, (int) offsetof(InterpState, builtinStatus)); +} + +JS_REQUIRES_STACK void +TraceRecorder::emitNativePropertyOp(JSScope* scope, JSScopeProperty* sprop, LIns* obj_ins, + bool setflag, LIns* boxed_ins) +{ + JS_ASSERT(!(sprop->attrs & (setflag ? JSPROP_SETTER : JSPROP_GETTER))); + JS_ASSERT(setflag ? !SPROP_HAS_STUB_SETTER(sprop) : !SPROP_HAS_STUB_GETTER(sprop)); + + // Take snapshot for js_DeepBail and store it in cx->bailExit. + VMSideExit* exit = snapshot(DEEP_BAIL_EXIT); + lir->insStorei(INS_CONSTPTR(exit), cx_ins, offsetof(JSContext, bailExit)); + + // Tell nanojit not to discard or defer stack writes before this call. + LIns* guardRec = createGuardRecord(exit); + lir->insGuard(LIR_xbarrier, guardRec, guardRec); + + // It is unsafe to pass the address of an object slot as the out parameter, + // because the getter or setter could end up resizing the object's dslots. + // Instead, use a word of stack and root it in nativeVp. + LIns* vp_ins = lir->insAlloc(sizeof(jsval)); + lir->insStorei(vp_ins, cx_ins, offsetof(JSContext, nativeVp)); + lir->insStorei(INS_CONST(1), cx_ins, offsetof(JSContext, nativeVpLen)); + if (setflag) + lir->insStorei(boxed_ins, vp_ins, 0); + + CallInfo* ci = (CallInfo*) lir->insSkip(sizeof(struct CallInfo))->payload(); + ci->_address = uintptr_t(setflag ? sprop->setter : sprop->getter); + ci->_argtypes = ARGSIZE_LO | ARGSIZE_LO << 2 | ARGSIZE_LO << 4 | ARGSIZE_LO << 6 | ARGSIZE_LO << 8; + ci->_cse = ci->_fold = 0; + ci->_abi = ABI_CDECL; +#ifdef DEBUG + ci->_name = "JSPropertyOp"; +#endif + LIns* args[] = { vp_ins, INS_CONSTWORD(SPROP_USERID(sprop)), obj_ins, cx_ins }; + LIns* ok_ins = lir->insCall(ci, args); + + // Unroot the vp. + lir->insStorei(INS_CONSTPTR(NULL), cx_ins, offsetof(JSContext, nativeVp)); + + // Guard that the call succeeded and builtinStatus is still 0. + // If the native op succeeds but we deep-bail here, the result value is + // lost! Therefore this can only be used for setters of shared properties. + // In that case we ignore the result value anyway. + LIns* status_ins = lir->insLoad(LIR_ld, + lirbuf->state, + (int) offsetof(InterpState, builtinStatus)); + propagateFailureToBuiltinStatus(ok_ins, status_ins); + guard(true, lir->ins_eq0(status_ins), STATUS_EXIT); + + // Re-load the value--but this is currently unused, so commented out. + //boxed_ins = lir->insLoad(LIR_ldp, vp_ins, 0); +} + JS_REQUIRES_STACK JSRecordingStatus TraceRecorder::emitNativeCall(JSTraceableNative* known, uintN argc, LIns* args[]) { @@ -8759,7 +9087,8 @@ TraceRecorder::emitNativeCall(JSTraceableNative* known, uintN argc, LIns* args[] } /* - * Check whether we have a specialized implementation for this native invocation. + * Check whether we have a specialized implementation for this native + * invocation. */ JS_REQUIRES_STACK JSRecordingStatus TraceRecorder::callTraceableNative(JSFunction* fun, uintN argc, bool constructing) @@ -8798,11 +9127,11 @@ TraceRecorder::callTraceableNative(JSFunction* fun, uintN argc, bool constructin argtype = known->prefix[i]; if (argtype == 'C') { *argp = cx_ins; - } else if (argtype == 'T') { /* this, as an object */ + } else if (argtype == 'T') { /* this, as an object */ if (JSVAL_IS_PRIMITIVE(tval)) goto next_specialization; *argp = this_ins; - } else if (argtype == 'S') { /* this, as a string */ + } else if (argtype == 'S') { /* this, as a string */ if (!JSVAL_IS_STRING(tval)) goto next_specialization; *argp = this_ins; @@ -8819,7 +9148,7 @@ TraceRecorder::callTraceableNative(JSFunction* fun, uintN argc, bool constructin *argp = INS_CONSTPTR(fp->imacpc); else *argp = INS_CONSTPTR(pc); - } else if (argtype == 'D') { /* this, as a number */ + } else if (argtype == 'D') { /* this, as a number */ if (!isNumber(tval)) goto next_specialization; *argp = this_ins; @@ -8893,7 +9222,7 @@ TraceRecorder::callNative(uintN argc, JSOp mode) // Allocate the vp vector and emit code to root it. uintN vplen = 2 + JS_MAX(argc, FUN_MINARGS(fun)) + fun->u.n.extra; if (!(fun->flags & JSFUN_FAST_NATIVE)) - vplen++; // slow native return value slot + vplen++; // slow native return value slot lir->insStorei(INS_CONST(vplen), cx_ins, offsetof(JSContext, nativeVpLen)); LIns* invokevp_ins = lir->insAlloc(vplen * sizeof(jsval)); lir->insStorei(invokevp_ins, cx_ins, offsetof(JSContext, nativeVp)); @@ -8924,11 +9253,12 @@ TraceRecorder::callNative(uintN argc, JSOp mode) args[2] = cx_ins; newobj_ins = lir->insCall(&js_NewInstance_ci, args); guard(false, lir->ins_eq0(newobj_ins), OOM_EXIT); - this_ins = newobj_ins; // boxing an object is a no-op + this_ins = newobj_ins; /* boxing an object is a no-op */ } else if (JSFUN_BOUND_METHOD_TEST(fun->flags)) { this_ins = INS_CONSTWORD(OBJECT_TO_JSVAL(OBJ_GET_PARENT(cx, funobj))); } else { this_ins = get(&vp[1]); + /* * For fast natives, 'null' or primitives are fine as as 'this' value. * For slow natives we have to ensure the object is substituted for the @@ -9261,30 +9591,113 @@ TraceRecorder::record_JSOP_SETPROP() return JSRS_CONTINUE; } +/* Emit a specialized, inlined copy of js_NativeSet. */ JS_REQUIRES_STACK JSRecordingStatus -TraceRecorder::record_SetPropHit(JSPropCacheEntry* entry, JSScopeProperty* sprop) +TraceRecorder::nativeSet(JSObject* obj, LIns* obj_ins, JSScopeProperty* sprop, + jsval v, LIns* v_ins) +{ + JSScope* scope = OBJ_SCOPE(obj); + uint32 slot = sprop->slot; + + /* + * We do not trace assignment to properties that have both a nonstub setter + * and a slot, for several reasons. + * + * First, that would require sampling rt->propertyRemovals before and after + * (see js_NativeSet), and even more code to handle the case where the two + * samples differ. A mere guard is not enough, because you can't just bail + * off trace in the middle of a property assignment without storing the + * value and making the stack right. + * + * If obj is the global object, there are two additional problems. We would + * have to emit still more code to store the result in the object (not the + * native global frame) if the setter returned successfully after + * deep-bailing. And we would have to cope if the run-time type of the + * setter's return value differed from the record-time type of v, in which + * case unboxing would fail and, having called a native setter, we could + * not just retry the instruction in the interpreter. + */ + JS_ASSERT(SPROP_HAS_STUB_SETTER(sprop) || slot == SPROP_INVALID_SLOT); + + // Box the value to be stored, if necessary. + LIns* boxed_ins = NULL; + if (!SPROP_HAS_STUB_SETTER(sprop) || (slot != SPROP_INVALID_SLOT && obj != globalObj)) { + boxed_ins = v_ins; + box_jsval(v, boxed_ins); + } + + // Call the setter, if any. + if (!SPROP_HAS_STUB_SETTER(sprop)) + emitNativePropertyOp(scope, sprop, obj_ins, true, boxed_ins); + + // Store the value, if this property has a slot. + if (slot != SPROP_INVALID_SLOT) { + JS_ASSERT(SPROP_HAS_VALID_SLOT(sprop, scope)); + JS_ASSERT(!(sprop->attrs & JSPROP_SHARED)); + if (obj == globalObj) { + if (!lazilyImportGlobalSlot(slot)) + ABORT_TRACE("lazy import of global slot failed"); + + // If we called a native setter, unbox the result. + if (!SPROP_HAS_STUB_SETTER(sprop)) { + v_ins = boxed_ins; + unbox_jsval(STOBJ_GET_SLOT(obj, slot), v_ins, snapshot(BRANCH_EXIT)); + } + set(&STOBJ_GET_SLOT(obj, slot), v_ins); + } else { + LIns* dslots_ins = NULL; + stobj_set_slot(obj_ins, slot, dslots_ins, boxed_ins); + } + } + + return JSRS_CONTINUE; +} + +JS_REQUIRES_STACK JSRecordingStatus +TraceRecorder::setProp(jsval &l, JSPropCacheEntry* entry, JSScopeProperty* sprop, + jsval &v, LIns*& v_ins) { if (entry == JS_NO_PROP_CACHE_FILL) ABORT_TRACE("can't trace uncacheable property set"); - if (PCVCAP_TAG(entry->vcap) >= 1) - ABORT_TRACE("can't trace inherited property set"); + JS_ASSERT_IF(PCVCAP_TAG(entry->vcap) >= 1, sprop->attrs & JSPROP_SHARED); + if (!SPROP_HAS_STUB_SETTER(sprop) && sprop->slot != SPROP_INVALID_SLOT) + ABORT_TRACE("can't trace set of property with setter and slot"); + if (sprop->attrs & JSPROP_SETTER) + ABORT_TRACE("can't trace JavaScript function setter"); - jsbytecode* pc = cx->fp->regs->pc; - JS_ASSERT(entry->kpc == pc); - - jsval& r = stackval(-1); - jsval& l = stackval(-2); + // These two cases are actually errors and can't be cached. + JS_ASSERT(!(sprop->attrs & JSPROP_GETTER)); // getter without setter + JS_ASSERT(!(sprop->attrs & JSPROP_READONLY)); JS_ASSERT(!JSVAL_IS_PRIMITIVE(l)); JSObject* obj = JSVAL_TO_OBJECT(l); LIns* obj_ins = get(&l); JSScope* scope = OBJ_SCOPE(obj); - JS_ASSERT(scope->owned()); - JS_ASSERT(scope->has(sprop)); + JS_ASSERT_IF(entry->vcap == PCVCAP_MAKE(entry->kshape, 0, 0), scope->has(sprop)); - if (!isValidSlot(scope, sprop)) - return JSRS_STOP; + // Fast path for CallClass. This is about 20% faster than the general case. + if (OBJ_GET_CLASS(cx, obj) == &js_CallClass) { + const CallInfo* ci = NULL; + if (sprop->setter == SetCallArg) + ci = &js_SetCallArg_ci; + else if (sprop->setter == SetCallVar) + ci = &js_SetCallVar_ci; + else + ABORT_TRACE("can't trace special CallClass setter"); + + LIns* v_ins = get(&v); + box_jsval(v, v_ins); + LIns* args[] = { + v_ins, + INS_CONST(SPROP_USERID(sprop)), + obj_ins, + cx_ins + }; + LIns* call_ins = lir->insCall(ci, args); + guard(false, addName(lir->ins_eq0(call_ins), "guard(set upvar)"), STATUS_EXIT); + return JSRS_CONTINUE; + } /* * Setting a function-valued property might need to rebrand the object; we @@ -9292,57 +9705,54 @@ TraceRecorder::record_SetPropHit(JSPropCacheEntry* entry, JSScopeProperty* sprop * separating functions into the trace-time type TT_FUNCTION will save the * day! */ - if (scope->branded() && VALUE_IS_FUNCTION(cx, r)) + if (scope->branded() && VALUE_IS_FUNCTION(cx, v)) ABORT_TRACE("can't trace function-valued property set in branded scope"); - if (obj == globalObj) { - JS_ASSERT(SPROP_HAS_VALID_SLOT(sprop, scope)); - uint32 slot = sprop->slot; - if (!lazilyImportGlobalSlot(slot)) - ABORT_TRACE("lazy import of global slot failed"); + // Find obj2. If entry->adding(), the TAG bits are all 0. + JSObject* obj2 = obj; + for (jsuword i = PCVCAP_TAG(entry->vcap) >> PCVCAP_PROTOBITS; i; i--) + obj2 = OBJ_GET_PARENT(cx, obj2); + for (jsuword j = PCVCAP_TAG(entry->vcap) & PCVCAP_PROTOMASK; j; j--) + obj2 = OBJ_GET_PROTO(cx, obj2); + scope = OBJ_SCOPE(obj2); + JS_ASSERT_IF(entry->adding(), obj2 == obj); - LIns* r_ins = get(&r); - set(&STOBJ_GET_SLOT(obj, slot), r_ins); - - JS_ASSERT(*pc != JSOP_INITPROP); - if (pc[JSOP_SETPROP_LENGTH] != JSOP_POP) - set(&l, r_ins); - return JSRS_CONTINUE; - } - - // The global object's shape is guarded at trace entry, all others need a guard here. + // Guard before anything else. LIns* map_ins = map(obj_ins); - LIns* ops_ins; - if (!map_is_native(obj->map, map_ins, ops_ins, offsetof(JSObjectOps, setProperty))) - ABORT_TRACE("non-native map"); + CHECK_STATUS(guardNativePropertyOp(obj, map_ins)); + jsuword pcval; + CHECK_STATUS(guardPropertyCacheHit(obj_ins, map_ins, obj, obj2, entry, pcval)); + JS_ASSERT(scope->object == obj2); + JS_ASSERT(scope->has(sprop)); + JS_ASSERT_IF(obj2 != obj, sprop->attrs & JSPROP_SHARED); - LIns* shape_ins = addName(lir->insLoad(LIR_ld, map_ins, offsetof(JSScope, shape)), "shape"); - guard(true, - addName(lir->ins2i(LIR_eq, shape_ins, entry->kshape), "guard(kshape)(record_SetPropHit)"), - BRANCH_EXIT); - - uint32 vshape = PCVCAP_SHAPE(entry->vcap); - if (entry->kshape != vshape) { - LIns *vshape_ins = lir->insLoad(LIR_ld, - lir->insLoad(LIR_ldp, cx_ins, offsetof(JSContext, runtime)), - offsetof(JSRuntime, protoHazardShape)); - guard(true, - addName(lir->ins2i(LIR_eq, vshape_ins, vshape), "guard(vshape)(record_SetPropHit)"), - MISMATCH_EXIT); + // Add a property to the object if necessary. + if (entry->adding()) { + JS_ASSERT(!(sprop->attrs & JSPROP_SHARED)); + if (obj == globalObj) + ABORT_TRACE("adding a property to the global object"); LIns* args[] = { INS_CONSTPTR(sprop), obj_ins, cx_ins }; LIns* ok_ins = lir->insCall(&js_AddProperty_ci, args); guard(false, lir->ins_eq0(ok_ins), OOM_EXIT); } - LIns* dslots_ins = NULL; - LIns* v_ins = get(&r); - LIns* boxed_ins = v_ins; - box_jsval(r, boxed_ins); - CHECK_STATUS(native_set(obj_ins, sprop, dslots_ins, boxed_ins)); + v_ins = get(&v); + return nativeSet(obj, obj_ins, sprop, v, v_ins); +} +JS_REQUIRES_STACK JSRecordingStatus +TraceRecorder::record_SetPropHit(JSPropCacheEntry* entry, JSScopeProperty* sprop) +{ + jsval& r = stackval(-1); + jsval& l = stackval(-2); + LIns* v_ins; + CHECK_STATUS(setProp(l, entry, sprop, r, v_ins)); + + jsbytecode* pc = cx->fp->regs->pc; if (*pc != JSOP_INITPROP && pc[JSOP_SETPROP_LENGTH] != JSOP_POP) set(&l, v_ins); + return JSRS_CONTINUE; } @@ -9453,7 +9863,8 @@ TraceRecorder::record_JSOP_GETELEM() /* Property access using a string name or something we have to stringify. */ if (!JSVAL_IS_INT(idx)) { if (!JSVAL_IS_PRIMITIVE(idx)) - ABORT_TRACE("non-primitive index"); + ABORT_TRACE("object used as index"); + // If index is not a string, turn it into a string. if (!js_InternNonIntElementId(cx, obj, idx, &id)) ABORT_TRACE_ERROR("failed to intern non-int element id"); @@ -9497,13 +9908,13 @@ TraceRecorder::record_JSOP_GETELEM() // In this case, we are in the same frame where the arguments object was created. // The entry type map is not necessarily up-to-date, so we capture a new type map // for this point in the code. - unsigned stackSlots = js_NativeStackSlots(cx, 0/*callDepth*/); + unsigned stackSlots = NativeStackSlots(cx, 0 /* callDepth */); if (stackSlots * sizeof(JSTraceType) > NJ_MAX_SKIP_PAYLOAD_SZB) ABORT_TRACE("|arguments| requires saving too much stack"); JSTraceType* typemap = (JSTraceType*) lir->insSkip(stackSlots * sizeof(JSTraceType))->payload(); DetermineTypesVisitor detVisitor(*this, typemap); VisitStackSlots(detVisitor, cx, 0); - typemap_ins = INS_CONSTPTR(typemap + 2 /*callee,this*/); + typemap_ins = INS_CONSTPTR(typemap + 2 /* callee, this */); } else { // In this case, we are in a deeper frame from where the arguments object was // created. The type map at the point of the call out from the creation frame @@ -9514,7 +9925,7 @@ TraceRecorder::record_JSOP_GETELEM() typemap_ins = lir->ins2(LIR_add, fip_ins, INS_CONST(sizeof(FrameInfo) + 2/*callee,this*/ * sizeof(JSTraceType))); } - LIns* typep_ins = lir->ins2(LIR_add, typemap_ins, + LIns* typep_ins = lir->ins2(LIR_add, typemap_ins, lir->ins2(LIR_mul, idx_ins, INS_CONST(sizeof(JSTraceType)))); LIns* type_ins = lir->insLoad(LIR_ldcb, typep_ins, 0); guard(true, @@ -9523,7 +9934,7 @@ TraceRecorder::record_JSOP_GETELEM() BRANCH_EXIT); // Read the value out of the native stack area. - guard(true, lir->ins2(LIR_ult, idx_ins, INS_CONST(afp->argc)), + guard(true, lir->ins2(LIR_ult, idx_ins, INS_CONST(afp->argc)), snapshot(BRANCH_EXIT)); size_t stackOffset = -treeInfo->nativeStackBase + nativeStackOffset(&afp->argv[0]); LIns* args_addr_ins = lir->ins2(LIR_add, lirbuf->sp, INS_CONST(stackOffset)); @@ -9531,7 +9942,7 @@ TraceRecorder::record_JSOP_GETELEM() lir->ins2(LIR_mul, idx_ins, INS_CONST(sizeof(double)))); v_ins = stackLoad(argi_addr_ins, type); } else { - guard(false, lir->ins2(LIR_ult, idx_ins, INS_CONST(afp->argc)), + guard(false, lir->ins2(LIR_ult, idx_ins, INS_CONST(afp->argc)), snapshot(BRANCH_EXIT)); v_ins = INS_VOID(); } @@ -9635,7 +10046,6 @@ TraceRecorder::record_JSOP_SETELEM() jsval& idx = stackval(-2); jsval& lval = stackval(-3); - /* no guards for type checks, trace specialized this already */ if (JSVAL_IS_PRIMITIVE(lval)) ABORT_TRACE("left JSOP_SETELEM operand is not an object"); ABORT_IF_XML(lval); @@ -9649,6 +10059,7 @@ TraceRecorder::record_JSOP_SETELEM() if (!JSVAL_IS_INT(idx)) { if (!JSVAL_IS_PRIMITIVE(idx)) ABORT_TRACE("non-primitive index"); + // If index is not a string, turn it into a string. if (!js_InternNonIntElementId(cx, obj, idx, &id)) ABORT_TRACE_ERROR("failed to intern non-int element id"); @@ -9734,17 +10145,17 @@ TraceRecorder::record_JSOP_CALLNAME() return JSRS_CONTINUE; } -JS_DEFINE_CALLINFO_5(extern, UINT32, js_GetUpvarArgOnTrace, CONTEXT, UINT32, INT32, UINT32, +JS_DEFINE_CALLINFO_5(extern, UINT32, GetUpvarArgOnTrace, CONTEXT, UINT32, INT32, UINT32, DOUBLEPTR, 0, 0) -JS_DEFINE_CALLINFO_5(extern, UINT32, js_GetUpvarVarOnTrace, CONTEXT, UINT32, INT32, UINT32, +JS_DEFINE_CALLINFO_5(extern, UINT32, GetUpvarVarOnTrace, CONTEXT, UINT32, INT32, UINT32, DOUBLEPTR, 0, 0) -JS_DEFINE_CALLINFO_5(extern, UINT32, js_GetUpvarStackOnTrace, CONTEXT, UINT32, INT32, UINT32, +JS_DEFINE_CALLINFO_5(extern, UINT32, GetUpvarStackOnTrace, CONTEXT, UINT32, INT32, UINT32, DOUBLEPTR, 0, 0) /* - * Record LIR to get the given upvar. Return the LIR instruction for - * the upvar value. NULL is returned only on a can't-happen condition - * with an invalid typemap. The value of the upvar is returned as v. + * Record LIR to get the given upvar. Return the LIR instruction for the upvar + * value. NULL is returned only on a can't-happen condition with an invalid + * typemap. The value of the upvar is returned as v. */ JS_REQUIRES_STACK LIns* TraceRecorder::upvar(JSScript* script, JSUpvarArray* uva, uintN index, jsval& v) @@ -9765,8 +10176,8 @@ TraceRecorder::upvar(JSScript* script, JSUpvarArray* uva, uintN index, jsval& v) } /* - * The upvar is not in the current trace, so get the upvar value - * exactly as the interpreter does and unbox. + * The upvar is not in the current trace, so get the upvar value exactly as + * the interpreter does and unbox. */ uint32 level = script->staticLevel - UPVAR_FRAME_SKIP(cookie); uint32 cookieSlot = UPVAR_FRAME_SLOT(cookie); @@ -9774,16 +10185,16 @@ TraceRecorder::upvar(JSScript* script, JSUpvarArray* uva, uintN index, jsval& v) const CallInfo* ci; int32 slot; if (!fp->fun) { - ci = &js_GetUpvarStackOnTrace_ci; + ci = &GetUpvarStackOnTrace_ci; slot = cookieSlot; } else if (cookieSlot < fp->fun->nargs) { - ci = &js_GetUpvarArgOnTrace_ci; + ci = &GetUpvarArgOnTrace_ci; slot = cookieSlot; } else if (cookieSlot == CALLEE_UPVAR_SLOT) { - ci = &js_GetUpvarArgOnTrace_ci; + ci = &GetUpvarArgOnTrace_ci; slot = -2; } else { - ci = &js_GetUpvarVarOnTrace_ci; + ci = &GetUpvarVarOnTrace_ci; slot = cookieSlot - fp->fun->nargs; } @@ -9805,8 +10216,8 @@ TraceRecorder::upvar(JSScript* script, JSUpvarArray* uva, uintN index, jsval& v) } /* - * Generate LIR to load a value from the native stack. This method ensures that the - * correct LIR load operator is used. + * Generate LIR to load a value from the native stack. This method ensures that + * the correct LIR load operator is used. */ LIns* TraceRecorder::stackLoad(LIns* base, uint8 type) { @@ -9921,7 +10332,7 @@ TraceRecorder::interpretedFunctionCall(jsval& fval, JSFunction* fun, uintN argc, } // Generate a type map for the outgoing frame and stash it in the LIR - unsigned stackSlots = js_NativeStackSlots(cx, 0/*callDepth*/); + unsigned stackSlots = NativeStackSlots(cx, 0 /* callDepth */); if (sizeof(FrameInfo) + stackSlots * sizeof(JSTraceType) > NJ_MAX_SKIP_PAYLOAD_SZB) ABORT_TRACE("interpreted function call requires saving too much stack"); LIns* data = lir->insSkip(sizeof(FrameInfo) + stackSlots * sizeof(JSTraceType)); @@ -10039,9 +10450,9 @@ TraceRecorder::record_JSOP_APPLY() aobj = JSVAL_TO_OBJECT(vp[3]); aobj_ins = get(&vp[3]); - /* - * We trace dense arrays and arguments objects. The code we generate for apply - * uses imacros to handle a specific number of arguments. + /* + * We trace dense arrays and arguments objects. The code we generate + * for apply uses imacros to handle a specific number of arguments. */ if (OBJ_IS_DENSE_ARRAY(cx, aobj)) { guardDenseArray(aobj, aobj_ins); @@ -10097,25 +10508,27 @@ TraceRecorder::record_NativeCallComplete() jsbytecode* pc = cx->fp->regs->pc; JS_ASSERT(pendingTraceableNative); - JS_ASSERT(*pc == JSOP_CALL || *pc == JSOP_APPLY || *pc == JSOP_NEW); + JS_ASSERT(*pc == JSOP_CALL || *pc == JSOP_APPLY || *pc == JSOP_NEW || *pc == JSOP_SETPROP); jsval& v = stackval(-1); LIns* v_ins = get(&v); - /* At this point the generated code has already called the native function - and we can no longer fail back to the original pc location (JSOP_CALL) - because that would cause the interpreter to re-execute the native - function, which might have side effects. - - Instead, the snapshot() call below sees that we are currently parked on - a traceable native's JSOP_CALL instruction, and it will advance the pc - to restore by the length of the current opcode. If the native's return - type is jsval, snapshot() will also indicate in the type map that the - element on top of the stack is a boxed value which doesn't need to be - boxed if the type guard generated by unbox_jsval() fails. */ + /* + * At this point the generated code has already called the native function + * and we can no longer fail back to the original pc location (JSOP_CALL) + * because that would cause the interpreter to re-execute the native + * function, which might have side effects. + * + * Instead, the snapshot() call below sees that we are currently parked on + * a traceable native's JSOP_CALL instruction, and it will advance the pc + * to restore by the length of the current opcode. If the native's return + * type is jsval, snapshot() will also indicate in the type map that the + * element on top of the stack is a boxed value which doesn't need to be + * boxed if the type guard generated by unbox_jsval() fails. + */ if (JSTN_ERRTYPE(pendingTraceableNative) == FAIL_STATUS) { - // Keep cx->bailExit null when it's invalid. + /* Keep cx->bailExit null when it's invalid. */ lir->insStorei(INS_CONSTPTR(NULL), cx_ins, (int) offsetof(JSContext, bailExit)); LIns* status = lir->insLoad(LIR_ld, lirbuf->state, (int) offsetof(InterpState, builtinStatus)); @@ -10145,27 +10558,9 @@ TraceRecorder::record_NativeCallComplete() } set(&v, v_ins); - /* - * If this is a generic traceable native invocation, propagate the boolean return - * value of the native into builtinStatus. If the return value (v_ins) - * is true, status' == status. Otherwise status' = status | JSBUILTIN_ERROR. - * We calculate (rval&1)^1, which is 1 if rval is JS_FALSE (error), and then - * shift that by 1 which is JSBUILTIN_ERROR. - */ - JS_STATIC_ASSERT((1 - JS_TRUE) << 1 == 0); - JS_STATIC_ASSERT((1 - JS_FALSE) << 1 == JSBUILTIN_ERROR); - status = lir->ins2(LIR_or, - status, - lir->ins2i(LIR_lsh, - lir->ins2i(LIR_xor, - lir->ins2i(LIR_and, ok_ins, 1), - 1), - 1)); - lir->insStorei(status, lirbuf->state, (int) offsetof(InterpState, builtinStatus)); + propagateFailureToBuiltinStatus(ok_ins, status); } - guard(true, - lir->ins_eq0(status), - STATUS_EXIT); + guard(true, lir->ins_eq0(status), STATUS_EXIT); } JSRecordingStatus ok = JSRS_CONTINUE; @@ -10214,15 +10609,11 @@ TraceRecorder::name(jsval*& vp, LIns*& ins, bool& tracked) */ CHECK_STATUS(test_property_cache(obj, obj_ins, obj2, pcval)); - /* - * Abort if property doesn't exist (interpreter will report an error.) - */ + /* Abort if property doesn't exist (interpreter will report an error.) */ if (PCVAL_IS_NULL(pcval)) ABORT_TRACE("named property not found"); - /* - * Insist on obj being the directly addressed object. - */ + /* Insist on obj being the directly addressed object. */ if (obj2 != obj) ABORT_TRACE("name() hit prototype chain"); @@ -10303,9 +10694,8 @@ TraceRecorder::prop(JSObject* obj, LIns* obj_ins, uint32& slot, LIns*& v_ins) return JSRS_CONTINUE; } - /* Insist if setting on obj being the directly addressed object. */ - uint32 setflags = (cs.format & (JOF_SET | JOF_INCDEC | JOF_FOR)); - LIns* dslots_ins = NULL; + uint32 setflags = (cs.format & (JOF_INCDEC | JOF_FOR)); + JS_ASSERT(!(cs.format & JOF_SET)); /* Don't trace getter or setter calls, our caller wants a direct slot. */ if (PCVAL_IS_SPROP(pcval)) { @@ -10325,6 +10715,7 @@ TraceRecorder::prop(JSObject* obj, LIns* obj_ins, uint32& slot, LIns*& v_ins) LIns* args[] = { INS_CONSTPTR(sprop), obj_ins, cx_ins }; v_ins = lir->insCall(&js_CallGetter_ci, args); guard(false, lir->ins2(LIR_eq, v_ins, INS_CONST(JSVAL_ERROR_COOKIE)), OOM_EXIT); + /* * BIG FAT WARNING: This snapshot cannot be a BRANCH_EXIT, since * the value to the top of the stack is not the value we unbox. @@ -10364,11 +10755,12 @@ TraceRecorder::prop(JSObject* obj, LIns* obj_ins, uint32& slot, LIns*& v_ins) * obj_ins the last proto-load. */ while (obj != obj2) { - obj_ins = stobj_get_slot(obj_ins, JSSLOT_PROTO, dslots_ins); + obj_ins = stobj_get_fslot(obj_ins, JSSLOT_PROTO); obj = STOBJ_GET_PROTO(obj); } } + LIns* dslots_ins = NULL; v_ins = stobj_get_slot(obj_ins, slot, dslots_ins); unbox_jsval(STOBJ_GET_SLOT(obj, slot), v_ins, snapshot(BRANCH_EXIT)); @@ -10470,8 +10862,8 @@ TraceRecorder::denseArrayElement(jsval& oval, jsval& ival, jsval*& vp, LIns*& v_ if (JSVAL_TAG(*vp) == JSVAL_BOOLEAN) { /* - * If we read a hole from the array, convert it to undefined and guard that there - * are no indexed properties along the prototype chain. + * If we read a hole from the array, convert it to undefined and guard + * that there are no indexed properties along the prototype chain. */ LIns* br = lir->insBranch(LIR_jf, lir->ins2i(LIR_eq, v_ins, JSVAL_TO_PSEUDO_BOOLEAN(JSVAL_HOLE)), @@ -10479,9 +10871,7 @@ TraceRecorder::denseArrayElement(jsval& oval, jsval& ival, jsval*& vp, LIns*& v_ CHECK_STATUS(guardPrototypeHasNoIndexedProperties(obj, obj_ins, MISMATCH_EXIT)); br->setTarget(lir->ins0(LIR_label)); - /* - * Don't let the hole value escape. Turn it into an undefined. - */ + /* Don't let the hole value escape. Turn it into an undefined. */ v_ins = lir->ins2i(LIR_and, v_ins, ~(JSVAL_HOLE_FLAG >> JSVAL_TAGBITS)); } return JSRS_CONTINUE; @@ -10920,16 +11310,23 @@ TraceRecorder::record_JSOP_BINDNAME() } } - if (obj != globalObj) - ABORT_TRACE("JSOP_BINDNAME must return global object on trace"); + /* + * If obj is a js_CallClass object, then we are tracing a reference to an + * upvar in a heavyweight function. We cannot reach this point of the trace + * with a different call object because of the guard on the function call, + * so we can assume the result of the bindname is constant on this trace. + */ + if (obj != globalObj && OBJ_GET_CLASS(cx, obj) != &js_CallClass) + ABORT_TRACE("Can only trace JSOP_BINDNAME with global or call object"); - // The trace is specialized to this global object. Furthermore, - // we know it is the sole 'global' object on the scope chain: we - // set globalObj to the scope chain element with no parent, and we - // reached it starting from the function closure or the current - // scopeChain, so there is nothing inner to it. So this must be - // the right base object. - stack(0, INS_CONSTPTR(globalObj)); + /* + * The trace is specialized to this global object. Furthermore, we know it + * is the sole 'global' object on the scope chain: we set globalObj to the + * scope chain element with no parent, and we reached it starting from the + * function closure or the current scopeChain, so there is nothing inner to + * it. Therefore this must be the right base object. + */ + stack(0, INS_CONSTPTR(obj)); return JSRS_CONTINUE; } @@ -10940,10 +11337,12 @@ TraceRecorder::record_JSOP_SETNAME() JS_ASSERT(!JSVAL_IS_PRIMITIVE(l)); /* - * Trace cases that are global code or in lightweight functions scoped by - * the global object only. + * Trace only cases that are global code, in lightweight functions + * scoped by the global object only, or in call objects. */ JSObject* obj = JSVAL_TO_OBJECT(l); + if (OBJ_GET_CLASS(cx, obj) == &js_CallClass) + return JSRS_CONTINUE; if (obj != cx->fp->scopeChain || obj != globalObj) ABORT_TRACE("JSOP_SETNAME left operand is not the global object"); @@ -10996,15 +11395,18 @@ TraceRecorder::record_JSOP_IN() if (wasDeepAborted()) ABORT_TRACE("deep abort from property lookup"); - /* The interpreter fuses comparisons and the following branch, - so we have to do that here as well. */ + /* + * The interpreter fuses comparisons and the following branch, so we have + * to do that here as well. + */ fuseIf(cx->fp->regs->pc + 1, cond, x); - /* We update the stack after the guard. This is safe since - the guard bails out at the comparison and the interpreter - will therefore re-execute the comparison. This way the - value of the condition doesn't have to be calculated and - saved on the stack in most cases. */ + /* + * We update the stack after the guard. This is safe since the guard bails + * out at the comparison and the interpreter will therefore re-execute the + * comparison. This way the value of the condition doesn't have to be + * calculated and saved on the stack in most cases. + */ set(&lval, x); return JSRS_CONTINUE; } @@ -11765,9 +12167,11 @@ TraceRecorder::record_JSOP_STOP() JSStackFrame *fp = cx->fp; if (fp->imacpc) { - // End of imacro, so return true to the interpreter immediately. The - // interpreter's JSOP_STOP case will return from the imacro, back to - // the pc after the calling op, still in the same JSStackFrame. + /* + * End of imacro, so return true to the interpreter immediately. The + * interpreter's JSOP_STOP case will return from the imacro, back to + * the pc after the calling op, still in the same JSStackFrame. + */ atoms = fp->script->atomMap.vector; return JSRS_CONTINUE; } @@ -11836,7 +12240,7 @@ TraceRecorder::record_JSOP_ENTERBLOCK() JS_REQUIRES_STACK JSRecordingStatus TraceRecorder::record_JSOP_LEAVEBLOCK() { - /* We mustn't exit the lexical block we began recording in. */ + /* We mustn't exit the lexical block we began recording in. */ if (cx->fp->blockChain != lexicalBlock) return JSRS_CONTINUE; else @@ -11896,9 +12300,10 @@ TraceRecorder::record_JSOP_GETTHISPROP() LIns* this_ins; CHECK_STATUS(getThis(this_ins)); + /* - * It's safe to just use cx->fp->thisp here because getThis() returns JSRS_STOP if thisp - * is not available. + * It's safe to just use cx->fp->thisp here because getThis() returns + * JSRS_STOP if thisp is not available. */ CHECK_STATUS(getProp(cx->fp->thisp, this_ins)); return JSRS_CONTINUE; @@ -12058,7 +12463,7 @@ js_GetBuiltinFunction(JSContext *cx, uintN index) STOBJ_CLEAR_PARENT(funobj); JS_LOCK_GC(rt); - if (!rt->builtinFunctions[index]) /* retest now that the lock is held */ + if (!rt->builtinFunctions[index]) /* retest now that the lock is held */ rt->builtinFunctions[index] = funobj; else funobj = rt->builtinFunctions[index]; @@ -12197,10 +12602,13 @@ DBG_STUB(JSOP_DEFLOCALFUN_DBGFC) DBG_STUB(JSOP_LAMBDA_DBGFC) #ifdef JS_JIT_SPEW -/* Prints information about entry typemaps and unstable exits for all peers at a PC */ +/* + * Print information about entry typemaps and unstable exits for all peers + * at a PC. + */ void -js_DumpPeerStability(JSTraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape, - uint32 argc) +DumpPeerStability(JSTraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape, + uint32 argc) { Fragment* f; TreeInfo* ti; @@ -12222,7 +12630,7 @@ js_DumpPeerStability(JSTraceMonitor* tm, const void* ip, JSObject* globalObj, ui UnstableExit* uexit = ti->unstableExits; while (uexit != NULL) { debug_only_print0(LC_TMStats, "EXIT: "); - JSTraceType* m = getFullTypeMap(uexit->exit); + JSTraceType* m = GetFullTypeMap(uexit->exit); for (unsigned i = 0; i < uexit->exit->numStackSlots; i++) debug_only_printf(LC_TMStats, "S%d ", m[i]); for (unsigned i = 0; i < uexit->exit->numGlobalSlots; i++) @@ -12267,7 +12675,7 @@ js_StartTraceVis(JSContext *cx, JSObject *obj, if (!filename) goto error; ok = JS_StartTraceVis(filename); - JS_free(cx, filename); + cx->free(filename); } else { ok = JS_StartTraceVis(); } @@ -12288,7 +12696,7 @@ JS_StopTraceVis() if (!traceVisLogFile) return false; - fclose(traceVisLogFile); // not worth checking the result + fclose(traceVisLogFile); // not worth checking the result traceVisLogFile = NULL; return true; diff --git a/js/src/jstracer.h b/js/src/jstracer.h index 82a1b13cdb4..671ed840151 100644 --- a/js/src/jstracer.h +++ b/js/src/jstracer.h @@ -166,8 +166,10 @@ public: #if defined(JS_JIT_SPEW) || defined(MOZ_NO_VARADIC_MACROS) enum LC_TMBits { - /* Output control bits for all non-Nanojit code. Only use bits 16 - and above, since Nanojit uses 0 .. 15 itself. */ + /* + * Output control bits for all non-Nanojit code. Only use bits 16 and + * above, since Nanojit uses 0 .. 15 itself. + */ LC_TMMinimal = 1<<16, LC_TMTracer = 1<<17, LC_TMRecorder = 1<<18, @@ -192,14 +194,22 @@ extern nanojit::LogControl js_LogController; #define debug_only_stmt(stmt) \ stmt -#define debug_only_printf(mask, fmt, ...) \ - do { if ((js_LogController.lcbits & (mask)) > 0) { \ - js_LogController.printf(fmt, __VA_ARGS__); fflush(stdout); \ - }} while (0) -#define debug_only_print0(mask, str) \ - do { if ((js_LogController.lcbits & (mask)) > 0) { \ - js_LogController.printf(str); fflush(stdout); \ - }} while (0) + +#define debug_only_printf(mask, fmt, ...) \ + JS_BEGIN_MACRO \ + if ((js_LogController.lcbits & (mask)) > 0) { \ + js_LogController.printf(fmt, __VA_ARGS__); \ + fflush(stdout); \ + } \ + JS_END_MACRO + +#define debug_only_print0(mask, str) \ + JS_BEGIN_MACRO \ + if ((js_LogController.lcbits & (mask)) > 0) { \ + js_LogController.printf("%s", str); \ + fflush(stdout); \ + } \ + JS_END_MACRO #else @@ -281,7 +291,7 @@ typedef int8_t JSTraceType; /* * This indicates an invalid type or error. Note that it should not be used in typemaps, - * because it is the wrong size. It can only be used as a uint32, for example as the + * because it is the wrong size. It can only be used as a uint32, for example as the * return value from a function that returns a type as a uint32. */ const uint32 TT_INVALID = uint32(-1); @@ -332,7 +342,6 @@ public: _(TIMEOUT) \ _(DEEP_BAIL) \ _(STATUS) - enum ExitType { #define MAKE_EXIT_CODE(x) x##_EXIT, @@ -374,21 +383,6 @@ struct VMSideExit : public nanojit::SideExit } }; -static inline JSTraceType* getStackTypeMap(nanojit::SideExit* exit) -{ - return (JSTraceType*)(((VMSideExit*)exit) + 1); -} - -static inline JSTraceType* getGlobalTypeMap(nanojit::SideExit* exit) -{ - return getStackTypeMap(exit) + ((VMSideExit*)exit)->numStackSlots; -} - -static inline JSTraceType* getFullTypeMap(nanojit::SideExit* exit) -{ - return getStackTypeMap(exit); -} - struct FrameInfo { JSObject* callee; // callee function object JSObject* block; // caller block chain head @@ -408,7 +402,7 @@ struct FrameInfo { * stack frame for the caller *before* the slots covered by spdist. * This may be negative if the caller is the top level script. * The key fact is that if we let 'cpos' be the start of the caller's - * native stack frame, then (cpos + spoffset) points to the first + * native stack frame, then (cpos + spoffset) points to the first * non-argument slot in the callee's native stack frame. */ int32 spoffset; @@ -548,7 +542,7 @@ struct JSRecordingStatus JSRS_ERROR = { JSRS_ERROR_code }; #define STATUS_ABORTS_RECORDING(s) ((s) == JSRS_STOP || (s) == JSRS_ERROR) #else enum JSRecordingStatus { - JSRS_ERROR, // Error; propagate to interpreter. + JSRS_ERROR, // Error; propagate to interpreter. JSRS_STOP, // Abort recording. JSRS_CONTINUE, // Continue recording. JSRS_IMACRO // Entered imacro; continue recording. @@ -687,6 +681,15 @@ class TraceRecorder : public avmplus::GCObject { nanojit::LIns*& ops_ins, size_t op_offset = 0); JS_REQUIRES_STACK JSRecordingStatus test_property_cache(JSObject* obj, nanojit::LIns* obj_ins, JSObject*& obj2, jsuword& pcval); + JS_REQUIRES_STACK JSRecordingStatus guardNativePropertyOp(JSObject* aobj, + nanojit::LIns* map_ins); + JS_REQUIRES_STACK JSRecordingStatus guardPropertyCacheHit(nanojit::LIns* obj_ins, + nanojit::LIns* map_ins, + JSObject* aobj, + JSObject* obj2, + JSPropCacheEntry* entry, + jsuword& pcval); + void stobj_set_fslot(nanojit::LIns *obj_ins, unsigned slot, nanojit::LIns* v_ins, const char *name); void stobj_set_dslot(nanojit::LIns *obj_ins, unsigned slot, nanojit::LIns*& dslots_ins, @@ -704,8 +707,6 @@ class TraceRecorder : public avmplus::GCObject { stobj_get_fslot(obj_ins, JSSLOT_PRIVATE), lir->insImmPtr((void*) ~mask)); } - JSRecordingStatus native_set(nanojit::LIns* obj_ins, JSScopeProperty* sprop, - nanojit::LIns*& dslots_ins, nanojit::LIns* v_ins); JSRecordingStatus native_get(nanojit::LIns* obj_ins, nanojit::LIns* pobj_ins, JSScopeProperty* sprop, nanojit::LIns*& dslots_ins, nanojit::LIns*& v_ins); @@ -722,6 +723,13 @@ class TraceRecorder : public avmplus::GCObject { JS_REQUIRES_STACK JSRecordingStatus getProp(jsval& v); JS_REQUIRES_STACK JSRecordingStatus getThis(nanojit::LIns*& this_ins); + JS_REQUIRES_STACK JSRecordingStatus nativeSet(JSObject* obj, nanojit::LIns* obj_ins, + JSScopeProperty* sprop, + jsval v, nanojit::LIns* v_ins); + JS_REQUIRES_STACK JSRecordingStatus setProp(jsval &l, JSPropCacheEntry* entry, + JSScopeProperty* sprop, + jsval &v, nanojit::LIns*& v_ins); + JS_REQUIRES_STACK void box_jsval(jsval v, nanojit::LIns*& v_ins); JS_REQUIRES_STACK void unbox_jsval(jsval v, nanojit::LIns*& v_ins, VMSideExit* exit); JS_REQUIRES_STACK bool guardClass(JSObject* obj, nanojit::LIns* obj_ins, JSClass* clasp, @@ -748,8 +756,15 @@ class TraceRecorder : public avmplus::GCObject { jsval* rval); JS_REQUIRES_STACK JSRecordingStatus interpretedFunctionCall(jsval& fval, JSFunction* fun, uintN argc, bool constructing); + JS_REQUIRES_STACK void propagateFailureToBuiltinStatus(nanojit::LIns *ok_ins, + nanojit::LIns *&status_ins); JS_REQUIRES_STACK JSRecordingStatus emitNativeCall(JSTraceableNative* known, uintN argc, nanojit::LIns* args[]); + JS_REQUIRES_STACK void emitNativePropertyOp(JSScope* scope, + JSScopeProperty* sprop, + nanojit::LIns* obj_ins, + bool setflag, + nanojit::LIns* boxed_ins); JS_REQUIRES_STACK JSRecordingStatus callTraceableNative(JSFunction* fun, uintN argc, bool constructing); JS_REQUIRES_STACK JSRecordingStatus callNative(uintN argc, JSOp mode); @@ -974,13 +989,13 @@ js_LogTraceVisState(TraceVisState s, TraceVisExitReason r) } } -static inline void +static inline void js_EnterTraceVisState(TraceVisState s, TraceVisExitReason r) { js_LogTraceVisState(s, r); } -static inline void +static inline void js_ExitTraceVisState(TraceVisExitReason r) { js_LogTraceVisState(S_EXITLAST, r); diff --git a/js/src/jsutil.cpp b/js/src/jsutil.cpp index e3a62878e3d..43d4eb37ff4 100644 --- a/js/src/jsutil.cpp +++ b/js/src/jsutil.cpp @@ -297,7 +297,7 @@ CallTree(void **bp) return NULL; /* Create a new callsite record. */ - site = (JSCallsite *) malloc(sizeof(JSCallsite)); + site = (JSCallsite *) js_malloc(sizeof(JSCallsite)); if (!site) return NULL; diff --git a/js/src/jsutil.h b/js/src/jsutil.h index 18264ccca45..6e0895b2fb9 100644 --- a/js/src/jsutil.h +++ b/js/src/jsutil.h @@ -44,6 +44,8 @@ #ifndef jsutil_h___ #define jsutil_h___ +#include + JS_BEGIN_EXTERN_C /* @@ -176,9 +178,30 @@ JS_Backtrace(int skip); extern JS_FRIEND_API(void) JS_DumpBacktrace(JSCallsite *trace); - #endif +static JS_INLINE void* js_malloc(size_t bytes) { + if (bytes < sizeof(void*)) /* for asyncFree */ + bytes = sizeof(void*); + return malloc(bytes); +} + +static JS_INLINE void* js_calloc(size_t bytes) { + if (bytes < sizeof(void*)) /* for asyncFree */ + bytes = sizeof(void*); + return calloc(bytes, 1); +} + +static JS_INLINE void* js_realloc(void* p, size_t bytes) { + if (bytes < sizeof(void*)) /* for asyncFree */ + bytes = sizeof(void*); + return realloc(p, bytes); +} + +static JS_INLINE void js_free(void* p) { + free(p); +} + JS_END_EXTERN_C #endif /* jsutil_h___ */ diff --git a/js/src/jsxdrapi.cpp b/js/src/jsxdrapi.cpp index b2d3efc1bf8..2eb9419d74e 100644 --- a/js/src/jsxdrapi.cpp +++ b/js/src/jsxdrapi.cpp @@ -90,7 +90,7 @@ typedef struct JSXDRMemState { if (MEM_LIMIT(xdr) && \ MEM_COUNT(xdr) + bytes > MEM_LIMIT(xdr)) { \ uint32 limit_ = JS_ROUNDUP(MEM_COUNT(xdr) + bytes, MEM_BLOCK);\ - void *data_ = JS_realloc((xdr)->cx, MEM_BASE(xdr), limit_); \ + void *data_ = (xdr)->cx->realloc(MEM_BASE(xdr), limit_); \ if (!data_) \ return 0; \ MEM_BASE(xdr) = (char *) data_; \ @@ -216,7 +216,7 @@ mem_tell(JSXDRState *xdr) static void mem_finalize(JSXDRState *xdr) { - JS_free(xdr->cx, MEM_BASE(xdr)); + xdr->cx->free(MEM_BASE(xdr)); } static JSXDROps xdrmem_ops = { @@ -239,13 +239,13 @@ JS_XDRInitBase(JSXDRState *xdr, JSXDRMode mode, JSContext *cx) JS_PUBLIC_API(JSXDRState *) JS_XDRNewMem(JSContext *cx, JSXDRMode mode) { - JSXDRState *xdr = (JSXDRState *) JS_malloc(cx, sizeof(JSXDRMemState)); + JSXDRState *xdr = (JSXDRState *) cx->malloc(sizeof(JSXDRMemState)); if (!xdr) return NULL; JS_XDRInitBase(xdr, mode, cx); if (mode == JSXDR_ENCODE) { - if (!(MEM_BASE(xdr) = (char *) JS_malloc(cx, MEM_BLOCK))) { - JS_free(cx, xdr); + if (!(MEM_BASE(xdr) = (char *) cx->malloc(MEM_BLOCK))) { + cx->free(xdr); return NULL; } } else { @@ -299,11 +299,11 @@ JS_XDRDestroy(JSXDRState *xdr) JSContext *cx = xdr->cx; xdr->ops->finalize(xdr); if (xdr->registry) { - JS_free(cx, xdr->registry); + cx->free(xdr->registry); if (xdr->reghash) JS_DHashTableDestroy((JSDHashTable *) xdr->reghash); } - JS_free(cx, xdr); + cx->free(xdr); } JS_PUBLIC_API(JSBool) @@ -381,18 +381,18 @@ JS_XDRCString(JSXDRState *xdr, char **sp) len = strlen(*sp); JS_XDRUint32(xdr, &len); if (xdr->mode == JSXDR_DECODE) { - if (!(*sp = (char *) JS_malloc(xdr->cx, len + 1))) + if (!(*sp = (char *) xdr->cx->malloc(len + 1))) return JS_FALSE; } if (!JS_XDRBytes(xdr, *sp, len)) { if (xdr->mode == JSXDR_DECODE) - JS_free(xdr->cx, *sp); + xdr->cx->free(*sp); return JS_FALSE; } if (xdr->mode == JSXDR_DECODE) { (*sp)[len] = '\0'; } else if (xdr->mode == JSXDR_FREE) { - JS_free(xdr->cx, *sp); + xdr->cx->free(*sp); *sp = NULL; } return JS_TRUE; @@ -452,7 +452,7 @@ JS_XDRString(JSXDRState *xdr, JSString **strp) return JS_FALSE; if (xdr->mode == JSXDR_DECODE) { - chars = (jschar *) JS_malloc(xdr->cx, (nchars + 1) * sizeof(jschar)); + chars = (jschar *) xdr->cx->malloc((nchars + 1) * sizeof(jschar)); if (!chars) return JS_FALSE; } else { @@ -471,7 +471,7 @@ JS_XDRString(JSXDRState *xdr, JSString **strp) bad: if (xdr->mode == JSXDR_DECODE) - JS_free(xdr->cx, chars); + xdr->cx->free(chars); return JS_FALSE; } @@ -662,7 +662,7 @@ js_XDRStringAtom(JSXDRState *xdr, JSAtom **atomp) * This is very uncommon. Don't use the tempPool arena for this as * most allocations here will be bigger than tempPool's arenasize. */ - chars = (jschar *) JS_malloc(cx, nchars * sizeof(jschar)); + chars = (jschar *) cx->malloc(nchars * sizeof(jschar)); if (!chars) return JS_FALSE; } @@ -670,7 +670,7 @@ js_XDRStringAtom(JSXDRState *xdr, JSAtom **atomp) if (XDRChars(xdr, chars, nchars)) atom = js_AtomizeChars(cx, chars, nchars, 0); if (chars != stackChars) - JS_free(cx, chars); + cx->free(chars); if (!atom) return JS_FALSE; @@ -709,7 +709,7 @@ JS_XDRRegisterClass(JSXDRState *xdr, JSClass *clasp, uint32 *idp) if (numclasses == maxclasses) { maxclasses = (maxclasses == 0) ? CLASS_REGISTRY_MIN : maxclasses << 1; registry = (JSClass **) - JS_realloc(xdr->cx, xdr->registry, maxclasses * sizeof(JSClass *)); + xdr->cx->realloc(xdr->registry, maxclasses * sizeof(JSClass *)); if (!registry) return JS_FALSE; xdr->registry = registry; diff --git a/js/src/jsxml.cpp b/js/src/jsxml.cpp index 46cdb5e66b6..ad662c10029 100644 --- a/js/src/jsxml.cpp +++ b/js/src/jsxml.cpp @@ -460,7 +460,7 @@ qname_toString(JSContext *cx, uintN argc, jsval *vp) if (str && clasp == &js_AttributeNameClass) { length = str->length(); - chars = (jschar *) JS_malloc(cx, (length + 2) * sizeof(jschar)); + chars = (jschar *) cx->malloc((length + 2) * sizeof(jschar)); if (!chars) return JS_FALSE; *chars = '@'; @@ -468,7 +468,7 @@ qname_toString(JSContext *cx, uintN argc, jsval *vp) chars[++length] = 0; str = js_NewString(cx, chars, length); if (!str) { - JS_free(cx, chars); + cx->free(chars); return JS_FALSE; } } @@ -932,8 +932,12 @@ XMLArraySetCapacity(JSContext *cx, JSXMLArray *array, uint32 capacity) if (capacity == 0) { /* We could let realloc(p, 0) free this, but purify gets confused. */ - if (array->vector) - free(array->vector); + if (array->vector) { + if (cx) + cx->free(array->vector); + else + js_free(array->vector); + } vector = NULL; } else { if ( @@ -941,7 +945,7 @@ XMLArraySetCapacity(JSContext *cx, JSXMLArray *array, uint32 capacity) (size_t)capacity > ~(size_t)0 / sizeof(void *) || #endif !(vector = (void **) - realloc(array->vector, capacity * sizeof(void *)))) { + js_realloc(array->vector, capacity * sizeof(void *)))) { if (cx) JS_ReportOutOfMemory(cx); return JS_FALSE; @@ -975,7 +979,7 @@ XMLArrayFinish(JSContext *cx, JSXMLArray *array) { JSXMLArrayCursor *cursor; - JS_free(cx, array->vector); + cx->free(array->vector); while ((cursor = array->cursors) != NULL) XMLArrayCursorFinish(cursor); @@ -1039,7 +1043,7 @@ XMLArrayAddMember(JSContext *cx, JSXMLArray *array, uint32 index, void *elt) (size_t)capacity > ~(size_t)0 / sizeof(void *) || #endif !(vector = (void **) - realloc(array->vector, capacity * sizeof(void *)))) { + js_realloc(array->vector, capacity * sizeof(void *)))) { JS_ReportOutOfMemory(cx); return JS_FALSE; } @@ -1120,10 +1124,10 @@ XMLArrayTruncate(JSContext *cx, JSXMLArray *array, uint32 length) if (length == 0) { if (array->vector) - free(array->vector); + cx->free(array->vector); vector = NULL; } else { - vector = (void **) realloc(array->vector, length * sizeof(void *)); + vector = (void **) js_realloc(array->vector, length * sizeof(void *)); if (!vector) return; } @@ -1854,7 +1858,7 @@ ParseXMLSource(JSContext *cx, JSString *src) length = constrlen(prefix) + urilen + constrlen(middle) + srclen + constrlen(suffix); - chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar)); + chars = (jschar *) cx->malloc((length + 1) * sizeof(jschar)); if (!chars) return NULL; @@ -1905,7 +1909,7 @@ ParseXMLSource(JSContext *cx, JSString *src) } } - JS_free(cx, chars); + cx->free(chars); return xml; #undef constrlen @@ -2138,7 +2142,7 @@ MakeXMLSpecialString(JSContext *cx, JSStringBuffer *sb, prefixlength + length + ((length2 != 0) ? 1 + length2 : 0) + suffixlength; bp = base = (jschar *) - JS_realloc(cx, sb->base, (newlength + 1) * sizeof(jschar)); + cx->realloc(sb->base, (newlength + 1) * sizeof(jschar)); if (!bp) { js_FinishStringBuffer(sb); return NULL; @@ -2159,7 +2163,7 @@ MakeXMLSpecialString(JSContext *cx, JSStringBuffer *sb, str = js_NewString(cx, base, newlength); if (!str) - free(base); + cx->free(base); return str; } @@ -2210,7 +2214,7 @@ AppendAttributeValue(JSContext *cx, JSStringBuffer *sb, JSString *valstr) valstr = js_EscapeAttributeValue(cx, valstr, JS_TRUE); if (!valstr) { if (STRING_BUFFER_OK(sb)) { - free(sb->base); + cx->free(sb->base); sb->base = STRING_BUFFER_ERROR_BASE; } return; @@ -2482,7 +2486,7 @@ GeneratePrefix(JSContext *cx, JSString *uri, JSXMLArray *decls) if (STARTS_WITH_XML(cp, length) || !IsXMLName(cp, length)) { newlength = length + 2 + (size_t) log10((double) decls->length); bp = (jschar *) - JS_malloc(cx, (newlength + 1) * sizeof(jschar)); + cx->malloc((newlength + 1) * sizeof(jschar)); if (!bp) return NULL; @@ -2507,7 +2511,7 @@ GeneratePrefix(JSContext *cx, JSString *uri, JSXMLArray *decls) if (bp == cp) { newlength = length + 2 + (size_t) log10((double) n); bp = (jschar *) - JS_malloc(cx, (newlength + 1) * sizeof(jschar)); + cx->malloc((newlength + 1) * sizeof(jschar)); if (!bp) return NULL; js_strncpy(bp, cp, length); @@ -2534,7 +2538,7 @@ GeneratePrefix(JSContext *cx, JSString *uri, JSXMLArray *decls) } else { prefix = js_NewString(cx, bp, newlength); if (!prefix) - JS_free(cx, bp); + cx->free(bp); } return prefix; } @@ -5132,7 +5136,7 @@ xml_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, if (length == 0) { cursor = NULL; } else { - cursor = (JSXMLArrayCursor *) JS_malloc(cx, sizeof *cursor); + cursor = (JSXMLArrayCursor *) cx->malloc(sizeof *cursor); if (!cursor) return JS_FALSE; XMLArrayCursorInit(cursor, &xml->xml_kids); @@ -5155,7 +5159,7 @@ xml_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, cursor = (JSXMLArrayCursor *) JSVAL_TO_PRIVATE(*statep); if (cursor) { XMLArrayCursorFinish(cursor); - JS_free(cx, cursor); + cx->free(cursor); } *statep = JSVAL_NULL; break; @@ -5266,7 +5270,7 @@ js_EnumerateXMLValues(JSContext *cx, JSObject *obj, JSIterateOp enum_op, if (length == 0) { cursor = NULL; } else { - cursor = (JSXMLArrayCursor *) JS_malloc(cx, sizeof *cursor); + cursor = (JSXMLArrayCursor *) cx->malloc(sizeof *cursor); if (!cursor) return JS_FALSE; XMLArrayCursorInit(cursor, &xml->xml_kids); @@ -5301,7 +5305,7 @@ js_EnumerateXMLValues(JSContext *cx, JSObject *obj, JSIterateOp enum_op, if (cursor) { destroy: XMLArrayCursorFinish(cursor); - JS_free(cx, cursor); + cx->free(cursor); } *statep = JSVAL_NULL; break; @@ -7801,7 +7805,7 @@ js_AddAttributePart(JSContext *cx, JSBool isName, JSString *str, JSString *str2) str2->getCharsAndLength(chars2, len2); newlen = (isName) ? len + 1 + len2 : len + 2 + len2 + 1; - chars = (jschar *) JS_realloc(cx, chars, (newlen+1) * sizeof(jschar)); + chars = (jschar *) cx->realloc(chars, (newlen+1) * sizeof(jschar)); if (!chars) return NULL; @@ -8114,7 +8118,7 @@ xmlfilter_finalize(JSContext *cx, JSObject *obj) return; XMLArrayCursorFinish(&filter->cursor); - JS_free(cx, filter); + cx->free(filter); } JSClass js_XMLFilterClass = { @@ -8169,7 +8173,7 @@ js_StepXMLListFilter(JSContext *cx, JSBool initialized) if (!filterobj) return JS_FALSE; - filter = (JSXMLFilter *) JS_malloc(cx, sizeof *filter); + filter = (JSXMLFilter *) cx->malloc(sizeof *filter); if (!filter) return JS_FALSE; diff --git a/js/src/lirasm/lirasm.cpp b/js/src/lirasm/lirasm.cpp index 060460898f6..84f73113f73 100644 --- a/js/src/lirasm/lirasm.cpp +++ b/js/src/lirasm/lirasm.cpp @@ -44,7 +44,6 @@ #include #include #include -#include #ifdef AVMPLUS_UNIX #include @@ -54,6 +53,8 @@ #include #include +#include +#include #include "nanojit/nanojit.h" #include "jstracer.h" @@ -106,6 +107,88 @@ const int PTRRET = #endif ; +enum LirTokenType { + NAME, NUMBER, PUNCT, NEWLINE +}; + +struct LirToken { + LirTokenType type; + string data; + int lineno; +}; + +inline bool +startsWith(const string &s, const string &prefix) +{ + return s.size() >= prefix.size() && s.compare(0, prefix.length(), prefix) == 0; +} + +// LIR files must be ASCII, for simplicity. +class LirTokenStream { +public: + LirTokenStream(istream &in) : mIn(in), mLineno(0) {} + + bool get(LirToken &token) { + if (mLine.empty()) { + if (!getline(mIn, mLine)) + return false; + mLine += '\n'; + mLineno++; + } + mLine.erase(0, mLine.find_first_not_of(" \t\v\r")); + char c = mLine[0]; + size_t e = mLine.find_first_not_of("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_$.+-"); + if (startsWith(mLine, "->")) { + mLine.erase(0, 2); + token.type = PUNCT; + token.data = "->"; + } else if (e > 0) { + string s = mLine.substr(0, e); + mLine.erase(0, e); + if (e > 1 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X')) + token.type = NUMBER; + else if (isdigit(s[0]) || (e > 1 && s[0] == '.' && isdigit(s[1]))) + token.type = NUMBER; + else + token.type = NAME; + token.data = s; + } else if (strchr(":,=[]()", c)) { + token.type = PUNCT; + token.data = c; + mLine.erase(0, 1); + } else if (c == ';' || c == '\n') { + token.type = NEWLINE; + token.data.clear(); + mLine.clear(); + } else { + cerr << mLineno << ": error: Unrecognized character in file." << endl; + return false; + } + + token.lineno = mLineno; + return true; + } + + bool eat(LirTokenType type, const char *exact = NULL) { + LirToken token; + return (get(token) && token.type == type && (exact == NULL || token.data == exact)); + } + + bool getName(string &name) { + LirToken t; + if (get(t) && t.type == NAME) { + name = t.data; + return true; + } + return false; + } + +private: + istream &mIn; + string mLine; + int mLineno; +}; + class LirasmFragment { public: union { @@ -122,23 +205,46 @@ typedef map Fragments; class Lirasm { public: + Lirasm(bool verbose); + ~Lirasm(); + + void assemble(istream &in); + void lookupFunction(const string &name, CallInfo *&ci); + Fragmento *mFragmento; LirBuffer *mLirbuf; LogControl mLogc; bool mVerbose; - avmplus::AvmCore s_core; Fragments mFragments; - vector mCallInfos; + map > mOpMap; - Lirasm(bool verbose); - ~Lirasm(); + void bad(const string &msg) { + cerr << "error: " << msg << endl; + exit(1); + } + +private: + void handlePatch(LirTokenStream &in); + + avmplus::AvmCore mCore; }; -class LirasmAssembler { +class FragmentAssembler { +public: + FragmentAssembler(Lirasm &parent, const string &fragmentName); + ~FragmentAssembler(); + + void assembleFragment(LirTokenStream &in, bool implicitBegin, const LirToken *firstToken); + private: + // Prohibit copying. + FragmentAssembler(const FragmentAssembler &); + FragmentAssembler & operator=(const FragmentAssembler &); + Lirasm *mParent; + const string mFragName; Fragment *mFragment; - string mFragName; + vector mCallInfos; map mLabels; LirWriter *mLir; LirBufWriter *mBufWriter; @@ -146,37 +252,26 @@ private: LirWriter *mExprFilter; LirWriter *mVerboseWriter; multimap mFwdJumps; - map > op_map; size_t mLineno; LOpcode mOpcode; size_t mOpcount; - bool mInFrag; - char mReturnTypeBits; vector mTokens; - void lookupFunction(const char*, CallInfo *&); + void tokenizeLine(LirTokenStream &in, LirToken &token); void need(size_t); - istream& read_and_tokenize_line(istream&); - void tokenize(string const &tok_sep); - LIns *ref(string const &); + LIns *ref(const string &); LIns *do_skip(size_t); - LIns *assemble_call(string &); + LIns *assemble_call(const string &); LIns *assemble_general(); LIns *assemble_guard(); LIns *assemble_jump(); LIns *assemble_load(); - void bad(string const &msg); - void beginFragment(); + void bad(const string &msg); + void extract_any_label(string &lab, char lab_delim); void endFragment(); - void extract_any_label(string &op, string &lab, char lab_delim); - void patch(); - -public: - LirasmAssembler(Lirasm &); - void assemble(istream &); }; Function functions[] = { @@ -186,66 +281,6 @@ Function functions[] = { FN(free, I32 | (PTRARG<<2)) }; -void -LirasmAssembler::lookupFunction(const char *name, CallInfo *&ci) -{ - const size_t nfuns = sizeof(functions) / sizeof(functions[0]); - for (size_t i = 0; i < nfuns; i++) - if (strcmp(name, functions[i].name) == 0) { - *ci = functions[i].callInfo; - return; - } - - Fragments::const_iterator func = mParent->mFragments.find(name); - if (func != mParent->mFragments.end()) { - if (func->second.mReturnType == RT_FLOAT) { - CallInfo target = {(uintptr_t) func->second.rfloat, ARGSIZE_F, 0, - 0, nanojit::ABI_FASTCALL, func->first.c_str()}; - *ci = target; - - } else { - CallInfo target = {(uintptr_t) func->second.rint, ARGSIZE_LO, 0, - 0, nanojit::ABI_FASTCALL, func->first.c_str()}; - *ci = target; - } - } else { - ci = NULL; - } -} - -istream & -LirasmAssembler::read_and_tokenize_line(istream &in) -{ - char buf[1024]; - string tok_sep(" \n\t"); - - mTokens.clear(); - - if (in.getline(buf,sizeof(buf))) { - ++mLineno; - string line(buf); - - size_t comment = line.find("//"); - if (comment != string::npos) - line.resize(comment); - - line += '\n'; - - size_t start = 0; - size_t end = 0; - while((start = line.find_first_not_of(tok_sep, end)) != string::npos && - (end = line.find_first_of(tok_sep, start)) != string::npos) { - string ss = line.substr(start, (end-start)); - if (ss == "=") { - mTokens[mTokens.size()-1] += ss; - continue; - } - mTokens.push_back(ss); - } - } - return in; -} - template out lexical_cast(in arg) { @@ -258,18 +293,19 @@ lexical_cast(in arg) } int32_t -imm(string const &s) +imm(const string &s) { stringstream tmp(s); int32_t ret; if ((s.find("0x") == 0 || s.find("0X") == 0) && - (tmp >> hex >> ret && tmp.eof())) + (tmp >> hex >> ret && tmp.eof())) { return ret; + } return lexical_cast(s); } uint64_t -quad(string const &s) +quad(const string &s) { stringstream tmp1(s), tmp2(s); union { @@ -292,223 +328,9 @@ pop_front(vector &vec) cerr << "pop_front of empty vector" << endl; exit(1); } - t tmp = vec[0]; - vec.erase(vec.begin()); - return tmp; -} - -void -LirasmAssembler::bad(string const &msg) -{ - cerr << "instruction " << mLineno << ": " << msg << endl; - exit(1); -} - -void -LirasmAssembler::need(size_t n) -{ - if (mTokens.size() != n) - bad("need " + lexical_cast(n) - + " tokens, have " + lexical_cast(mTokens.size())); -} - -LIns* -LirasmAssembler::ref(string const &lab) -{ - if (mLabels.find(lab) == mLabels.end()) - bad("unknown label '" + lab + "'"); - return mLabels.find(lab)->second; -} - -LIns* -LirasmAssembler::do_skip(size_t i) -{ - LIns *s = mLir->insSkip(i); - memset(s->payload(), 0xba, i); - return s; -} - -LIns* -LirasmAssembler::assemble_jump() -{ - LIns *target = NULL; - LIns *condition = NULL; - - if (mOpcode == LIR_j) { - need(1); - } else { - need(2); - string cond = pop_front(mTokens); - condition = ref(cond); - } - string name = pop_front(mTokens); - if (mLabels.find(name) != mLabels.end()) { - target = ref(name); - return mLir->insBranch(mOpcode, condition, target); - } else { - LIns *ins = mLir->insBranch(mOpcode, condition, target); - mFwdJumps.insert(make_pair(name, ins)); - return ins; - } -} - -LIns* -LirasmAssembler::assemble_load() -{ - // Support implicit immediate-as-second-operand modes - // since, unlike sti/stqi, no immediate-displacement - // load opcodes were defined in LIR. - need(2); - if (mTokens[1].find("0x") == 0 || - mTokens[1].find("0x") == 0 || - mTokens[1].find_first_of("0123456789") == 0) { - return mLir->insLoad(mOpcode, - ref(mTokens[0]), - imm(mTokens[1])); - } - bad("immediate offset required for load"); - return NULL; // not reached -} - -LIns* -LirasmAssembler::assemble_call(string &op) -{ - CallInfo *ci = new CallInfo(); - mParent->mCallInfos.push_back(ci); - LIns* args[MAXARGS]; - - // Assembler syntax for a call: - // - // call 0x1234 fastcall a b c - // - // requires at least 2 args, - // fn address immediate and ABI token. - - if (mTokens.size() < 2) - bad("need at least address and ABI code for " + op); - - string func = pop_front(mTokens); - string abi = pop_front(mTokens); - - AbiKind _abi; - if (abi == "fastcall") - _abi = ABI_FASTCALL; - else if (abi == "stdcall") - _abi = ABI_STDCALL; - else if (abi == "thiscall") - _abi = ABI_THISCALL; - else if (abi == "cdecl") - _abi = ABI_CDECL; - else - bad("call abi name '" + abi + "'"); - ci->_abi = _abi; - - if (mTokens.size() > MAXARGS) - bad("too many args to " + op); - - if (func.find("0x") == 0) { - ci->_address = imm(func); - - ci->_cse = 0; - ci->_fold = 0; - -#ifdef DEBUG - ci->_name = "fn"; -#endif - - } else { - lookupFunction(func.c_str(), ci); - if (ci == NULL) - bad("invalid function reference " + func); - if (_abi != ci->_abi) - bad("invalid calling convention for " + func); - } - - ci->_argtypes = 0; - - for (size_t i = 0; i < mTokens.size(); ++i) { - args[i] = ref(mTokens[mTokens.size() - (i+1)]); - ci->_argtypes |= args[i]->isQuad() ? ARGSIZE_F : ARGSIZE_LO; - ci->_argtypes <<= 2; - } - - // Select return type from opcode. - // FIXME: callh needs special treatment currently - // missing from here. - if (mOpcode == LIR_call) - ci->_argtypes |= ARGSIZE_LO; - else - ci->_argtypes |= ARGSIZE_F; - - return mLir->insCall(ci, args); -} - -LIns* -LirasmAssembler::assemble_guard() -{ - LIns *exitIns = do_skip(sizeof(LasmSideExit)); - LasmSideExit* exit = (LasmSideExit*) exitIns->payload(); - memset(exit, 0, sizeof(LasmSideExit)); - exit->from = mFragment; - exit->target = NULL; - exit->line = mLineno; - - LIns *guardRec = do_skip(sizeof(GuardRecord)); - GuardRecord *rec = (GuardRecord*) guardRec->payload(); - memset(rec, 0, sizeof(GuardRecord)); - rec->exit = exit; - exit->addGuard(rec); - - need(mOpcount); - - if (mOpcode != LIR_loop) - mReturnTypeBits |= RT_GUARD; - - LIns *ins_cond; - if (mOpcode == LIR_xt || mOpcode == LIR_xf) - ins_cond = ref(pop_front(mTokens)); - else - ins_cond = NULL; - - if (!mTokens.empty()) - bad("too many arguments"); - - return mLir->insGuard(mOpcode, ins_cond, guardRec); -} - -LIns* -LirasmAssembler::assemble_general() -{ - if (mOpcount == 0) { - // 0-ary ops may, or may not, have an immediate - // thing wedged in them; depends on the op. We - // are lax and set it if it's provided. - LIns *ins = mLir->ins0(mOpcode); - if (mTokens.size() > 0) { - assert(mTokens.size() == 1); - ins->initLInsI(mOpcode, imm(mTokens[0])); - } - return ins; - } else { - need(mOpcount); - if (mOpcount == 1) { - if (mOpcode == LIR_ret) - mReturnTypeBits |= RT_INT32; - if (mOpcode == LIR_fret) - mReturnTypeBits |= RT_FLOAT; - - return mLir->ins1(mOpcode, - ref(mTokens[0])); - } else if (mOpcount == 2) { - return mLir->ins2(mOpcode, - ref(mTokens[0]), - ref(mTokens[1])); - } else { - bad("too many operands"); - } - } - // Never get here. - return NULL; + t tmp = vec[0]; + vec.erase(vec.begin()); + return tmp; } void @@ -575,26 +397,8 @@ dump_srecords(ostream &out, Fragment *frag) } } -void -LirasmAssembler::extract_any_label(string &op, - string &lab, - char lab_delim) -{ - if (op.size() > 1 && - op[op.size()-1] == lab_delim && - !mTokens.empty()) { - - lab = op; - op = pop_front(mTokens); - lab.erase(lab.size()-1); - - if (mLabels.find(lab) != mLabels.end()) - bad("duplicate label"); - } -} - -void -LirasmAssembler::beginFragment() +FragmentAssembler::FragmentAssembler(Lirasm &parent, const string &fragmentName) + : mParent(&parent), mFragName(fragmentName) { mFragment = new (&gc) Fragment(NULL); mFragment->lirbuf = mParent->mLirbuf; @@ -615,24 +419,249 @@ LirasmAssembler::beginFragment() } #endif - mInFrag = true; mReturnTypeBits = 0; mLir->ins0(LIR_start); + + mLineno = 0; +} + +FragmentAssembler::~FragmentAssembler() +{ + delete mVerboseWriter; + delete mExprFilter; + delete mCseFilter; + delete mBufWriter; + + for (size_t i = 0; i < mCallInfos.size(); ++i) + delete mCallInfos[i]; } void -LirasmAssembler::endFragment() +FragmentAssembler::bad(const string &msg) { - mInFrag = false; + cerr << "instruction " << mLineno << ": " << msg << endl; + exit(1); +} - if (mReturnTypeBits == 0) +void +FragmentAssembler::need(size_t n) +{ + if (mTokens.size() != n) { + bad("need " + lexical_cast(n) + + " tokens, have " + lexical_cast(mTokens.size())); + } +} + +LIns * +FragmentAssembler::ref(const string &lab) +{ + if (mLabels.find(lab) == mLabels.end()) + bad("unknown label '" + lab + "'"); + return mLabels.find(lab)->second; +} + +LIns * +FragmentAssembler::do_skip(size_t i) +{ + LIns *s = mLir->insSkip(i); + memset(s->payload(), 0xba, i); + return s; +} + +LIns * +FragmentAssembler::assemble_jump() +{ + LIns *target = NULL; + LIns *condition = NULL; + + if (mOpcode == LIR_j) { + need(1); + } else { + need(2); + string cond = pop_front(mTokens); + condition = ref(cond); + } + string name = pop_front(mTokens); + if (mLabels.find(name) != mLabels.end()) { + target = ref(name); + return mLir->insBranch(mOpcode, condition, target); + } else { + LIns *ins = mLir->insBranch(mOpcode, condition, target); + mFwdJumps.insert(make_pair(name, ins)); + return ins; + } +} + +LIns * +FragmentAssembler::assemble_load() +{ + // Support implicit immediate-as-second-operand modes + // since, unlike sti/stqi, no immediate-displacement + // load opcodes were defined in LIR. + need(2); + if (mTokens[1].find("0x") == 0 || + mTokens[1].find("0x") == 0 || + mTokens[1].find_first_of("0123456789") == 0) { + return mLir->insLoad(mOpcode, + ref(mTokens[0]), + imm(mTokens[1])); + } + bad("immediate offset required for load"); + return NULL; // not reached +} + +LIns * +FragmentAssembler::assemble_call(const string &op) +{ + CallInfo *ci = new CallInfo(); + mCallInfos.push_back(ci); + LIns *args[MAXARGS]; + + // Assembler syntax for a call: + // + // call 0x1234 fastcall a b c + // + // requires at least 2 args, + // fn address immediate and ABI token. + + if (mTokens.size() < 2) + bad("need at least address and ABI code for " + op); + + string func = pop_front(mTokens); + string abi = pop_front(mTokens); + + AbiKind _abi; + if (abi == "fastcall") + _abi = ABI_FASTCALL; + else if (abi == "stdcall") + _abi = ABI_STDCALL; + else if (abi == "thiscall") + _abi = ABI_THISCALL; + else if (abi == "cdecl") + _abi = ABI_CDECL; + else + bad("call abi name '" + abi + "'"); + ci->_abi = _abi; + + if (mTokens.size() > MAXARGS) + bad("too many args to " + op); + + if (func.find("0x") == 0) { + ci->_address = imm(func); + + ci->_cse = 0; + ci->_fold = 0; + +#ifdef DEBUG + ci->_name = "fn"; +#endif + } else { + mParent->lookupFunction(func, ci); + if (ci == NULL) + bad("invalid function reference " + func); + if (_abi != ci->_abi) + bad("invalid calling convention for " + func); + } + + ci->_argtypes = 0; + + for (size_t i = 0; i < mTokens.size(); ++i) { + args[i] = ref(mTokens[mTokens.size() - (i+1)]); + ci->_argtypes |= args[i]->isQuad() ? ARGSIZE_F : ARGSIZE_LO; + ci->_argtypes <<= 2; + } + + // Select return type from opcode. + // FIXME: callh needs special treatment currently + // missing from here. + if (mOpcode == LIR_call) + ci->_argtypes |= ARGSIZE_LO; + else + ci->_argtypes |= ARGSIZE_F; + + return mLir->insCall(ci, args); +} + +LIns * +FragmentAssembler::assemble_guard() +{ + LIns *exitIns = do_skip(sizeof(LasmSideExit)); + LasmSideExit* exit = (LasmSideExit*) exitIns->payload(); + memset(exit, 0, sizeof(LasmSideExit)); + exit->from = mFragment; + exit->target = NULL; + exit->line = mLineno; + + LIns *guardRec = do_skip(sizeof(GuardRecord)); + GuardRecord *rec = (GuardRecord*) guardRec->payload(); + memset(rec, 0, sizeof(GuardRecord)); + rec->exit = exit; + exit->addGuard(rec); + + need(mOpcount); + + if (mOpcode != LIR_loop) + mReturnTypeBits |= RT_GUARD; + + LIns *ins_cond; + if (mOpcode == LIR_xt || mOpcode == LIR_xf) + ins_cond = ref(pop_front(mTokens)); + else + ins_cond = NULL; + + if (!mTokens.empty()) + bad("too many arguments"); + + return mLir->insGuard(mOpcode, ins_cond, guardRec); +} + +LIns * +FragmentAssembler::assemble_general() +{ + if (mOpcount == 0) { + // 0-ary ops may, or may not, have an immediate + // thing wedged in them; depends on the op. We + // are lax and set it if it's provided. + LIns *ins = mLir->ins0(mOpcode); + if (mTokens.size() > 0) { + assert(mTokens.size() == 1); + ins->initLInsI(mOpcode, imm(mTokens[0])); + } + return ins; + } else { + need(mOpcount); + if (mOpcount == 1) { + if (mOpcode == LIR_ret) + mReturnTypeBits |= RT_INT32; + if (mOpcode == LIR_fret) + mReturnTypeBits |= RT_FLOAT; + + return mLir->ins1(mOpcode, + ref(mTokens[0])); + } else if (mOpcount == 2) { + return mLir->ins2(mOpcode, + ref(mTokens[0]), + ref(mTokens[1])); + } else { + bad("too many operands"); + } + } + // Never get here. + return NULL; +} + +void +FragmentAssembler::endFragment() +{ + if (mReturnTypeBits == 0) { cerr << "warning: no return type in fragment '" << mFragName << "'" << endl; - if (mReturnTypeBits != RT_INT32 && mReturnTypeBits != RT_FLOAT - && mReturnTypeBits != RT_GUARD) + } + if (mReturnTypeBits != RT_INT32 && mReturnTypeBits != RT_FLOAT && + mReturnTypeBits != RT_GUARD) { cerr << "warning: multiple return types in fragment '" << mFragName << "'" << endl; - + } LIns *exitIns = do_skip(sizeof(SideExit)); SideExit* exit = (SideExit*) exitIns->payload(); memset(exit, 0, sizeof(SideExit)); @@ -645,15 +674,15 @@ LirasmAssembler::endFragment() if (mParent->mFragmento->assm()->error() != nanojit::None) { cerr << "error during assembly: "; switch (mParent->mFragmento->assm()->error()) { - case nanojit::OutOMem: cerr << "OutOMem"; break; - case nanojit::StackFull: cerr << "StackFull"; break; - case nanojit::RegionFull: cerr << "RegionFull"; break; - case nanojit::MaxLength: cerr << "MaxLength"; break; - case nanojit::MaxExit: cerr << "MaxExit"; break; - case nanojit::MaxXJump: cerr << "MaxXJump"; break; - case nanojit::UnknownPrim: cerr << "UnknownPrim"; break; - case nanojit::UnknownBranch: cerr << "UnknownBranch"; break; - case nanojit::None: cerr << "None"; break; + case nanojit::OutOMem: cerr << "OutOMem"; break; + case nanojit::StackFull: cerr << "StackFull"; break; + case nanojit::RegionFull: cerr << "RegionFull"; break; + case nanojit::MaxLength: cerr << "MaxLength"; break; + case nanojit::MaxExit: cerr << "MaxExit"; break; + case nanojit::MaxXJump: cerr << "MaxXJump"; break; + case nanojit::UnknownPrim: cerr << "UnknownPrim"; break; + case nanojit::UnknownBranch: cerr << "UnknownBranch"; break; + case nanojit::None: cerr << "None"; break; } cerr << endl; std::exit(1); @@ -663,170 +692,156 @@ LirasmAssembler::endFragment() f = &mParent->mFragments[mFragName]; switch (mReturnTypeBits) { - case RT_FLOAT: - default: - f->rfloat = reinterpret_cast(mFragment->code()); - f->mReturnType = RT_FLOAT; - break; - case RT_INT32: - f->rint = reinterpret_cast(mFragment->code()); - f->mReturnType = RT_INT32; - break; case RT_GUARD: f->rguard = reinterpret_cast(mFragment->code()); f->mReturnType = RT_GUARD; break; + case RT_FLOAT: + f->rfloat = reinterpret_cast(mFragment->code()); + f->mReturnType = RT_FLOAT; + break; + default: + f->rint = reinterpret_cast(mFragment->code()); + f->mReturnType = RT_INT32; + break; } - delete mVerboseWriter; - delete mExprFilter; - delete mCseFilter; - delete mBufWriter; - for (size_t i = 0; i < mParent->mCallInfos.size(); ++i) - delete mParent->mCallInfos[i]; - mParent->mCallInfos.clear(); - mParent->mFragments[mFragName].mLabels = mLabels; - mLabels.clear(); } void -LirasmAssembler::assemble(istream &in) +FragmentAssembler::tokenizeLine(LirTokenStream &in, LirToken &token) { -#define OPDEF(op, number, args, repkind) \ - op_map[#op] = make_pair(LIR_##op, args); -#define OPDEF64(op, number, args, repkind) \ - op_map[#op] = make_pair(LIR_##op, args); -#include "nanojit/LIRopcode.tbl" -#undef OPDEF -#undef OPDEF64 + mTokens.clear(); + mTokens.push_back(token.data); - op_map["alloc"] = op_map["ialloc"]; - op_map["param"] = op_map["iparam"]; + while (in.get(token)) { + if (token.type == NEWLINE) + break; + mTokens.push_back(token.data); + } +} - bool singleFrag = false; - bool first = true; - while(read_and_tokenize_line(in)) { +void +FragmentAssembler::extract_any_label(string &lab, char lab_delim) +{ + if (mTokens.size() > 2 && mTokens[1].size() == 1 && mTokens[1][0] == lab_delim) { + lab = pop_front(mTokens); + pop_front(mTokens); // remove punctuation - if (mParent->mLirbuf->outOMem()) { - cerr << "lirbuf out of memory" << endl; - exit(1); + if (mLabels.find(lab) != mLabels.end()) + bad("duplicate label"); + } +} + +void +FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, const LirToken *firstToken) +{ + LirToken token; + while (true) { + if (firstToken) { + token = *firstToken; + firstToken = NULL; + } else if (!in.get(token)) { + if (!implicitBegin) + bad("unexpected end of file in fragment '" + mFragName + "'"); + break; } - - if (mTokens.empty()) + if (token.type == NEWLINE) continue; + if (token.type != NAME) + bad("unexpected token '" + token.data + "'"); - string op = pop_front(mTokens); - - if (op == ".patch") { - tokenize("."); - patch(); - continue; + string op = token.data; + if (op == ".begin") + bad("nested fragments are not supported"); + if (op == ".end") { + if (implicitBegin) + bad(".end without .begin"); + if (!in.eat(NEWLINE)) + bad("extra junk after .end"); + break; } - if (!singleFrag) { - if (op == ".begin") { - if (mTokens.size() != 1) - bad("missing fragment name"); - if (mInFrag) - bad("nested fragments are not supported"); - - mFragName = pop_front(mTokens); - - beginFragment(); - first = false; - continue; - } else if (op == ".end") { - if (!mInFrag) - bad("expecting .begin before .end"); - if (!mTokens.empty()) - bad("too many tokens"); - endFragment(); - continue; - } - } - if (first) { - first = false; - singleFrag = true; - mFragName = "main"; - - beginFragment(); - } + mLineno = token.lineno; + tokenizeLine(in, token); string lab; LIns *ins = NULL; - extract_any_label(op, lab, ':'); + extract_any_label(lab, ':'); /* Save label and do any back-patching of deferred forward-jumps. */ if (!lab.empty()) { ins = mLir->ins0(LIR_label); - typedef multimap mulmap; + typedef multimap mulmap; typedef mulmap::const_iterator ci; - pair range = mFwdJumps.equal_range(lab); + pair range = mFwdJumps.equal_range(lab); for (ci i = range.first; i != range.second; ++i) { i->second->setTarget(ins); } mFwdJumps.erase(lab); lab.clear(); } - extract_any_label(op, lab, '='); + extract_any_label(lab, '='); - if (op_map.find(op) == op_map.end()) + assert(!mTokens.empty()); + op = pop_front(mTokens); + if (mParent->mOpMap.find(op) == mParent->mOpMap.end()) bad("unknown instruction '" + op + "'"); - pair entry = op_map[op]; + pair entry = mParent->mOpMap[op]; mOpcode = entry.first; mOpcount = entry.second; switch (mOpcode) { // A few special opcode cases. - case LIR_j: - case LIR_jt: - case LIR_jf: - case LIR_ji: + case LIR_j: + case LIR_jt: + case LIR_jf: + case LIR_ji: ins = assemble_jump(); break; - case LIR_int: + case LIR_int: need(1); ins = mLir->insImm(imm(mTokens[0])); break; - case LIR_quad: + case LIR_quad: need(1); ins = mLir->insImmq(quad(mTokens[0])); break; - case LIR_sti: - case LIR_stqi: + case LIR_sti: + case LIR_stqi: need(3); ins = mLir->insStorei(ref(mTokens[0]), ref(mTokens[1]), imm(mTokens[2])); break; - case LIR_ld: - case LIR_ldc: - case LIR_ldq: - case LIR_ldqc: - case LIR_ldcb: - case LIR_ldcs: + case LIR_ld: + case LIR_ldc: + case LIR_ldq: + case LIR_ldqc: + case LIR_ldcb: + case LIR_ldcs: ins = assemble_load(); break; - case LIR_iparam: + case LIR_iparam: need(2); ins = mLir->insParam(imm(mTokens[0]), imm(mTokens[1])); break; - case LIR_ialloc: + case LIR_ialloc: need(1); ins = mLir->insAlloc(imm(mTokens[0])); break; - case LIR_skip: + case LIR_skip: need(1); { int32_t count = imm(mTokens[0]); @@ -836,20 +851,21 @@ LirasmAssembler::assemble(istream &in) } break; - case LIR_xt: - case LIR_xf: - case LIR_x: - case LIR_xbarrier: - case LIR_loop: + case LIR_xt: + case LIR_xf: + case LIR_x: + case LIR_xbarrier: + case LIR_loop: ins = assemble_guard(); break; - case LIR_call: - case LIR_callh: - case LIR_fcall: + case LIR_call: + case LIR_callh: + case LIR_fcall: ins = assemble_call(op); break; - default: + + default: ins = assemble_general(); break; } @@ -857,47 +873,43 @@ LirasmAssembler::assemble(istream &in) assert(ins); if (!lab.empty()) mLabels.insert(make_pair(lab, ins)); - } - if (mInFrag && singleFrag) - endFragment(); - if (mInFrag) - bad("unexpected EOF"); - if (mParent->mLirbuf->outOMem()) { - cerr << "lirbuf out of memory" << endl; - exit(1); - } -} - -bool -has_flag(vector &args, string const &flag) -{ - for (vector::iterator i = args.begin(); - i != args.end(); ++i) { - if (*i == flag) { - args.erase(i); - return true; + if (mParent->mLirbuf->outOMem()) { + cerr << "lirbuf out of memory" << endl; + exit(1); } } - return false; + endFragment(); } - Lirasm::Lirasm(bool verbose) { mVerbose = verbose; nanojit::AvmCore::config.tree_opt = true; mLogc.lcbits = 0; - mFragmento = new (&gc) Fragmento(&s_core, &mLogc, 32); + mFragmento = new (&gc) Fragmento(&mCore, &mLogc, 32); mFragmento->labels = NULL; mLirbuf = new (&gc) LirBuffer(mFragmento); #ifdef DEBUG if (mVerbose) { mLogc.lcbits = LC_Assembly; - mFragmento->labels = new (&gc) LabelMap(&s_core); + mFragmento->labels = new (&gc) LabelMap(&mCore); mLirbuf->names = new (&gc) LirNameMap(&gc, mFragmento->labels); } #endif + + // Populate the mOpMap table. +#define OPDEF(op, number, args, repkind) \ + mOpMap[#op] = make_pair(LIR_##op, args); +#define OPDEF64(op, number, args, repkind) \ + mOpMap[#op] = make_pair(LIR_##op, args); +#include "nanojit/LIRopcode.tbl" +#undef OPDEF +#undef OPDEF64 + + // TODO - These should alias to the appropriate platform-specific LIR opcode. + mOpMap["alloc"] = mOpMap["ialloc"]; + mOpMap["param"] = mOpMap["iparam"]; } Lirasm::~Lirasm() @@ -912,49 +924,120 @@ Lirasm::~Lirasm() delete mFragmento; } -LirasmAssembler::LirasmAssembler(Lirasm &lasm) -{ - mParent = &lasm; - mInFrag = false; - mLineno = 0; -} - void -LirasmAssembler::tokenize(string const &tok_sep) +Lirasm::lookupFunction(const string &name, CallInfo *&ci) { - vector::iterator i; - for (i = mTokens.begin(); i < mTokens.end(); i++) - { - string line = *i; - size_t start = 0; - size_t end = 0; - while((start = line.find_first_not_of(tok_sep, end)) != string::npos && - (end = line.find_first_of(tok_sep, start)) != string::npos) { - const string ss = line.substr(start, (end-start)); - i->erase(start, end-start+1); - mTokens.insert(i++, ss); - mTokens.insert(i++, tok_sep); + const size_t nfuns = sizeof(functions) / sizeof(functions[0]); + for (size_t i = 0; i < nfuns; i++) { + if (name == functions[i].name) { + *ci = functions[i].callInfo; + return; } } + + Fragments::const_iterator func = mFragments.find(name); + if (func != mFragments.end()) { + if (func->second.mReturnType == RT_FLOAT) { + CallInfo target = {(uintptr_t) func->second.rfloat, ARGSIZE_F, 0, + 0, nanojit::ABI_FASTCALL, func->first.c_str()}; + *ci = target; + + } else { + CallInfo target = {(uintptr_t) func->second.rint, ARGSIZE_LO, 0, + 0, nanojit::ABI_FASTCALL, func->first.c_str()}; + *ci = target; + } + } else { + ci = NULL; + } } void -LirasmAssembler::patch() +Lirasm::assemble(istream &in) { - if (mTokens[1] != "." || mTokens[3] != "->") + LirTokenStream ts(in); + bool first = true; + + LirToken token; + while (ts.get(token)) { + if (mLirbuf->outOMem()) { + cerr << "lirbuf out of memory" << endl; + exit(1); + } + if (token.type == NEWLINE) + continue; + if (token.type != NAME) + bad("unexpected token '" + token.data + "'"); + + const string &op = token.data; + if (op == ".patch") { + handlePatch(ts); + } else if (op == ".begin") { + string name; + if (!ts.getName(name)) + bad("expected fragment name after .begin"); + if (!ts.eat(NEWLINE)) + bad("extra junk after .begin " + name); + + FragmentAssembler assembler(*this, name); + assembler.assembleFragment(ts, false, NULL); + first = false; + } else if (op == ".end") { + bad(".end without .begin"); + } else if (first) { + FragmentAssembler assembler(*this, "main"); + assembler.assembleFragment(ts, true, &token); + break; + } else { + bad("unexpected stray opcode '" + op + "'"); + } + } + + if (mLirbuf->outOMem()) { + cerr << "lirbuf out of memory" << endl; + exit(1); + } +} + +void +Lirasm::handlePatch(LirTokenStream &in) +{ + string src, fragName, guardName, destName; + + if (!in.getName(src) || !in.eat(PUNCT, "->") || !in.getName(destName)) bad("incorrect syntax"); + + // Break the src at '.'. This is awkward but the syntax looks nice. + size_t j = src.find('.'); + if (j == string::npos || j == 0 || j == src.size() - 1) + bad("incorrect syntax"); + fragName = src.substr(0, j); + guardName = src.substr(j + 1); + Fragments::iterator i; - if ((i=mParent->mFragments.find(mTokens[0])) == mParent->mFragments.end()) + if ((i=mFragments.find(fragName)) == mFragments.end()) bad("invalid fragment reference"); LirasmFragment *frag = &i->second; - if (frag->mLabels.find(mTokens[2]) == frag->mLabels.end()) + if (frag->mLabels.find(guardName) == frag->mLabels.end()) bad("invalid guard reference"); - LIns *ins = frag->mLabels.find(mTokens[2])->second; - if ((i=mParent->mFragments.find(mTokens[4])) == mParent->mFragments.end()) + LIns *ins = frag->mLabels.find(guardName)->second; + if ((i=mFragments.find(destName)) == mFragments.end()) bad("invalid guard reference"); ins->record()->exit->target = i->second.fragptr; - mParent->mFragmento->assm()->patch(ins->record()->exit); + mFragmento->assm()->patch(ins->record()->exit); +} + +bool +has_flag(vector &args, const string &flag) +{ + for (vector::iterator i = args.begin(); i != args.end(); ++i) { + if (*i == flag) { + args.erase(i); + return true; + } + } + return false; } int @@ -996,7 +1079,7 @@ main(int argc, char **argv) } Lirasm lasm(verbose); - LirasmAssembler(lasm).assemble(in); + lasm.assemble(in); Fragments::const_iterator i; if (execute) { diff --git a/js/src/nanojit/NativeSparc.h b/js/src/nanojit/NativeSparc.h index 13e0b8dee32..7c7b43866b9 100644 --- a/js/src/nanojit/NativeSparc.h +++ b/js/src/nanojit/NativeSparc.h @@ -214,7 +214,7 @@ namespace nanojit int offset = (c->_address) - ((int)_nIns) + 4; \ int i = 0x40000000 | ((offset >> 2) & 0x3FFFFFFF); \ IMM32(i); \ - verbose_only(asm_output("call %s",(c->_name));) \ + asm_output("call %s",(c->_name)); \ } while (0) #define Format_2_1(rd, op2, imm22) do { \ @@ -292,548 +292,548 @@ namespace nanojit #define ADDCC(rs1, rs2, rd) \ do { \ - asm_output("addcc %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x10, rs1, 0, rs2); \ + asm_output("addcc %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define ADD(rs1, rs2, rd) \ do { \ - asm_output("add %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0, rs1, 0, rs2); \ + asm_output("add %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define AND(rs1, rs2, rd) \ do { \ - asm_output("and %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x1, rs1, 0, rs2); \ + asm_output("and %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define BA(a, dsp22) \ do { \ - asm_output("ba %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x8, 0x2, dsp22); \ + asm_output("ba %p", _nIns + dsp22 - 1); \ } while (0) #define BE(a, dsp22) \ do { \ - asm_output("be %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x1, 0x2, dsp22); \ + asm_output("be %p", _nIns + dsp22 - 1); \ } while (0) #define BG(a, dsp22) \ do { \ - asm_output("bg %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xA, 0x2, dsp22); \ + asm_output("bg %p", _nIns + dsp22 - 1); \ } while (0) #define BGU(a, dsp22) \ do { \ - asm_output("bgu %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xC, 0x2, dsp22); \ + asm_output("bgu %p", _nIns + dsp22 - 1); \ } while (0) #define BGE(a, dsp22) \ do { \ - asm_output("bge %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xB, 0x2, dsp22); \ + asm_output("bge %p", _nIns + dsp22 - 1); \ } while (0) #define BL(a, dsp22) \ do { \ - asm_output("bl %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x3, 0x2, dsp22); \ + asm_output("bl %p", _nIns + dsp22 - 1); \ } while (0) #define BLE(a, dsp22) \ do { \ - asm_output("ble %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x2, 0x2, dsp22); \ + asm_output("ble %p", _nIns + dsp22 - 1); \ } while (0) #define BLEU(a, dsp22) \ do { \ - asm_output("bleu %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x4, 0x2, dsp22); \ + asm_output("bleu %p", _nIns + dsp22 - 1); \ } while (0) #define BCC(a, dsp22) \ do { \ - asm_output("bcc %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xd, 0x2, dsp22); \ + asm_output("bcc %p", _nIns + dsp22 - 1); \ } while (0) #define BCS(a, dsp22) \ do { \ - asm_output("bcs %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x5, 0x2, dsp22); \ + asm_output("bcs %p", _nIns + dsp22 - 1); \ } while (0) #define BVC(a, dsp22) \ do { \ - asm_output("bvc %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xf, 0x2, dsp22); \ + asm_output("bvc %p", _nIns + dsp22 - 1); \ } while (0) #define BVS(a, dsp22) \ do { \ - asm_output("bvc %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x7, 0x2, dsp22); \ + asm_output("bvc %p", _nIns + dsp22 - 1); \ } while (0) #define BNE(a, dsp22) \ do { \ - asm_output("bne %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x9, 0x2, dsp22); \ + asm_output("bne %p", _nIns + dsp22 - 1); \ } while (0) #define FABSS(rs2, rd) \ do { \ - asm_output("fabs %s, %s", gpn(rs2+32), gpn(rd+32)); \ Format_3_8(2, rd, 0x34, 0, 0x9, rs2); \ + asm_output("fabs %s, %s", gpn(rs2+32), gpn(rd+32)); \ } while (0) #define FADDD(rs1, rs2, rd) \ do { \ - asm_output("faddd %s, %s, %s", gpn(rs1+32), gpn(rs2+32), gpn(rd+32)); \ Format_3_8(2, rd, 0x34, rs1, 0x42, rs2); \ + asm_output("faddd %s, %s, %s", gpn(rs1+32), gpn(rs2+32), gpn(rd+32)); \ } while (0) #define FBE(a, dsp22) \ do { \ - asm_output("fbe %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x9, 0x6, dsp22); \ + asm_output("fbe %p", _nIns + dsp22 - 1); \ } while(0) #define FBNE(a, dsp22) \ do { \ - asm_output("fbne %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x1, 0x6, dsp22); \ + asm_output("fbne %p", _nIns + dsp22 - 1); \ } while(0) #define FBUE(a, dsp22) \ do { \ - asm_output("fbue %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xA, 0x6, dsp22); \ + asm_output("fbue %p", _nIns + dsp22 - 1); \ } while(0) #define FBG(a, dsp22) \ do { \ - asm_output("fng %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x6, 0x6, dsp22); \ + asm_output("fng %p", _nIns + dsp22 - 1); \ } while(0) #define FBUG(a, dsp22) \ do { \ - asm_output("fbug %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x5, 0x6, dsp22); \ + asm_output("fbug %p", _nIns + dsp22 - 1); \ } while(0) #define FBGE(a, dsp22) \ do { \ - asm_output("fbge %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xB, 0x6, dsp22); \ + asm_output("fbge %p", _nIns + dsp22 - 1); \ } while(0) #define FBUGE(a, dsp22) \ do { \ - asm_output("fbuge %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xC, 0x6, dsp22); \ + asm_output("fbuge %p", _nIns + dsp22 - 1); \ } while(0) #define FBL(a, dsp22) \ do { \ - asm_output("fbl %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x4, 0x6, dsp22); \ + asm_output("fbl %p", _nIns + dsp22 - 1); \ } while(0) #define FBLE(a, dsp22) \ do { \ - asm_output("fble %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xD, 0x6, dsp22); \ + asm_output("fble %p", _nIns + dsp22 - 1); \ } while(0) #define FCMPD(rs1, rs2) \ do { \ - asm_output("fcmpd %s, %s", gpn(rs1+32), gpn(rs2+32)); \ Format_3_9(2, 0, 0, 0x35, rs1, 0x52, rs2); \ + asm_output("fcmpd %s, %s", gpn(rs1+32), gpn(rs2+32)); \ } while (0) #define FSUBD(rs1, rs2, rd) \ do { \ - asm_output("fsubd %s, %s, %s", gpn(rs1+32), gpn(rs2+32), gpn(rd+32)); \ Format_3_8(2, rd, 0x34, rs1, 0x46, rs2); \ + asm_output("fsubd %s, %s, %s", gpn(rs1+32), gpn(rs2+32), gpn(rd+32)); \ } while (0) #define FMULD(rs1, rs2, rd) \ do { \ - asm_output("fmuld %s, %s, %s", gpn(rs1+32), gpn(rs2+32), gpn(rd+32)); \ Format_3_8(2, rd, 0x34, rs1, 0x4a, rs2); \ + asm_output("fmuld %s, %s, %s", gpn(rs1+32), gpn(rs2+32), gpn(rd+32)); \ } while (0) #define FDIVD(rs1, rs2, rd) \ do { \ - asm_output("fdivd %s, %s, %s", gpn(rs1+32), gpn(rs2+32), gpn(rd+32)); \ Format_3_8(2, rd, 0x34, rs1, 0x4e, rs2); \ + asm_output("fdivd %s, %s, %s", gpn(rs1+32), gpn(rs2+32), gpn(rd+32)); \ } while (0) #define FMOVD(rs2, rd) \ do { \ - asm_output("fmovd %s, %s", gpn(rs2+32), gpn(rd+32)); \ Format_3_8(2, rd, 0x34, 0, 0x2, rs2); \ + asm_output("fmovd %s, %s", gpn(rs2+32), gpn(rd+32)); \ } while (0) #define FNEGD(rs2, rd) \ do { \ - asm_output("fnegd %s, %s", gpn(rs2+32), gpn(rd+32)); \ Format_3_8(2, rd, 0x34, 0, 0x6, rs2); \ + asm_output("fnegd %s, %s", gpn(rs2+32), gpn(rd+32)); \ } while (0) #define FITOD(rs2, rd) \ do { \ - asm_output("fitod %s, %s", gpn(rs2+32), gpn(rd+32)); \ Format_3_8(2, rd, 0x34, 0, 0xc8, rs2); \ + asm_output("fitod %s, %s", gpn(rs2+32), gpn(rd+32)); \ } while (0) #define JMPL(rs1, rs2, rd) \ do { \ - asm_output("jmpl [%s + %s]", gpn(rs1), gpn(rs2)); \ Format_3_1(2, rd, 0x38, rs1, 0, rs2); \ + asm_output("jmpl [%s + %s]", gpn(rs1), gpn(rs2)); \ } while (0) #define JMPLI(rs1, simm13, rd) \ do { \ - asm_output("jmpl [%s + %d]", gpn(rs1), simm13); \ Format_3_1I(2, rd, 0x38, rs1, simm13); \ + asm_output("jmpl [%s + %d]", gpn(rs1), simm13); \ } while (0) #define LDF(rs1, rs2, rd) \ do { \ - asm_output("ld [%s + %s], %s", gpn(rs1), gpn(rs2), gpn(rd+32)); \ Format_3_1(3, rd, 0x20, rs1, 0, rs2); \ + asm_output("ld [%s + %s], %s", gpn(rs1), gpn(rs2), gpn(rd+32)); \ } while (0) #define LDFI(rs1, simm13, rd) \ do { \ - asm_output("ld [%s + %d], %s", gpn(rs1), simm13, gpn(rd+32)); \ Format_3_1I(3, rd, 0x20, rs1, simm13); \ + asm_output("ld [%s + %d], %s", gpn(rs1), simm13, gpn(rd+32)); \ } while (0) #define LDUB(rs1, rs2, rd) \ do { \ - asm_output("ld [%s + %s], %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(3, rd, 0x1, rs1, 0, rs2); \ + asm_output("ld [%s + %s], %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define LDUBI(rs1, simm13, rd) \ do { \ - asm_output("ld [%s + %d], %s", gpn(rs1), simm13, gpn(rd)); \ Format_3_1I(3, rd, 0x1, rs1, simm13); \ + asm_output("ld [%s + %d], %s", gpn(rs1), simm13, gpn(rd)); \ } while (0) #define LDUH(rs1, rs2, rd) \ do { \ - asm_output("ld [%s + %s], %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(3, rd, 0x2, rs1, 0, rs2); \ + asm_output("ld [%s + %s], %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define LDUHI(rs1, simm13, rd) \ do { \ - asm_output("ld [%s + %d], %s", gpn(rs1), simm13, gpn(rd)); \ Format_3_1I(3, rd, 0x2, rs1, simm13); \ + asm_output("ld [%s + %d], %s", gpn(rs1), simm13, gpn(rd)); \ } while (0) #define LDSW(rs1, rs2, rd) \ do { \ - asm_output("ld [%s + %s], %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(3, rd, 0x8, rs1, 0, rs2); \ + asm_output("ld [%s + %s], %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define LDSWI(rs1, simm13, rd) \ do { \ - asm_output("ld [%s + %d], %s", gpn(rs1), simm13, gpn(rd)); \ Format_3_1I(3, rd, 0x8, rs1, simm13); \ + asm_output("ld [%s + %d], %s", gpn(rs1), simm13, gpn(rd)); \ } while (0) #define MOVE(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("move %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 1, cc1, cc0, rs); \ + asm_output("move %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVNE(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movne %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 9, cc1, cc0, rs); \ + asm_output("movne %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVL(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movl %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 3, cc1, cc0, rs); \ + asm_output("movl %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVLE(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movle %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 2, cc1, cc0, rs); \ + asm_output("movle %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVG(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movg %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 0xa, cc1, cc0, rs); \ + asm_output("movg %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVGE(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movge %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 0xb, cc1, cc0, rs); \ + asm_output("movge %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVCS(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movcs %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 5, cc1, cc0, rs); \ + asm_output("movcs %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVLEU(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movleu %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 4, cc1, cc0, rs); \ + asm_output("movleu %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVGU(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movgu %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 0xc, cc1, cc0, rs); \ + asm_output("movgu %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVCC(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movcc %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 0xd, cc1, cc0, rs); \ + asm_output("movcc %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVVC(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movvc %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 0xf, cc1, cc0, rs); \ + asm_output("movvc %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVEI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("move %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 1, cc1, cc0, simm11); \ + asm_output("move %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVFEI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("move %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 9, cc1, cc0, simm11); \ + asm_output("move %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVNEI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("move %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 9, cc1, cc0, simm11); \ + asm_output("move %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVLI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("move %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 3, cc1, cc0, simm11); \ + asm_output("move %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVFLI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("move %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 4, cc1, cc0, simm11); \ + asm_output("move %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVLEI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movle %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 2, cc1, cc0, simm11); \ + asm_output("movle %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVFLEI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movle %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 0xd, cc1, cc0, simm11); \ + asm_output("movle %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVGI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movg %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 0xa, cc1, cc0, simm11); \ + asm_output("movg %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVFGI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movg %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 6, cc1, cc0, simm11); \ + asm_output("movg %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVGEI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movge %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 0xb, cc1, cc0, simm11); \ + asm_output("movge %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVFGEI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movge %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 0xb, cc1, cc0, simm11); \ + asm_output("movge %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVLEUI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movleu %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 4, cc1, cc0, simm11); \ + asm_output("movleu %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVGUI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movgu %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 0xc, cc1, cc0, simm11); \ + asm_output("movgu %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVCCI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movcc %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 0xd, cc1, cc0, simm11); \ + asm_output("movcc %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVVSI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movvs %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 7, cc1, cc0, simm11); \ + asm_output("movvs %d, %s", simm11, gpn(rd)); \ } while (0) #define MULX(rs1, rs2, rd) \ do { \ - asm_output("mul %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x9, rs1, 0, rs2); \ + asm_output("mul %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define NOP() \ do { \ - asm_output("nop"); \ Format_2_1(0, 0x4, 0); \ + asm_output("nop"); \ } while (0) #define ORI(rs1, simm13, rd) \ do { \ - asm_output("or %s, %d, %s", gpn(rs1), simm13, gpn(rd)); \ Format_3_1I(2, rd, 0x2, rs1, simm13); \ + asm_output("or %s, %d, %s", gpn(rs1), simm13, gpn(rd)); \ } while (0) #define OR(rs1, rs2, rd) \ do { \ - asm_output("or %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x2, rs1, 0, rs2); \ + asm_output("or %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define ORN(rs1, rs2, rd) \ do { \ - asm_output("orn %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x6, rs1, 0, rs2); \ + asm_output("orn %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define ANDCC(rs1, rs2, rd) \ do { \ - asm_output("andcc %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x11, rs1, 0, rs2); \ + asm_output("andcc %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define RESTORE(rs1, rs2, rd) \ do { \ - asm_output("restore"); \ Format_3_1(2, rd, 0x3D, rs1, 0, rs2); \ + asm_output("restore"); \ } while (0) #define SAVEI(rs1, simm13, rd) \ do { \ - asm_output("save %s, %d, %s", gpn(rs1), simm13, gpn(rd)); \ Format_3_1I(2, rd, 0x3C, rs1, simm13); \ + asm_output("save %s, %d, %s", gpn(rs1), simm13, gpn(rd)); \ } while (0) #define SAVE(rs1, rs2, rd) \ do { \ - asm_output("save %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x3C, rs1, 0, rs2); \ + asm_output("save %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define SETHI(imm22, rd) \ do { \ - asm_output("sethi %p, %s", imm22, gpn(rd)); \ Format_2_1(rd, 0x4, (imm22 >> 10)); \ + asm_output("sethi %p, %s", imm22, gpn(rd)); \ } while (0) #define SLL(rs1, rs2, rd) \ do { \ - asm_output("sll %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_5(2, rd, 0x25, rs1, 0, rs2); \ + asm_output("sll %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define SRA(rs1, rs2, rd) \ do { \ - asm_output("sra %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_5(2, rd, 0x27, rs1, 0, rs2); \ + asm_output("sra %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define SRL(rs1, rs2, rd) \ do { \ - asm_output("srl %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_5(2, rd, 0x26, rs1, 0, rs2); \ + asm_output("srl %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define STF(rd, rs1, rs2) \ do { \ - asm_output("st %s, [%s + %s]", gpn(rd+32), gpn(rs1), gpn(rs2)); \ Format_3_1(3, rd, 0x24, rs1, 0, rs2); \ + asm_output("st %s, [%s + %s]", gpn(rd+32), gpn(rs1), gpn(rs2)); \ } while (0) #define STFI(rd, simm13, rs1) \ do { \ - asm_output("st %s, [%s + %d]", gpn(rd+32), gpn(rs1), simm13); \ Format_3_1I(3, rd, 0x24, rs1, simm13); \ + asm_output("st %s, [%s + %d]", gpn(rd+32), gpn(rs1), simm13); \ } while (0) #define STW(rd, rs2, rs1) \ do { \ - asm_output("st %s, [%s + %s]", gpn(rd), gpn(rs1), gpn(rs2)); \ Format_3_1(3, rd, 0x4, rs1, 0, rs2); \ + asm_output("st %s, [%s + %s]", gpn(rd), gpn(rs1), gpn(rs2)); \ } while (0) #define STWI(rd, simm13, rs1) \ do { \ - asm_output("st %s, [%s + %d]", gpn(rd), gpn(rs1), simm13); \ Format_3_1I(3, rd, 0x4, rs1, simm13); \ + asm_output("st %s, [%s + %d]", gpn(rd), gpn(rs1), simm13); \ } while (0) #define SUBCC(rs1, rs2, rd) \ do { \ - asm_output("subcc %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x14, rs1, 0, rs2); \ + asm_output("subcc %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define SUB(rs1, rs2, rd) \ do { \ - asm_output("sub %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x4, rs1, 0, rs2); \ + asm_output("sub %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define SUBI(rs1, simm13, rd) \ do { \ - asm_output("sub %s, %d, %s", gpn(rs1), simm13, gpn(rd)); \ Format_3_1I(2, rd, 0x4, rs1, simm13); \ + asm_output("sub %s, %d, %s", gpn(rs1), simm13, gpn(rd)); \ } while (0) #define XOR(rs1, rs2, rd) \ do { \ - asm_output("xor %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x3, rs1, 0, rs2); \ + asm_output("xor %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) // Returns true if imm below 13-bit unsigned immediate) diff --git a/js/src/prmjtime.cpp b/js/src/prmjtime.cpp index 61b99b65bdc..986f480a30b 100644 --- a/js/src/prmjtime.cpp +++ b/js/src/prmjtime.cpp @@ -109,7 +109,7 @@ PRMJ_LocalGMTDifference() #if defined(XP_WIN) && !defined(WINCE) /* Windows does not follow POSIX. Updates to the - * TZ environment variable are not reflected + * TZ environment variable are not reflected * immediately on that platform as they are * on UNIX systems without this call. */ @@ -170,8 +170,8 @@ static const JSInt64 win2un = JSLL_INIT(0x19DB1DE, 0xD53E8000); #if defined(HAVE_GETSYSTEMTIMEASFILETIME) inline void LowResTime(LPFILETIME lpft) -{ - GetSystemTimeAsFileTime(lpft); +{ + GetSystemTimeAsFileTime(lpft); } #elif defined(HAVE_SYSTEMTIMETOFILETIME) inline void @@ -229,9 +229,9 @@ NowCalibrate() LowResTime(&ft); } while (memcmp(&ftStart,&ft, sizeof(ft)) == 0); timeEndPeriod(1); - + #ifdef WINCE - calibration.granularity = (FILETIME2INT64(ft) - + calibration.granularity = (FILETIME2INT64(ft) - FILETIME2INT64(ftStart))/10; #endif /* @@ -581,7 +581,7 @@ PRMJ_DSTOffset(JSInt64 local_time) #if defined(XP_WIN) && !defined(WINCE) /* Windows does not follow POSIX. Updates to the - * TZ environment variable are not reflected + * TZ environment variable are not reflected * immediately on that platform as they are * on UNIX systems without this call. */ diff --git a/js/src/resource.h b/js/src/resource.h index 9301810e444..59dbde3775e 100644 --- a/js/src/resource.h +++ b/js/src/resource.h @@ -4,7 +4,7 @@ // // Next default values for new objects -// +// #ifdef APSTUDIO_INVOKED #ifndef APSTUDIO_READONLY_SYMBOLS #define _APS_NEXT_RESOURCE_VALUE 101 diff --git a/js/src/trace-test.js b/js/src/trace-test.js index df5f7746560..00c2dc12148 100644 --- a/js/src/trace-test.js +++ b/js/src/trace-test.js @@ -3660,6 +3660,51 @@ function testComparisons() testComparisons.expected = "no failures reported!"; test(testComparisons); +function testBug504520() { + // A bug involving comparisons. + var arr = [1/0, 1/0, 1/0, 1/0, 1/0, 0]; + assertEq(arr.length > RUNLOOP, true); + + var s = ''; + for (var i = 0; i < arr.length; i++) + arr[i] >= 1/0 ? null : (s += i); + assertEq(s, '5'); +} +test(testBug504520); + +function testBug504520Harder() { + // test 1024 similar cases + var vals = [1/0, -1/0, 0, 0/0]; + var ops = ["===", "!==", "==", "!=", "<", ">", "<=", ">="]; + for each (var x in vals) { + for each (var y in vals) { + for each (var op in ops) { + for each (var z in vals) { + // Assume eval is correct. This depends on the global + // Infinity property not having been reassigned. + var xz = eval(x + op + z); + var yz = eval(y + op + z); + + var arr = [x, x, x, x, x, y]; + assertEq(arr.length > RUNLOOP, true); + var expected = [xz, xz, xz, xz, xz, yz]; + + // ?: looks superfluous but that's what we're testing here + var fun = eval( + '(function (arr, results) {\n' + + ' for (let i = 0; i < arr.length; i++)\n' + + ' results.push(arr[i]' + op + z + ' ? "true" : "false");\n' + + '});\n'); + var actual = []; + fun(arr, actual); + assertEq("" + actual, "" + expected); + } + } + } + } +} +test(testBug504520Harder); + function testCaseAbort() { var four = "4"; @@ -5497,6 +5542,21 @@ testOwnPropertyWithInOperator.jitstats = { }; test(testEliminatedGuardWithinAnchor); +function testNativeSetter() { + var re = /foo/; + var N = RUNLOOP + 10; + for (var i = 0; i < N; i++) + re.lastIndex = i; + assertEq(re.lastIndex, N - 1); +} +testNativeSetter.jitstats = { + recorderStarted: 1, + recorderAborted: 0, + traceTriggered: 1, + sideExitIntoInterpreter: 1 +}; +test(testNativeSetter); + /***************************************************************************** * * * _____ _ _ _____ ______ _____ _______ *