зеркало из https://github.com/mozilla/gecko-dev.git
Merge
This commit is contained in:
Коммит
536de5d141
|
@ -340,7 +340,7 @@ function execute(n, x) {
|
|||
} catch (e if e == BREAK && x.target == n) {
|
||||
break;
|
||||
} catch (e if e == CONTINUE && x.target == n) {
|
||||
continue;
|
||||
// Must run the update expression.
|
||||
}
|
||||
n.update && getValue(execute(n.update, x));
|
||||
}
|
||||
|
|
|
@ -476,6 +476,20 @@ function Statement(t, x) {
|
|||
if (--i < 0)
|
||||
throw t.newSyntaxError("Label not found");
|
||||
} while (ss[i].label != label);
|
||||
|
||||
/*
|
||||
* Both break and continue to label need to be handled specially
|
||||
* within a labeled loop, so that they target that loop. If not in
|
||||
* a loop, then break targets its labeled statement. Labels can be
|
||||
* nested so we skip all labels immediately enclosing the nearest
|
||||
* non-label statement.
|
||||
*/
|
||||
while (i < ss.length - 1 && ss[i+1].type == LABEL)
|
||||
i++;
|
||||
if (i < ss.length - 1 && ss[i+1].isLoop)
|
||||
i++;
|
||||
else if (tt == CONTINUE)
|
||||
throw t.newSyntaxError("Invalid continue");
|
||||
} else {
|
||||
do {
|
||||
if (--i < 0) {
|
||||
|
|
|
@ -348,12 +348,20 @@ check-sync-dirs = $(PYTHON) $(srcdir)/config/check-sync-dirs.py
|
|||
check::
|
||||
$(check-sync-dirs) $(srcdir)/config $(MOZ_SYNC_BUILD_FILES)/config
|
||||
$(check-sync-dirs) $(srcdir)/build $(MOZ_SYNC_BUILD_FILES)/build
|
||||
|
||||
check-valgrind::
|
||||
$(check-sync-dirs) $(srcdir)/config $(MOZ_SYNC_BUILD_FILES)/config
|
||||
$(check-sync-dirs) $(srcdir)/build $(MOZ_SYNC_BUILD_FILES)/build
|
||||
endif
|
||||
|
||||
ifdef ENABLE_JIT
|
||||
check::
|
||||
$(wildcard $(RUN_TEST_PROGRAM)) $(PYTHON) -u $(srcdir)/trace-test/trace-test.py \
|
||||
--no-slow --no-progress --tinderbox $(DIST)/bin/js$(BIN_SUFFIX)
|
||||
|
||||
check-valgrind::
|
||||
$(wildcard $(RUN_TEST_PROGRAM)) $(PYTHON) -u $(srcdir)/trace-test/trace-test.py \
|
||||
--valgrind --no-slow --no-progress --tinderbox $(DIST)/bin/js$(BIN_SUFFIX)
|
||||
endif
|
||||
|
||||
DIST_GARBAGE = config.cache config.log config.status \
|
||||
|
|
|
@ -2481,6 +2481,7 @@ i?86-*)
|
|||
;;
|
||||
x86_64*-*)
|
||||
AC_DEFINE(AVMPLUS_AMD64)
|
||||
AC_DEFINE(AVMPLUS_64BIT)
|
||||
;;
|
||||
arm*-*)
|
||||
AC_DEFINE(AVMPLUS_ARM)
|
||||
|
|
|
@ -54,6 +54,7 @@ JITSTAT(returnLoopExits)
|
|||
JITSTAT(mergedLoopExits)
|
||||
JITSTAT(noCompatInnerTrees)
|
||||
JITSTAT(blacklisted)
|
||||
JITSTAT(cacheFlushed)
|
||||
JITSTAT(archIsIA32)
|
||||
JITSTAT(archIsAMD64)
|
||||
JITSTAT(archIs64BIT)
|
||||
|
|
|
@ -52,6 +52,7 @@ CPPSRCS = \
|
|||
testXDR.cpp \
|
||||
testIntString.cpp \
|
||||
testSameValue.cpp \
|
||||
testDefineGetterSetterNonEnumerable.cpp \
|
||||
$(NULL)
|
||||
|
||||
DEFINES += -DEXPORT_JS_API
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
#include "tests.h"
|
||||
#include "jsxdrapi.h"
|
||||
|
||||
static JSBool
|
||||
native(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
||||
{
|
||||
return JS_TRUE;
|
||||
}
|
||||
|
||||
static const char PROPERTY_NAME[] = "foo";
|
||||
|
||||
BEGIN_TEST(testDefineGetterSetterNonEnumerable)
|
||||
{
|
||||
jsvalRoot vobj(cx);
|
||||
JSObject *obj = JS_NewObject(cx, NULL, NULL, NULL);
|
||||
CHECK(obj);
|
||||
vobj = OBJECT_TO_JSVAL(obj);
|
||||
|
||||
jsvalRoot vget(cx);
|
||||
JSFunction *funGet = JS_NewFunction(cx, native, 0, 0, NULL, "get");
|
||||
CHECK(funGet);
|
||||
|
||||
jsvalRoot vset(cx);
|
||||
JSFunction *funSet = JS_NewFunction(cx, native, 1, 0, NULL, "set");
|
||||
CHECK(funSet);
|
||||
|
||||
CHECK(JS_DefineProperty(cx, JSVAL_TO_OBJECT(vobj), PROPERTY_NAME,
|
||||
JSVAL_VOID,
|
||||
JS_DATA_TO_FUNC_PTR(JSPropertyOp, jsval(vget)),
|
||||
JS_DATA_TO_FUNC_PTR(JSPropertyOp, jsval(vset)),
|
||||
JSPROP_GETTER | JSPROP_SETTER | JSPROP_ENUMERATE));
|
||||
|
||||
CHECK(JS_DefineProperty(cx, JSVAL_TO_OBJECT(vobj), PROPERTY_NAME,
|
||||
JSVAL_VOID,
|
||||
JS_DATA_TO_FUNC_PTR(JSPropertyOp, jsval(vget)),
|
||||
JS_DATA_TO_FUNC_PTR(JSPropertyOp, jsval(vset)),
|
||||
JSPROP_GETTER | JSPROP_SETTER | JSPROP_PERMANENT));
|
||||
|
||||
JSBool found = JS_FALSE;
|
||||
uintN attrs = 0;
|
||||
CHECK(JS_GetPropertyAttributes(cx, JSVAL_TO_OBJECT(vobj), PROPERTY_NAME,
|
||||
&attrs, &found));
|
||||
CHECK(found);
|
||||
CHECK(attrs & JSPROP_GETTER);
|
||||
CHECK(attrs & JSPROP_SETTER);
|
||||
CHECK(attrs & JSPROP_PERMANENT);
|
||||
CHECK(!(attrs & JSPROP_ENUMERATE));
|
||||
|
||||
return true;
|
||||
}
|
||||
END_TEST(testDefineGetterSetterNonEnumerable)
|
|
@ -572,61 +572,51 @@ JS_ValueToSource(JSContext *cx, jsval v)
|
|||
JS_PUBLIC_API(JSBool)
|
||||
JS_ValueToNumber(JSContext *cx, jsval v, jsdouble *dp)
|
||||
{
|
||||
JSTempValueRooter tvr;
|
||||
|
||||
CHECK_REQUEST(cx);
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(cx, v, &tvr);
|
||||
*dp = js_ValueToNumber(cx, &tvr.u.value);
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
return !JSVAL_IS_NULL(tvr.u.value);
|
||||
|
||||
JSAutoTempValueRooter tvr(cx, v);
|
||||
*dp = js_ValueToNumber(cx, tvr.addr());
|
||||
return !JSVAL_IS_NULL(tvr.value());
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(JSBool)
|
||||
JS_ValueToECMAInt32(JSContext *cx, jsval v, int32 *ip)
|
||||
{
|
||||
JSTempValueRooter tvr;
|
||||
|
||||
CHECK_REQUEST(cx);
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(cx, v, &tvr);
|
||||
*ip = js_ValueToECMAInt32(cx, &tvr.u.value);
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
return !JSVAL_IS_NULL(tvr.u.value);
|
||||
|
||||
JSAutoTempValueRooter tvr(cx, v);
|
||||
*ip = js_ValueToECMAInt32(cx, tvr.addr());
|
||||
return !JSVAL_IS_NULL(tvr.value());
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(JSBool)
|
||||
JS_ValueToECMAUint32(JSContext *cx, jsval v, uint32 *ip)
|
||||
{
|
||||
JSTempValueRooter tvr;
|
||||
|
||||
CHECK_REQUEST(cx);
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(cx, v, &tvr);
|
||||
*ip = js_ValueToECMAUint32(cx, &tvr.u.value);
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
return !JSVAL_IS_NULL(tvr.u.value);
|
||||
|
||||
JSAutoTempValueRooter tvr(cx, v);
|
||||
*ip = js_ValueToECMAUint32(cx, tvr.addr());
|
||||
return !JSVAL_IS_NULL(tvr.value());
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(JSBool)
|
||||
JS_ValueToInt32(JSContext *cx, jsval v, int32 *ip)
|
||||
{
|
||||
JSTempValueRooter tvr;
|
||||
|
||||
CHECK_REQUEST(cx);
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(cx, v, &tvr);
|
||||
*ip = js_ValueToInt32(cx, &tvr.u.value);
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
return !JSVAL_IS_NULL(tvr.u.value);
|
||||
|
||||
JSAutoTempValueRooter tvr(cx, v);
|
||||
*ip = js_ValueToInt32(cx, tvr.addr());
|
||||
return !JSVAL_IS_NULL(tvr.value());
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(JSBool)
|
||||
JS_ValueToUint16(JSContext *cx, jsval v, uint16 *ip)
|
||||
{
|
||||
JSTempValueRooter tvr;
|
||||
|
||||
CHECK_REQUEST(cx);
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(cx, v, &tvr);
|
||||
*ip = js_ValueToUint16(cx, &tvr.u.value);
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
return !JSVAL_IS_NULL(tvr.u.value);
|
||||
|
||||
JSAutoTempValueRooter tvr(cx, v);
|
||||
*ip = js_ValueToUint16(cx, tvr.addr());
|
||||
return !JSVAL_IS_NULL(tvr.value());
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(JSBool)
|
||||
|
@ -850,10 +840,6 @@ JS_DestroyRuntime(JSRuntime *rt)
|
|||
{
|
||||
#ifdef DEBUG
|
||||
/* Don't hurt everyone in leaky ol' Mozilla with a fatal JS_ASSERT! */
|
||||
if (rt->nativeEnumerators) {
|
||||
fprintf(stderr,
|
||||
"JS engine warning: leak of native enumerators is detected.\n");
|
||||
}
|
||||
if (!JS_CLIST_IS_EMPTY(&rt->contextList)) {
|
||||
JSContext *cx, *iter = NULL;
|
||||
uintN cxcount = 0;
|
||||
|
@ -2626,6 +2612,7 @@ JS_NewExternalString(JSContext *cx, jschar *chars, size_t length, intN type)
|
|||
if (!str)
|
||||
return NULL;
|
||||
str->initFlat(chars, length);
|
||||
cx->updateMallocCounter((length + 1) * sizeof(jschar));
|
||||
return str;
|
||||
}
|
||||
|
||||
|
@ -3045,7 +3032,6 @@ JS_DefineObject(JSContext *cx, JSObject *obj, const char *name, JSClass *clasp,
|
|||
return NULL;
|
||||
if (!DefineProperty(cx, obj, name, OBJECT_TO_JSVAL(nobj), NULL, NULL, attrs,
|
||||
0, 0)) {
|
||||
cx->weakRoots.newborn[GCX_OBJECT] = NULL;
|
||||
return NULL;
|
||||
}
|
||||
return nobj;
|
||||
|
@ -3956,6 +3942,7 @@ JS_Enumerate(JSContext *cx, JSObject *obj)
|
|||
|
||||
ida = NULL;
|
||||
iter_state = JSVAL_NULL;
|
||||
JSAutoEnumStateRooter tvr(cx, obj, &iter_state);
|
||||
|
||||
/* Get the number of properties to enumerate. */
|
||||
if (!obj->enumerate(cx, JSENUMERATE_INIT, &iter_state, &num_properties))
|
||||
|
@ -3996,7 +3983,7 @@ JS_Enumerate(JSContext *cx, JSObject *obj)
|
|||
return SetIdArrayLength(cx, ida, i);
|
||||
|
||||
error:
|
||||
if (iter_state != JSVAL_NULL)
|
||||
if (!JSVAL_IS_NULL(iter_state))
|
||||
obj->enumerate(cx, JSENUMERATE_DESTROY, &iter_state, 0);
|
||||
if (ida)
|
||||
JS_DestroyIdArray(cx, ida);
|
||||
|
|
|
@ -401,7 +401,7 @@ IndexToId(JSContext* cx, JSObject* obj, jsdouble index, JSBool* hole, jsid* idp,
|
|||
JSBool createAtom = JS_FALSE)
|
||||
{
|
||||
if (index <= JSVAL_INT_MAX) {
|
||||
*idp = INT_TO_JSID(index);
|
||||
*idp = INT_TO_JSID(int(index));
|
||||
return JS_TRUE;
|
||||
}
|
||||
|
||||
|
@ -555,22 +555,16 @@ js_SetLengthProperty(JSContext *cx, JSObject *obj, jsdouble length)
|
|||
JSBool
|
||||
js_HasLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp)
|
||||
{
|
||||
JSErrorReporter older;
|
||||
JSTempValueRooter tvr;
|
||||
jsid id;
|
||||
JSBool ok;
|
||||
|
||||
older = JS_SetErrorReporter(cx, NULL);
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(cx, JSVAL_NULL, &tvr);
|
||||
id = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom);
|
||||
ok = obj->getProperty(cx, id, &tvr.u.value);
|
||||
JSErrorReporter older = JS_SetErrorReporter(cx, NULL);
|
||||
JSAutoTempValueRooter tvr(cx, JSVAL_NULL);
|
||||
jsid id = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom);
|
||||
JSBool ok = obj->getProperty(cx, id, tvr.addr());
|
||||
JS_SetErrorReporter(cx, older);
|
||||
if (ok) {
|
||||
*lengthp = ValueIsLength(cx, &tvr.u.value);
|
||||
ok = !JSVAL_IS_NULL(tvr.u.value);
|
||||
}
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
return ok;
|
||||
if (!ok)
|
||||
return false;
|
||||
|
||||
*lengthp = ValueIsLength(cx, tvr.addr());
|
||||
return !JSVAL_IS_NULL(tvr.value());
|
||||
}
|
||||
|
||||
JSBool
|
||||
|
@ -1718,7 +1712,7 @@ InitArrayElements(JSContext *cx, JSObject *obj, jsuint start, jsuint count, jsva
|
|||
if (!dp)
|
||||
return JS_FALSE;
|
||||
tmp[0] = DOUBLE_TO_JSVAL(dp);
|
||||
JSAutoTempValueRooter(cx, JS_ARRAY_LENGTH(tmp), tmp);
|
||||
JSAutoTempValueRooter tvr(cx, JS_ARRAY_LENGTH(tmp), tmp);
|
||||
JSAutoTempIdRooter idr(cx);
|
||||
do {
|
||||
tmp[1] = *vector++;
|
||||
|
@ -1809,12 +1803,8 @@ array_join(JSContext *cx, uintN argc, jsval *vp)
|
|||
static JSBool
|
||||
array_reverse(JSContext *cx, uintN argc, jsval *vp)
|
||||
{
|
||||
JSObject *obj;
|
||||
JSTempValueRooter tvr;
|
||||
jsuint len, half, i;
|
||||
JSBool ok, hole, hole2;
|
||||
|
||||
obj = JS_THIS_OBJECT(cx, vp);
|
||||
jsuint len;
|
||||
JSObject *obj = JS_THIS_OBJECT(cx, vp);
|
||||
if (!obj || !js_GetLengthProperty(cx, obj, &len))
|
||||
return JS_FALSE;
|
||||
*vp = OBJECT_TO_JSVAL(obj);
|
||||
|
@ -1852,22 +1842,19 @@ array_reverse(JSContext *cx, uintN argc, jsval *vp)
|
|||
return JS_TRUE;
|
||||
}
|
||||
|
||||
ok = JS_TRUE;
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(cx, JSVAL_NULL, &tvr);
|
||||
half = len / 2;
|
||||
for (i = 0; i < half; i++) {
|
||||
ok = JS_CHECK_OPERATION_LIMIT(cx) &&
|
||||
GetArrayElement(cx, obj, i, &hole, &tvr.u.value) &&
|
||||
GetArrayElement(cx, obj, len - i - 1, &hole2, vp) &&
|
||||
SetOrDeleteArrayElement(cx, obj, len - i - 1, hole, tvr.u.value) &&
|
||||
SetOrDeleteArrayElement(cx, obj, i, hole2, *vp);
|
||||
if (!ok)
|
||||
break;
|
||||
JSAutoTempValueRooter tvr(cx, JSVAL_NULL);
|
||||
for (jsuint i = 0, half = len / 2; i < half; i++) {
|
||||
JSBool hole, hole2;
|
||||
if (!JS_CHECK_OPERATION_LIMIT(cx) ||
|
||||
!GetArrayElement(cx, obj, i, &hole, tvr.addr()) ||
|
||||
!GetArrayElement(cx, obj, len - i - 1, &hole2, vp) ||
|
||||
!SetOrDeleteArrayElement(cx, obj, len - i - 1, hole, tvr.value()) ||
|
||||
!SetOrDeleteArrayElement(cx, obj, i, hole2, *vp)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
|
||||
*vp = OBJECT_TO_JSVAL(obj);
|
||||
return ok;
|
||||
return true;
|
||||
}
|
||||
|
||||
typedef struct MSortArgs {
|
||||
|
@ -2609,7 +2596,7 @@ array_unshift(JSContext *cx, uintN argc, jsval *vp)
|
|||
argv = JS_ARGV(cx, vp);
|
||||
if (length > 0) {
|
||||
if (OBJ_IS_DENSE_ARRAY(cx, obj) && !js_PrototypeHasIndexedProperties(cx, obj) &&
|
||||
!INDEX_TOO_SPARSE(obj, newlen + argc)) {
|
||||
!INDEX_TOO_SPARSE(obj, unsigned(newlen + argc))) {
|
||||
JS_ASSERT(newlen + argc == length + argc);
|
||||
if (!EnsureCapacity(cx, obj, length + argc))
|
||||
return JS_FALSE;
|
||||
|
@ -2810,8 +2797,7 @@ array_concat(JSContext *cx, uintN argc, jsval *vp)
|
|||
JSObject *aobj, *nobj;
|
||||
jsuint length, alength, slot;
|
||||
uintN i;
|
||||
JSBool hole, ok;
|
||||
JSTempValueRooter tvr;
|
||||
JSBool hole;
|
||||
|
||||
/* Treat our |this| object as the first argument; see ECMA 15.4.4.4. */
|
||||
argv = JS_ARGV(cx, vp) - 1;
|
||||
|
@ -2849,14 +2835,12 @@ array_concat(JSContext *cx, uintN argc, jsval *vp)
|
|||
length = 0;
|
||||
}
|
||||
|
||||
MUST_FLOW_THROUGH("out");
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(cx, JSVAL_NULL, &tvr);
|
||||
JSAutoTempValueRooter tvr(cx, JSVAL_NULL);
|
||||
|
||||
/* Loop over [0, argc] to concat args into nobj, expanding all Arrays. */
|
||||
for (i = 0; i <= argc; i++) {
|
||||
ok = JS_CHECK_OPERATION_LIMIT(cx);
|
||||
if (!ok)
|
||||
goto out;
|
||||
if (!JS_CHECK_OPERATION_LIMIT(cx))
|
||||
return false;
|
||||
v = argv[i];
|
||||
if (!JSVAL_IS_PRIMITIVE(v)) {
|
||||
JSObject *wobj;
|
||||
|
@ -2864,30 +2848,25 @@ array_concat(JSContext *cx, uintN argc, jsval *vp)
|
|||
aobj = JSVAL_TO_OBJECT(v);
|
||||
wobj = js_GetWrappedObject(cx, aobj);
|
||||
if (OBJ_IS_ARRAY(cx, wobj)) {
|
||||
ok = aobj->getProperty(cx, ATOM_TO_JSID(cx->runtime->atomState.lengthAtom),
|
||||
&tvr.u.value);
|
||||
if (!ok)
|
||||
goto out;
|
||||
alength = ValueIsLength(cx, &tvr.u.value);
|
||||
ok = !JSVAL_IS_NULL(tvr.u.value);
|
||||
if (!ok)
|
||||
goto out;
|
||||
jsid id = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom);
|
||||
if (!aobj->getProperty(cx, id, tvr.addr()))
|
||||
return false;
|
||||
alength = ValueIsLength(cx, tvr.addr());
|
||||
if (JSVAL_IS_NULL(tvr.value()))
|
||||
return false;
|
||||
for (slot = 0; slot < alength; slot++) {
|
||||
ok = JS_CHECK_OPERATION_LIMIT(cx) &&
|
||||
GetArrayElement(cx, aobj, slot, &hole,
|
||||
&tvr.u.value);
|
||||
if (!ok)
|
||||
goto out;
|
||||
if (!JS_CHECK_OPERATION_LIMIT(cx) ||
|
||||
!GetArrayElement(cx, aobj, slot, &hole, tvr.addr())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Per ECMA 262, 15.4.4.4, step 9, ignore non-existent
|
||||
* properties.
|
||||
*/
|
||||
if (!hole) {
|
||||
ok = SetArrayElement(cx, nobj, length + slot,
|
||||
tvr.u.value);
|
||||
if (!ok)
|
||||
goto out;
|
||||
if (!hole &&
|
||||
!SetArrayElement(cx, nobj, length+slot, tvr.value())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
length += alength;
|
||||
|
@ -2895,17 +2874,12 @@ array_concat(JSContext *cx, uintN argc, jsval *vp)
|
|||
}
|
||||
}
|
||||
|
||||
ok = SetArrayElement(cx, nobj, length, v);
|
||||
if (!ok)
|
||||
goto out;
|
||||
if (!SetArrayElement(cx, nobj, length, v))
|
||||
return false;
|
||||
length++;
|
||||
}
|
||||
|
||||
ok = js_SetLengthProperty(cx, nobj, length);
|
||||
|
||||
out:
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
return ok;
|
||||
return js_SetLengthProperty(cx, nobj, length);
|
||||
}
|
||||
|
||||
static JSBool
|
||||
|
@ -3412,14 +3386,11 @@ js_Array(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
|||
JS_STATIC_ASSERT(JSSLOT_PRIVATE == JSSLOT_ARRAY_LENGTH);
|
||||
JS_STATIC_ASSERT(JSSLOT_ARRAY_LENGTH + 1 == JSSLOT_ARRAY_COUNT);
|
||||
|
||||
#ifdef JS_TRACER
|
||||
|
||||
JSObject* FASTCALL
|
||||
JSObject* JS_FASTCALL
|
||||
js_NewEmptyArray(JSContext* cx, JSObject* proto)
|
||||
{
|
||||
JS_ASSERT(OBJ_IS_ARRAY(cx, proto));
|
||||
|
||||
JS_ASSERT(JS_ON_TRACE(cx));
|
||||
JSObject* obj = js_NewGCObject(cx, GCX_OBJECT);
|
||||
if (!obj)
|
||||
return NULL;
|
||||
|
@ -3437,12 +3408,13 @@ js_NewEmptyArray(JSContext* cx, JSObject* proto)
|
|||
obj->dslots = NULL;
|
||||
return obj;
|
||||
}
|
||||
#ifdef JS_TRACER
|
||||
JS_DEFINE_CALLINFO_2(extern, OBJECT, js_NewEmptyArray, CONTEXT, OBJECT, 0, 0)
|
||||
#endif
|
||||
|
||||
JSObject* FASTCALL
|
||||
js_NewUninitializedArray(JSContext* cx, JSObject* proto, uint32 len)
|
||||
JSObject* JS_FASTCALL
|
||||
js_NewArrayWithSlots(JSContext* cx, JSObject* proto, uint32 len)
|
||||
{
|
||||
JS_ASSERT(JS_ON_TRACE(cx));
|
||||
JSObject* obj = js_NewEmptyArray(cx, proto);
|
||||
if (!obj)
|
||||
return NULL;
|
||||
|
@ -3451,9 +3423,9 @@ js_NewUninitializedArray(JSContext* cx, JSObject* proto, uint32 len)
|
|||
return NULL;
|
||||
return obj;
|
||||
}
|
||||
JS_DEFINE_CALLINFO_3(extern, OBJECT, js_NewUninitializedArray, CONTEXT, OBJECT, UINT32, 0, 0)
|
||||
|
||||
#endif /* JS_TRACER */
|
||||
#ifdef JS_TRACER
|
||||
JS_DEFINE_CALLINFO_3(extern, OBJECT, js_NewArrayWithSlots, CONTEXT, OBJECT, UINT32, 0, 0)
|
||||
#endif
|
||||
|
||||
JSObject *
|
||||
js_InitArrayClass(JSContext *cx, JSObject *obj)
|
||||
|
|
|
@ -97,6 +97,13 @@ js_InitArrayClass(JSContext *cx, JSObject *obj);
|
|||
extern bool
|
||||
js_InitContextBusyArrayTable(JSContext *cx);
|
||||
|
||||
/*
|
||||
* Creates a new array with the given length and proto (NB: NULL is not
|
||||
* translated to Array.prototype), with len slots preallocated.
|
||||
*/
|
||||
extern JSObject * JS_FASTCALL
|
||||
js_NewArrayWithSlots(JSContext* cx, JSObject* proto, uint32 len);
|
||||
|
||||
extern JSObject *
|
||||
js_NewArrayObject(JSContext *cx, jsuint length, jsval *vector,
|
||||
JSBool holey = JS_FALSE);
|
||||
|
|
|
@ -123,7 +123,7 @@ struct JSNativeTraceInfo {
|
|||
#define _JS_PTR_ARGSIZE nanojit::ARGSIZE_P
|
||||
#define _JS_PTR_RETSIZE nanojit::ARGSIZE_P
|
||||
|
||||
class ClosureVarInfo;
|
||||
struct ClosureVarInfo;
|
||||
|
||||
/*
|
||||
* Supported types for builtin functions.
|
||||
|
@ -224,6 +224,7 @@ class ClosureVarInfo;
|
|||
#define _JS_CTYPE_FRAGMENT _JS_CTYPE(nanojit::Fragment *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_CLASS _JS_CTYPE(JSClass *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_DOUBLEPTR _JS_CTYPE(double *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_CHARPTR _JS_CTYPE(char *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_APNPTR _JS_CTYPE(js_ArgsPrivateNative *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_CVIPTR _JS_CTYPE(const ClosureVarInfo *, _JS_PTR, --, --, INFALLIBLE)
|
||||
|
||||
|
@ -471,7 +472,7 @@ JS_DECLARE_CALLINFO(js_Array_dense_setelem)
|
|||
JS_DECLARE_CALLINFO(js_Array_dense_setelem_int)
|
||||
JS_DECLARE_CALLINFO(js_Array_dense_setelem_double)
|
||||
JS_DECLARE_CALLINFO(js_NewEmptyArray)
|
||||
JS_DECLARE_CALLINFO(js_NewUninitializedArray)
|
||||
JS_DECLARE_CALLINFO(js_NewArrayWithSlots)
|
||||
JS_DECLARE_CALLINFO(js_ArrayCompPush)
|
||||
|
||||
/* Defined in jsfun.cpp. */
|
||||
|
|
|
@ -95,6 +95,8 @@ FinishThreadData(JSThreadData *data)
|
|||
/* All GC-related things must be already removed at this point. */
|
||||
for (size_t i = 0; i != JS_ARRAY_LENGTH(data->scriptsToGC); ++i)
|
||||
JS_ASSERT(!data->scriptsToGC[i]);
|
||||
for (size_t i = 0; i != JS_ARRAY_LENGTH(data->nativeEnumCache); ++i)
|
||||
JS_ASSERT(!data->nativeEnumCache[i]);
|
||||
#endif
|
||||
|
||||
js_FinishGSNCache(&data->gsnCache);
|
||||
|
@ -117,14 +119,12 @@ PurgeThreadData(JSContext *cx, JSThreadData *data)
|
|||
tm->reservedDoublePoolPtr = tm->reservedDoublePool;
|
||||
|
||||
/*
|
||||
* If we are about to regenerate shapes, we have to flush the JIT cache, too.
|
||||
* If we are about to regenerate shapes, we have to flush the JIT cache,
|
||||
* which will eventually abort any current recording.
|
||||
*/
|
||||
if (cx->runtime->gcRegenShapes)
|
||||
tm->needFlush = JS_TRUE;
|
||||
|
||||
if (tm->recorder)
|
||||
tm->recorder->deepAbort();
|
||||
|
||||
/*
|
||||
* We want to keep tm->reservedObjects after the GC. So, unless we are
|
||||
* shutting down, we don't purge them here and rather mark them during
|
||||
|
@ -136,6 +136,8 @@ PurgeThreadData(JSContext *cx, JSThreadData *data)
|
|||
|
||||
/* Destroy eval'ed scripts. */
|
||||
js_DestroyScriptsToGC(cx, data);
|
||||
|
||||
js_PurgeCachedNativeEnumerators(cx, data);
|
||||
}
|
||||
|
||||
#ifdef JS_THREADSAFE
|
||||
|
@ -261,6 +263,12 @@ thread_purger(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 /* index */,
|
|||
if (JS_CLIST_IS_EMPTY(&thread->contextList)) {
|
||||
JS_ASSERT(cx->thread != thread);
|
||||
js_DestroyScriptsToGC(cx, &thread->data);
|
||||
|
||||
/*
|
||||
* The following is potentially suboptimal as it also zeros the cache
|
||||
* in data, but the code simplicity wins here.
|
||||
*/
|
||||
js_PurgeCachedNativeEnumerators(cx, &thread->data);
|
||||
DestroyThread(thread);
|
||||
return JS_DHASH_REMOVE;
|
||||
}
|
||||
|
@ -557,7 +565,8 @@ DumpEvalCacheMeter(JSContext *cx)
|
|||
);
|
||||
for (uintN i = 0; i < JS_ARRAY_LENGTH(table); ++i) {
|
||||
fprintf(fp, "%-8.8s %llu\n",
|
||||
table[i].name, *(uint64 *)((uint8 *)ecm + table[i].offset));
|
||||
table[i].name,
|
||||
(unsigned long long int) *(uint64 *)((uint8 *)ecm + table[i].offset));
|
||||
}
|
||||
fprintf(fp, "hit ratio %g%%\n", ecm->hit * 100. / ecm->probe);
|
||||
fprintf(fp, "avg steps %g\n", double(ecm->step) / ecm->probe);
|
||||
|
|
|
@ -101,7 +101,7 @@ namespace nanojit {
|
|||
class LabelMap;
|
||||
#endif
|
||||
extern "C++" {
|
||||
template<typename K> class DefaultHash;
|
||||
template<typename K> struct DefaultHash;
|
||||
template<typename K, typename V, typename H> class HashMap;
|
||||
template<typename T> class Seq;
|
||||
}
|
||||
|
@ -148,15 +148,15 @@ struct JSTraceMonitor {
|
|||
* last-ditch GC and suppress calls to JS_ReportOutOfMemory.
|
||||
*
|
||||
* !tracecx && !recorder: not on trace
|
||||
* !tracecx && recorder && !recorder->deepAborted: recording
|
||||
* !tracecx && recorder && recorder->deepAborted: deep aborted
|
||||
* !tracecx && recorder: recording
|
||||
* tracecx && !recorder: executing a trace
|
||||
* tracecx && recorder: executing inner loop, recording outer loop
|
||||
*/
|
||||
JSContext *tracecx;
|
||||
|
||||
CLS(VMAllocator) allocator; // A chunk allocator for LIR.
|
||||
CLS(nanojit::CodeAlloc) codeAlloc; // A general allocator for native code.
|
||||
CLS(VMAllocator) dataAlloc; /* A chunk allocator for LIR. */
|
||||
CLS(VMAllocator) tempAlloc; /* A temporary chunk allocator. */
|
||||
CLS(nanojit::CodeAlloc) codeAlloc; /* An allocator for native code. */
|
||||
CLS(nanojit::Assembler) assembler;
|
||||
CLS(nanojit::LirBuffer) lirbuf;
|
||||
CLS(nanojit::LirBuffer) reLirBuf;
|
||||
|
@ -197,8 +197,10 @@ struct JSTraceMonitor {
|
|||
*/
|
||||
CLS(REHashMap) reFragments;
|
||||
|
||||
/* Keep a list of recorders we need to abort on cache flush. */
|
||||
CLS(TraceRecorder) abortStack;
|
||||
/*
|
||||
* A temporary allocator for RE recording.
|
||||
*/
|
||||
CLS(VMAllocator) reTempAlloc;
|
||||
|
||||
#ifdef DEBUG
|
||||
/* Fields needed for fragment/guard profiling. */
|
||||
|
@ -298,6 +300,20 @@ struct JSThreadData {
|
|||
*/
|
||||
size_t gcMallocBytes;
|
||||
|
||||
/*
|
||||
* Cache of reusable JSNativeEnumerators mapped by shape identifiers (as
|
||||
* stored in scope->shape). This cache is nulled by the GC and protected
|
||||
* by gcLock.
|
||||
*/
|
||||
#define NATIVE_ENUM_CACHE_LOG2 8
|
||||
#define NATIVE_ENUM_CACHE_MASK JS_BITMASK(NATIVE_ENUM_CACHE_LOG2)
|
||||
#define NATIVE_ENUM_CACHE_SIZE JS_BIT(NATIVE_ENUM_CACHE_LOG2)
|
||||
|
||||
#define NATIVE_ENUM_CACHE_HASH(shape) \
|
||||
((((shape) >> NATIVE_ENUM_CACHE_LOG2) ^ (shape)) & NATIVE_ENUM_CACHE_MASK)
|
||||
|
||||
jsuword nativeEnumCache[NATIVE_ENUM_CACHE_SIZE];
|
||||
|
||||
#ifdef JS_THREADSAFE
|
||||
/*
|
||||
* Deallocator task for this thread.
|
||||
|
@ -612,12 +628,6 @@ struct JSRuntime {
|
|||
JSObject *anynameObject;
|
||||
JSObject *functionNamespaceObject;
|
||||
|
||||
/*
|
||||
* A helper list for the GC, so it can mark native iterator states. See
|
||||
* js_TraceNativeEnumerators for details.
|
||||
*/
|
||||
JSNativeEnumerator *nativeEnumerators;
|
||||
|
||||
#ifndef JS_THREADSAFE
|
||||
JSThreadData threadData;
|
||||
|
||||
|
@ -642,20 +652,6 @@ struct JSRuntime {
|
|||
/* Literal table maintained by jsatom.c functions. */
|
||||
JSAtomState atomState;
|
||||
|
||||
/*
|
||||
* Cache of reusable JSNativeEnumerators mapped by shape identifiers (as
|
||||
* stored in scope->shape). This cache is nulled by the GC and protected
|
||||
* by gcLock.
|
||||
*/
|
||||
#define NATIVE_ENUM_CACHE_LOG2 8
|
||||
#define NATIVE_ENUM_CACHE_MASK JS_BITMASK(NATIVE_ENUM_CACHE_LOG2)
|
||||
#define NATIVE_ENUM_CACHE_SIZE JS_BIT(NATIVE_ENUM_CACHE_LOG2)
|
||||
|
||||
#define NATIVE_ENUM_CACHE_HASH(shape) \
|
||||
((((shape) >> NATIVE_ENUM_CACHE_LOG2) ^ (shape)) & NATIVE_ENUM_CACHE_MASK)
|
||||
|
||||
jsuword nativeEnumCache[NATIVE_ENUM_CACHE_SIZE];
|
||||
|
||||
/*
|
||||
* Various metering fields are defined at the end of JSRuntime. In this
|
||||
* way there is no need to recompile all the code that refers to other
|
||||
|
@ -866,6 +862,10 @@ typedef struct JSLocalRootStack {
|
|||
#define JSTVU_WEAK_ROOTS (-4) /* u.weakRoots points to saved weak roots */
|
||||
#define JSTVU_COMPILER (-5) /* u.compiler roots JSCompiler* */
|
||||
#define JSTVU_SCRIPT (-6) /* u.script roots JSScript* */
|
||||
#define JSTVU_ENUMERATOR (-7) /* a pointer to JSTempValueRooter points
|
||||
to an instance of JSAutoEnumStateRooter
|
||||
with u.object storing the enumeration
|
||||
object */
|
||||
|
||||
/*
|
||||
* Here single JSTVU_SINGLE covers both jsval and pointers to almost (see note
|
||||
|
@ -929,7 +929,6 @@ typedef struct JSLocalRootStack {
|
|||
#define JS_PUSH_TEMP_ROOT_SCRIPT(cx,script_,tvr) \
|
||||
JS_PUSH_TEMP_ROOT_COMMON(cx, script_, tvr, JSTVU_SCRIPT, script)
|
||||
|
||||
|
||||
#define JSRESOLVE_INFER 0xffff /* infer bits from current bytecode */
|
||||
|
||||
struct JSContext {
|
||||
|
@ -1135,6 +1134,15 @@ struct JSContext {
|
|||
return p;
|
||||
}
|
||||
|
||||
inline void* mallocNoReport(size_t bytes) {
|
||||
JS_ASSERT(bytes != 0);
|
||||
void *p = runtime->malloc(bytes);
|
||||
if (!p)
|
||||
return NULL;
|
||||
updateMallocCounter(bytes);
|
||||
return p;
|
||||
}
|
||||
|
||||
inline void* calloc(size_t bytes) {
|
||||
JS_ASSERT(bytes != 0);
|
||||
void *p = runtime->calloc(bytes);
|
||||
|
@ -1276,7 +1284,7 @@ class JSAutoTempValueRooter
|
|||
|
||||
class JSAutoTempIdRooter
|
||||
{
|
||||
public:
|
||||
public:
|
||||
explicit JSAutoTempIdRooter(JSContext *cx, jsid id = INT_TO_JSID(0))
|
||||
: mContext(cx) {
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(mContext, ID_TO_VALUE(id), &mTvr);
|
||||
|
@ -1289,11 +1297,66 @@ public:
|
|||
jsid id() { return (jsid) mTvr.u.value; }
|
||||
jsid * addr() { return (jsid *) &mTvr.u.value; }
|
||||
|
||||
private:
|
||||
private:
|
||||
JSContext *mContext;
|
||||
JSTempValueRooter mTvr;
|
||||
};
|
||||
|
||||
class JSAutoIdArray {
|
||||
public:
|
||||
JSAutoIdArray(JSContext *cx, JSIdArray *ida) : cx(cx), idArray(ida) {
|
||||
if (ida)
|
||||
JS_PUSH_TEMP_ROOT(cx, ida->length, ida->vector, &tvr);
|
||||
}
|
||||
~JSAutoIdArray() {
|
||||
if (idArray) {
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
JS_DestroyIdArray(cx, idArray);
|
||||
}
|
||||
}
|
||||
bool operator!() {
|
||||
return idArray == NULL;
|
||||
}
|
||||
jsid operator[](size_t i) const {
|
||||
JS_ASSERT(idArray);
|
||||
JS_ASSERT(i < size_t(idArray->length));
|
||||
return idArray->vector[i];
|
||||
}
|
||||
size_t length() const {
|
||||
return idArray->length;
|
||||
}
|
||||
private:
|
||||
JSContext * const cx;
|
||||
JSIdArray * const idArray;
|
||||
JSTempValueRooter tvr;
|
||||
};
|
||||
|
||||
/* The auto-root for enumeration object and its state. */
|
||||
class JSAutoEnumStateRooter : public JSTempValueRooter
|
||||
{
|
||||
public:
|
||||
JSAutoEnumStateRooter(JSContext *cx, JSObject *obj, jsval *statep)
|
||||
: mContext(cx), mStatep(statep)
|
||||
{
|
||||
JS_ASSERT(obj);
|
||||
JS_ASSERT(statep);
|
||||
JS_PUSH_TEMP_ROOT_COMMON(cx, obj, this, JSTVU_ENUMERATOR, object);
|
||||
}
|
||||
|
||||
~JSAutoEnumStateRooter() {
|
||||
JS_POP_TEMP_ROOT(mContext, this);
|
||||
}
|
||||
|
||||
void mark(JSTracer *trc) {
|
||||
JS_CALL_OBJECT_TRACER(trc, u.object, "enumerator_obj");
|
||||
js_MarkEnumeratorState(trc, u.object, *mStatep);
|
||||
}
|
||||
|
||||
private:
|
||||
JSContext *mContext;
|
||||
jsval *mStatep;
|
||||
};
|
||||
|
||||
class JSAutoResolveFlags
|
||||
{
|
||||
public:
|
||||
|
|
|
@ -229,7 +229,7 @@ static intN
|
|||
DaysInMonth(jsint year, jsint month)
|
||||
{
|
||||
JSBool leap = (DaysInYear(year) == 366);
|
||||
intN result = DayFromMonth(month, leap) - DayFromMonth(month-1, leap);
|
||||
intN result = intN(DayFromMonth(month, leap) - DayFromMonth(month-1, leap));
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -2312,26 +2312,23 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
|||
JS_ASSERT(op != JSOP_CALLEE);
|
||||
JS_ASSERT((cg->fun->flags & JSFUN_LAMBDA) && atom == cg->fun->atom);
|
||||
|
||||
switch (op) {
|
||||
default:
|
||||
/*
|
||||
* Leave pn->pn_op == JSOP_NAME if cg->fun is heavyweight, as
|
||||
* we cannot be sure cg->fun is not something of the form:
|
||||
*
|
||||
* var ff = (function f(s) { eval(s); return f; });
|
||||
*
|
||||
* where a caller invokes ff("var f = 42"). The result returned
|
||||
* for such an invocation must be 42, since the callee name is
|
||||
* lexically bound in an outer declarative environment from the
|
||||
* function's activation. See jsfun.cpp:call_resolve.
|
||||
*/
|
||||
JS_ASSERT(op != JSOP_DELNAME);
|
||||
if (!(cg->flags & TCF_FUN_HEAVYWEIGHT)) {
|
||||
op = JSOP_CALLEE;
|
||||
pn->pn_dflags |= PND_CONST;
|
||||
}
|
||||
break;
|
||||
/*
|
||||
* Leave pn->pn_op == JSOP_NAME if cg->fun is heavyweight, as we
|
||||
* cannot be sure cg->fun is not something of the form:
|
||||
*
|
||||
* var ff = (function f(s) { eval(s); return f; });
|
||||
*
|
||||
* where a caller invokes ff("var f = 42"). The result returned for
|
||||
* such an invocation must be 42, since the callee name is
|
||||
* lexically bound in an outer declarative environment from the
|
||||
* function's activation. See jsfun.cpp:call_resolve.
|
||||
*/
|
||||
JS_ASSERT(op != JSOP_DELNAME);
|
||||
if (!(cg->flags & TCF_FUN_HEAVYWEIGHT)) {
|
||||
op = JSOP_CALLEE;
|
||||
pn->pn_dflags |= PND_CONST;
|
||||
}
|
||||
|
||||
pn->pn_op = op;
|
||||
pn->pn_dflags |= PND_BOUND;
|
||||
return JS_TRUE;
|
||||
|
|
|
@ -429,7 +429,7 @@ struct JSCodeGenerator : public JSTreeContext
|
|||
~JSCodeGenerator();
|
||||
|
||||
bool hasSharps() {
|
||||
bool rv = flags & TCF_HAS_SHARPS;
|
||||
bool rv = !!(flags & TCF_HAS_SHARPS);
|
||||
JS_ASSERT((sharpSlotBase >= 0) == rv);
|
||||
return rv;
|
||||
}
|
||||
|
|
|
@ -81,43 +81,6 @@
|
|||
|
||||
#include "jsatominlines.h"
|
||||
|
||||
/*
|
||||
* Reserved slot structure for Arguments objects:
|
||||
*
|
||||
* JSSLOT_PRIVATE - the corresponding frame until the frame exits.
|
||||
* JSSLOT_ARGS_LENGTH - the number of actual arguments and a flag indicating
|
||||
* whether arguments.length was overwritten.
|
||||
* JSSLOT_ARGS_CALLEE - the arguments.callee value or JSVAL_HOLE if that was
|
||||
* overwritten.
|
||||
* JSSLOT_ARGS_COPY_START .. - room to store the corresponding arguments after
|
||||
* the frame exists. The slot's value will be JSVAL_HOLE
|
||||
* if arguments[i] was deleted or overwritten.
|
||||
*/
|
||||
const uint32 JSSLOT_ARGS_LENGTH = JSSLOT_PRIVATE + 1;
|
||||
const uint32 JSSLOT_ARGS_CALLEE = JSSLOT_PRIVATE + 2;
|
||||
const uint32 JSSLOT_ARGS_COPY_START = JSSLOT_PRIVATE + 3;
|
||||
|
||||
/* Number of extra fixed slots besides JSSLOT_PRIVATE. */
|
||||
const uint32 ARGS_CLASS_FIXED_RESERVED_SLOTS = JSSLOT_ARGS_COPY_START -
|
||||
JSSLOT_ARGS_LENGTH;
|
||||
|
||||
/*
|
||||
* JSSLOT_ARGS_LENGTH stores ((argc << 1) | overwritten_flag) as int jsval.
|
||||
* Thus (JS_ARGS_LENGTH_MAX << 1) | 1 must fit JSVAL_INT_MAX. To assert that
|
||||
* we check first that the shift does not overflow uint32.
|
||||
*/
|
||||
JS_STATIC_ASSERT(JS_ARGS_LENGTH_MAX <= JS_BIT(30));
|
||||
JS_STATIC_ASSERT(jsval((JS_ARGS_LENGTH_MAX << 1) | 1) <= JSVAL_INT_MAX);
|
||||
|
||||
static inline bool
|
||||
IsOverriddenArgsLength(JSObject *obj)
|
||||
{
|
||||
JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_ArgumentsClass);
|
||||
|
||||
jsval v = obj->fslots[JSSLOT_ARGS_LENGTH];
|
||||
return (JSVAL_TO_INT(v) & 1) != 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
SetOverriddenArgsLength(JSObject *obj)
|
||||
{
|
||||
|
@ -136,7 +99,7 @@ InitArgsLengthSlot(JSObject *obj, uint32 argc)
|
|||
JS_ASSERT(argc <= JS_ARGS_LENGTH_MAX);
|
||||
JS_ASSERT(obj->fslots[JSSLOT_ARGS_LENGTH] == JSVAL_VOID);
|
||||
obj->fslots[JSSLOT_ARGS_LENGTH] = INT_TO_JSVAL(argc << 1);
|
||||
JS_ASSERT(!IsOverriddenArgsLength(obj));
|
||||
JS_ASSERT(!js_IsOverriddenArgsLength(obj));
|
||||
}
|
||||
|
||||
static inline uint32
|
||||
|
@ -225,7 +188,7 @@ js_GetArgsProperty(JSContext *cx, JSStackFrame *fp, jsid id, jsval *vp)
|
|||
}
|
||||
} else if (id == ATOM_TO_JSID(cx->runtime->atomState.lengthAtom)) {
|
||||
JSObject *argsobj = JSVAL_TO_OBJECT(fp->argsobj);
|
||||
if (argsobj && IsOverriddenArgsLength(argsobj))
|
||||
if (argsobj && js_IsOverriddenArgsLength(argsobj))
|
||||
return argsobj->getProperty(cx, id, vp);
|
||||
*vp = INT_TO_JSVAL(jsint(fp->argc));
|
||||
}
|
||||
|
@ -321,6 +284,8 @@ js_Arguments(JSContext *cx, JSObject *parent, uint32 argc, JSObject *callee,
|
|||
double *argv, js_ArgsPrivateNative *apn)
|
||||
{
|
||||
JSObject *argsobj = NewArguments(cx, parent, argc, callee);
|
||||
if (!argsobj)
|
||||
return NULL;
|
||||
apn->argv = argv;
|
||||
SetArgsPrivateNative(argsobj, apn);
|
||||
return argsobj;
|
||||
|
@ -559,7 +524,7 @@ ArgGetter(JSContext *cx, JSObject *obj, jsval idval, jsval *vp)
|
|||
}
|
||||
}
|
||||
} else if (idval == ATOM_KEY(cx->runtime->atomState.lengthAtom)) {
|
||||
if (!IsOverriddenArgsLength(obj))
|
||||
if (!js_IsOverriddenArgsLength(obj))
|
||||
*vp = INT_TO_JSVAL(GetArgsLength(obj));
|
||||
} else {
|
||||
JS_ASSERT(idval == ATOM_KEY(cx->runtime->atomState.calleeAtom));
|
||||
|
@ -639,7 +604,7 @@ args_resolve(JSContext *cx, JSObject *obj, jsval idval, uintN flags,
|
|||
id = INT_JSVAL_TO_JSID(idval);
|
||||
}
|
||||
} else if (idval == ATOM_KEY(cx->runtime->atomState.lengthAtom)) {
|
||||
if (!IsOverriddenArgsLength(obj))
|
||||
if (!js_IsOverriddenArgsLength(obj))
|
||||
id = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom);
|
||||
|
||||
} else if (idval == ATOM_KEY(cx->runtime->atomState.calleeAtom)) {
|
||||
|
@ -1590,7 +1555,7 @@ js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp)
|
|||
nupvars = flagsword >> 16;
|
||||
fun->flags = uint16(flagsword);
|
||||
fun->u.i.skipmin = uint16(firstword >> 2);
|
||||
fun->u.i.wrapper = (firstword >> 1) & 1;
|
||||
fun->u.i.wrapper = JSPackedBool((firstword >> 1) & 1);
|
||||
}
|
||||
|
||||
/* do arguments and local vars */
|
||||
|
@ -2377,20 +2342,16 @@ js_InitFunctionClass(JSContext *cx, JSObject *obj)
|
|||
return NULL;
|
||||
fun = js_NewFunction(cx, proto, NULL, 0, JSFUN_INTERPRETED, obj, NULL);
|
||||
if (!fun)
|
||||
goto bad;
|
||||
return NULL;
|
||||
fun->u.i.script = js_NewScript(cx, 1, 1, 0, 0, 0, 0, 0);
|
||||
if (!fun->u.i.script)
|
||||
goto bad;
|
||||
return NULL;
|
||||
fun->u.i.script->code[0] = JSOP_STOP;
|
||||
*fun->u.i.script->notes() = SRC_NULL;
|
||||
#ifdef CHECK_SCRIPT_OWNER
|
||||
fun->u.i.script->owner = NULL;
|
||||
#endif
|
||||
return proto;
|
||||
|
||||
bad:
|
||||
cx->weakRoots.newborn[GCX_OBJECT] = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
JSFunction *
|
||||
|
@ -2955,7 +2916,7 @@ get_local_names_enumerator(JSDHashTable *table, JSDHashEntryHdr *hdr,
|
|||
entry->localKind == JSLOCAL_CONST ||
|
||||
entry->localKind == JSLOCAL_UPVAR);
|
||||
JS_ASSERT(entry->index < args->fun->u.i.nvars + args->fun->u.i.nupvars);
|
||||
JS_ASSERT(args->nCopiedVars++ < args->fun->u.i.nvars + args->fun->u.i.nupvars);
|
||||
JS_ASSERT(args->nCopiedVars++ < unsigned(args->fun->u.i.nvars + args->fun->u.i.nupvars));
|
||||
i = args->fun->nargs;
|
||||
if (entry->localKind == JSLOCAL_UPVAR)
|
||||
i += args->fun->u.i.nvars;
|
||||
|
|
|
@ -334,6 +334,43 @@ js_GetArgsObject(JSContext *cx, JSStackFrame *fp);
|
|||
extern void
|
||||
js_PutArgsObject(JSContext *cx, JSStackFrame *fp);
|
||||
|
||||
/*
|
||||
* Reserved slot structure for Arguments objects:
|
||||
*
|
||||
* JSSLOT_PRIVATE - the corresponding frame until the frame exits.
|
||||
* JSSLOT_ARGS_LENGTH - the number of actual arguments and a flag indicating
|
||||
* whether arguments.length was overwritten.
|
||||
* JSSLOT_ARGS_CALLEE - the arguments.callee value or JSVAL_HOLE if that was
|
||||
* overwritten.
|
||||
* JSSLOT_ARGS_COPY_START .. - room to store the corresponding arguments after
|
||||
* the frame exists. The slot's value will be JSVAL_HOLE
|
||||
* if arguments[i] was deleted or overwritten.
|
||||
*/
|
||||
const uint32 JSSLOT_ARGS_LENGTH = JSSLOT_PRIVATE + 1;
|
||||
const uint32 JSSLOT_ARGS_CALLEE = JSSLOT_PRIVATE + 2;
|
||||
const uint32 JSSLOT_ARGS_COPY_START = JSSLOT_PRIVATE + 3;
|
||||
|
||||
/* Number of extra fixed slots besides JSSLOT_PRIVATE. */
|
||||
const uint32 ARGS_CLASS_FIXED_RESERVED_SLOTS = JSSLOT_ARGS_COPY_START -
|
||||
JSSLOT_ARGS_LENGTH;
|
||||
|
||||
/*
|
||||
* JSSLOT_ARGS_LENGTH stores ((argc << 1) | overwritten_flag) as int jsval.
|
||||
* Thus (JS_ARGS_LENGTH_MAX << 1) | 1 must fit JSVAL_INT_MAX. To assert that
|
||||
* we check first that the shift does not overflow uint32.
|
||||
*/
|
||||
JS_STATIC_ASSERT(JS_ARGS_LENGTH_MAX <= JS_BIT(30));
|
||||
JS_STATIC_ASSERT(jsval((JS_ARGS_LENGTH_MAX << 1) | 1) <= JSVAL_INT_MAX);
|
||||
|
||||
JS_INLINE bool
|
||||
js_IsOverriddenArgsLength(JSObject *obj)
|
||||
{
|
||||
JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_ArgumentsClass);
|
||||
|
||||
jsval v = obj->fslots[JSSLOT_ARGS_LENGTH];
|
||||
return (JSVAL_TO_INT(v) & 1) != 0;
|
||||
}
|
||||
|
||||
extern JSBool
|
||||
js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp);
|
||||
|
||||
|
|
|
@ -2852,7 +2852,6 @@ js_TraceStackFrame(JSTracer *trc, JSStackFrame *fp)
|
|||
JS_ASSERT(nslots >= fp->script->nfixed);
|
||||
} else {
|
||||
nslots = fp->script->nfixed;
|
||||
JS_ASSERT_IF(!fp->regs->sp, nslots == 0);
|
||||
}
|
||||
TRACE_JSVALS(trc, nslots, fp->slots, "slot");
|
||||
}
|
||||
|
@ -3036,6 +3035,9 @@ js_TraceContext(JSTracer *trc, JSContext *acx)
|
|||
case JSTVU_SCRIPT:
|
||||
js_TraceScript(trc, tvr->u.script);
|
||||
break;
|
||||
case JSTVU_ENUMERATOR:
|
||||
static_cast<JSAutoEnumStateRooter *>(tvr)->mark(trc);
|
||||
break;
|
||||
default:
|
||||
JS_ASSERT(tvr->count >= 0);
|
||||
TRACE_JSVALS(trc, tvr->count, tvr->u.array, "tvr->u.array");
|
||||
|
@ -3095,7 +3097,6 @@ js_TraceRuntime(JSTracer *trc, JSBool allAtoms)
|
|||
if (rt->gcLocksHash)
|
||||
JS_DHashTableEnumerate(rt->gcLocksHash, gc_lock_traversal, trc);
|
||||
js_TraceAtomState(trc, allAtoms);
|
||||
js_TraceNativeEnumerators(trc);
|
||||
js_TraceRuntimeNumberState(trc);
|
||||
|
||||
iter = NULL;
|
||||
|
|
|
@ -326,22 +326,6 @@ struct JSWeakRoots {
|
|||
|
||||
#define JS_CLEAR_WEAK_ROOTS(wr) (memset((wr), 0, sizeof(JSWeakRoots)))
|
||||
|
||||
/*
|
||||
* Increase runtime->gcBytes by sz bytes to account for an allocation outside
|
||||
* the GC that will be freed only after the GC is run. The function may run
|
||||
* the last ditch GC to ensure that gcBytes does not exceed gcMaxBytes. It will
|
||||
* fail if the latter is not possible.
|
||||
*
|
||||
* This function requires that runtime->gcLock is held on entry. On successful
|
||||
* return the lock is still held and on failure it will be released with
|
||||
* the error reported.
|
||||
*/
|
||||
extern JSBool
|
||||
js_AddAsGCBytes(JSContext *cx, size_t sz);
|
||||
|
||||
extern void
|
||||
js_RemoveAsGCBytes(JSRuntime* rt, size_t sz);
|
||||
|
||||
#ifdef JS_THREADSAFE
|
||||
class JSFreePointerListTask : public JSBackgroundTask {
|
||||
void *head;
|
||||
|
|
|
@ -997,49 +997,35 @@ JSClass js_NoSuchMethodClass = {
|
|||
JS_STATIC_INTERPRET JSBool
|
||||
js_OnUnknownMethod(JSContext *cx, jsval *vp)
|
||||
{
|
||||
JSObject *obj;
|
||||
jsid id;
|
||||
JSTempValueRooter tvr;
|
||||
JSBool ok;
|
||||
|
||||
JS_ASSERT(!JSVAL_IS_PRIMITIVE(vp[1]));
|
||||
obj = JSVAL_TO_OBJECT(vp[1]);
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(cx, JSVAL_NULL, &tvr);
|
||||
|
||||
MUST_FLOW_THROUGH("out");
|
||||
id = ATOM_TO_JSID(cx->runtime->atomState.noSuchMethodAtom);
|
||||
ok = js_GetMethod(cx, obj, id, JSGET_NO_METHOD_BARRIER, &tvr.u.value);
|
||||
if (!ok)
|
||||
goto out;
|
||||
if (JSVAL_IS_PRIMITIVE(tvr.u.value)) {
|
||||
vp[0] = tvr.u.value;
|
||||
JSObject *obj = JSVAL_TO_OBJECT(vp[1]);
|
||||
jsid id = ATOM_TO_JSID(cx->runtime->atomState.noSuchMethodAtom);
|
||||
JSAutoTempValueRooter tvr(cx, JSVAL_NULL);
|
||||
if (!js_GetMethod(cx, obj, id, JSGET_NO_METHOD_BARRIER, tvr.addr()))
|
||||
return false;
|
||||
if (JSVAL_IS_PRIMITIVE(tvr.value())) {
|
||||
vp[0] = tvr.value();
|
||||
} else {
|
||||
#if JS_HAS_XML_SUPPORT
|
||||
/* Extract the function name from function::name qname. */
|
||||
if (!JSVAL_IS_PRIMITIVE(vp[0])) {
|
||||
obj = JSVAL_TO_OBJECT(vp[0]);
|
||||
ok = js_IsFunctionQName(cx, obj, &id);
|
||||
if (!ok)
|
||||
goto out;
|
||||
if (!js_IsFunctionQName(cx, obj, &id))
|
||||
return false;
|
||||
if (id != 0)
|
||||
vp[0] = ID_TO_VALUE(id);
|
||||
}
|
||||
#endif
|
||||
obj = js_NewObjectWithGivenProto(cx, &js_NoSuchMethodClass,
|
||||
NULL, NULL);
|
||||
if (!obj) {
|
||||
ok = JS_FALSE;
|
||||
goto out;
|
||||
}
|
||||
obj->fslots[JSSLOT_FOUND_FUNCTION] = tvr.u.value;
|
||||
if (!obj)
|
||||
return false;
|
||||
obj->fslots[JSSLOT_FOUND_FUNCTION] = tvr.value();
|
||||
obj->fslots[JSSLOT_SAVED_ID] = vp[0];
|
||||
vp[0] = OBJECT_TO_JSVAL(obj);
|
||||
}
|
||||
ok = JS_TRUE;
|
||||
|
||||
out:
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
return ok;
|
||||
return true;
|
||||
}
|
||||
|
||||
static JS_REQUIRES_STACK JSBool
|
||||
|
@ -1502,7 +1488,7 @@ js_Execute(JSContext *cx, JSObject *chain, JSScript *script,
|
|||
frame.callobj = down->callobj;
|
||||
frame.argsobj = down->argsobj;
|
||||
frame.varobj = down->varobj;
|
||||
frame.fun = down->fun;
|
||||
frame.fun = (script->staticLevel > 0) ? down->fun : NULL;
|
||||
frame.thisv = down->thisv;
|
||||
if (down->flags & JSFRAME_COMPUTED_THIS)
|
||||
flags |= JSFRAME_COMPUTED_THIS;
|
||||
|
@ -1555,9 +1541,8 @@ js_Execute(JSContext *cx, JSObject *chain, JSScript *script,
|
|||
} else {
|
||||
sharps[0] = sharps[1] = JSVAL_VOID;
|
||||
}
|
||||
} else
|
||||
}
|
||||
#endif
|
||||
JS_ASSERT_IF(down, script->nfixed == 0);
|
||||
} else {
|
||||
frame.slots = NULL;
|
||||
}
|
||||
|
@ -1876,10 +1861,8 @@ js_InvokeConstructor(JSContext *cx, uintN argc, JSBool clampReturn, jsval *vp)
|
|||
|
||||
/* Now we have an object with a constructor method; call it. */
|
||||
vp[1] = OBJECT_TO_JSVAL(obj);
|
||||
if (!js_Invoke(cx, argc, vp, JSINVOKE_CONSTRUCT)) {
|
||||
cx->weakRoots.newborn[GCX_OBJECT] = NULL;
|
||||
if (!js_Invoke(cx, argc, vp, JSINVOKE_CONSTRUCT))
|
||||
return JS_FALSE;
|
||||
}
|
||||
|
||||
/* Check the return value and if it's primitive, force it to be obj. */
|
||||
rval = *vp;
|
||||
|
@ -2554,7 +2537,7 @@ AssertValidPropertyCacheHit(JSContext *cx, JSScript *script, JSFrameRegs& regs,
|
|||
|
||||
JSObject *obj, *pobj;
|
||||
JSProperty *prop;
|
||||
bool ok;
|
||||
JSBool ok;
|
||||
|
||||
if (JOF_OPMODE(*regs.pc) == JOF_NAME) {
|
||||
ok = js_FindProperty(cx, ATOM_TO_JSID(atom), &obj, &pobj, &prop);
|
||||
|
@ -2805,18 +2788,9 @@ js_Interpret(JSContext *cx)
|
|||
#endif /* !JS_THREADED_INTERP */
|
||||
|
||||
#ifdef JS_TRACER
|
||||
/* We had better not be entering the interpreter from JIT-compiled code. */
|
||||
TraceRecorder *tr = TRACE_RECORDER(cx);
|
||||
SET_TRACE_RECORDER(cx, NULL);
|
||||
|
||||
/* If a recorder is pending and we try to re-enter the interpreter, flag
|
||||
the recorder to be destroyed when we return. */
|
||||
if (tr) {
|
||||
if (tr->wasDeepAborted())
|
||||
tr->removeFragmentReferences();
|
||||
else
|
||||
tr->pushAbortStack();
|
||||
}
|
||||
/* We cannot reenter the interpreter while recording. */
|
||||
if (TRACE_RECORDER(cx))
|
||||
js_AbortRecording(cx, "attempt to reenter interpreter while recording");
|
||||
#endif
|
||||
|
||||
/* Check for too deep of a native thread stack. */
|
||||
|
@ -3308,15 +3282,6 @@ js_Interpret(JSContext *cx)
|
|||
js_SetVersion(cx, originalVersion);
|
||||
--cx->interpLevel;
|
||||
|
||||
#ifdef JS_TRACER
|
||||
if (tr) {
|
||||
SET_TRACE_RECORDER(cx, tr);
|
||||
if (!tr->wasDeepAborted()) {
|
||||
tr->popAbortStack();
|
||||
tr->deepAbort();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return ok;
|
||||
|
||||
atom_not_defined:
|
||||
|
|
|
@ -99,7 +99,7 @@ js_CloseNativeIterator(JSContext *cx, JSObject *iterobj)
|
|||
return;
|
||||
|
||||
/* Protect against failure to fully initialize obj. */
|
||||
iterable = STOBJ_GET_PARENT(iterobj);
|
||||
iterable = iterobj->getParent();
|
||||
if (iterable) {
|
||||
#if JS_HAS_XML_SUPPORT
|
||||
uintN flags = JSVAL_TO_INT(STOBJ_GET_SLOT(iterobj, JSSLOT_ITER_FLAGS));
|
||||
|
@ -113,13 +113,33 @@ js_CloseNativeIterator(JSContext *cx, JSObject *iterobj)
|
|||
STOBJ_SET_SLOT(iterobj, JSSLOT_ITER_STATE, JSVAL_NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
iterator_trace(JSTracer *trc, JSObject *obj)
|
||||
{
|
||||
/*
|
||||
* The GC marks iter_state during the normal slot scanning if
|
||||
* JSVAL_IS_TRACEABLE(iter_state) is true duplicating the efforts of
|
||||
* js_MarkEnumeratorState. But this is rare so we optimize for code
|
||||
* simplicity.
|
||||
*/
|
||||
JSObject *iterable = obj->getParent();
|
||||
if (!iterable) {
|
||||
/* for (x in null) creates an iterator object with a null parent. */
|
||||
return;
|
||||
}
|
||||
jsval iter_state = obj->fslots[JSSLOT_ITER_STATE];
|
||||
js_MarkEnumeratorState(trc, iterable, iter_state);
|
||||
}
|
||||
|
||||
JSClass js_IteratorClass = {
|
||||
"Iterator",
|
||||
JSCLASS_HAS_RESERVED_SLOTS(2) | /* slots for state and flags */
|
||||
JSCLASS_HAS_CACHED_PROTO(JSProto_Iterator),
|
||||
JSCLASS_HAS_CACHED_PROTO(JSProto_Iterator) |
|
||||
JSCLASS_MARK_IS_TRACE,
|
||||
JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
|
||||
JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, NULL,
|
||||
JSCLASS_NO_OPTIONAL_MEMBERS
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, JS_CLASS_TRACE(iterator_trace), NULL
|
||||
};
|
||||
|
||||
static JSBool
|
||||
|
|
|
@ -940,7 +940,6 @@ js_ValueToNumber(JSContext *cx, jsval *vp)
|
|||
const jschar *bp, *end, *ep;
|
||||
jsdouble d, *dp;
|
||||
JSObject *obj;
|
||||
JSTempValueRooter tvr;
|
||||
|
||||
v = *vp;
|
||||
for (;;) {
|
||||
|
@ -1004,12 +1003,11 @@ js_ValueToNumber(JSContext *cx, jsval *vp)
|
|||
* vp roots obj so we cannot use it as an extra root for
|
||||
* obj->defaultValue result when calling the hook.
|
||||
*/
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(cx, v, &tvr);
|
||||
if (!obj->defaultValue(cx, JSTYPE_NUMBER, &tvr.u.value))
|
||||
JSAutoTempValueRooter tvr(cx, v);
|
||||
if (!obj->defaultValue(cx, JSTYPE_NUMBER, tvr.addr()))
|
||||
obj = NULL;
|
||||
else
|
||||
v = *vp = tvr.u.value;
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
v = *vp = tvr.value();
|
||||
if (!obj) {
|
||||
*vp = JSVAL_NULL;
|
||||
return 0.0;
|
||||
|
|
524
js/src/jsobj.cpp
524
js/src/jsobj.cpp
|
@ -41,6 +41,8 @@
|
|||
/*
|
||||
* JS object implementation.
|
||||
*/
|
||||
#define __STDC_LIMIT_MACROS
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "jstypes.h"
|
||||
|
@ -71,6 +73,7 @@
|
|||
#include "jsscript.h"
|
||||
#include "jsscriptinlines.h"
|
||||
#include "jsstaticcheck.h"
|
||||
#include "jsstdint.h"
|
||||
#include "jsstr.h"
|
||||
#include "jstracer.h"
|
||||
#include "jsdbgapi.h"
|
||||
|
@ -253,8 +256,10 @@ obj_getCount(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
|
|||
if (JS_HAS_STRICT_OPTION(cx) && !ReportStrictSlot(cx, JSSLOT_COUNT))
|
||||
return JS_FALSE;
|
||||
|
||||
/* Get the number of properties to enumerate. */
|
||||
iter_state = JSVAL_NULL;
|
||||
JSAutoEnumStateRooter tvr(cx, obj, &iter_state);
|
||||
|
||||
/* Get the number of properties to enumerate. */
|
||||
ok = obj->enumerate(cx, JSENUMERATE_INIT, &iter_state, &num_properties);
|
||||
if (!ok)
|
||||
goto out;
|
||||
|
@ -267,8 +272,8 @@ obj_getCount(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
|
|||
*vp = num_properties;
|
||||
|
||||
out:
|
||||
if (iter_state != JSVAL_NULL)
|
||||
ok = obj->enumerate(cx, JSENUMERATE_DESTROY, &iter_state, 0);
|
||||
if (!JSVAL_IS_NULL(iter_state))
|
||||
ok &= obj->enumerate(cx, JSENUMERATE_DESTROY, &iter_state, 0);
|
||||
return ok;
|
||||
}
|
||||
|
||||
|
@ -1248,13 +1253,18 @@ obj_eval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
|||
JSScript **bucket = NULL; /* avoid GCC warning with early decl&init */
|
||||
#if JS_HAS_EVAL_THIS_SCOPE
|
||||
JSObject *callerScopeChain = NULL, *callerVarObj = NULL;
|
||||
JSObject *setCallerScopeChain = NULL;
|
||||
JSBool setCallerVarObj = JS_FALSE;
|
||||
JSBool setCallerScopeChain = JS_FALSE, setCallerVarObj = JS_FALSE;
|
||||
#endif
|
||||
|
||||
fp = js_GetTopStackFrame(cx);
|
||||
caller = js_GetScriptedCaller(cx, fp);
|
||||
indirectCall = (caller && caller->regs && *caller->regs->pc != JSOP_EVAL);
|
||||
if (!caller) {
|
||||
JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
|
||||
JSMSG_BAD_INDIRECT_CALL, js_eval_str);
|
||||
return JS_FALSE;
|
||||
}
|
||||
|
||||
indirectCall = (caller->regs && *caller->regs->pc != JSOP_EVAL);
|
||||
|
||||
/*
|
||||
* This call to js_GetWrappedObject is safe because of the security checks
|
||||
|
@ -1296,7 +1306,7 @@ obj_eval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
|||
* object, then we need to provide one for the compiler to stick any
|
||||
* declared (var) variables into.
|
||||
*/
|
||||
if (caller && !caller->varobj && !js_GetCallObject(cx, caller))
|
||||
if (!caller->varobj && !js_GetCallObject(cx, caller))
|
||||
return JS_FALSE;
|
||||
|
||||
/* Accept an optional trailing argument that overrides the scope object. */
|
||||
|
@ -1309,39 +1319,45 @@ obj_eval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
|||
|
||||
/* From here on, control must exit through label out with ok set. */
|
||||
MUST_FLOW_THROUGH("out");
|
||||
uintN staticLevel = caller->script->staticLevel + 1;
|
||||
if (!scopeobj) {
|
||||
#if JS_HAS_EVAL_THIS_SCOPE
|
||||
/* If obj.eval(str), emulate 'with (obj) eval(str)' in the caller. */
|
||||
/*
|
||||
* If we see an indirect call, then run eval in the global scope. We do
|
||||
* this so the compiler can make assumptions about what bindings may or
|
||||
* may not exist in the current frame if it doesn't see 'eval'.
|
||||
*/
|
||||
if (indirectCall) {
|
||||
/* Pretend that we're top level. */
|
||||
staticLevel = 0;
|
||||
|
||||
callerScopeChain = js_GetScopeChain(cx, caller);
|
||||
if (!callerScopeChain) {
|
||||
ok = JS_FALSE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
OBJ_TO_INNER_OBJECT(cx, obj);
|
||||
if (!obj) {
|
||||
ok = JS_FALSE;
|
||||
goto out;
|
||||
}
|
||||
if (obj != callerScopeChain) {
|
||||
ok = js_CheckPrincipalsAccess(cx, obj,
|
||||
JS_StackFramePrincipals(cx, caller),
|
||||
cx->runtime->atomState.evalAtom);
|
||||
if (!ok)
|
||||
goto out;
|
||||
|
||||
scopeobj = js_NewWithObject(cx, obj, callerScopeChain, -1);
|
||||
if (!scopeobj) {
|
||||
ok = JS_FALSE;
|
||||
goto out;
|
||||
}
|
||||
ok = js_CheckPrincipalsAccess(cx, obj,
|
||||
JS_StackFramePrincipals(cx, caller),
|
||||
cx->runtime->atomState.evalAtom);
|
||||
if (!ok)
|
||||
goto out;
|
||||
|
||||
/* Set fp->scopeChain too, for the compiler. */
|
||||
caller->scopeChain = fp->scopeChain = scopeobj;
|
||||
/* NB: We know obj is a global object here. */
|
||||
JS_ASSERT(!OBJ_GET_PARENT(cx, obj));
|
||||
scopeobj = obj;
|
||||
|
||||
/* Remember scopeobj so we can null its private when done. */
|
||||
setCallerScopeChain = scopeobj;
|
||||
}
|
||||
/* Set fp->scopeChain too, for the compiler. */
|
||||
caller->scopeChain = fp->scopeChain = scopeobj;
|
||||
|
||||
/* Remember scopeobj so we can null its private when done. */
|
||||
setCallerScopeChain = JS_TRUE;
|
||||
|
||||
callerVarObj = caller->varobj;
|
||||
if (obj != callerVarObj) {
|
||||
|
@ -1358,12 +1374,10 @@ obj_eval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
|||
* NB: This means that native callers (who reach this point through
|
||||
* the C API) must use the two parameter form.
|
||||
*/
|
||||
if (caller) {
|
||||
scopeobj = js_GetScopeChain(cx, caller);
|
||||
if (!scopeobj) {
|
||||
ok = JS_FALSE;
|
||||
goto out;
|
||||
}
|
||||
scopeobj = js_GetScopeChain(cx, caller);
|
||||
if (!scopeobj) {
|
||||
ok = JS_FALSE;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
scopeobj = js_GetWrappedObject(cx, scopeobj);
|
||||
|
@ -1372,19 +1386,15 @@ obj_eval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
|||
ok = JS_FALSE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ok = js_CheckPrincipalsAccess(cx, scopeobj,
|
||||
JS_StackFramePrincipals(cx, caller),
|
||||
cx->runtime->atomState.evalAtom);
|
||||
if (!ok)
|
||||
goto out;
|
||||
|
||||
scopeobj = js_NewWithObject(cx, scopeobj,
|
||||
JS_GetGlobalForObject(cx, scopeobj), -1);
|
||||
if (!scopeobj) {
|
||||
ok = JS_FALSE;
|
||||
goto out;
|
||||
}
|
||||
argv[1] = OBJECT_TO_JSVAL(scopeobj);
|
||||
/* We're pretending that we're in global code. */
|
||||
staticLevel = 0;
|
||||
}
|
||||
|
||||
/* Ensure we compile this eval with the right object in the scope chain. */
|
||||
|
@ -1394,23 +1404,16 @@ obj_eval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
|||
goto out;
|
||||
}
|
||||
|
||||
tcflags = TCF_COMPILE_N_GO;
|
||||
if (caller) {
|
||||
tcflags |= TCF_PUT_STATIC_LEVEL(caller->script->staticLevel + 1);
|
||||
principals = JS_EvalFramePrincipals(cx, fp, caller);
|
||||
file = js_ComputeFilename(cx, caller, principals, &line);
|
||||
} else {
|
||||
principals = NULL;
|
||||
file = NULL;
|
||||
line = 0;
|
||||
}
|
||||
tcflags = TCF_COMPILE_N_GO | TCF_PUT_STATIC_LEVEL(staticLevel);
|
||||
principals = JS_EvalFramePrincipals(cx, fp, caller);
|
||||
file = js_ComputeFilename(cx, caller, principals, &line);
|
||||
|
||||
str = JSVAL_TO_STRING(argv[0]);
|
||||
script = NULL;
|
||||
|
||||
/* Cache local eval scripts indexed by source qualified by scope. */
|
||||
bucket = EvalCacheHash(cx, str);
|
||||
if (caller->fun) {
|
||||
if (!indirectCall && caller->fun) {
|
||||
uintN count = 0;
|
||||
JSScript **scriptp = bucket;
|
||||
|
||||
|
@ -1475,7 +1478,9 @@ obj_eval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
|||
}
|
||||
|
||||
if (!script) {
|
||||
script = JSCompiler::compileScript(cx, scopeobj, caller, principals, tcflags,
|
||||
JSStackFrame *callerFrame = (staticLevel != 0) ? caller : NULL;
|
||||
script = JSCompiler::compileScript(cx, scopeobj, callerFrame,
|
||||
principals, tcflags,
|
||||
str->chars(), str->length(),
|
||||
NULL, file, line, str);
|
||||
if (!script) {
|
||||
|
@ -1486,8 +1491,7 @@ obj_eval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
|||
|
||||
if (argc < 2) {
|
||||
/* Execute using caller's new scope object (might be a Call object). */
|
||||
if (caller)
|
||||
scopeobj = caller->scopeChain;
|
||||
scopeobj = caller->scopeChain;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1508,11 +1512,8 @@ obj_eval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
|||
out:
|
||||
#if JS_HAS_EVAL_THIS_SCOPE
|
||||
/* Restore OBJ_GET_PARENT(scopeobj) not callerScopeChain in case of Call. */
|
||||
if (setCallerScopeChain) {
|
||||
if (setCallerScopeChain)
|
||||
caller->scopeChain = callerScopeChain;
|
||||
JS_ASSERT(OBJ_GET_CLASS(cx, setCallerScopeChain) == &js_WithClass);
|
||||
setCallerScopeChain->setPrivate(NULL);
|
||||
}
|
||||
if (setCallerVarObj)
|
||||
caller->varobj = callerVarObj;
|
||||
#endif
|
||||
|
@ -1946,7 +1947,7 @@ obj_getPrototypeOf(JSContext *cx, uintN argc, jsval *vp)
|
|||
}
|
||||
|
||||
if (JSVAL_IS_PRIMITIVE(vp[2])) {
|
||||
char *bytes = js_DecompileValueGenerator(cx, -argc, vp[2], NULL);
|
||||
char *bytes = js_DecompileValueGenerator(cx, 0 - argc, vp[2], NULL);
|
||||
if (!bytes)
|
||||
return JS_FALSE;
|
||||
JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
|
||||
|
@ -2044,6 +2045,57 @@ obj_getOwnPropertyDescriptor(JSContext *cx, uintN argc, jsval *vp)
|
|||
return ok;
|
||||
}
|
||||
|
||||
static JSBool
|
||||
obj_keys(JSContext *cx, uintN argc, jsval *vp)
|
||||
{
|
||||
jsval v = argc == 0 ? JSVAL_VOID : vp[2];
|
||||
if (JSVAL_IS_PRIMITIVE(v)) {
|
||||
JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_NOT_NONNULL_OBJECT);
|
||||
return JS_FALSE;
|
||||
}
|
||||
|
||||
JSObject *obj = JSVAL_TO_OBJECT(v);
|
||||
JSAutoIdArray ida(cx, JS_Enumerate(cx, obj));
|
||||
if (!ida)
|
||||
return JS_FALSE;
|
||||
|
||||
JSObject *proto;
|
||||
if (!js_GetClassPrototype(cx, NULL, INT_TO_JSID(JSProto_Array), &proto))
|
||||
return JS_FALSE;
|
||||
vp[1] = OBJECT_TO_JSVAL(proto);
|
||||
|
||||
JS_ASSERT(ida.length() <= UINT32_MAX);
|
||||
JSObject *aobj = js_NewArrayWithSlots(cx, proto, uint32(ida.length()));
|
||||
if (!aobj)
|
||||
return JS_FALSE;
|
||||
*vp = OBJECT_TO_JSVAL(aobj);
|
||||
|
||||
jsval *slots = aobj->dslots;
|
||||
size_t len = ida.length();
|
||||
JS_ASSERT(js_DenseArrayCapacity(aobj) >= len);
|
||||
for (size_t i = 0; i < len; i++) {
|
||||
jsid id = ida[i];
|
||||
if (JSID_IS_INT(id)) {
|
||||
if (!js_ValueToStringId(cx, INT_JSID_TO_JSVAL(id), &slots[i]))
|
||||
return JS_FALSE;
|
||||
} else {
|
||||
/*
|
||||
* Object-valued ids are a possibility admitted by SpiderMonkey for
|
||||
* the purposes of E4X. It's unclear whether they could ever be
|
||||
* detected here -- the "obvious" possibility, a property referred
|
||||
* to by a QName, actually appears as a string jsid -- but in the
|
||||
* interests of fidelity we pass object jsids through unchanged.
|
||||
*/
|
||||
slots[i] = ID_TO_VALUE(id);
|
||||
}
|
||||
}
|
||||
|
||||
JS_ASSERT(len <= UINT32_MAX);
|
||||
aobj->fslots[JSSLOT_ARRAY_COUNT] = len;
|
||||
|
||||
return JS_TRUE;
|
||||
}
|
||||
|
||||
|
||||
#if JS_HAS_OBJ_WATCHPOINT
|
||||
const char js_watch_str[] = "watch";
|
||||
|
@ -2092,6 +2144,7 @@ static JSFunctionSpec object_methods[] = {
|
|||
static JSFunctionSpec object_static_methods[] = {
|
||||
JS_FN("getPrototypeOf", obj_getPrototypeOf, 1,0),
|
||||
JS_FN("getOwnPropertyDescriptor", obj_getOwnPropertyDescriptor,2,0),
|
||||
JS_FN("keys", obj_keys, 1,0),
|
||||
JS_FS_END
|
||||
};
|
||||
|
||||
|
@ -2738,7 +2791,7 @@ block_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
|
|||
}
|
||||
|
||||
JSBool
|
||||
js_DefineBlockVariable(JSContext *cx, JSObject *obj, jsid id, int16 index)
|
||||
js_DefineBlockVariable(JSContext *cx, JSObject *obj, jsid id, intN index)
|
||||
{
|
||||
JS_ASSERT(obj->getClass() == &js_BlockClass);
|
||||
JS_ASSERT(!OBJ_IS_CLONED_BLOCK(obj));
|
||||
|
@ -2779,7 +2832,6 @@ js_XDRBlockObject(JSXDRState *xdr, JSObject **objp)
|
|||
JSObject *obj, *parent;
|
||||
uint16 depth, count, i;
|
||||
uint32 tmp;
|
||||
JSTempValueRooter tvr;
|
||||
JSScopeProperty *sprop;
|
||||
jsid propid;
|
||||
JSAtom *atom;
|
||||
|
@ -2827,12 +2879,10 @@ js_XDRBlockObject(JSXDRState *xdr, JSObject **objp)
|
|||
STOBJ_SET_PARENT(obj, parent);
|
||||
}
|
||||
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(cx, OBJECT_TO_JSVAL(obj), &tvr);
|
||||
JSAutoTempValueRooter tvr(cx, obj);
|
||||
|
||||
if (!JS_XDRUint32(xdr, &tmp)) {
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
return JS_FALSE;
|
||||
}
|
||||
if (!JS_XDRUint32(xdr, &tmp))
|
||||
return false;
|
||||
|
||||
if (xdr->mode == JSXDR_DECODE) {
|
||||
depth = (uint16)(tmp >> 16);
|
||||
|
@ -2866,15 +2916,12 @@ js_XDRBlockObject(JSXDRState *xdr, JSObject **objp)
|
|||
/* XDR the real id, then the shortid. */
|
||||
if (!js_XDRStringAtom(xdr, &atom) ||
|
||||
!JS_XDRUint16(xdr, (uint16 *)&shortid)) {
|
||||
ok = JS_FALSE;
|
||||
break;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (xdr->mode == JSXDR_DECODE) {
|
||||
if (!js_DefineBlockVariable(cx, obj, ATOM_TO_JSID(atom), shortid)) {
|
||||
ok = JS_FALSE;
|
||||
break;
|
||||
}
|
||||
if (!js_DefineBlockVariable(cx, obj, ATOM_TO_JSID(atom), shortid))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2882,9 +2929,7 @@ js_XDRBlockObject(JSXDRState *xdr, JSObject **objp)
|
|||
/* Do as the parser does and make this block scope shareable. */
|
||||
OBJ_SCOPE(obj)->object = NULL;
|
||||
}
|
||||
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
return ok;
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -3390,29 +3435,22 @@ js_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto,
|
|||
{
|
||||
jsid id;
|
||||
jsval cval, rval;
|
||||
JSTempValueRooter argtvr, tvr;
|
||||
JSObject *obj, *ctor;
|
||||
|
||||
JS_PUSH_TEMP_ROOT(cx, argc, argv, &argtvr);
|
||||
JSAutoTempValueRooter argtvr(cx, argc, argv);
|
||||
|
||||
if (!js_GetClassId(cx, clasp, &id) ||
|
||||
!js_FindClassObject(cx, parent, id, &cval)) {
|
||||
JS_POP_TEMP_ROOT(cx, &argtvr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (JSVAL_IS_PRIMITIVE(cval)) {
|
||||
js_ReportIsNotFunction(cx, &cval, JSV2F_CONSTRUCT | JSV2F_SEARCH_STACK);
|
||||
JS_POP_TEMP_ROOT(cx, &argtvr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Protect cval in case a crazy getter for .prototype uproots it. After
|
||||
* this point, all control flow must exit through label out with obj set.
|
||||
*/
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(cx, cval, &tvr);
|
||||
MUST_FLOW_THROUGH("out");
|
||||
/* Protect cval in case a crazy getter for .prototype uproots it. */
|
||||
JSAutoTempValueRooter tvr(cx, cval);
|
||||
|
||||
/*
|
||||
* If proto or parent are NULL, set them to Constructor.prototype and/or
|
||||
|
@ -3424,8 +3462,7 @@ js_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto,
|
|||
if (!proto) {
|
||||
if (!ctor->getProperty(cx, ATOM_TO_JSID(cx->runtime->atomState.classPrototypeAtom),
|
||||
&rval)) {
|
||||
obj = NULL;
|
||||
goto out;
|
||||
return NULL;
|
||||
}
|
||||
if (JSVAL_IS_OBJECT(rval))
|
||||
proto = JSVAL_TO_OBJECT(rval);
|
||||
|
@ -3433,14 +3470,13 @@ js_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto,
|
|||
|
||||
obj = js_NewObject(cx, clasp, proto, parent);
|
||||
if (!obj)
|
||||
goto out;
|
||||
return NULL;
|
||||
|
||||
if (!js_InternalConstruct(cx, obj, cval, argc, argv, &rval))
|
||||
goto bad;
|
||||
return NULL;
|
||||
|
||||
if (JSVAL_IS_PRIMITIVE(rval))
|
||||
goto out;
|
||||
obj = JSVAL_TO_OBJECT(rval);
|
||||
return obj;
|
||||
|
||||
/*
|
||||
* If the instance's class differs from what was requested, throw a type
|
||||
|
@ -3449,24 +3485,16 @@ js_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto,
|
|||
* private data set at this point, then the constructor was replaced and
|
||||
* we should throw a type error.
|
||||
*/
|
||||
obj = JSVAL_TO_OBJECT(rval);
|
||||
if (OBJ_GET_CLASS(cx, obj) != clasp ||
|
||||
(!(~clasp->flags & (JSCLASS_HAS_PRIVATE |
|
||||
JSCLASS_CONSTRUCT_PROTOTYPE)) &&
|
||||
!obj->getPrivate())) {
|
||||
JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
|
||||
JSMSG_WRONG_CONSTRUCTOR, clasp->name);
|
||||
goto bad;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
JS_POP_TEMP_ROOT(cx, &argtvr);
|
||||
return obj;
|
||||
|
||||
bad:
|
||||
cx->weakRoots.newborn[GCX_OBJECT] = NULL;
|
||||
obj = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* XXXbe if one adds props, deletes earlier props, adds more, the last added
|
||||
|
@ -3746,7 +3774,8 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
|
|||
if (sprop &&
|
||||
pobj == obj &&
|
||||
(sprop->attrs & (JSPROP_GETTER | JSPROP_SETTER))) {
|
||||
sprop = OBJ_SCOPE(obj)->change(cx, sprop, attrs, sprop->attrs,
|
||||
sprop = OBJ_SCOPE(obj)->change(cx, sprop, attrs,
|
||||
JSPROP_GETTER | JSPROP_SETTER,
|
||||
(attrs & JSPROP_GETTER)
|
||||
? getter
|
||||
: sprop->getter,
|
||||
|
@ -4959,69 +4988,81 @@ out:
|
|||
* in the object. Instead for the empty enumerator the code uses JSVAL_ZERO as
|
||||
* the enumeration state.
|
||||
*
|
||||
* JSRuntime.nativeEnumCache caches the enumerators using scope's shape to
|
||||
* JSThreadData.nativeEnumCache caches the enumerators using scope's shape to
|
||||
* avoid repeated scanning of scopes for enumerable properties. The cache
|
||||
* entry is either JSNativeEnumerator* or, for the empty enumerator, the shape
|
||||
* value itself. The latter is stored as (shape << 1) | 1 to ensure that it is
|
||||
* always different from JSNativeEnumerator* values.
|
||||
*
|
||||
* We cache the enumerators in the JSENUMERATE_INIT case of js_Enumerate, not
|
||||
* during JSENUMERATE_DESTROY. The GC can invoke the latter case during the
|
||||
* finalization when JSNativeEnumerator contains finalized ids and the
|
||||
* enumerator must be freed.
|
||||
*/
|
||||
struct JSNativeEnumerator {
|
||||
/*
|
||||
* The index into the ids array. It runs from the length down to 1 when
|
||||
* the enumerator is running. It is 0 when the enumerator is finished and
|
||||
* can be reused on a cache hit. Its type is jsword, not uint32, for
|
||||
* compatibility with js_CompareAndSwap.
|
||||
*/
|
||||
jsword cursor;
|
||||
|
||||
* can be reused on a cache hit.
|
||||
*/
|
||||
uint32 cursor;
|
||||
uint32 length; /* length of ids array */
|
||||
uint32 shape; /* "shape" number -- see jsscope.h */
|
||||
JSNativeEnumerator *next; /* list linking */
|
||||
jsid ids[1]; /* enumeration id array */
|
||||
|
||||
static inline size_t size(uint32 length) {
|
||||
JS_ASSERT(length != 0);
|
||||
return offsetof(JSNativeEnumerator, ids) +
|
||||
(size_t) length * sizeof(jsid);
|
||||
}
|
||||
|
||||
bool isFinished() const {
|
||||
return cursor == 0;
|
||||
}
|
||||
|
||||
void mark(JSTracer *trc) {
|
||||
JS_ASSERT(length >= 1);
|
||||
jsid *cursor = ids;
|
||||
jsid *end = ids + length;
|
||||
do {
|
||||
js_TraceId(trc, *cursor);
|
||||
} while (++cursor != end);
|
||||
}
|
||||
};
|
||||
|
||||
/* The tagging of shape values requires one bit. */
|
||||
JS_STATIC_ASSERT((jsuword) SHAPE_OVERFLOW_BIT <=
|
||||
((jsuword) 1 << (JS_BITS_PER_WORD - 1)));
|
||||
|
||||
static inline size_t
|
||||
NativeEnumeratorSize(uint32 length)
|
||||
static void
|
||||
SetEnumeratorCache(JSContext *cx, jsuword *cachep, jsuword newcache)
|
||||
{
|
||||
JS_ASSERT(length != 0);
|
||||
return offsetof(JSNativeEnumerator, ids) + (size_t) length * sizeof(jsid);
|
||||
jsuword old = *cachep;
|
||||
*cachep = newcache;
|
||||
if (!(old & jsuword(1)) && old) {
|
||||
/* Free the cached enumerator unless it is running. */
|
||||
JSNativeEnumerator *ne = reinterpret_cast<JSNativeEnumerator *>(old);
|
||||
if (ne->isFinished())
|
||||
cx->free(ne);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is used to enumerate the properties of native JSObjects
|
||||
* and those host objects that do not define a JSNewEnumerateOp-style iterator
|
||||
* function.
|
||||
*/
|
||||
JSBool
|
||||
js_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
|
||||
jsval *statep, jsid *idp)
|
||||
{
|
||||
JSClass *clasp;
|
||||
JSEnumerateOp enumerate;
|
||||
JSNativeEnumerator *ne;
|
||||
uint32 length, shape;
|
||||
size_t allocated;
|
||||
JSScope *scope;
|
||||
jsuword *cachep, oldcache;
|
||||
JSScopeProperty *sprop;
|
||||
jsid *ids;
|
||||
jsword newcursor;
|
||||
|
||||
clasp = OBJ_GET_CLASS(cx, obj);
|
||||
enumerate = clasp->enumerate;
|
||||
/* Here cx is JSTracer when enum_op is JSENUMERATE_MARK. */
|
||||
JSClass *clasp = obj->getClass();
|
||||
JSEnumerateOp enumerate = clasp->enumerate;
|
||||
if (clasp->flags & JSCLASS_NEW_ENUMERATE) {
|
||||
JS_ASSERT(enumerate != JS_EnumerateStub);
|
||||
return ((JSNewEnumerateOp) enumerate)(cx, obj, enum_op, statep, idp);
|
||||
}
|
||||
|
||||
switch (enum_op) {
|
||||
case JSENUMERATE_INIT:
|
||||
case JSENUMERATE_INIT: {
|
||||
if (!enumerate(cx, obj))
|
||||
return JS_FALSE;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* The set of all property ids is pre-computed when the iterator is
|
||||
|
@ -5029,51 +5070,45 @@ js_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
|
|||
* during the iteration.
|
||||
*
|
||||
* Use a do-while(0) loop to avoid too many nested ifs. If ne is null
|
||||
* after the loop, it indicates an empty enumerator. If allocated is
|
||||
* not zero after the loop, we add the newly allocated ne to the cache
|
||||
* and runtime->nativeEnumerators list.
|
||||
* after the loop, it indicates an empty enumerator.
|
||||
*/
|
||||
ne = NULL;
|
||||
length = 0;
|
||||
allocated = (size_t) 0;
|
||||
JS_LOCK_OBJ(cx, obj);
|
||||
scope = OBJ_SCOPE(obj);
|
||||
JSNativeEnumerator *ne;
|
||||
uint32 length;
|
||||
do {
|
||||
uint32 shape = OBJ_SHAPE(obj);
|
||||
|
||||
ENUM_CACHE_METER(nativeEnumProbes);
|
||||
shape = scope->shape;
|
||||
cachep = &cx->runtime->
|
||||
nativeEnumCache[NATIVE_ENUM_CACHE_HASH(shape)];
|
||||
oldcache = *cachep;
|
||||
jsuword *cachep = &JS_THREAD_DATA(cx)->
|
||||
nativeEnumCache[NATIVE_ENUM_CACHE_HASH(shape)];
|
||||
jsuword oldcache = *cachep;
|
||||
if (oldcache & (jsuword) 1) {
|
||||
if ((uint32) (oldcache >> 1) == shape) {
|
||||
if (uint32(oldcache >> 1) == shape) {
|
||||
/* scope has a shape with no enumerable properties. */
|
||||
ne = NULL;
|
||||
length = 0;
|
||||
break;
|
||||
}
|
||||
} else if (oldcache != (jsuword) 0) {
|
||||
/*
|
||||
* We can safely read ne->shape without taking the GC lock as
|
||||
* ne is deleted only when running the GC and ne->shape is
|
||||
* read-only after initialization.
|
||||
*/
|
||||
ne = (JSNativeEnumerator *) *cachep;
|
||||
} else if (oldcache != jsuword(0)) {
|
||||
ne = reinterpret_cast<JSNativeEnumerator *>(oldcache);
|
||||
JS_ASSERT(ne->length >= 1);
|
||||
if (ne->shape == shape) {
|
||||
/*
|
||||
* Check that ne is not running with another enumerator
|
||||
* and, if so, reuse and mark it as running from now.
|
||||
*/
|
||||
if (ne->shape == shape && ne->isFinished()) {
|
||||
/* Mark ne as active. */
|
||||
ne->cursor = ne->length;
|
||||
length = ne->length;
|
||||
if (js_CompareAndSwap(&ne->cursor, 0, length))
|
||||
break;
|
||||
length = 0;
|
||||
JS_ASSERT(!ne->isFinished());
|
||||
break;
|
||||
}
|
||||
ne = NULL;
|
||||
}
|
||||
ENUM_CACHE_METER(nativeEnumMisses);
|
||||
|
||||
JS_LOCK_OBJ(cx, obj);
|
||||
|
||||
/* Count all enumerable properties in object's scope. */
|
||||
JS_ASSERT(length == 0);
|
||||
for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) {
|
||||
JSScope *scope = OBJ_SCOPE(obj);
|
||||
length = 0;
|
||||
for (JSScopeProperty *sprop = SCOPE_LAST_PROP(scope);
|
||||
sprop;
|
||||
sprop = sprop->parent) {
|
||||
if ((sprop->attrs & JSPROP_ENUMERATE) &&
|
||||
!(sprop->flags & SPROP_IS_ALIAS) &&
|
||||
(!scope->hadMiddleDelete() || scope->has(sprop))) {
|
||||
|
@ -5081,26 +5116,35 @@ js_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
|
|||
}
|
||||
}
|
||||
if (length == 0) {
|
||||
/*
|
||||
* Cache the scope without enumerable properties unless its
|
||||
* shape overflows, see bug 440834.
|
||||
*/
|
||||
if (shape < SHAPE_OVERFLOW_BIT)
|
||||
*cachep = ((jsuword) shape << 1) | (jsuword) 1;
|
||||
/*
|
||||
* Cache the scope without enumerable properties unless its
|
||||
* shape overflows, see bug 440834.
|
||||
*/
|
||||
JS_UNLOCK_SCOPE(cx, scope);
|
||||
if (shape < SHAPE_OVERFLOW_BIT) {
|
||||
SetEnumeratorCache(cx, cachep,
|
||||
(jsuword(shape) << 1) | jsuword(1));
|
||||
}
|
||||
ne = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
allocated = NativeEnumeratorSize(length);
|
||||
ne = (JSNativeEnumerator *) cx->malloc(allocated);
|
||||
ne = (JSNativeEnumerator *)
|
||||
cx->mallocNoReport(JSNativeEnumerator::size(length));
|
||||
if (!ne) {
|
||||
/* Report the OOM error outside the lock. */
|
||||
JS_UNLOCK_SCOPE(cx, scope);
|
||||
return JS_FALSE;
|
||||
JS_ReportOutOfMemory(cx);
|
||||
return false;
|
||||
}
|
||||
ne->cursor = length;
|
||||
ne->length = length;
|
||||
ne->shape = shape;
|
||||
ids = ne->ids;
|
||||
for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) {
|
||||
|
||||
jsid *ids = ne->ids;
|
||||
for (JSScopeProperty *sprop = SCOPE_LAST_PROP(scope);
|
||||
sprop;
|
||||
sprop = sprop->parent) {
|
||||
if ((sprop->attrs & JSPROP_ENUMERATE) &&
|
||||
!(sprop->flags & SPROP_IS_ALIAS) &&
|
||||
(!scope->hadMiddleDelete() || scope->has(sprop))) {
|
||||
|
@ -5109,124 +5153,98 @@ js_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
|
|||
}
|
||||
}
|
||||
JS_ASSERT(ids == ne->ids + length);
|
||||
JS_UNLOCK_SCOPE(cx, scope);
|
||||
|
||||
/*
|
||||
* Do not cache enumerators for objects with with a shape
|
||||
* that had overflowed, see bug 440834.
|
||||
*/
|
||||
if (shape < SHAPE_OVERFLOW_BIT)
|
||||
SetEnumeratorCache(cx, cachep, reinterpret_cast<jsuword>(ne));
|
||||
} while (0);
|
||||
JS_UNLOCK_SCOPE(cx, scope);
|
||||
|
||||
if (!ne) {
|
||||
JS_ASSERT(length == 0);
|
||||
JS_ASSERT(allocated == 0);
|
||||
*statep = JSVAL_ZERO;
|
||||
} else {
|
||||
JS_ASSERT(length != 0);
|
||||
JS_ASSERT(ne->cursor == (jsword) length);
|
||||
if (allocated != 0) {
|
||||
JS_LOCK_GC(cx->runtime);
|
||||
if (!js_AddAsGCBytes(cx, allocated)) {
|
||||
/* js_AddAsGCBytes releases the GC lock on failures. */
|
||||
cx->free(ne);
|
||||
return JS_FALSE;
|
||||
}
|
||||
ne->next = cx->runtime->nativeEnumerators;
|
||||
cx->runtime->nativeEnumerators = ne;
|
||||
JS_ASSERT(((jsuword) ne & (jsuword) 1) == (jsuword) 0);
|
||||
|
||||
/*
|
||||
* Do not cache enumerators for objects with with a shape
|
||||
* that had overflowed, see bug 440834.
|
||||
*/
|
||||
if (shape < SHAPE_OVERFLOW_BIT)
|
||||
*cachep = (jsuword) ne;
|
||||
JS_UNLOCK_GC(cx->runtime);
|
||||
}
|
||||
JS_ASSERT(ne->cursor == length);
|
||||
JS_ASSERT(!(reinterpret_cast<jsuword>(ne) & jsuword(1)));
|
||||
*statep = PRIVATE_TO_JSVAL(ne);
|
||||
}
|
||||
if (idp)
|
||||
*idp = INT_TO_JSVAL(length);
|
||||
break;
|
||||
}
|
||||
|
||||
case JSENUMERATE_NEXT:
|
||||
case JSENUMERATE_DESTROY:
|
||||
case JSENUMERATE_DESTROY: {
|
||||
if (*statep == JSVAL_ZERO) {
|
||||
*statep = JSVAL_NULL;
|
||||
break;
|
||||
}
|
||||
ne = (JSNativeEnumerator *) JSVAL_TO_PRIVATE(*statep);
|
||||
JSNativeEnumerator *ne = (JSNativeEnumerator *)
|
||||
JSVAL_TO_PRIVATE(*statep);
|
||||
JS_ASSERT(ne->length >= 1);
|
||||
JS_ASSERT(ne->cursor >= 1);
|
||||
|
||||
/*
|
||||
* We must not access ne->cursor when we set it to zero as it means
|
||||
* that ne is free and another thread can grab it from the cache. So
|
||||
* we set the state to JSVAL_ZERO in the NEXT case to avoid touching
|
||||
* ne->length again in the DESTROY case.
|
||||
*/
|
||||
if (enum_op == JSENUMERATE_NEXT) {
|
||||
newcursor = ne->cursor - 1;
|
||||
uint32 newcursor = ne->cursor - 1;
|
||||
*idp = ne->ids[newcursor];
|
||||
ne->cursor = newcursor;
|
||||
if (newcursor == 0)
|
||||
*statep = JSVAL_ZERO;
|
||||
if (newcursor != 0) {
|
||||
ne->cursor = newcursor;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/* The enumerator has not iterated over all ids. */
|
||||
JS_ASSERT(enum_op == JSENUMERATE_DESTROY);
|
||||
ne->cursor = 0;
|
||||
}
|
||||
*statep = JSVAL_ZERO;
|
||||
|
||||
/*
|
||||
* Force on shutdown an extra GC cycle so all native enumerators
|
||||
* on the rt->nativeEnumerators list will be removed when the GC
|
||||
* calls js_TraceNativeEnumerators. See bug 499570.
|
||||
*/
|
||||
if (cx->runtime->state == JSRTS_LANDING)
|
||||
cx->runtime->gcPoke = true;
|
||||
jsuword *cachep = &JS_THREAD_DATA(cx)->
|
||||
nativeEnumCache[NATIVE_ENUM_CACHE_HASH(ne->shape)];
|
||||
if (reinterpret_cast<jsuword>(ne) == *cachep) {
|
||||
/* Mark the cached iterator as available. */
|
||||
ne->cursor = 0;
|
||||
} else {
|
||||
cx->free(ne);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
return JS_TRUE;
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
js_TraceNativeEnumerators(JSTracer *trc)
|
||||
js_MarkEnumeratorState(JSTracer *trc, JSObject *obj, jsval state)
|
||||
{
|
||||
JSRuntime *rt;
|
||||
JSNativeEnumerator **nep, *ne;
|
||||
jsid *cursor, *end;
|
||||
if (JSVAL_IS_TRACEABLE(state)) {
|
||||
JS_CALL_TRACER(trc, JSVAL_TO_TRACEABLE(state),
|
||||
JSVAL_TRACE_KIND(state), "enumerator_value");
|
||||
} else if (obj->map->ops->enumerate == js_Enumerate &&
|
||||
!(obj->getClass()->flags & JSCLASS_NEW_ENUMERATE)) {
|
||||
/* Check if state stores JSNativeEnumerator. */
|
||||
JS_ASSERT(JSVAL_IS_INT(state) ||
|
||||
JSVAL_IS_NULL(state) ||
|
||||
JSVAL_IS_VOID(state));
|
||||
if (JSVAL_IS_INT(state) && state != JSVAL_ZERO)
|
||||
((JSNativeEnumerator *) JSVAL_TO_PRIVATE(state))->mark(trc);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Purge native enumerators cached by shape id when the GC is about to
|
||||
* re-number shapes due to shape generation overflow. Do this also when
|
||||
* shutting down the runtime.
|
||||
*/
|
||||
rt = trc->context->runtime;
|
||||
bool doGC = IS_GC_MARKING_TRACER(trc) &&
|
||||
(rt->gcRegenShapes || rt->state == JSRTS_LANDING);
|
||||
void
|
||||
js_PurgeCachedNativeEnumerators(JSContext *cx, JSThreadData *data)
|
||||
{
|
||||
jsuword *cachep = &data->nativeEnumCache[0];
|
||||
jsuword *end = cachep + JS_ARRAY_LENGTH(data->nativeEnumCache);
|
||||
for (; cachep != end; ++cachep)
|
||||
SetEnumeratorCache(cx, cachep, jsuword(0));
|
||||
|
||||
if (doGC) {
|
||||
memset(&rt->nativeEnumCache, 0, sizeof rt->nativeEnumCache);
|
||||
#ifdef JS_DUMP_ENUM_CACHE_STATS
|
||||
printf("nativeEnumCache hit rate %g%%\n",
|
||||
100.0 * (rt->nativeEnumProbes - rt->nativeEnumMisses) /
|
||||
rt->nativeEnumProbes);
|
||||
printf("nativeEnumCache hit rate %g%%\n",
|
||||
100.0 * (cx->runtime->nativeEnumProbes -
|
||||
cx->runtime->nativeEnumMisses) /
|
||||
cx->runtime->nativeEnumProbes);
|
||||
#endif
|
||||
}
|
||||
|
||||
nep = &rt->nativeEnumerators;
|
||||
while ((ne = *nep) != NULL) {
|
||||
JS_ASSERT(ne->length != 0);
|
||||
if (ne->cursor != 0) {
|
||||
/* Trace ids of the running enumerator. */
|
||||
cursor = ne->ids;
|
||||
end = cursor + ne->length;
|
||||
do {
|
||||
js_TraceId(trc, *cursor);
|
||||
} while (++cursor != end);
|
||||
} else if (doGC) {
|
||||
js_RemoveAsGCBytes(rt, NativeEnumeratorSize(ne->length));
|
||||
*nep = ne->next;
|
||||
trc->context->free(ne);
|
||||
continue;
|
||||
}
|
||||
nep = &ne->next;
|
||||
}
|
||||
}
|
||||
|
||||
JSBool
|
||||
|
|
|
@ -257,8 +257,9 @@ struct JSObject {
|
|||
}
|
||||
|
||||
JSBool defineProperty(JSContext *cx, jsid id, jsval value,
|
||||
JSPropertyOp getter, JSPropertyOp setter,
|
||||
uintN attrs) {
|
||||
JSPropertyOp getter = JS_PropertyStub,
|
||||
JSPropertyOp setter = JS_PropertyStub,
|
||||
uintN attrs = JSPROP_ENUMERATE) {
|
||||
return map->ops->defineProperty(cx, this, id, value, getter, setter, attrs);
|
||||
}
|
||||
|
||||
|
@ -350,17 +351,25 @@ struct JSObject {
|
|||
#define STOBJ_NSLOTS(obj) \
|
||||
((obj)->dslots ? (uint32)(obj)->dslots[-1] : (uint32)JS_INITIAL_NSLOTS)
|
||||
|
||||
#define STOBJ_GET_SLOT(obj,slot) \
|
||||
((slot) < JS_INITIAL_NSLOTS \
|
||||
? (obj)->fslots[(slot)] \
|
||||
: (JS_ASSERT((slot) < (uint32)(obj)->dslots[-1]), \
|
||||
(obj)->dslots[(slot) - JS_INITIAL_NSLOTS]))
|
||||
inline jsval&
|
||||
STOBJ_GET_SLOT(JSObject *obj, uintN slot)
|
||||
{
|
||||
return (slot < JS_INITIAL_NSLOTS)
|
||||
? obj->fslots[slot]
|
||||
: (JS_ASSERT(slot < (uint32)obj->dslots[-1]),
|
||||
obj->dslots[slot - JS_INITIAL_NSLOTS]);
|
||||
}
|
||||
|
||||
#define STOBJ_SET_SLOT(obj,slot,value) \
|
||||
((slot) < JS_INITIAL_NSLOTS \
|
||||
? (obj)->fslots[(slot)] = (value) \
|
||||
: (JS_ASSERT((slot) < (uint32)(obj)->dslots[-1]), \
|
||||
(obj)->dslots[(slot) - JS_INITIAL_NSLOTS] = (value)))
|
||||
inline void
|
||||
STOBJ_SET_SLOT(JSObject *obj, uintN slot, jsval value)
|
||||
{
|
||||
if (slot < JS_INITIAL_NSLOTS) {
|
||||
obj->fslots[slot] = value;
|
||||
} else {
|
||||
JS_ASSERT(slot < (uint32)obj->dslots[-1]);
|
||||
obj->dslots[slot - JS_INITIAL_NSLOTS] = value;
|
||||
}
|
||||
}
|
||||
|
||||
inline JSClass*
|
||||
STOBJ_GET_CLASS(const JSObject* obj)
|
||||
|
@ -492,7 +501,7 @@ OBJ_IS_CLONED_BLOCK(JSObject *obj)
|
|||
}
|
||||
|
||||
extern JSBool
|
||||
js_DefineBlockVariable(JSContext *cx, JSObject *obj, jsid id, int16 index);
|
||||
js_DefineBlockVariable(JSContext *cx, JSObject *obj, jsid id, intN index);
|
||||
|
||||
#define OBJ_BLOCK_COUNT(cx,obj) \
|
||||
(OBJ_SCOPE(obj)->entryCount)
|
||||
|
@ -872,7 +881,10 @@ js_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
|
|||
jsval *statep, jsid *idp);
|
||||
|
||||
extern void
|
||||
js_TraceNativeEnumerators(JSTracer *trc);
|
||||
js_MarkEnumeratorState(JSTracer *trc, JSObject *obj, jsval state);
|
||||
|
||||
extern void
|
||||
js_PurgeCachedNativeEnumerators(JSContext *cx, JSThreadData *data);
|
||||
|
||||
extern JSBool
|
||||
js_CheckAccess(JSContext *cx, JSObject *obj, jsid id, JSAccessMode mode,
|
||||
|
|
|
@ -64,6 +64,11 @@
|
|||
|
||||
#include "jsatominlines.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable:4351)
|
||||
#endif
|
||||
|
||||
struct JSONParser
|
||||
{
|
||||
JSONParser(JSContext *cx)
|
||||
|
@ -83,6 +88,10 @@ struct JSONParser
|
|||
js::Vector<jschar, 8> buffer;
|
||||
};
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
JSClass js_JSONClass = {
|
||||
js_JSON_str,
|
||||
JSCLASS_HAS_CACHED_PROTO(JSProto_JSON),
|
||||
|
@ -97,7 +106,7 @@ js_json_parse(JSContext *cx, uintN argc, jsval *vp)
|
|||
JSString *s = NULL;
|
||||
jsval *argv = vp + 2;
|
||||
jsval reviver = JSVAL_NULL;
|
||||
JSAutoTempValueRooter(cx, 1, &reviver);
|
||||
JSAutoTempValueRooter tvr(cx, 1, &reviver);
|
||||
|
||||
if (!JS_ConvertArguments(cx, argc, argv, "S / v", &s, &reviver))
|
||||
return JS_FALSE;
|
||||
|
@ -121,8 +130,8 @@ js_json_stringify(JSContext *cx, uintN argc, jsval *vp)
|
|||
jsval *argv = vp + 2;
|
||||
JSObject *replacer = NULL;
|
||||
jsval space = JSVAL_NULL;
|
||||
JSAutoTempValueRooter(cx, replacer);
|
||||
JSAutoTempValueRooter(cx, 1, &space);
|
||||
JSAutoTempValueRooter tvr(cx, replacer);
|
||||
JSAutoTempValueRooter tvr2(cx, 1, &space);
|
||||
|
||||
// Must throw an Error if there isn't a first arg
|
||||
if (!JS_ConvertArguments(cx, argc, argv, "v / o v", vp, &replacer, &space))
|
||||
|
|
|
@ -3097,10 +3097,8 @@
|
|||
restore_scope:
|
||||
/* Restore fp->scopeChain now that obj is defined in fp->varobj. */
|
||||
fp->scopeChain = obj2;
|
||||
if (!ok) {
|
||||
cx->weakRoots.newborn[GCX_OBJECT] = NULL;
|
||||
if (!ok)
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
END_CASE(JSOP_DEFFUN)
|
||||
|
||||
|
@ -3148,10 +3146,8 @@
|
|||
}
|
||||
}
|
||||
|
||||
if (!ok) {
|
||||
cx->weakRoots.newborn[GCX_OBJECT] = NULL;
|
||||
if (!ok)
|
||||
goto error;
|
||||
}
|
||||
END_CASE(JSOP_DEFFUN_FC)
|
||||
|
||||
BEGIN_CASE(JSOP_DEFLOCALFUN)
|
||||
|
|
|
@ -1316,6 +1316,7 @@ LinkUseToDef(JSParseNode *pn, JSDefinition *dn, JSTreeContext *tc)
|
|||
JS_ASSERT(pn != dn->dn_uses);
|
||||
pn->pn_link = dn->dn_uses;
|
||||
dn->dn_uses = pn;
|
||||
dn->pn_dflags |= pn->pn_dflags & PND_USE2DEF_FLAGS;
|
||||
pn->pn_used = true;
|
||||
pn->pn_lexdef = dn;
|
||||
}
|
||||
|
@ -4309,7 +4310,6 @@ RebindLets(JSParseNode *pn, JSTreeContext *tc)
|
|||
JSDefinition *dn = ALE_DEFN(ale);
|
||||
dn->pn_type = TOK_NAME;
|
||||
dn->pn_op = JSOP_NOP;
|
||||
dn->pn_dflags |= pn->pn_dflags & PND_USE2DEF_FLAGS;
|
||||
}
|
||||
LinkUseToDef(pn, ALE_DEFN(ale), tc);
|
||||
}
|
||||
|
|
|
@ -2003,17 +2003,13 @@ CompileRegExpToAST(JSContext* cx, JSTokenStream* ts,
|
|||
#ifdef JS_TRACER
|
||||
typedef js::Vector<LIns *, 4, js::ContextAllocPolicy> LInsList;
|
||||
|
||||
/* Dummy GC for nanojit placement new. */
|
||||
static GC gc;
|
||||
static avmplus::AvmCore s_core = avmplus::AvmCore();
|
||||
|
||||
/* Return the cached fragment for the given regexp, or create one. */
|
||||
static Fragment*
|
||||
LookupNativeRegExp(JSContext* cx, uint16 re_flags,
|
||||
const jschar* re_chars, size_t re_length)
|
||||
{
|
||||
JSTraceMonitor *tm = &JS_TRACE_MONITOR(cx);
|
||||
VMAllocator &alloc = *tm->allocator;
|
||||
VMAllocator &alloc = *tm->dataAlloc;
|
||||
REHashMap &table = *tm->reFragments;
|
||||
|
||||
REHashKey k(re_length, re_flags, re_chars);
|
||||
|
@ -2274,6 +2270,7 @@ enumerateNextChars(JSContext *cx, RENode *node, CharSet &set)
|
|||
|
||||
class RegExpNativeCompiler {
|
||||
private:
|
||||
VMAllocator& tempAlloc;
|
||||
JSContext* cx;
|
||||
JSRegExp* re;
|
||||
CompilerState* cs; /* RegExp to compile */
|
||||
|
@ -2290,6 +2287,10 @@ class RegExpNativeCompiler {
|
|||
LIns* state;
|
||||
LIns* cpend;
|
||||
|
||||
bool outOfMemory() {
|
||||
return tempAlloc.outOfMemory() || JS_TRACE_MONITOR(cx).dataAlloc->outOfMemory();
|
||||
}
|
||||
|
||||
JSBool isCaseInsensitive() const { return (cs->flags & JSREG_FOLD) != 0; }
|
||||
|
||||
void targetCurrentPoint(LIns *ins)
|
||||
|
@ -2480,7 +2481,6 @@ class RegExpNativeCompiler {
|
|||
|
||||
LIns* compileFlat(RENode *&node, LIns* pos, LInsList& fails)
|
||||
{
|
||||
VMAllocator *alloc = JS_TRACE_MONITOR(cx).allocator;
|
||||
#ifdef USE_DOUBLE_CHAR_MATCH
|
||||
if (node->u.flat.length == 1) {
|
||||
if (node->next && node->next->op == REOP_FLAT &&
|
||||
|
@ -2496,7 +2496,7 @@ class RegExpNativeCompiler {
|
|||
} else {
|
||||
size_t i;
|
||||
for (i = 0; i < node->u.flat.length - 1; i += 2) {
|
||||
if (alloc->outOfMemory())
|
||||
if (outOfMemory())
|
||||
return 0;
|
||||
pos = compileFlatDoubleChar(((jschar*) node->kid)[i],
|
||||
((jschar*) node->kid)[i+1],
|
||||
|
@ -2514,7 +2514,7 @@ class RegExpNativeCompiler {
|
|||
return compileFlatSingleChar(node->u.flat.chr, pos, fails);
|
||||
} else {
|
||||
for (size_t i = 0; i < node->u.flat.length; i++) {
|
||||
if (alloc->outOfMemory())
|
||||
if (outOfMemory())
|
||||
return 0;
|
||||
pos = compileFlatSingleChar(((jschar*) node->kid)[i], pos, fails);
|
||||
if (!pos)
|
||||
|
@ -2536,16 +2536,16 @@ class RegExpNativeCompiler {
|
|||
*/
|
||||
RECharSet *charSet = &re->classList[node->u.ucclass.index];
|
||||
size_t bitmapLen = (charSet->length >> 3) + 1;
|
||||
/* insSkip() can't hold large data blocks. */
|
||||
/* Arbitrary size limit on bitmap. */
|
||||
if (bitmapLen > 1024)
|
||||
return NULL;
|
||||
Allocator &alloc = *JS_TRACE_MONITOR(cx).dataAlloc;
|
||||
/* The following line allocates charSet.u.bits if successful. */
|
||||
if (!charSet->converted && !ProcessCharSet(cx, re, charSet))
|
||||
return NULL;
|
||||
LIns* skip = lirBufWriter->insSkip(bitmapLen);
|
||||
if (JS_TRACE_MONITOR(cx).allocator->outOfMemory())
|
||||
void* bitmapData = alloc.alloc(bitmapLen);
|
||||
if (outOfMemory())
|
||||
return NULL;
|
||||
void* bitmapData = skip->payload();
|
||||
memcpy(bitmapData, charSet->u.bits, bitmapLen);
|
||||
|
||||
LIns* to_fail = lir->insBranch(LIR_jf, lir->ins2(LIR_plt, pos, cpend), 0);
|
||||
|
@ -2962,9 +2962,8 @@ class RegExpNativeCompiler {
|
|||
*/
|
||||
LIns *compileNode(RENode *node, LIns *pos, bool atEnd, LInsList &fails)
|
||||
{
|
||||
VMAllocator *alloc = JS_TRACE_MONITOR(cx).allocator;
|
||||
for (; pos && node; node = node->next) {
|
||||
if (alloc->outOfMemory())
|
||||
if (outOfMemory())
|
||||
return NULL;
|
||||
|
||||
bool childNextIsEnd = atEnd && !node->next;
|
||||
|
@ -3040,7 +3039,7 @@ class RegExpNativeCompiler {
|
|||
/* Failed to match on first character, so fail whole match. */
|
||||
lir->ins0(LIR_regfence);
|
||||
lir->ins1(LIR_ret, lir->insImm(0));
|
||||
return !JS_TRACE_MONITOR(cx).allocator->outOfMemory();
|
||||
return !outOfMemory();
|
||||
}
|
||||
|
||||
/* Compile normal regular expressions that can match starting at any char. */
|
||||
|
@ -3056,7 +3055,7 @@ class RegExpNativeCompiler {
|
|||
lir->insStorei(lir->ins2(LIR_piadd, start, lir->insImmWord(2)), state,
|
||||
offsetof(REGlobalData, skipped));
|
||||
|
||||
return !JS_TRACE_MONITOR(cx).allocator->outOfMemory();
|
||||
return !outOfMemory();
|
||||
}
|
||||
|
||||
inline LIns*
|
||||
|
@ -3082,16 +3081,17 @@ class RegExpNativeCompiler {
|
|||
lir->ins1(LIR_live, lirbuf->param1);
|
||||
}
|
||||
|
||||
LIns* skip = lirBufWriter->insSkip(sizeof(GuardRecord) +
|
||||
sizeof(SideExit) +
|
||||
(re_length-1) * sizeof(jschar));
|
||||
GuardRecord* guard = (GuardRecord *) skip->payload();
|
||||
memset(guard, 0, sizeof(*guard));
|
||||
SideExit* exit = (SideExit*)(guard+1);
|
||||
Allocator &alloc = *JS_TRACE_MONITOR(cx).dataAlloc;
|
||||
|
||||
size_t len = (sizeof(GuardRecord) +
|
||||
sizeof(VMSideExit) +
|
||||
(re_length-1) * sizeof(jschar));
|
||||
GuardRecord* guard = (GuardRecord *) alloc.alloc(len);
|
||||
VMSideExit* exit = (VMSideExit*)(guard+1);
|
||||
guard->exit = exit;
|
||||
guard->exit->target = fragment;
|
||||
fragment->lastIns = lir->insGuard(LIR_x, NULL, skip);
|
||||
// guard->profCount is memset'd to zero
|
||||
fragment->lastIns = lir->insGuard(LIR_x, NULL, guard);
|
||||
// guard->profCount is calloc'd to zero
|
||||
verbose_only(
|
||||
guard->profGuardID = fragment->guardNumberer++;
|
||||
guard->nextInFrag = fragment->guardsForFrag;
|
||||
|
@ -3101,10 +3101,17 @@ class RegExpNativeCompiler {
|
|||
}
|
||||
|
||||
public:
|
||||
RegExpNativeCompiler(JSRegExp* re, CompilerState* cs, Fragment* fragment)
|
||||
: re(re), cs(cs), fragment(fragment), lir(NULL), lirBufWriter(NULL) { }
|
||||
RegExpNativeCompiler(JSContext* cx, JSRegExp* re, CompilerState* cs, Fragment* fragment)
|
||||
: tempAlloc(*JS_TRACE_MONITOR(cx).reTempAlloc), cx(cx),
|
||||
re(re), cs(cs), fragment(fragment), lir(NULL), lirBufWriter(NULL) { }
|
||||
|
||||
JSBool compile(JSContext* cx)
|
||||
~RegExpNativeCompiler() {
|
||||
/* Purge the tempAlloc used during recording. */
|
||||
tempAlloc.reset();
|
||||
JS_TRACE_MONITOR(cx).reLirBuf->clear();
|
||||
}
|
||||
|
||||
JSBool compile()
|
||||
{
|
||||
GuardRecord* guard = NULL;
|
||||
LIns* pos;
|
||||
|
@ -3112,10 +3119,9 @@ class RegExpNativeCompiler {
|
|||
size_t re_length;
|
||||
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
||||
Assembler *assm = tm->assembler;
|
||||
VMAllocator& alloc = *tm->allocator;
|
||||
LIns* loopLabel = NULL;
|
||||
|
||||
if (alloc.outOfMemory() || js_OverfullJITCache(tm))
|
||||
if (outOfMemory() || js_OverfullJITCache(tm))
|
||||
return JS_FALSE;
|
||||
|
||||
re->source->getCharsAndLength(re_chars, re_length);
|
||||
|
@ -3128,25 +3134,24 @@ class RegExpNativeCompiler {
|
|||
return JS_FALSE;
|
||||
}
|
||||
|
||||
this->cx = cx;
|
||||
/* At this point we have an empty fragment. */
|
||||
LirBuffer* lirbuf = fragment->lirbuf;
|
||||
if (alloc.outOfMemory())
|
||||
if (outOfMemory())
|
||||
goto fail;
|
||||
/* FIXME Use bug 463260 smart pointer when available. */
|
||||
lir = lirBufWriter = new (&gc) LirBufWriter(lirbuf);
|
||||
lir = lirBufWriter = new LirBufWriter(lirbuf);
|
||||
|
||||
/* FIXME Use bug 463260 smart pointer when available. */
|
||||
#ifdef NJ_VERBOSE
|
||||
debug_only_stmt(
|
||||
if (js_LogController.lcbits & LC_TMRegexp) {
|
||||
lir = verbose_filter = new (&gc) VerboseWriter(alloc, lir, lirbuf->names,
|
||||
&js_LogController);
|
||||
lir = verbose_filter = new VerboseWriter(tempAlloc, lir, lirbuf->names,
|
||||
&js_LogController);
|
||||
}
|
||||
)
|
||||
#endif
|
||||
#ifdef DEBUG
|
||||
lir = sanity_filter = new (&gc) SanityFilter(lir);
|
||||
lir = sanity_filter = new SanityFilter(lir);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -3189,9 +3194,9 @@ class RegExpNativeCompiler {
|
|||
|
||||
guard = insertGuard(loopLabel, re_chars, re_length);
|
||||
|
||||
if (alloc.outOfMemory())
|
||||
if (outOfMemory())
|
||||
goto fail;
|
||||
::compile(assm, fragment, alloc verbose_only(, tm->labels));
|
||||
::compile(assm, fragment verbose_only(, tempAlloc, tm->labels));
|
||||
if (assm->error() != nanojit::None)
|
||||
goto fail;
|
||||
|
||||
|
@ -3205,7 +3210,7 @@ class RegExpNativeCompiler {
|
|||
#endif
|
||||
return JS_TRUE;
|
||||
fail:
|
||||
if (alloc.outOfMemory() || js_OverfullJITCache(tm)) {
|
||||
if (outOfMemory() || js_OverfullJITCache(tm)) {
|
||||
delete lirBufWriter;
|
||||
// recover profiling data from expiring Fragments
|
||||
verbose_only(
|
||||
|
@ -3221,6 +3226,9 @@ class RegExpNativeCompiler {
|
|||
re->flags |= JSREG_NOCOMPILE;
|
||||
delete lirBufWriter;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
delete sanity_filter;
|
||||
#endif
|
||||
#ifdef NJ_VERBOSE
|
||||
debug_only_stmt( if (js_LogController.lcbits & LC_TMRegexp)
|
||||
delete lir; )
|
||||
|
@ -3238,14 +3246,14 @@ CompileRegExpToNative(JSContext* cx, JSRegExp* re, Fragment* fragment)
|
|||
JSBool rv = JS_FALSE;
|
||||
void* mark;
|
||||
CompilerState state;
|
||||
RegExpNativeCompiler rc(re, &state, fragment);
|
||||
RegExpNativeCompiler rc(cx, re, &state, fragment);
|
||||
|
||||
JS_ASSERT(!fragment->code());
|
||||
mark = JS_ARENA_MARK(&cx->tempPool);
|
||||
if (!CompileRegExpToAST(cx, NULL, re->source, re->flags, state)) {
|
||||
goto out;
|
||||
}
|
||||
rv = rc.compile(cx);
|
||||
rv = rc.compile();
|
||||
out:
|
||||
JS_ARENA_RELEASE(&cx->tempPool, mark);
|
||||
return rv;
|
||||
|
@ -4084,8 +4092,7 @@ SimpleMatch(REGlobalData *gData, REMatchState *x, REOp op,
|
|||
JS_ASSERT(charSet->converted);
|
||||
ch = *x->cp;
|
||||
index = ch >> 3;
|
||||
if (charSet->length != 0 &&
|
||||
ch <= charSet->length &&
|
||||
if (ch <= charSet->length &&
|
||||
(charSet->u.bits[index] & (1 << (ch & 0x7)))) {
|
||||
result = x;
|
||||
result->cp++;
|
||||
|
@ -4100,8 +4107,7 @@ SimpleMatch(REGlobalData *gData, REMatchState *x, REOp op,
|
|||
JS_ASSERT(charSet->converted);
|
||||
ch = *x->cp;
|
||||
index = ch >> 3;
|
||||
if (charSet->length == 0 ||
|
||||
ch > charSet->length ||
|
||||
if (ch > charSet->length ||
|
||||
!(charSet->u.bits[index] & (1 << (ch & 0x7)))) {
|
||||
result = x;
|
||||
result->cp++;
|
||||
|
@ -4197,8 +4203,7 @@ ExecuteREBytecode(REGlobalData *gData, REMatchState *x)
|
|||
goto bad;
|
||||
matchCh1 = *x->cp;
|
||||
k = matchCh1 >> 3;
|
||||
if ((charSet->length == 0 ||
|
||||
matchCh1 > charSet->length ||
|
||||
if ((matchCh1 > charSet->length ||
|
||||
!(charSet->u.bits[k] & (1 << (matchCh1 & 0x7)))) ^
|
||||
charSet->sense) {
|
||||
goto doAlt;
|
||||
|
@ -4904,17 +4909,13 @@ js_ExecuteRegExp(JSContext *cx, JSRegExp *re, JSString *str, size_t *indexp,
|
|||
ok = js_DefineProperty(cx, obj, id, val, \
|
||||
JS_PropertyStub, JS_PropertyStub, \
|
||||
JSPROP_ENUMERATE); \
|
||||
if (!ok) { \
|
||||
cx->weakRoots.newborn[GCX_OBJECT] = NULL; \
|
||||
cx->weakRoots.newborn[GCX_STRING] = NULL; \
|
||||
if (!ok) \
|
||||
goto out; \
|
||||
} \
|
||||
}
|
||||
|
||||
matchstr = js_NewDependentString(cx, str, cp - str->chars(),
|
||||
matchlen);
|
||||
if (!matchstr) {
|
||||
cx->weakRoots.newborn[GCX_OBJECT] = NULL;
|
||||
ok = JS_FALSE;
|
||||
goto out;
|
||||
}
|
||||
|
@ -4951,8 +4952,6 @@ js_ExecuteRegExp(JSContext *cx, JSRegExp *re, JSString *str, size_t *indexp,
|
|||
res->moreLength * sizeof(JSSubString));
|
||||
}
|
||||
if (!morepar) {
|
||||
cx->weakRoots.newborn[GCX_OBJECT] = NULL;
|
||||
cx->weakRoots.newborn[GCX_STRING] = NULL;
|
||||
ok = JS_FALSE;
|
||||
goto out;
|
||||
}
|
||||
|
@ -4976,19 +4975,14 @@ js_ExecuteRegExp(JSContext *cx, JSRegExp *re, JSString *str, size_t *indexp,
|
|||
str->chars(),
|
||||
parsub->length);
|
||||
if (!parstr) {
|
||||
cx->weakRoots.newborn[GCX_OBJECT] = NULL;
|
||||
cx->weakRoots.newborn[GCX_STRING] = NULL;
|
||||
ok = JS_FALSE;
|
||||
goto out;
|
||||
}
|
||||
ok = js_DefineProperty(cx, obj, INT_TO_JSID(num + 1), STRING_TO_JSVAL(parstr),
|
||||
NULL, NULL, JSPROP_ENUMERATE);
|
||||
}
|
||||
if (!ok) {
|
||||
cx->weakRoots.newborn[GCX_OBJECT] = NULL;
|
||||
cx->weakRoots.newborn[GCX_STRING] = NULL;
|
||||
if (!ok)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (parsub->index == -1) {
|
||||
res->lastParen = js_EmptySubString;
|
||||
|
@ -5830,3 +5824,19 @@ js_CloneRegExpObject(JSContext *cx, JSObject *obj, JSObject *parent)
|
|||
return clone;
|
||||
}
|
||||
|
||||
bool
|
||||
js_ContainsRegExpMetaChars(const jschar *chars, size_t length)
|
||||
{
|
||||
for (size_t i = 0; i < length; ++i) {
|
||||
jschar c = chars[i];
|
||||
switch (c) {
|
||||
/* Taken from the PatternCharacter production in 15.10.1. */
|
||||
case '^': case '$': case '\\': case '.': case '*': case '+':
|
||||
case '?': case '(': case ')': case '[': case ']': case '{':
|
||||
case '}': case '|':
|
||||
return true;
|
||||
default:;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -195,6 +195,10 @@ js_ClearRegExpLastIndex(JSObject *obj)
|
|||
obj->fslots[JSSLOT_REGEXP_LAST_INDEX] = JSVAL_ZERO;
|
||||
}
|
||||
|
||||
/* Return whether the given character array contains RegExp meta-characters. */
|
||||
extern bool
|
||||
js_ContainsRegExpMetaChars(const jschar *chars, size_t length);
|
||||
|
||||
JS_END_EXTERN_C
|
||||
|
||||
#endif /* jsregexp_h___ */
|
||||
|
|
|
@ -176,12 +176,21 @@ js_IsIdentifier(JSString *str)
|
|||
return JS_TRUE;
|
||||
}
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable:4351)
|
||||
#endif
|
||||
|
||||
/* Initialize members that aren't initialized in |init|. */
|
||||
JSTokenStream::JSTokenStream(JSContext *cx)
|
||||
: tokens(), cursor(), lookahead(), ungetpos(), ungetbuf(), flags(), linelen(),
|
||||
linepos(), file(), listenerTSData(), saveEOL(), tokenbuf(cx)
|
||||
{}
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
bool
|
||||
JSTokenStream::init(JSContext *cx, const jschar *base, size_t length,
|
||||
FILE *fp, const char *fn, uintN ln)
|
||||
|
@ -656,7 +665,7 @@ static JSBool
|
|||
GetXMLEntity(JSContext *cx, JSTokenStream *ts)
|
||||
{
|
||||
ptrdiff_t offset, length, i;
|
||||
int32 c, d;
|
||||
int c, d;
|
||||
JSBool ispair;
|
||||
jschar *bp, digit;
|
||||
char *bytes;
|
||||
|
@ -858,7 +867,7 @@ JSTokenType
|
|||
js_GetToken(JSContext *cx, JSTokenStream *ts)
|
||||
{
|
||||
JSTokenType tt;
|
||||
int32 c, qc;
|
||||
int c, qc;
|
||||
JSToken *tp;
|
||||
JSAtom *atom;
|
||||
JSBool hadUnicodeEscape;
|
||||
|
|
|
@ -297,6 +297,7 @@ JSScope::searchTable(jsid id, bool adding)
|
|||
uint32 sizeMask;
|
||||
|
||||
JS_ASSERT(table);
|
||||
JS_ASSERT(!JSVAL_IS_NULL(id));
|
||||
|
||||
/* Compute the primary hash address. */
|
||||
METER(hashes);
|
||||
|
@ -448,7 +449,8 @@ js_HashScopeProperty(JSDHashTable *table, const void *key)
|
|||
|
||||
#define SPROP_MATCH_PARAMS(sprop, aid, agetter, asetter, aslot, aattrs, \
|
||||
aflags, ashortid) \
|
||||
((sprop)->id == (aid) && \
|
||||
(JS_ASSERT(!JSVAL_IS_NULL((sprop)->id)), JS_ASSERT(!JSVAL_IS_NULL(aid)), \
|
||||
(sprop)->id == (aid) && \
|
||||
SPROP_MATCH_PARAMS_AFTER_ID(sprop, agetter, asetter, aslot, aattrs, \
|
||||
aflags, ashortid))
|
||||
|
||||
|
@ -585,6 +587,7 @@ InsertPropertyTreeChild(JSRuntime *rt, JSScopeProperty *parent,
|
|||
uintN i;
|
||||
|
||||
JS_ASSERT(!parent || child->parent != parent);
|
||||
JS_ASSERT(!JSVAL_IS_NULL(child->id));
|
||||
|
||||
if (!parent) {
|
||||
table = &rt->propertyTreeHash;
|
||||
|
@ -617,6 +620,7 @@ InsertPropertyTreeChild(JSRuntime *rt, JSScopeProperty *parent,
|
|||
JS_RUNTIME_METER(rt, duplicatePropTreeNodes);
|
||||
}
|
||||
} else {
|
||||
JS_ASSERT(!JSVAL_IS_NULL(parent->id));
|
||||
childp = &parent->kids;
|
||||
kids = *childp;
|
||||
if (kids) {
|
||||
|
@ -729,6 +733,7 @@ RemovePropertyTreeChild(JSRuntime *rt, JSScopeProperty *child)
|
|||
*/
|
||||
table = &rt->propertyTreeHash;
|
||||
} else {
|
||||
JS_ASSERT(!JSVAL_IS_NULL(parent->id));
|
||||
kids = parent->kids;
|
||||
if (KIDS_IS_CHUNKY(kids)) {
|
||||
list = chunk = KIDS_TO_CHUNK(kids);
|
||||
|
@ -832,6 +837,8 @@ GetPropertyTreeChild(JSContext *cx, JSScopeProperty *parent,
|
|||
PropTreeKidsChunk *chunk;
|
||||
uintN i, n;
|
||||
|
||||
JS_ASSERT(!JSVAL_IS_NULL(child->id));
|
||||
|
||||
rt = cx->runtime;
|
||||
if (!parent) {
|
||||
JS_LOCK_GC(rt);
|
||||
|
@ -846,6 +853,8 @@ GetPropertyTreeChild(JSContext *cx, JSScopeProperty *parent,
|
|||
if (sprop)
|
||||
goto out;
|
||||
} else {
|
||||
JS_ASSERT(!JSVAL_IS_NULL(parent->id));
|
||||
|
||||
/*
|
||||
* Because chunks are appended at the end and never deleted except by
|
||||
* the GC, we can search without taking the runtime's GC lock. We may
|
||||
|
@ -1013,8 +1022,27 @@ JSScope::reportReadOnlyScope(JSContext *cx)
|
|||
void
|
||||
JSScope::generateOwnShape(JSContext *cx)
|
||||
{
|
||||
if (object)
|
||||
js_LeaveTraceIfGlobalObject(cx, object);
|
||||
#ifdef JS_TRACER
|
||||
if (object) {
|
||||
js_LeaveTraceIfGlobalObject(cx, object);
|
||||
|
||||
/*
|
||||
* The JIT must have arranged to re-guard after any unpredictable shape
|
||||
* change, so if we are on trace here, we should already be prepared to
|
||||
* bail off trace.
|
||||
*/
|
||||
JS_ASSERT_IF(JS_ON_TRACE(cx), cx->bailExit);
|
||||
|
||||
/*
|
||||
* If we are recording, here is where we forget already-guarded shapes.
|
||||
* Any subsequent property operation upon object on the trace currently
|
||||
* being recorded will re-guard (and re-memoize).
|
||||
*/
|
||||
JSTraceMonitor *tm = &JS_TRACE_MONITOR(cx);
|
||||
if (TraceRecorder *tr = tm->recorder)
|
||||
tr->forgetGuardedShapesForObject(object);
|
||||
}
|
||||
#endif
|
||||
|
||||
shape = js_GenerateShape(cx, false);
|
||||
setOwnShape();
|
||||
|
@ -1037,6 +1065,8 @@ JSScope::add(JSContext *cx, jsid id,
|
|||
JS_ASSERT_IF(attrs & JSPROP_GETTER, getter);
|
||||
JS_ASSERT_IF(attrs & JSPROP_SETTER, setter);
|
||||
|
||||
JS_ASSERT(!JSVAL_IS_NULL(id));
|
||||
|
||||
/*
|
||||
* You can't add properties to a sealed scope. But note well that you can
|
||||
* change property attributes in a sealed scope, even though that replaces
|
||||
|
@ -1391,6 +1421,8 @@ JSScope::change(JSContext *cx, JSScopeProperty *sprop,
|
|||
|
||||
CHECK_ANCESTOR_LINE(this, true);
|
||||
|
||||
JS_ASSERT(!JSVAL_IS_NULL(sprop->id));
|
||||
|
||||
/* Allow only shared (slot-less) => unshared (slot-full) transition. */
|
||||
attrs |= sprop->attrs & mask;
|
||||
JS_ASSERT(!((attrs ^ sprop->attrs) & JSPROP_SHARED) ||
|
||||
|
@ -1571,12 +1603,14 @@ JSScope::brandingShapeChange(JSContext *cx, uint32 slot, jsval v)
|
|||
void
|
||||
JSScope::deletingShapeChange(JSContext *cx, JSScopeProperty *sprop)
|
||||
{
|
||||
JS_ASSERT(!JSVAL_IS_NULL(sprop->id));
|
||||
generateOwnShape(cx);
|
||||
}
|
||||
|
||||
bool
|
||||
JSScope::methodShapeChange(JSContext *cx, JSScopeProperty *sprop, jsval toval)
|
||||
{
|
||||
JS_ASSERT(!JSVAL_IS_NULL(sprop->id));
|
||||
if (sprop->isMethod()) {
|
||||
#ifdef DEBUG
|
||||
jsval prev = LOCKED_OBJ_GET_SLOT(object, sprop->slot);
|
||||
|
@ -1610,6 +1644,7 @@ JSScope::methodShapeChange(JSContext *cx, uint32 slot, jsval toval)
|
|||
generateOwnShape(cx);
|
||||
} else {
|
||||
for (JSScopeProperty *sprop = lastProp; sprop; sprop = sprop->parent) {
|
||||
JS_ASSERT(!JSVAL_IS_NULL(sprop->id));
|
||||
if (sprop->slot == slot && (!hadMiddleDelete() || has(sprop)))
|
||||
return methodShapeChange(cx, sprop, toval);
|
||||
}
|
||||
|
@ -1626,6 +1661,7 @@ JSScope::protoShapeChange(JSContext *cx)
|
|||
void
|
||||
JSScope::replacingShapeChange(JSContext *cx, JSScopeProperty *sprop, JSScopeProperty *newsprop)
|
||||
{
|
||||
JS_ASSERT(!JSVAL_IS_NULL(sprop->id));
|
||||
if (shape == sprop->shape)
|
||||
shape = newsprop->shape;
|
||||
else
|
||||
|
@ -1641,6 +1677,7 @@ JSScope::sealingShapeChange(JSContext *cx)
|
|||
void
|
||||
JSScope::shadowingShapeChange(JSContext *cx, JSScopeProperty *sprop)
|
||||
{
|
||||
JS_ASSERT(!JSVAL_IS_NULL(sprop->id));
|
||||
generateOwnShape(cx);
|
||||
}
|
||||
|
||||
|
@ -1665,6 +1702,7 @@ PrintPropertyGetterOrSetter(JSTracer *trc, char *buf, size_t bufsize)
|
|||
JS_ASSERT(trc->debugPrinter == PrintPropertyGetterOrSetter);
|
||||
sprop = (JSScopeProperty *)trc->debugPrintArg;
|
||||
id = sprop->id;
|
||||
JS_ASSERT(!JSVAL_IS_NULL(id));
|
||||
name = trc->debugPrintIndex ? js_setter_str : js_getter_str;
|
||||
|
||||
if (JSID_IS_ATOM(id)) {
|
||||
|
@ -1689,6 +1727,7 @@ PrintPropertyMethod(JSTracer *trc, char *buf, size_t bufsize)
|
|||
JS_ASSERT(trc->debugPrinter == PrintPropertyMethod);
|
||||
sprop = (JSScopeProperty *)trc->debugPrintArg;
|
||||
id = sprop->id;
|
||||
JS_ASSERT(!JSVAL_IS_NULL(id));
|
||||
|
||||
JS_ASSERT(JSID_IS_ATOM(id));
|
||||
n = js_PutEscapedString(buf, bufsize - 1, ATOM_TO_STRING(JSID_TO_ATOM(id)), 0);
|
||||
|
@ -1785,6 +1824,7 @@ DumpSubtree(JSContext *cx, JSScopeProperty *sprop, int level, FILE *fp)
|
|||
|
||||
fprintf(fp, "%*sid ", level, "");
|
||||
v = ID_TO_VALUE(sprop->id);
|
||||
JS_ASSERT(!JSVAL_IS_NULL(v));
|
||||
if (JSID_IS_INT(sprop->id)) {
|
||||
fprintf(fp, "%d", JSVAL_TO_INT(v));
|
||||
} else {
|
||||
|
|
|
@ -50,6 +50,11 @@
|
|||
#include "jsprvtd.h"
|
||||
#include "jspubtd.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable:4800)
|
||||
#endif
|
||||
|
||||
JS_BEGIN_EXTERN_C
|
||||
|
||||
/*
|
||||
|
@ -418,9 +423,14 @@ OBJ_SHAPE(JSObject *obj)
|
|||
* A little information hiding for scope->lastProp, in case it ever becomes
|
||||
* a tagged pointer again.
|
||||
*/
|
||||
#define SCOPE_LAST_PROP(scope) ((scope)->lastProp)
|
||||
#define SCOPE_REMOVE_LAST_PROP(scope) ((scope)->lastProp = \
|
||||
(scope)->lastProp->parent)
|
||||
#define SCOPE_LAST_PROP(scope) \
|
||||
(JS_ASSERT_IF((scope)->lastProp, !JSVAL_IS_NULL((scope)->lastProp->id)), \
|
||||
(scope)->lastProp)
|
||||
#define SCOPE_REMOVE_LAST_PROP(scope) \
|
||||
(JS_ASSERT_IF((scope)->lastProp->parent, \
|
||||
!JSVAL_IS_NULL((scope)->lastProp->parent->id)), \
|
||||
(scope)->lastProp = (scope)->lastProp->parent)
|
||||
|
||||
/*
|
||||
* Helpers for reinterpreting JSPropertyOp as JSObject* for scripted getters
|
||||
* and setters.
|
||||
|
@ -763,6 +773,7 @@ inline bool
|
|||
JSScopeProperty::get(JSContext* cx, JSObject* obj, JSObject *pobj, jsval* vp)
|
||||
{
|
||||
JS_ASSERT(!SPROP_HAS_STUB_GETTER(this));
|
||||
JS_ASSERT(!JSVAL_IS_NULL(this->id));
|
||||
|
||||
if (attrs & JSPROP_GETTER) {
|
||||
JS_ASSERT(!isMethod());
|
||||
|
@ -831,4 +842,8 @@ js_FinishPropertyTree(JSRuntime *rt);
|
|||
|
||||
JS_END_EXTERN_C
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
#endif /* jsscope_h___ */
|
||||
|
|
|
@ -100,6 +100,10 @@ typedef JSUintPtr uintptr_t;
|
|||
* we would require compiler assistance, and at the moment we don't need
|
||||
* preprocessor-correctness.
|
||||
*/
|
||||
#ifdef _MSC_VER
|
||||
#undef SIZE_MAX
|
||||
#endif
|
||||
|
||||
#define INTPTR_MAX ((intptr_t) (UINTPTR_MAX >> 1))
|
||||
#define INTPTR_MIN (intptr_t(uintptr_t(INTPTR_MAX) + uintptr_t(1)))
|
||||
#define UINTPTR_MAX ((uintptr_t) -1)
|
||||
|
|
444
js/src/jsstr.cpp
444
js/src/jsstr.cpp
|
@ -78,14 +78,8 @@
|
|||
|
||||
#define JSSTRDEP_RECURSION_LIMIT 100
|
||||
|
||||
static JS_ALWAYS_INLINE JSBool
|
||||
UWordInRootedValue(JSContext *cx, size_t i, jsval *vp)
|
||||
{
|
||||
if (i >= (size_t)JSVAL_INT_MAX)
|
||||
return js_NewNumberInRootedValue(cx, i, vp);
|
||||
*vp = INT_TO_JSVAL(i);
|
||||
return JS_TRUE;
|
||||
}
|
||||
JS_STATIC_ASSERT(size_t(JSString::MAX_LENGTH) <= size_t(JSVAL_INT_MAX));
|
||||
JS_STATIC_ASSERT(INT_FITS_IN_JSVAL(JSString::MAX_LENGTH));
|
||||
|
||||
static size_t
|
||||
MinimizeDependentStrings(JSString *str, int level, JSString **basep)
|
||||
|
@ -1049,108 +1043,141 @@ JS_DEFINE_CALLINFO_1(extern, INT32, js_String_p_charCodeAt0_int, STRING,
|
|||
#endif
|
||||
|
||||
jsint
|
||||
js_BoyerMooreHorspool(const jschar *text, jsint textlen,
|
||||
const jschar *pat, jsint patlen,
|
||||
jsint start)
|
||||
js_BoyerMooreHorspool(const jschar *text, jsuint textlen,
|
||||
const jschar *pat, jsuint patlen)
|
||||
{
|
||||
jsint i, j, k, m;
|
||||
uint8 skip[BMH_CHARSET_SIZE];
|
||||
jschar c;
|
||||
uint8 skip[sBMHCharSetSize];
|
||||
|
||||
JS_ASSERT(0 < patlen && patlen <= BMH_PATLEN_MAX);
|
||||
for (i = 0; i < BMH_CHARSET_SIZE; i++)
|
||||
JS_ASSERT(0 < patlen && patlen <= sBMHPatLenMax);
|
||||
for (jsuint i = 0; i < sBMHCharSetSize; i++)
|
||||
skip[i] = (uint8)patlen;
|
||||
m = patlen - 1;
|
||||
for (i = 0; i < m; i++) {
|
||||
c = pat[i];
|
||||
if (c >= BMH_CHARSET_SIZE)
|
||||
return BMH_BAD_PATTERN;
|
||||
jsuint m = patlen - 1;
|
||||
for (jsuint i = 0; i < m; i++) {
|
||||
jschar c = pat[i];
|
||||
if (c >= sBMHCharSetSize)
|
||||
return sBMHBadPattern;
|
||||
skip[c] = (uint8)(m - i);
|
||||
}
|
||||
for (k = start + m;
|
||||
jschar c;
|
||||
for (jsuint k = m;
|
||||
k < textlen;
|
||||
k += ((c = text[k]) >= BMH_CHARSET_SIZE) ? patlen : skip[c]) {
|
||||
for (i = k, j = m; ; i--, j--) {
|
||||
if (j < 0)
|
||||
return i + 1;
|
||||
k += ((c = text[k]) >= sBMHCharSetSize) ? patlen : skip[c]) {
|
||||
for (jsuint i = k, j = m; ; i--, j--) {
|
||||
if (text[i] != pat[j])
|
||||
break;
|
||||
if (j == 0)
|
||||
return static_cast<jsint>(i); /* safe: max string size */
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static JS_ALWAYS_INLINE jsint
|
||||
StringMatch(const jschar *text, jsuint textlen,
|
||||
const jschar *pat, jsuint patlen)
|
||||
{
|
||||
if (patlen == 0)
|
||||
return 0;
|
||||
if (textlen < patlen)
|
||||
return -1;
|
||||
|
||||
/* XXX tune the BMH threshold (512) */
|
||||
if (textlen >= 512 && patlen <= sBMHPatLenMax) {
|
||||
jsint index = js_BoyerMooreHorspool(text, textlen, pat, patlen);
|
||||
if (index != sBMHBadPattern)
|
||||
return index;
|
||||
}
|
||||
|
||||
const jschar *textend = text + textlen - (patlen - 1);
|
||||
const jschar *patend = pat + patlen;
|
||||
const jschar p0 = *pat;
|
||||
const jschar *t = text;
|
||||
uint8 fixup;
|
||||
|
||||
/* Credit: Duff */
|
||||
switch ((textend - text) & 7) {
|
||||
do {
|
||||
case 0: if (*t++ == p0) { fixup = 8; goto match; }
|
||||
case 7: if (*t++ == p0) { fixup = 7; goto match; }
|
||||
case 6: if (*t++ == p0) { fixup = 6; goto match; }
|
||||
case 5: if (*t++ == p0) { fixup = 5; goto match; }
|
||||
case 4: if (*t++ == p0) { fixup = 4; goto match; }
|
||||
case 3: if (*t++ == p0) { fixup = 3; goto match; }
|
||||
case 2: if (*t++ == p0) { fixup = 2; goto match; }
|
||||
case 1: if (*t++ == p0) { fixup = 1; goto match; }
|
||||
continue;
|
||||
do {
|
||||
if (*t++ == p0) {
|
||||
match:
|
||||
for (const jschar *p1 = pat + 1, *t1 = t;
|
||||
p1 != patend;
|
||||
++p1, ++t1) {
|
||||
if (*p1 != *t1)
|
||||
goto failed_match;
|
||||
}
|
||||
return t - text - 1;
|
||||
}
|
||||
failed_match:;
|
||||
} while (--fixup > 0);
|
||||
} while(t != textend);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static JSBool
|
||||
str_indexOf(JSContext *cx, uintN argc, jsval *vp)
|
||||
{
|
||||
jsval t;
|
||||
JSString *str, *str2;
|
||||
const jschar *text, *pat;
|
||||
jsint i, j, index, textlen, patlen;
|
||||
jsdouble d;
|
||||
|
||||
t = vp[1];
|
||||
if (JSVAL_IS_STRING(t) && argc != 0 && JSVAL_IS_STRING(vp[2])) {
|
||||
str = JSVAL_TO_STRING(t);
|
||||
str2 = JSVAL_TO_STRING(vp[2]);
|
||||
} else {
|
||||
str = NormalizeThis(cx, vp);
|
||||
if (!str)
|
||||
return JS_FALSE;
|
||||
JSString *str;
|
||||
NORMALIZE_THIS(cx, vp, str);
|
||||
|
||||
str2 = ArgToRootedString(cx, argc, vp, 0);
|
||||
if (!str2)
|
||||
return JS_FALSE;
|
||||
}
|
||||
JSString *patstr = ArgToRootedString(cx, argc, vp, 0);
|
||||
if (!patstr)
|
||||
return JS_FALSE;
|
||||
|
||||
text = str->chars();
|
||||
textlen = (jsint) str->length();
|
||||
pat = str2->chars();
|
||||
patlen = (jsint) str2->length();
|
||||
const jschar *text = str->chars();
|
||||
jsuint textlen = str->length();
|
||||
const jschar *pat = patstr->chars();
|
||||
jsuint patlen = patstr->length();
|
||||
|
||||
jsuint start;
|
||||
if (argc > 1) {
|
||||
d = js_ValueToNumber(cx, &vp[3]);
|
||||
if (JSVAL_IS_NULL(vp[3]))
|
||||
return JS_FALSE;
|
||||
d = js_DoubleToInteger(d);
|
||||
if (d < 0)
|
||||
i = 0;
|
||||
else if (d > textlen)
|
||||
i = textlen;
|
||||
else
|
||||
i = (jsint)d;
|
||||
} else {
|
||||
i = 0;
|
||||
}
|
||||
if (patlen == 0) {
|
||||
*vp = INT_TO_JSVAL(i);
|
||||
return JS_TRUE;
|
||||
}
|
||||
|
||||
/* XXX tune the BMH threshold (512) */
|
||||
if (textlen - i >= 512 && (jsuint)(patlen - 2) <= BMH_PATLEN_MAX - 2) {
|
||||
index = js_BoyerMooreHorspool(text, textlen, pat, patlen, i);
|
||||
if (index != BMH_BAD_PATTERN)
|
||||
goto out;
|
||||
}
|
||||
|
||||
index = -1;
|
||||
j = 0;
|
||||
while (i + j < textlen) {
|
||||
if (text[i + j] == pat[j]) {
|
||||
if (++j == patlen) {
|
||||
index = i;
|
||||
break;
|
||||
jsval indexVal = vp[3];
|
||||
if (JSVAL_IS_INT(indexVal)) {
|
||||
jsint i = JSVAL_TO_INT(indexVal);
|
||||
if (i <= 0) {
|
||||
start = 0;
|
||||
} else if (jsuint(i) > textlen) {
|
||||
start = 0;
|
||||
textlen = 0;
|
||||
} else {
|
||||
start = i;
|
||||
text += start;
|
||||
textlen -= start;
|
||||
}
|
||||
} else {
|
||||
i++;
|
||||
j = 0;
|
||||
jsdouble d = js_ValueToNumber(cx, &vp[3]);
|
||||
if (JSVAL_IS_NULL(vp[3]))
|
||||
return JS_FALSE;
|
||||
d = js_DoubleToInteger(d);
|
||||
if (d <= 0) {
|
||||
start = 0;
|
||||
} else if (d > textlen) {
|
||||
start = 0;
|
||||
textlen = 0;
|
||||
} else {
|
||||
start = (jsint)d;
|
||||
text += start;
|
||||
textlen -= start;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
start = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
*vp = INT_TO_JSVAL(index);
|
||||
return JS_TRUE;
|
||||
jsint match = StringMatch(text, textlen, pat, patlen);
|
||||
*vp = INT_TO_JSVAL((match == -1) ? -1 : start + match);
|
||||
return true;
|
||||
}
|
||||
|
||||
static JSBool
|
||||
|
@ -1274,7 +1301,17 @@ str_trimRight(JSContext *cx, uintN argc, jsval *vp)
|
|||
* Perl-inspired string functions.
|
||||
*/
|
||||
|
||||
/* Utility to extract the re/reobj pair from vp and manage the reference count. */
|
||||
/*
|
||||
* RegExpGuard factors logic out of String regexp operations. After each
|
||||
* operation completes, RegExpGuard data members become available, according to
|
||||
* the comments below.
|
||||
*
|
||||
* Notes on parameters to RegExpGuard member functions:
|
||||
* - 'optarg' indicates in which argument position RegExp flags will be found,
|
||||
* if present. This is a Mozilla extension and not part of any ECMA spec.
|
||||
* - 'flat' indicates that the given pattern string will not be interpreted as
|
||||
* a regular expression, hence regexp meta-characters are ignored.
|
||||
*/
|
||||
class RegExpGuard
|
||||
{
|
||||
RegExpGuard(const RegExpGuard &);
|
||||
|
@ -1285,52 +1322,89 @@ class RegExpGuard
|
|||
JSRegExp *mRe;
|
||||
|
||||
public:
|
||||
RegExpGuard() : mRe(NULL) {}
|
||||
|
||||
/*
|
||||
* 'optarg' indicates in which argument position the flags will be found,
|
||||
* if present. This is a Mozilla extension and not part of any ECMA spec.
|
||||
*
|
||||
* If 'flat' is set, the first argument is to be converted to a string to
|
||||
* match in a "flat" sense (without regular expression metachars having
|
||||
* special meanings) UNLESS the first arg is a RegExp object. This is the
|
||||
* case with String.prototype.replace.
|
||||
*/
|
||||
bool
|
||||
initFromArgs(JSContext *cx, uintN optarg, bool flat, uintN argc, jsval *vp)
|
||||
{
|
||||
mCx = cx;
|
||||
if (argc != 0 && VALUE_IS_REGEXP(cx, vp[2])) {
|
||||
mReobj = JSVAL_TO_OBJECT(vp[2]);
|
||||
mRe = (JSRegExp *) mReobj->getPrivate();
|
||||
HOLD_REGEXP(cx, mRe);
|
||||
} else {
|
||||
JSString *src = ArgToRootedString(cx, argc, vp, 0);
|
||||
if (!src)
|
||||
return false;
|
||||
JSString *opt;
|
||||
if (optarg < argc) {
|
||||
opt = js_ValueToString(cx, vp[2 + optarg]);
|
||||
if (!opt)
|
||||
return false;
|
||||
} else {
|
||||
opt = NULL;
|
||||
}
|
||||
mRe = js_NewRegExpOpt(cx, src, opt, flat);
|
||||
if (!mRe)
|
||||
return false;
|
||||
mReobj = NULL;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
RegExpGuard(JSContext *cx) : mCx(cx), mRe(NULL) {}
|
||||
|
||||
~RegExpGuard() {
|
||||
if (mRe)
|
||||
DROP_REGEXP(mCx, mRe);
|
||||
}
|
||||
|
||||
JSObject *reobj() const { return mReobj; }
|
||||
JSRegExp *re() const { return mRe; }
|
||||
/* init must succeed in order to call tryFlatMatch or normalizeRegExp. */
|
||||
bool
|
||||
init(uintN argc, jsval *vp)
|
||||
{
|
||||
jsval patval = vp[2];
|
||||
if (argc != 0 && VALUE_IS_REGEXP(mCx, patval)) {
|
||||
mReobj = JSVAL_TO_OBJECT(patval);
|
||||
mRe = (JSRegExp *) mReobj->getPrivate();
|
||||
HOLD_REGEXP(mCx, mRe);
|
||||
} else {
|
||||
patstr = ArgToRootedString(mCx, argc, vp, 0);
|
||||
if (!patstr)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Upper bound on the number of characters we are willing to potentially
|
||||
* waste on searching for RegExp meta-characters.
|
||||
*/
|
||||
static const size_t sMaxFlatPatLen = 256;
|
||||
|
||||
/*
|
||||
* Attempt to match |patstr| with |textstr|. Return false if flat matching
|
||||
* could not be used.
|
||||
*/
|
||||
bool
|
||||
tryFlatMatch(JSString *textstr, bool flat, uintN optarg, uintN argc)
|
||||
{
|
||||
if (mRe)
|
||||
return false;
|
||||
patstr->getCharsAndLength(pat, patlen);
|
||||
if (optarg < argc ||
|
||||
(!flat &&
|
||||
(patlen > sMaxFlatPatLen || js_ContainsRegExpMetaChars(pat, patlen)))) {
|
||||
return false;
|
||||
}
|
||||
textstr->getCharsAndLength(text, textlen);
|
||||
match = StringMatch(text, textlen, pat, patlen);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Data available on successful return from |tryFlatMatch|. */
|
||||
JSString *patstr;
|
||||
const jschar *pat;
|
||||
size_t patlen;
|
||||
const jschar *text;
|
||||
size_t textlen;
|
||||
jsint match;
|
||||
|
||||
/* If the pattern is not already a regular expression, make it so. */
|
||||
bool
|
||||
normalizeRegExp(bool flat, uintN optarg, uintN argc, jsval *vp)
|
||||
{
|
||||
/* If we don't have a RegExp, build RegExp from pattern string. */
|
||||
if (mRe)
|
||||
return true;
|
||||
JSString *opt;
|
||||
if (optarg < argc) {
|
||||
opt = js_ValueToString(mCx, vp[2 + optarg]);
|
||||
if (!opt)
|
||||
return false;
|
||||
} else {
|
||||
opt = NULL;
|
||||
}
|
||||
mRe = js_NewRegExpOpt(mCx, patstr, opt, flat);
|
||||
if (!mRe)
|
||||
return false;
|
||||
mReobj = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Data available on successful return from |normalizeRegExp|. */
|
||||
JSObject *reobj() const { return mReobj; } /* nullable */
|
||||
JSRegExp *re() const { return mRe; } /* non-null */
|
||||
};
|
||||
|
||||
/* js_ExecuteRegExp indicates success in two ways, based on the 'test' flag. */
|
||||
|
@ -1378,8 +1452,8 @@ DoMatch(JSContext *cx, jsval *vp, JSString *str, const RegExpGuard &g,
|
|||
}
|
||||
} else {
|
||||
/* single match */
|
||||
bool testSingle = flags & TEST_SINGLE_BIT,
|
||||
callbackOnSingle = flags & CALLBACK_ON_SINGLE_BIT;
|
||||
bool testSingle = !!(flags & TEST_SINGLE_BIT),
|
||||
callbackOnSingle = !!(flags & CALLBACK_ON_SINGLE_BIT);
|
||||
size_t i = 0;
|
||||
if (!js_ExecuteRegExp(cx, g.re(), str, &i, testSingle, vp))
|
||||
return false;
|
||||
|
@ -1398,10 +1472,7 @@ DoMatch(JSContext *cx, jsval *vp, JSString *str, const RegExpGuard &g,
|
|||
static bool
|
||||
MatchCallback(JSContext *cx, size_t count, void *p)
|
||||
{
|
||||
if (count >= JSVAL_INT_MAX) {
|
||||
js_ReportAllocationOverflow(cx);
|
||||
return false;
|
||||
}
|
||||
JS_ASSERT(count <= JSVAL_INT_MAX); /* by max string length */
|
||||
|
||||
jsval &arrayval = *static_cast<jsval *>(p);
|
||||
JSObject *arrayobj = JSVAL_TO_OBJECT(arrayval);
|
||||
|
@ -1423,7 +1494,29 @@ MatchCallback(JSContext *cx, size_t count, void *p)
|
|||
jsval v = STRING_TO_JSVAL(matchstr);
|
||||
|
||||
JSAutoResolveFlags rf(cx, JSRESOLVE_QUALIFIED | JSRESOLVE_ASSIGNING);
|
||||
return arrayobj->setProperty(cx, INT_TO_JSID(count), &v);
|
||||
return !!arrayobj->setProperty(cx, INT_TO_JSID(count), &v);
|
||||
}
|
||||
|
||||
static bool
|
||||
BuildFlatMatchArray(JSContext *cx, JSString *textstr, const RegExpGuard &g,
|
||||
jsval *vp)
|
||||
{
|
||||
if (g.match < 0) {
|
||||
*vp = JSVAL_NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* For this non-global match, produce a RegExp.exec-style array. */
|
||||
JSObject *obj = js_NewSlowArrayObject(cx);
|
||||
if (!obj)
|
||||
return false;
|
||||
*vp = OBJECT_TO_JSVAL(obj);
|
||||
|
||||
return obj->defineProperty(cx, INT_TO_JSID(0), STRING_TO_JSVAL(g.patstr)) &&
|
||||
obj->defineProperty(cx, ATOM_TO_JSID(cx->runtime->atomState.indexAtom),
|
||||
INT_TO_JSVAL(g.match)) &&
|
||||
obj->defineProperty(cx, ATOM_TO_JSID(cx->runtime->atomState.inputAtom),
|
||||
STRING_TO_JSVAL(textstr));
|
||||
}
|
||||
|
||||
static JSBool
|
||||
|
@ -1432,15 +1525,19 @@ str_match(JSContext *cx, uintN argc, jsval *vp)
|
|||
JSString *str;
|
||||
NORMALIZE_THIS(cx, vp, str);
|
||||
|
||||
RegExpGuard g;
|
||||
if (!g.initFromArgs(cx, 1, false, argc, vp))
|
||||
RegExpGuard g(cx);
|
||||
if (!g.init(argc, vp))
|
||||
return false;
|
||||
if (g.tryFlatMatch(str, false, 1, argc))
|
||||
return BuildFlatMatchArray(cx, str, g, vp);
|
||||
if (!g.normalizeRegExp(false, 1, argc, vp))
|
||||
return false;
|
||||
|
||||
JSAutoTempValueRooter array(cx, JSVAL_NULL);
|
||||
if (!DoMatch(cx, vp, str, g, MatchCallback, array.addr(), MATCH_ARGS))
|
||||
return false;
|
||||
|
||||
/* When not global, DoMatch will leave the (RegEx.exec()) in *vp. */
|
||||
/* When not global, DoMatch will leave |RegEx.exec()| in *vp. */
|
||||
if (g.re()->flags & JSREG_GLOB)
|
||||
*vp = array.value();
|
||||
return true;
|
||||
|
@ -1452,8 +1549,14 @@ str_search(JSContext *cx, uintN argc, jsval *vp)
|
|||
JSString *str;
|
||||
NORMALIZE_THIS(cx, vp, str);
|
||||
|
||||
RegExpGuard g;
|
||||
if (!g.initFromArgs(cx, 1, false, argc, vp))
|
||||
RegExpGuard g(cx);
|
||||
if (!g.init(argc, vp))
|
||||
return false;
|
||||
if (g.tryFlatMatch(str, false, 1, argc)) {
|
||||
*vp = INT_TO_JSVAL(g.match);
|
||||
return true;
|
||||
}
|
||||
if (!g.normalizeRegExp(false, 1, argc, vp))
|
||||
return false;
|
||||
|
||||
size_t i = 0;
|
||||
|
@ -1461,13 +1564,14 @@ str_search(JSContext *cx, uintN argc, jsval *vp)
|
|||
return false;
|
||||
|
||||
if (*vp == JSVAL_TRUE)
|
||||
return UWordInRootedValue(cx, cx->regExpStatics.leftContext.length, vp);
|
||||
*vp = INT_TO_JSVAL(-1);
|
||||
*vp = INT_TO_JSVAL(cx->regExpStatics.leftContext.length);
|
||||
else
|
||||
*vp = INT_TO_JSVAL(-1);
|
||||
return true;
|
||||
}
|
||||
|
||||
struct ReplaceData {
|
||||
ReplaceData(JSContext *cx) : cb(cx) {}
|
||||
ReplaceData(JSContext *cx) : g(cx), cb(cx) {}
|
||||
JSString *str; /* 'this' parameter object as a string */
|
||||
RegExpGuard g; /* regexp parameter object and private data */
|
||||
JSObject *lambda; /* replacement function object or null */
|
||||
|
@ -1730,16 +1834,41 @@ ReplaceCallback(JSContext *cx, size_t count, void *p)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
BuildFlatReplacement(JSContext *cx, JSString *textstr, JSString *repstr,
|
||||
const RegExpGuard &g, jsval *vp)
|
||||
{
|
||||
if (g.match == -1) {
|
||||
*vp = STRING_TO_JSVAL(textstr);
|
||||
return true;
|
||||
}
|
||||
|
||||
const jschar *rep;
|
||||
size_t replen;
|
||||
repstr->getCharsAndLength(rep, replen);
|
||||
|
||||
JSCharBuffer cb(cx);
|
||||
if (!cb.reserve(g.textlen - g.patlen + replen) ||
|
||||
!cb.append(g.text, static_cast<size_t>(g.match)) ||
|
||||
!cb.append(rep, replen) ||
|
||||
!cb.append(g.text + g.match + g.patlen, g.text + g.textlen)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
JSString *str = js_NewStringFromCharBuffer(cx, cb);
|
||||
if (!str)
|
||||
return false;
|
||||
*vp = STRING_TO_JSVAL(str);
|
||||
return true;
|
||||
}
|
||||
|
||||
static JSBool
|
||||
str_replace(JSContext *cx, uintN argc, jsval *vp)
|
||||
{
|
||||
ReplaceData rdata(cx);
|
||||
|
||||
NORMALIZE_THIS(cx, vp, rdata.str);
|
||||
|
||||
if (!rdata.g.initFromArgs(cx, 2, true, argc, vp))
|
||||
return false;
|
||||
|
||||
/* Extract replacement string/function. */
|
||||
if (argc >= 2 && JS_TypeOfValue(cx, vp[3]) == JSTYPE_FUNCTION) {
|
||||
rdata.lambda = JSVAL_TO_OBJECT(vp[3]);
|
||||
rdata.repstr = NULL;
|
||||
|
@ -1758,6 +1887,15 @@ str_replace(JSContext *cx, uintN argc, jsval *vp)
|
|||
rdata.dollarEnd);
|
||||
}
|
||||
|
||||
if (!rdata.g.init(argc, vp))
|
||||
return false;
|
||||
if (!rdata.dollar && !rdata.lambda &&
|
||||
rdata.g.tryFlatMatch(rdata.str, true, 2, argc)) {
|
||||
return BuildFlatReplacement(cx, rdata.str, rdata.repstr, rdata.g, vp);
|
||||
}
|
||||
if (!rdata.g.normalizeRegExp(true, 2, argc, vp))
|
||||
return false;
|
||||
|
||||
rdata.index = 0;
|
||||
rdata.leftIndex = 0;
|
||||
rdata.calledBack = false;
|
||||
|
@ -3134,9 +3272,6 @@ js_ValueToCharBuffer(JSContext *cx, jsval v, JSCharBuffer &cb)
|
|||
JS_FRIEND_API(JSString *)
|
||||
js_ValueToSource(JSContext *cx, jsval v)
|
||||
{
|
||||
JSTempValueRooter tvr;
|
||||
JSString *str;
|
||||
|
||||
if (JSVAL_IS_VOID(v))
|
||||
return ATOM_TO_STRING(cx->runtime->atomState.void0Atom);
|
||||
if (JSVAL_IS_STRING(v))
|
||||
|
@ -3152,16 +3287,11 @@ js_ValueToSource(JSContext *cx, jsval v)
|
|||
return js_ValueToString(cx, v);
|
||||
}
|
||||
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(cx, JSVAL_NULL, &tvr);
|
||||
if (!js_TryMethod(cx, JSVAL_TO_OBJECT(v),
|
||||
cx->runtime->atomState.toSourceAtom,
|
||||
0, NULL, &tvr.u.value)) {
|
||||
str = NULL;
|
||||
} else {
|
||||
str = js_ValueToString(cx, tvr.u.value);
|
||||
}
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
return str;
|
||||
JSAtom *atom = cx->runtime->atomState.toSourceAtom;
|
||||
JSAutoTempValueRooter tvr(cx, JSVAL_NULL);
|
||||
if (!js_TryMethod(cx, JSVAL_TO_OBJECT(v), atom, 0, NULL, tvr.addr()))
|
||||
return NULL;
|
||||
return js_ValueToString(cx, tvr.value());
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -142,7 +142,11 @@ struct JSString {
|
|||
ATOMIZED = JSSTRING_BIT(JS_BITS_PER_WORD - 3),
|
||||
DEFLATED = JSSTRING_BIT(JS_BITS_PER_WORD - 4),
|
||||
|
||||
#if JS_BITS_PER_WORD > 32
|
||||
LENGTH_BITS = 28,
|
||||
#else
|
||||
LENGTH_BITS = JS_BITS_PER_WORD - 4,
|
||||
#endif
|
||||
LENGTH_MASK = JSSTRING_BITMASK(LENGTH_BITS),
|
||||
|
||||
/*
|
||||
|
@ -676,20 +680,17 @@ js_CompareStrings(JSString *str1, JSString *str2);
|
|||
|
||||
/*
|
||||
* Boyer-Moore-Horspool superlinear search for pat:patlen in text:textlen.
|
||||
* The patlen argument must be positive and no greater than BMH_PATLEN_MAX.
|
||||
* The start argument tells where in text to begin the search.
|
||||
* The patlen argument must be positive and no greater than sBMHPatLenMax.
|
||||
*
|
||||
* Return the index of pat in text, or -1 if not found.
|
||||
*/
|
||||
#define BMH_CHARSET_SIZE 256 /* ISO-Latin-1 */
|
||||
#define BMH_PATLEN_MAX 255 /* skip table element is uint8 */
|
||||
|
||||
#define BMH_BAD_PATTERN (-2) /* return value if pat is not ISO-Latin-1 */
|
||||
static const jsuint sBMHCharSetSize = 256; /* ISO-Latin-1 */
|
||||
static const jsuint sBMHPatLenMax = 255; /* skip table element is uint8 */
|
||||
static const jsint sBMHBadPattern = -2; /* return value if pat is not ISO-Latin-1 */
|
||||
|
||||
extern jsint
|
||||
js_BoyerMooreHorspool(const jschar *text, jsint textlen,
|
||||
const jschar *pat, jsint patlen,
|
||||
jsint start);
|
||||
js_BoyerMooreHorspool(const jschar *text, jsuint textlen,
|
||||
const jschar *pat, jsuint patlen);
|
||||
|
||||
extern size_t
|
||||
js_strlen(const jschar *s);
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -47,6 +47,7 @@
|
|||
#include "jstypes.h"
|
||||
#include "jsbuiltins.h"
|
||||
#include "jscntxt.h"
|
||||
#include "jsdhash.h"
|
||||
#include "jsinterp.h"
|
||||
#include "jslock.h"
|
||||
#include "jsnum.h"
|
||||
|
@ -157,19 +158,20 @@ public:
|
|||
|
||||
/*
|
||||
* Tracker is used to keep track of values being manipulated by the interpreter
|
||||
* during trace recording.
|
||||
* during trace recording. Note that tracker pages aren't necessarily the
|
||||
* same size as OS pages, they just are a moderate-sized chunk of memory.
|
||||
*/
|
||||
class Tracker {
|
||||
struct Page {
|
||||
struct Page* next;
|
||||
jsuword base;
|
||||
nanojit::LIns* map[1];
|
||||
struct TrackerPage {
|
||||
struct TrackerPage* next;
|
||||
jsuword base;
|
||||
nanojit::LIns* map[1];
|
||||
};
|
||||
struct Page* pagelist;
|
||||
struct TrackerPage* pagelist;
|
||||
|
||||
jsuword getPageBase(const void* v) const;
|
||||
struct Page* findPage(const void* v) const;
|
||||
struct Page* addPage(const void* v);
|
||||
jsuword getTrackerPageBase(const void* v) const;
|
||||
struct TrackerPage* findTrackerPage(const void* v) const;
|
||||
struct TrackerPage* addTrackerPage(const void* v);
|
||||
public:
|
||||
Tracker();
|
||||
~Tracker();
|
||||
|
@ -510,7 +512,7 @@ struct FrameInfo {
|
|||
void set_argc(uint16 argc, bool constructing) {
|
||||
this->argc = uint32(argc) | (constructing ? CONSTRUCTING_FLAG: 0);
|
||||
}
|
||||
uint16 get_argc() const { return argc & ~CONSTRUCTING_FLAG; }
|
||||
uint16 get_argc() const { return uint16(argc & ~CONSTRUCTING_FLAG); }
|
||||
bool is_constructing() const { return (argc & CONSTRUCTING_FLAG) != 0; }
|
||||
|
||||
// The typemap just before the callee is called.
|
||||
|
@ -690,14 +692,14 @@ enum TypeConsensus
|
|||
TypeConsensus_Bad /* Typemaps are not compatible */
|
||||
};
|
||||
|
||||
class TraceRecorder : public avmplus::GCObject {
|
||||
class TraceRecorder {
|
||||
VMAllocator& tempAlloc;
|
||||
JSContext* cx;
|
||||
JSTraceMonitor* traceMonitor;
|
||||
JSObject* globalObj;
|
||||
JSObject* lexicalBlock;
|
||||
Tracker tracker;
|
||||
Tracker nativeFrameTracker;
|
||||
char* entryTypeMap;
|
||||
unsigned callDepth;
|
||||
JSAtom** atoms;
|
||||
VMSideExit* anchor;
|
||||
|
@ -722,7 +724,6 @@ class TraceRecorder : public avmplus::GCObject {
|
|||
nanojit::LIns* inner_sp_ins;
|
||||
nanojit::LIns* native_rval_ins;
|
||||
nanojit::LIns* newobj_ins;
|
||||
bool deepAborted;
|
||||
bool trashSelf;
|
||||
Queue<nanojit::Fragment*> whichTreesToTrash;
|
||||
Queue<jsbytecode*> cfgMerges;
|
||||
|
@ -731,8 +732,6 @@ class TraceRecorder : public avmplus::GCObject {
|
|||
JSSpecializedNative* pendingSpecializedNative;
|
||||
jsval* pendingUnboxSlot;
|
||||
nanojit::LIns* pendingGuardCondition;
|
||||
TraceRecorder* nextRecorderToAbort;
|
||||
bool wasRootFragment;
|
||||
jsbytecode* outer; /* outer trace header PC */
|
||||
uint32 outerArgc; /* outer trace deepest frame argc */
|
||||
bool loop;
|
||||
|
@ -839,9 +838,17 @@ class TraceRecorder : public avmplus::GCObject {
|
|||
JS_REQUIRES_STACK JSRecordingStatus unary(nanojit::LOpcode op);
|
||||
JS_REQUIRES_STACK JSRecordingStatus binary(nanojit::LOpcode op);
|
||||
|
||||
JS_REQUIRES_STACK void guardShape(nanojit::LIns* obj_ins, JSObject* obj,
|
||||
uint32 shape, const char* guardName,
|
||||
nanojit::LIns* map_ins, VMSideExit* exit);
|
||||
JS_REQUIRES_STACK JSRecordingStatus guardShape(nanojit::LIns* obj_ins, JSObject* obj,
|
||||
uint32 shape, const char* name,
|
||||
nanojit::LIns* map_ins, VMSideExit* exit);
|
||||
|
||||
JSDHashTable guardedShapeTable;
|
||||
|
||||
#ifdef DEBUG
|
||||
void dumpGuardedShapes(const char* prefix);
|
||||
#endif
|
||||
|
||||
void forgetGuardedShapes();
|
||||
|
||||
inline nanojit::LIns* map(nanojit::LIns *obj_ins);
|
||||
JS_REQUIRES_STACK bool map_is_native(JSObjectMap* map, nanojit::LIns* map_ins,
|
||||
|
@ -981,6 +988,19 @@ class TraceRecorder : public avmplus::GCObject {
|
|||
JS_REQUIRES_STACK jsatomid getFullIndex(ptrdiff_t pcoff = 0);
|
||||
|
||||
public:
|
||||
|
||||
inline void*
|
||||
operator new(size_t size)
|
||||
{
|
||||
return calloc(1, size);
|
||||
}
|
||||
|
||||
inline void
|
||||
operator delete(void *p)
|
||||
{
|
||||
free(p);
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK
|
||||
TraceRecorder(JSContext* cx, VMSideExit*, nanojit::Fragment*, TreeInfo*,
|
||||
unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap,
|
||||
|
@ -988,6 +1008,8 @@ public:
|
|||
uint32 outerArgc);
|
||||
~TraceRecorder();
|
||||
|
||||
bool outOfMemory();
|
||||
|
||||
static JS_REQUIRES_STACK JSRecordingStatus monitorRecording(JSContext* cx, TraceRecorder* tr,
|
||||
JSOp op);
|
||||
|
||||
|
@ -1011,11 +1033,11 @@ public:
|
|||
* The instruction is suitable for use as the final argument of a single
|
||||
* call to LirBuffer::insGuard; do not reuse the returned value.
|
||||
*/
|
||||
JS_REQUIRES_STACK nanojit::LIns* createGuardRecord(VMSideExit* exit);
|
||||
JS_REQUIRES_STACK nanojit::GuardRecord* createGuardRecord(VMSideExit* exit);
|
||||
|
||||
nanojit::Fragment* getFragment() const { return fragment; }
|
||||
TreeInfo* getTreeInfo() const { return treeInfo; }
|
||||
JS_REQUIRES_STACK void compile(JSTraceMonitor* tm);
|
||||
JS_REQUIRES_STACK bool compile(JSTraceMonitor* tm);
|
||||
JS_REQUIRES_STACK bool closeLoop(TypeConsensus &consensus);
|
||||
JS_REQUIRES_STACK bool closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus &consensus);
|
||||
JS_REQUIRES_STACK void endLoop();
|
||||
|
@ -1026,10 +1048,6 @@ public:
|
|||
JS_REQUIRES_STACK void prepareTreeCall(VMFragment* inner);
|
||||
JS_REQUIRES_STACK void emitTreeCall(VMFragment* inner, VMSideExit* exit);
|
||||
unsigned getCallDepth() const;
|
||||
void pushAbortStack();
|
||||
void popAbortStack();
|
||||
void removeFragmentReferences();
|
||||
void deepAbort();
|
||||
|
||||
JS_REQUIRES_STACK JSRecordingStatus record_EnterFrame();
|
||||
JS_REQUIRES_STACK JSRecordingStatus record_LeaveFrame();
|
||||
|
@ -1038,8 +1056,21 @@ public:
|
|||
JS_REQUIRES_STACK JSRecordingStatus record_DefLocalFunSetSlot(uint32 slot, JSObject* obj);
|
||||
JS_REQUIRES_STACK JSRecordingStatus record_NativeCallComplete();
|
||||
|
||||
bool wasDeepAborted() { return deepAborted; }
|
||||
TreeInfo* getTreeInfo() { return treeInfo; }
|
||||
void forgetGuardedShapesForObject(JSObject* obj);
|
||||
|
||||
#ifdef DEBUG
|
||||
void tprint(const char *format, int count, nanojit::LIns *insa[]);
|
||||
void tprint(const char *format);
|
||||
void tprint(const char *format, nanojit::LIns *ins);
|
||||
void tprint(const char *format, nanojit::LIns *ins1, nanojit::LIns *ins2);
|
||||
void tprint(const char *format, nanojit::LIns *ins1, nanojit::LIns *ins2, nanojit::LIns *ins3);
|
||||
void tprint(const char *format, nanojit::LIns *ins1, nanojit::LIns *ins2, nanojit::LIns *ins3,
|
||||
nanojit::LIns *ins4);
|
||||
void tprint(const char *format, nanojit::LIns *ins1, nanojit::LIns *ins2, nanojit::LIns *ins3,
|
||||
nanojit::LIns *ins4, nanojit::LIns *ins5);
|
||||
void tprint(const char *format, nanojit::LIns *ins1, nanojit::LIns *ins2, nanojit::LIns *ins3,
|
||||
nanojit::LIns *ins4, nanojit::LIns *ins5, nanojit::LIns *ins6);
|
||||
#endif
|
||||
|
||||
#define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
|
||||
JS_REQUIRES_STACK JSRecordingStatus record_##op();
|
||||
|
@ -1068,11 +1099,11 @@ public:
|
|||
|
||||
#define TRACE_ARGS_(x,args) \
|
||||
JS_BEGIN_MACRO \
|
||||
TraceRecorder* tr_ = TRACE_RECORDER(cx); \
|
||||
if (tr_ && !tr_->wasDeepAborted()) { \
|
||||
if (TraceRecorder* tr_ = TRACE_RECORDER(cx)) { \
|
||||
JSRecordingStatus status = tr_->record_##x args; \
|
||||
if (STATUS_ABORTS_RECORDING(status)) { \
|
||||
js_AbortRecording(cx, #x); \
|
||||
if (TRACE_RECORDER(cx)) \
|
||||
js_AbortRecording(cx, #x); \
|
||||
if (status == JSRS_ERROR) \
|
||||
goto error; \
|
||||
} \
|
||||
|
|
|
@ -5466,20 +5466,14 @@ xml_attribute(JSContext *cx, uintN argc, jsval *vp)
|
|||
static JSBool
|
||||
xml_attributes(JSContext *cx, uintN argc, jsval *vp)
|
||||
{
|
||||
jsval name;
|
||||
JSObject *qn;
|
||||
JSTempValueRooter tvr;
|
||||
JSBool ok;
|
||||
|
||||
name = ATOM_KEY(cx->runtime->atomState.starAtom);
|
||||
qn = ToAttributeName(cx, name);
|
||||
jsval name = ATOM_KEY(cx->runtime->atomState.starAtom);
|
||||
JSObject *qn = ToAttributeName(cx, name);
|
||||
if (!qn)
|
||||
return JS_FALSE;
|
||||
name = OBJECT_TO_JSVAL(qn);
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(cx, name, &tvr);
|
||||
ok = GetProperty(cx, JS_THIS_OBJECT(cx, vp), name, vp);
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
return ok;
|
||||
|
||||
JSAutoTempValueRooter tvr(cx, name);
|
||||
return GetProperty(cx, JS_THIS_OBJECT(cx, vp), name, vp);
|
||||
}
|
||||
|
||||
static JSXML *
|
||||
|
|
|
@ -40,16 +40,25 @@
|
|||
DEPTH = ..
|
||||
topsrcdir = @top_srcdir@
|
||||
srcdir = @srcdir@
|
||||
VPATH = @srcdir@
|
||||
|
||||
VPATH = $(srcdir)
|
||||
VPATH += $(srcdir)/../nanojit
|
||||
|
||||
include $(DEPTH)/config/autoconf.mk
|
||||
|
||||
PROGRAM = lirasm$(BIN_SUFFIX)
|
||||
CPPSRCS = lirasm.cpp
|
||||
CPPSRCS = lirasm.cpp \
|
||||
Assembler.cpp \
|
||||
Allocator.cpp \
|
||||
CodeAlloc.cpp \
|
||||
Containers.cpp \
|
||||
Fragmento.cpp \
|
||||
LIR.cpp \
|
||||
RegAlloc.cpp \
|
||||
avmplus.cpp \
|
||||
Native$(NANOJIT_ARCH).cpp \
|
||||
$(NULL)
|
||||
|
||||
DEFINES += -DEXPORT_JS_API
|
||||
|
||||
LIBS = $(NSPR_LIBS) $(EDITLINE_LIBS) $(DEPTH)/$(LIB_PREFIX)js_static.$(LIB_SUFFIX)
|
||||
|
||||
LOCAL_INCLUDES += -I$(topsrcdir) -I..
|
||||
|
||||
|
@ -63,12 +72,6 @@ CFLAGS += -EHsc
|
|||
CXXFLAGS += -EHsc
|
||||
endif
|
||||
|
||||
ifdef MOZ_SHARK
|
||||
CFLAGS += -F/System/Library/PrivateFrameworks
|
||||
CXXFLAGS += -F/System/Library/PrivateFrameworks
|
||||
LDFLAGS += -F/System/Library/PrivateFrameworks -framework CHUD
|
||||
endif
|
||||
|
||||
check::
|
||||
$(srcdir)/testlirc.sh
|
||||
|
||||
|
|
|
@ -57,20 +57,65 @@
|
|||
#include <assert.h>
|
||||
|
||||
#include "nanojit/nanojit.h"
|
||||
#include "jstracer.h"
|
||||
|
||||
using namespace nanojit;
|
||||
using namespace std;
|
||||
|
||||
static avmplus::GC gc;
|
||||
/* Allocator SPI implementation. */
|
||||
|
||||
void*
|
||||
nanojit::Allocator::allocChunk(size_t nbytes)
|
||||
{
|
||||
void *p = malloc(nbytes);
|
||||
if (!p)
|
||||
exit(1);
|
||||
return p;
|
||||
}
|
||||
|
||||
void
|
||||
nanojit::Allocator::freeChunk(void *p) {
|
||||
free(p);
|
||||
}
|
||||
|
||||
void
|
||||
nanojit::Allocator::postReset() {
|
||||
}
|
||||
|
||||
|
||||
struct LasmSideExit : public SideExit {
|
||||
size_t line;
|
||||
};
|
||||
|
||||
typedef JS_FASTCALL int32_t (*RetInt)();
|
||||
typedef JS_FASTCALL double (*RetFloat)();
|
||||
typedef JS_FASTCALL GuardRecord* (*RetGuard)();
|
||||
|
||||
/* LIR SPI implementation */
|
||||
|
||||
void
|
||||
nanojit::StackFilter::getTops(LIns* guard, int& spTop, int& rpTop)
|
||||
{
|
||||
spTop = 0;
|
||||
rpTop = 0;
|
||||
}
|
||||
|
||||
#if defined NJ_VERBOSE
|
||||
void
|
||||
nanojit::LirNameMap::formatGuard(LIns *i, char *out)
|
||||
{
|
||||
LasmSideExit *x;
|
||||
|
||||
x = (LasmSideExit *)i->record()->exit;
|
||||
sprintf(out,
|
||||
"%s: %s %s -> line=%d (GuardID=%03d)",
|
||||
formatRef(i),
|
||||
lirNames[i->opcode()],
|
||||
i->oprnd1() ? formatRef(i->oprnd1()) : "",
|
||||
x->line,
|
||||
i->record()->profGuardID);
|
||||
}
|
||||
#endif
|
||||
|
||||
typedef FASTCALL int32_t (*RetInt)();
|
||||
typedef FASTCALL double (*RetFloat)();
|
||||
typedef FASTCALL GuardRecord* (*RetGuard)();
|
||||
|
||||
struct Function {
|
||||
const char *name;
|
||||
|
@ -211,11 +256,15 @@ public:
|
|||
void assemble(istream &in);
|
||||
void lookupFunction(const string &name, CallInfo *&ci);
|
||||
|
||||
Fragmento *mFragmento;
|
||||
LirBuffer *mLirbuf;
|
||||
verbose_only( LabelMap *mLabelMap; )
|
||||
LogControl mLogc;
|
||||
avmplus::AvmCore mCore;
|
||||
Allocator mAlloc;
|
||||
CodeAlloc mCodeAlloc;
|
||||
bool mVerbose;
|
||||
Fragments mFragments;
|
||||
Assembler mAssm;
|
||||
map<string, pair<LOpcode, size_t> > mOpMap;
|
||||
|
||||
void bad(const string &msg) {
|
||||
|
@ -225,8 +274,6 @@ public:
|
|||
|
||||
private:
|
||||
void handlePatch(LirTokenStream &in);
|
||||
|
||||
avmplus::AvmCore mCore;
|
||||
};
|
||||
|
||||
class FragmentAssembler {
|
||||
|
@ -234,14 +281,19 @@ public:
|
|||
FragmentAssembler(Lirasm &parent, const string &fragmentName);
|
||||
~FragmentAssembler();
|
||||
|
||||
void assembleFragment(LirTokenStream &in, bool implicitBegin, const LirToken *firstToken);
|
||||
void assembleFragment(LirTokenStream &in,
|
||||
bool implicitBegin,
|
||||
const LirToken *firstToken);
|
||||
|
||||
private:
|
||||
static uint32_t sProfId;
|
||||
// Prohibit copying.
|
||||
FragmentAssembler(const FragmentAssembler &);
|
||||
FragmentAssembler & operator=(const FragmentAssembler &);
|
||||
LasmSideExit *createSideExit();
|
||||
GuardRecord *createGuardRecord(LasmSideExit *exit);
|
||||
|
||||
Lirasm *mParent;
|
||||
Lirasm &mParent;
|
||||
const string mFragName;
|
||||
Fragment *mFragment;
|
||||
vector<CallInfo*> mCallInfos;
|
||||
|
@ -263,7 +315,6 @@ private:
|
|||
void tokenizeLine(LirTokenStream &in, LirToken &token);
|
||||
void need(size_t);
|
||||
LIns *ref(const string &);
|
||||
LIns *do_skip(size_t);
|
||||
LIns *assemble_call(const string &);
|
||||
LIns *assemble_general();
|
||||
LIns *assemble_guard();
|
||||
|
@ -353,6 +404,9 @@ dep_u32(char *&buf, uint32_t word, uint32_t &cksum)
|
|||
void
|
||||
dump_srecords(ostream &out, Fragment *frag)
|
||||
{
|
||||
// FIXME: Disabled until we work out a sane way to walk through
|
||||
// code chunks under the new CodeAlloc regime.
|
||||
/*
|
||||
// Write S-records. Can only do 4-byte addresses at the moment.
|
||||
|
||||
// FIXME: this presently dumps out the entire set of code pages
|
||||
|
@ -395,26 +449,34 @@ dump_srecords(ostream &out, Fragment *frag)
|
|||
out << string(buf) << endl;
|
||||
}
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
FragmentAssembler::FragmentAssembler(Lirasm &parent, const string &fragmentName)
|
||||
: mParent(&parent), mFragName(fragmentName)
|
||||
{
|
||||
mFragment = new (&gc) Fragment(NULL);
|
||||
mFragment->lirbuf = mParent->mLirbuf;
|
||||
mFragment->anchor = mFragment;
|
||||
mFragment->root = mFragment;
|
||||
mParent->mFragments[mFragName].fragptr = mFragment;
|
||||
|
||||
mBufWriter = new (&gc) LirBufWriter(mParent->mLirbuf);
|
||||
mCseFilter = new (&gc) CseFilter(mBufWriter, &gc);
|
||||
mExprFilter = new (&gc) ExprFilter(mCseFilter);
|
||||
|
||||
uint32_t
|
||||
FragmentAssembler::sProfId = 0;
|
||||
|
||||
FragmentAssembler::FragmentAssembler(Lirasm &parent, const string &fragmentName)
|
||||
: mParent(parent), mFragName(fragmentName)
|
||||
{
|
||||
mFragment = new Fragment(NULL verbose_only(, sProfId++));
|
||||
mFragment->lirbuf = mParent.mLirbuf;
|
||||
mFragment->root = mFragment;
|
||||
mParent.mFragments[mFragName].fragptr = mFragment;
|
||||
|
||||
mBufWriter = new LirBufWriter(mParent.mLirbuf);
|
||||
mCseFilter = new CseFilter(mBufWriter, mParent.mAlloc);
|
||||
mExprFilter = new ExprFilter(mCseFilter);
|
||||
mVerboseWriter = NULL;
|
||||
mLir = mExprFilter;
|
||||
|
||||
#ifdef DEBUG
|
||||
if (mParent->mVerbose) {
|
||||
mVerboseWriter = new (&gc) VerboseWriter(&gc, mExprFilter, mParent->mLirbuf->names, &mParent->mLogc);
|
||||
if (mParent.mVerbose) {
|
||||
mVerboseWriter = new VerboseWriter(mParent.mAlloc,
|
||||
mExprFilter,
|
||||
mParent.mLirbuf->names,
|
||||
&mParent.mLogc);
|
||||
mLir = mVerboseWriter;
|
||||
}
|
||||
#endif
|
||||
|
@ -431,11 +493,9 @@ FragmentAssembler::~FragmentAssembler()
|
|||
delete mExprFilter;
|
||||
delete mCseFilter;
|
||||
delete mBufWriter;
|
||||
|
||||
for (size_t i = 0; i < mCallInfos.size(); ++i)
|
||||
delete mCallInfos[i];
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
FragmentAssembler::bad(const string &msg)
|
||||
{
|
||||
|
@ -460,14 +520,6 @@ FragmentAssembler::ref(const string &lab)
|
|||
return mLabels.find(lab)->second;
|
||||
}
|
||||
|
||||
LIns *
|
||||
FragmentAssembler::do_skip(size_t i)
|
||||
{
|
||||
LIns *s = mLir->insSkip(i);
|
||||
memset(s->payload(), 0xba, i);
|
||||
return s;
|
||||
}
|
||||
|
||||
LIns *
|
||||
FragmentAssembler::assemble_jump()
|
||||
{
|
||||
|
@ -513,7 +565,7 @@ FragmentAssembler::assemble_load()
|
|||
LIns *
|
||||
FragmentAssembler::assemble_call(const string &op)
|
||||
{
|
||||
CallInfo *ci = new CallInfo();
|
||||
CallInfo *ci = new (mParent.mAlloc) CallInfo();
|
||||
mCallInfos.push_back(ci);
|
||||
LIns *args[MAXARGS];
|
||||
|
||||
|
@ -530,9 +582,13 @@ FragmentAssembler::assemble_call(const string &op)
|
|||
string func = pop_front(mTokens);
|
||||
string abi = pop_front(mTokens);
|
||||
|
||||
AbiKind _abi;
|
||||
if (abi == "fastcall")
|
||||
AbiKind _abi = ABI_CDECL;
|
||||
if (abi == "fastcall") {
|
||||
#ifdef NO_FASTCALL
|
||||
bad("no fastcall support");
|
||||
#endif
|
||||
_abi = ABI_FASTCALL;
|
||||
}
|
||||
else if (abi == "stdcall")
|
||||
_abi = ABI_STDCALL;
|
||||
else if (abi == "thiscall")
|
||||
|
@ -556,7 +612,7 @@ FragmentAssembler::assemble_call(const string &op)
|
|||
ci->_name = "fn";
|
||||
#endif
|
||||
} else {
|
||||
mParent->lookupFunction(func, ci);
|
||||
mParent.lookupFunction(func, ci);
|
||||
if (ci == NULL)
|
||||
bad("invalid function reference " + func);
|
||||
if (_abi != ci->_abi)
|
||||
|
@ -574,7 +630,7 @@ FragmentAssembler::assemble_call(const string &op)
|
|||
// Select return type from opcode.
|
||||
// FIXME: callh needs special treatment currently
|
||||
// missing from here.
|
||||
if (mOpcode == LIR_call)
|
||||
if (mOpcode == LIR_icall)
|
||||
ci->_argtypes |= ARGSIZE_LO;
|
||||
else
|
||||
ci->_argtypes |= ARGSIZE_F;
|
||||
|
@ -582,26 +638,36 @@ FragmentAssembler::assemble_call(const string &op)
|
|||
return mLir->insCall(ci, args);
|
||||
}
|
||||
|
||||
LIns *
|
||||
FragmentAssembler::assemble_guard()
|
||||
LasmSideExit*
|
||||
FragmentAssembler::createSideExit()
|
||||
{
|
||||
LIns *exitIns = do_skip(sizeof(LasmSideExit));
|
||||
LasmSideExit* exit = (LasmSideExit*) exitIns->payload();
|
||||
LasmSideExit* exit = new (mParent.mAlloc) LasmSideExit();
|
||||
memset(exit, 0, sizeof(LasmSideExit));
|
||||
exit->from = mFragment;
|
||||
exit->target = NULL;
|
||||
exit->line = mLineno;
|
||||
return exit;
|
||||
}
|
||||
|
||||
LIns *guardRec = do_skip(sizeof(GuardRecord));
|
||||
GuardRecord *rec = (GuardRecord*) guardRec->payload();
|
||||
GuardRecord*
|
||||
FragmentAssembler::createGuardRecord(LasmSideExit *exit)
|
||||
{
|
||||
GuardRecord *rec = new (mParent.mAlloc) GuardRecord();
|
||||
memset(rec, 0, sizeof(GuardRecord));
|
||||
rec->exit = exit;
|
||||
exit->addGuard(rec);
|
||||
return rec;
|
||||
}
|
||||
|
||||
|
||||
LIns *
|
||||
FragmentAssembler::assemble_guard()
|
||||
{
|
||||
GuardRecord* guard = createGuardRecord(createSideExit());
|
||||
|
||||
need(mOpcount);
|
||||
|
||||
if (mOpcode != LIR_loop)
|
||||
mReturnTypeBits |= RT_GUARD;
|
||||
mReturnTypeBits |= RT_GUARD;
|
||||
|
||||
LIns *ins_cond;
|
||||
if (mOpcode == LIR_xt || mOpcode == LIR_xf)
|
||||
|
@ -612,7 +678,7 @@ FragmentAssembler::assemble_guard()
|
|||
if (!mTokens.empty())
|
||||
bad("too many arguments");
|
||||
|
||||
return mLir->insGuard(mOpcode, ins_cond, guardRec);
|
||||
return mLir->insGuard(mOpcode, ins_cond, guard);
|
||||
}
|
||||
|
||||
LIns *
|
||||
|
@ -662,25 +728,17 @@ FragmentAssembler::endFragment()
|
|||
cerr << "warning: multiple return types in fragment '"
|
||||
<< mFragName << "'" << endl;
|
||||
}
|
||||
LIns *exitIns = do_skip(sizeof(SideExit));
|
||||
SideExit* exit = (SideExit*) exitIns->payload();
|
||||
memset(exit, 0, sizeof(SideExit));
|
||||
exit->guards = NULL;
|
||||
exit->from = exit->target = mFragment;
|
||||
mFragment->lastIns = mLir->insGuard(LIR_loop, NULL, exitIns);
|
||||
|
||||
::compile(mParent->mFragmento->assm(), mFragment);
|
||||
mFragment->lastIns =
|
||||
mLir->insGuard(LIR_x, NULL, createGuardRecord(createSideExit()));
|
||||
|
||||
if (mParent->mFragmento->assm()->error() != nanojit::None) {
|
||||
::compile(&mParent.mAssm, mFragment, mParent.mAlloc
|
||||
verbose_only(, mParent.mLabelMap));
|
||||
|
||||
if (mParent.mAssm.error() != nanojit::None) {
|
||||
cerr << "error during assembly: ";
|
||||
switch (mParent->mFragmento->assm()->error()) {
|
||||
case nanojit::OutOMem: cerr << "OutOMem"; break;
|
||||
switch (mParent.mAssm.error()) {
|
||||
case nanojit::StackFull: cerr << "StackFull"; break;
|
||||
case nanojit::RegionFull: cerr << "RegionFull"; break;
|
||||
case nanojit::MaxLength: cerr << "MaxLength"; break;
|
||||
case nanojit::MaxExit: cerr << "MaxExit"; break;
|
||||
case nanojit::MaxXJump: cerr << "MaxXJump"; break;
|
||||
case nanojit::UnknownPrim: cerr << "UnknownPrim"; break;
|
||||
case nanojit::UnknownBranch: cerr << "UnknownBranch"; break;
|
||||
case nanojit::None: cerr << "None"; break;
|
||||
}
|
||||
|
@ -689,24 +747,24 @@ FragmentAssembler::endFragment()
|
|||
}
|
||||
|
||||
LirasmFragment *f;
|
||||
f = &mParent->mFragments[mFragName];
|
||||
f = &mParent.mFragments[mFragName];
|
||||
|
||||
switch (mReturnTypeBits) {
|
||||
case RT_GUARD:
|
||||
f->rguard = reinterpret_cast<RetGuard>(mFragment->code());
|
||||
case RT_GUARD:
|
||||
f->rguard = (RetGuard)((uintptr_t)mFragment->code());
|
||||
f->mReturnType = RT_GUARD;
|
||||
break;
|
||||
case RT_FLOAT:
|
||||
f->rfloat = reinterpret_cast<RetFloat>(mFragment->code());
|
||||
case RT_FLOAT:
|
||||
f->rfloat = (RetFloat)((uintptr_t)mFragment->code());
|
||||
f->mReturnType = RT_FLOAT;
|
||||
break;
|
||||
default:
|
||||
f->rint = reinterpret_cast<RetInt>(mFragment->code());
|
||||
default:
|
||||
f->rint = (RetInt)((uintptr_t)mFragment->code());
|
||||
f->mReturnType = RT_INT32;
|
||||
break;
|
||||
}
|
||||
|
||||
mParent->mFragments[mFragName].mLabels = mLabels;
|
||||
mParent.mFragments[mFragName].mLabels = mLabels;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -731,7 +789,7 @@ FragmentAssembler::extract_any_label(string &lab, char lab_delim)
|
|||
|
||||
if (mLabels.find(lab) != mLabels.end())
|
||||
bad("duplicate label");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -786,10 +844,10 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
|
|||
|
||||
assert(!mTokens.empty());
|
||||
op = pop_front(mTokens);
|
||||
if (mParent->mOpMap.find(op) == mParent->mOpMap.end())
|
||||
if (mParent.mOpMap.find(op) == mParent.mOpMap.end())
|
||||
bad("unknown instruction '" + op + "'");
|
||||
|
||||
pair<LOpcode, size_t> entry = mParent->mOpMap[op];
|
||||
pair<LOpcode, size_t> entry = mParent.mOpMap[op];
|
||||
mOpcode = entry.first;
|
||||
mOpcount = entry.second;
|
||||
|
||||
|
@ -842,24 +900,17 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
|
|||
break;
|
||||
|
||||
case LIR_skip:
|
||||
need(1);
|
||||
{
|
||||
int32_t count = imm(mTokens[0]);
|
||||
if (uint32_t(count) > NJ_MAX_SKIP_PAYLOAD_SZB)
|
||||
bad("oversize skip");
|
||||
ins = do_skip(count);
|
||||
}
|
||||
bad("skip instruction is deprecated");
|
||||
break;
|
||||
|
||||
case LIR_xt:
|
||||
case LIR_xf:
|
||||
case LIR_x:
|
||||
case LIR_xbarrier:
|
||||
case LIR_loop:
|
||||
ins = assemble_guard();
|
||||
break;
|
||||
|
||||
case LIR_call:
|
||||
case LIR_icall:
|
||||
case LIR_callh:
|
||||
case LIR_fcall:
|
||||
ins = assemble_call(op);
|
||||
|
@ -874,27 +925,23 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
|
|||
if (!lab.empty())
|
||||
mLabels.insert(make_pair(lab, ins));
|
||||
|
||||
if (mParent->mLirbuf->outOMem()) {
|
||||
cerr << "lirbuf out of memory" << endl;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
endFragment();
|
||||
}
|
||||
|
||||
Lirasm::Lirasm(bool verbose)
|
||||
Lirasm::Lirasm(bool verbose) :
|
||||
mAssm(mCodeAlloc, mAlloc, &mCore, &mLogc)
|
||||
{
|
||||
mVerbose = verbose;
|
||||
nanojit::AvmCore::config.tree_opt = true;
|
||||
mLogc.lcbits = 0;
|
||||
mFragmento = new (&gc) Fragmento(&mCore, &mLogc, 32);
|
||||
mFragmento->labels = NULL;
|
||||
mLirbuf = new (&gc) LirBuffer(mFragmento);
|
||||
|
||||
mLirbuf = new (mAlloc) LirBuffer(mAlloc);
|
||||
#ifdef DEBUG
|
||||
if (mVerbose) {
|
||||
mLogc.lcbits = LC_Assembly;
|
||||
mFragmento->labels = new (&gc) LabelMap(&mCore);
|
||||
mLirbuf->names = new (&gc) LirNameMap(&gc, mFragmento->labels);
|
||||
mLabelMap = new (mAlloc) LabelMap(mAlloc, &mLogc);
|
||||
mLirbuf->names = new (mAlloc) LirNameMap(mAlloc, mLabelMap);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -916,14 +963,11 @@ Lirasm::~Lirasm()
|
|||
{
|
||||
Fragments::iterator i;
|
||||
for (i = mFragments.begin(); i != mFragments.end(); ++i) {
|
||||
i->second.fragptr->releaseCode(mFragmento);
|
||||
delete i->second.fragptr;
|
||||
}
|
||||
delete mLirbuf;
|
||||
delete mFragmento->labels;
|
||||
delete mFragmento;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
Lirasm::lookupFunction(const string &name, CallInfo *&ci)
|
||||
{
|
||||
|
@ -938,13 +982,17 @@ Lirasm::lookupFunction(const string &name, CallInfo *&ci)
|
|||
Fragments::const_iterator func = mFragments.find(name);
|
||||
if (func != mFragments.end()) {
|
||||
if (func->second.mReturnType == RT_FLOAT) {
|
||||
CallInfo target = {(uintptr_t) func->second.rfloat, ARGSIZE_F, 0,
|
||||
0, nanojit::ABI_FASTCALL, func->first.c_str()};
|
||||
CallInfo target = {(uintptr_t) func->second.rfloat,
|
||||
ARGSIZE_F, 0, 0,
|
||||
nanojit::ABI_FASTCALL
|
||||
verbose_only(, func->first.c_str()) };
|
||||
*ci = target;
|
||||
|
||||
} else {
|
||||
CallInfo target = {(uintptr_t) func->second.rint, ARGSIZE_LO, 0,
|
||||
0, nanojit::ABI_FASTCALL, func->first.c_str()};
|
||||
CallInfo target = {(uintptr_t) func->second.rint,
|
||||
ARGSIZE_LO, 0, 0,
|
||||
nanojit::ABI_FASTCALL
|
||||
verbose_only(, func->first.c_str()) };
|
||||
*ci = target;
|
||||
}
|
||||
} else {
|
||||
|
@ -960,10 +1008,7 @@ Lirasm::assemble(istream &in)
|
|||
|
||||
LirToken token;
|
||||
while (ts.get(token)) {
|
||||
if (mLirbuf->outOMem()) {
|
||||
cerr << "lirbuf out of memory" << endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (token.type == NEWLINE)
|
||||
continue;
|
||||
if (token.type != NAME)
|
||||
|
@ -992,11 +1037,6 @@ Lirasm::assemble(istream &in)
|
|||
bad("unexpected stray opcode '" + op + "'");
|
||||
}
|
||||
}
|
||||
|
||||
if (mLirbuf->outOMem()) {
|
||||
cerr << "lirbuf out of memory" << endl;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1025,7 +1065,7 @@ Lirasm::handlePatch(LirTokenStream &in)
|
|||
bad("invalid guard reference");
|
||||
ins->record()->exit->target = i->second.fragptr;
|
||||
|
||||
mFragmento->assm()->patch(ins->record()->exit);
|
||||
mAssm.patch(ins->record()->exit);
|
||||
}
|
||||
|
||||
bool
|
||||
|
|
|
@ -7,5 +7,5 @@ c = int 67
|
|||
sti c ptr 2
|
||||
zero = int 0
|
||||
sti zero ptr 3
|
||||
ss = call puts cdecl ptr
|
||||
ss = icall puts cdecl ptr
|
||||
ret ss
|
||||
|
|
|
@ -8,17 +8,17 @@ c = int 67
|
|||
sti c ptr 2
|
||||
zero = int 0
|
||||
sti zero ptr 3
|
||||
ss = call puts cdecl ptr
|
||||
ss = icall puts cdecl ptr
|
||||
ret ss
|
||||
.end
|
||||
|
||||
.begin b
|
||||
rr = call a fastcall
|
||||
rr = icall a fastcall
|
||||
ret rr
|
||||
.end
|
||||
|
||||
.begin main
|
||||
ans = call b fastcall
|
||||
ans = icall b fastcall
|
||||
five = int 5
|
||||
res = add five ans
|
||||
ret res
|
||||
|
|
|
@ -10,6 +10,6 @@ ret avg
|
|||
.begin main
|
||||
oneh = int 100
|
||||
twoh = int 200
|
||||
res = call avg fastcall twoh oneh
|
||||
res = icall avg fastcall twoh oneh
|
||||
ret res
|
||||
.end
|
||||
|
|
|
@ -39,6 +39,8 @@
|
|||
|
||||
#include "nanojit.h"
|
||||
|
||||
#ifdef FEATURE_NANOJIT
|
||||
|
||||
namespace nanojit
|
||||
{
|
||||
Allocator::Allocator()
|
||||
|
@ -90,3 +92,5 @@ namespace nanojit
|
|||
current_limit = (char*)mem + chunkbytes;
|
||||
}
|
||||
}
|
||||
|
||||
#endif // FEATURE_NANOJIT
|
||||
|
|
|
@ -99,9 +99,19 @@ inline void* operator new(size_t size, nanojit::Allocator &a) {
|
|||
return a.alloc(size);
|
||||
}
|
||||
|
||||
/** global new overload enabling this pattern: new (allocator) T(...) */
|
||||
inline void* operator new(size_t size, nanojit::Allocator *a) {
|
||||
return a->alloc(size);
|
||||
}
|
||||
|
||||
/** global new[] overload enabling this pattern: new (allocator) T[] */
|
||||
inline void* operator new[](size_t size, nanojit::Allocator& a) {
|
||||
return a.alloc(size);
|
||||
}
|
||||
|
||||
/** global new[] overload enabling this pattern: new (allocator) T[] */
|
||||
inline void* operator new[](size_t size, nanojit::Allocator* a) {
|
||||
return a->alloc(size);
|
||||
}
|
||||
|
||||
#endif // __nanojit_Allocator__
|
||||
|
|
|
@ -83,7 +83,7 @@ namespace nanojit
|
|||
LInsp i = in->read();
|
||||
const char* str = _names->formatIns(i);
|
||||
char* cpy = new (_alloc) char[strlen(str)+1];
|
||||
strcpy(cpy, str);
|
||||
VMPI_strcpy(cpy, str);
|
||||
_strs.insert(cpy);
|
||||
return i;
|
||||
}
|
||||
|
@ -95,16 +95,21 @@ namespace nanojit
|
|||
*
|
||||
* - merging paths ( build a graph? ), possibly use external rep to drive codegen
|
||||
*/
|
||||
Assembler::Assembler(CodeAlloc& codeAlloc, Allocator& alloc, AvmCore *core, LogControl* logc)
|
||||
: codeList(0)
|
||||
Assembler::Assembler(CodeAlloc& codeAlloc, Allocator& alloc, AvmCore* core, LogControl* logc)
|
||||
: codeList(NULL)
|
||||
, alloc(alloc)
|
||||
, _codeAlloc(codeAlloc)
|
||||
, _thisfrag(NULL)
|
||||
, _branchStateMap(alloc)
|
||||
, _patches(alloc)
|
||||
, _labels(alloc)
|
||||
, _epilogue(NULL)
|
||||
, _err(None)
|
||||
, config(core->config)
|
||||
{
|
||||
VMPI_memset(&_stats, 0, sizeof(_stats));
|
||||
nInit(core);
|
||||
(void)logc;
|
||||
verbose_only( _logc = logc; )
|
||||
verbose_only( _outputCache = 0; )
|
||||
verbose_only( outlineEOL[0] = '\0'; )
|
||||
|
@ -152,10 +157,10 @@ namespace nanojit
|
|||
Register r = nRegisterAllocFromSet(set);
|
||||
return r;
|
||||
}
|
||||
counter_increment(steals);
|
||||
|
||||
// nothing free, steal one
|
||||
// LSRA says pick the one with the furthest use
|
||||
counter_increment(steals);
|
||||
LIns* vic = findVictim(allow);
|
||||
NanoAssert(vic);
|
||||
|
||||
|
@ -204,7 +209,7 @@ namespace nanojit
|
|||
arReset();
|
||||
}
|
||||
|
||||
#ifdef _DEBUG
|
||||
#ifdef _DEBUG
|
||||
void Assembler::pageValidate()
|
||||
{
|
||||
if (error()) return;
|
||||
|
@ -212,16 +217,13 @@ namespace nanojit
|
|||
NanoAssertMsg(_inExit ? containsPtr(exitStart, exitEnd, _nIns) : containsPtr(codeStart, codeEnd, _nIns),
|
||||
"Native instruction pointer overstep paging bounds; check overrideProtect for last instruction");
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef _DEBUG
|
||||
|
||||
void Assembler::resourceConsistencyCheck()
|
||||
{
|
||||
if (error()) return;
|
||||
NanoAssert(!error());
|
||||
|
||||
#ifdef NANOJIT_IA32
|
||||
NanoAssert((_allocator.active[FST0] && _fpuStkDepth == -1) ||
|
||||
|
@ -267,7 +269,7 @@ namespace nanojit
|
|||
{
|
||||
// check registers
|
||||
RegAlloc *regs = &_allocator;
|
||||
uint32_t managed = regs->managed;
|
||||
RegisterMask managed = regs->managed;
|
||||
Register r = FirstReg;
|
||||
while(managed)
|
||||
{
|
||||
|
@ -385,7 +387,7 @@ namespace nanojit
|
|||
// Existing reservation with a known register allocated, but
|
||||
// the register is not allowed.
|
||||
RegisterMask prefer = hint(ins, allow);
|
||||
#ifdef AVMPLUS_IA32
|
||||
#ifdef NANOJIT_IA32
|
||||
if (((rmask(r)&XmmRegs) && !(allow&XmmRegs)) ||
|
||||
((rmask(r)&x87Regs) && !(allow&x87Regs)))
|
||||
{
|
||||
|
@ -396,6 +398,15 @@ namespace nanojit
|
|||
ins->setReg(r);
|
||||
_allocator.addActive(r, ins);
|
||||
} else
|
||||
#elif defined(NANOJIT_PPC)
|
||||
if (((rmask(r)&GpRegs) && !(allow&GpRegs)) ||
|
||||
((rmask(r)&FpRegs) && !(allow&FpRegs)))
|
||||
{
|
||||
evict(r, ins);
|
||||
r = registerAlloc(prefer);
|
||||
ins->setReg(r);
|
||||
_allocator.addActive(r, ins);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
// The post-state register holding 'ins' is 's', the pre-state
|
||||
|
@ -426,7 +437,6 @@ namespace nanojit
|
|||
return r;
|
||||
}
|
||||
|
||||
|
||||
int Assembler::findMemFor(LIns *ins)
|
||||
{
|
||||
if (!ins->isUsed())
|
||||
|
@ -783,7 +793,7 @@ namespace nanojit
|
|||
// save entry point pointers
|
||||
frag->fragEntry = fragEntry;
|
||||
frag->setCode(_nIns);
|
||||
// PERFM_NVPROF("code", CodeAlloc::size(codeList));
|
||||
PERFM_NVPROF("code", CodeAlloc::size(codeList));
|
||||
|
||||
#ifdef NANOJIT_IA32
|
||||
NanoAssertMsgf(_fpuStkDepth == 0,"_fpuStkDepth %d\n",_fpuStkDepth);
|
||||
|
@ -837,7 +847,6 @@ namespace nanojit
|
|||
#define countlir_label() _nvprof("lir-label",1)
|
||||
#define countlir_xcc() _nvprof("lir-xcc",1)
|
||||
#define countlir_x() _nvprof("lir-x",1)
|
||||
#define countlir_loop() _nvprof("lir-loop",1)
|
||||
#define countlir_call() _nvprof("lir-call",1)
|
||||
#else
|
||||
#define countlir_live()
|
||||
|
@ -863,7 +872,6 @@ namespace nanojit
|
|||
#define countlir_label()
|
||||
#define countlir_xcc()
|
||||
#define countlir_x()
|
||||
#define countlir_loop()
|
||||
#define countlir_call()
|
||||
#endif
|
||||
|
||||
|
@ -871,13 +879,13 @@ namespace nanojit
|
|||
{
|
||||
NanoAssert(_thisfrag->nStaticExits == 0);
|
||||
|
||||
// trace must end with LIR_x, LIR_loop, LIR_[f]ret, LIR_xtbl, or LIR_[f]live
|
||||
// trace must end with LIR_x, LIR_[f]ret, LIR_xtbl, or LIR_[f]live
|
||||
NanoAssert(reader->pos()->isop(LIR_x) ||
|
||||
reader->pos()->isop(LIR_ret) ||
|
||||
reader->pos()->isop(LIR_fret) ||
|
||||
reader->pos()->isop(LIR_xtbl) ||
|
||||
reader->pos()->isop(LIR_live) ||
|
||||
reader->pos()->isop(LIR_flive));
|
||||
reader->pos()->isop(LIR_flive) ||
|
||||
reader->pos()->isop(LIR_live));
|
||||
|
||||
InsList pending_lives(alloc);
|
||||
|
||||
|
@ -958,11 +966,12 @@ namespace nanojit
|
|||
break;
|
||||
}
|
||||
|
||||
case LIR_ret:
|
||||
case LIR_fret:
|
||||
case LIR_ret: {
|
||||
countlir_ret();
|
||||
asm_ret(ins);
|
||||
break;
|
||||
}
|
||||
|
||||
// allocate some stack space. the value of this instruction
|
||||
// is the address of the stack space.
|
||||
|
@ -1340,7 +1349,7 @@ namespace nanojit
|
|||
asm_call(ins);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#ifdef NJ_VERBOSE
|
||||
// We have to do final LIR printing inside this loop. If we do it
|
||||
// before this loop, we we end up printing a lot of dead LIR
|
||||
|
@ -1356,12 +1365,28 @@ namespace nanojit
|
|||
// field in another machine instruction).
|
||||
//
|
||||
if (_logc->lcbits & LC_Assembly) {
|
||||
outputf(" %s", _thisfrag->lirbuf->names->formatIns(ins));
|
||||
// Special case: a guard condition won't get printed next time
|
||||
// around the loop, so do it now.
|
||||
LirNameMap* names = _thisfrag->lirbuf->names;
|
||||
outputf(" %s", names->formatIns(ins));
|
||||
if (ins->isGuard() && ins->oprnd1()) {
|
||||
outputf(" %s # handled by the guard",
|
||||
_thisfrag->lirbuf->names->formatIns(ins->oprnd1()));
|
||||
// Special case: code is generated for guard conditions at
|
||||
// the same time that code is generated for the guard
|
||||
// itself. If the condition is only used by the guard, we
|
||||
// must print it now otherwise it won't get printed. So
|
||||
// we do print it now, with an explanatory comment. If
|
||||
// the condition *is* used again we'll end up printing it
|
||||
// twice, but that's ok.
|
||||
outputf(" %s # codegen'd with the %s",
|
||||
names->formatIns(ins->oprnd1()), lirNames[op]);
|
||||
|
||||
} else if (ins->isop(LIR_cmov) || ins->isop(LIR_qcmov)) {
|
||||
// Likewise for cmov conditions.
|
||||
outputf(" %s # codegen'd with the %s",
|
||||
names->formatIns(ins->oprnd1()), lirNames[op]);
|
||||
|
||||
} else if (ins->isop(LIR_mod)) {
|
||||
// There's a similar case when a div feeds into a mod.
|
||||
outputf(" %s # codegen'd with the mod",
|
||||
names->formatIns(ins->oprnd1()));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -1385,7 +1410,7 @@ namespace nanojit
|
|||
underrunProtect(si->count * sizeof(NIns*) + 20);
|
||||
_nIns = reinterpret_cast<NIns*>(uintptr_t(_nIns) & ~(sizeof(NIns*) - 1));
|
||||
for (uint32_t i = 0; i < si->count; ++i) {
|
||||
_nIns = (NIns*) (((uint8*) _nIns) - sizeof(NIns*));
|
||||
_nIns = (NIns*) (((intptr_t) _nIns) - sizeof(NIns*));
|
||||
*(NIns**) _nIns = target;
|
||||
}
|
||||
si->table = (NIns**) _nIns;
|
||||
|
@ -1437,13 +1462,19 @@ namespace nanojit
|
|||
else
|
||||
findRegFor(op1, i->isop(LIR_flive) ? FpRegs : GpRegs);
|
||||
}
|
||||
|
||||
// clear this list since we have now dealt with those lifetimes. extending
|
||||
// their lifetimes again later (earlier in the code) serves no purpose.
|
||||
pending_lives.clear();
|
||||
}
|
||||
|
||||
void Assembler::arFree(uint32_t idx)
|
||||
{
|
||||
verbose_only( printActivationState(" >FP"); )
|
||||
|
||||
AR &ar = _activation;
|
||||
LIns *i = ar.entry[idx];
|
||||
NanoAssert(i != 0);
|
||||
do {
|
||||
ar.entry[idx] = 0;
|
||||
idx--;
|
||||
|
@ -1451,51 +1482,43 @@ namespace nanojit
|
|||
}
|
||||
|
||||
#ifdef NJ_VERBOSE
|
||||
void Assembler::printActivationState()
|
||||
void Assembler::printActivationState(const char* what)
|
||||
{
|
||||
bool verbose_activation = false;
|
||||
if (!verbose_activation)
|
||||
if (!(_logc->lcbits & LC_Activation))
|
||||
return;
|
||||
|
||||
#ifdef NANOJIT_ARM
|
||||
// @todo Why is there here?!? This routine should be indep. of platform
|
||||
verbose_only(
|
||||
if (_logc->lcbits & LC_Assembly) {
|
||||
char* s = &outline[0];
|
||||
memset(s, ' ', 51); s[51] = '\0';
|
||||
s += strlen(s);
|
||||
sprintf(s, " SP ");
|
||||
s += strlen(s);
|
||||
for(uint32_t i=_activation.lowwatermark; i<_activation.tos;i++) {
|
||||
LInsp ins = _activation.entry[i];
|
||||
if (ins && ins !=_activation.entry[i+1]) {
|
||||
sprintf(s, "%d(%s) ", 4*i, _thisfrag->lirbuf->names->formatRef(ins));
|
||||
s += strlen(s);
|
||||
}
|
||||
}
|
||||
output(&outline[0]);
|
||||
}
|
||||
)
|
||||
#else
|
||||
verbose_only(
|
||||
char* s = &outline[0];
|
||||
if (_logc->lcbits & LC_Assembly) {
|
||||
memset(s, ' ', 51); s[51] = '\0';
|
||||
s += strlen(s);
|
||||
sprintf(s, " ebp ");
|
||||
s += strlen(s);
|
||||
char* s = &outline[0];
|
||||
VMPI_memset(s, ' ', 45); s[45] = '\0';
|
||||
s += VMPI_strlen(s);
|
||||
VMPI_sprintf(s, "%s", what);
|
||||
s += VMPI_strlen(s);
|
||||
|
||||
for(uint32_t i=_activation.lowwatermark; i<_activation.tos;i++) {
|
||||
LInsp ins = _activation.entry[i];
|
||||
if (ins) {
|
||||
sprintf(s, "%d(%s) ", -4*i,_thisfrag->lirbuf->names->formatRef(ins));
|
||||
s += strlen(s);
|
||||
int32_t max = _activation.tos < NJ_MAX_STACK_ENTRY ? _activation.tos : NJ_MAX_STACK_ENTRY;
|
||||
for(int32_t i = _activation.lowwatermark; i < max; i++) {
|
||||
LIns *ins = _activation.entry[i];
|
||||
if (ins) {
|
||||
const char* n = _thisfrag->lirbuf->names->formatRef(ins);
|
||||
if (ins->isop(LIR_alloc)) {
|
||||
int32_t count = ins->size()>>2;
|
||||
VMPI_sprintf(s," %d-%d(%s)", 4*i, 4*(i+count-1), n);
|
||||
count += i-1;
|
||||
while (i < count) {
|
||||
NanoAssert(_activation.entry[i] == ins);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
output(&outline[0]);
|
||||
else if (ins->isQuad()) {
|
||||
VMPI_sprintf(s," %d+(%s)", 4*i, n);
|
||||
NanoAssert(_activation.entry[i+1] == ins);
|
||||
i++;
|
||||
}
|
||||
else {
|
||||
VMPI_sprintf(s," %d(%s)", 4*i, n);
|
||||
}
|
||||
}
|
||||
)
|
||||
#endif
|
||||
s += VMPI_strlen(s);
|
||||
}
|
||||
output(&outline[0]);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1515,6 +1538,7 @@ namespace nanojit
|
|||
int32_t start = ar.lowwatermark;
|
||||
int32_t i = 0;
|
||||
NanoAssert(start>0);
|
||||
verbose_only( printActivationState(" <FP"); )
|
||||
|
||||
if (size == 1) {
|
||||
// easy most common case -- find a hole, or make the frame bigger
|
||||
|
@ -1783,77 +1807,77 @@ namespace nanojit
|
|||
return a;
|
||||
}
|
||||
|
||||
#ifdef NJ_VERBOSE
|
||||
// "outline" must be able to hold the output line in addition to the
|
||||
// outlineEOL buffer, which is concatenated onto outline just before it
|
||||
// is printed.
|
||||
char Assembler::outline[8192];
|
||||
char Assembler::outlineEOL[512];
|
||||
#ifdef NJ_VERBOSE
|
||||
// "outline" must be able to hold the output line in addition to the
|
||||
// outlineEOL buffer, which is concatenated onto outline just before it
|
||||
// is printed.
|
||||
char Assembler::outline[8192];
|
||||
char Assembler::outlineEOL[512];
|
||||
|
||||
void Assembler::outputForEOL(const char* format, ...)
|
||||
void Assembler::outputForEOL(const char* format, ...)
|
||||
{
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
outlineEOL[0] = '\0';
|
||||
vsprintf(outlineEOL, format, args);
|
||||
}
|
||||
|
||||
void Assembler::outputf(const char* format, ...)
|
||||
{
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
outline[0] = '\0';
|
||||
|
||||
// Format the output string and remember the number of characters
|
||||
// that were written.
|
||||
uint32_t outline_len = vsprintf(outline, format, args);
|
||||
|
||||
// Add the EOL string to the output, ensuring that we leave enough
|
||||
// space for the terminating NULL character, then reset it so it
|
||||
// doesn't repeat on the next outputf.
|
||||
VMPI_strncat(outline, outlineEOL, sizeof(outline)-(outline_len+1));
|
||||
outlineEOL[0] = '\0';
|
||||
|
||||
output(outline);
|
||||
}
|
||||
|
||||
void Assembler::output(const char* s)
|
||||
{
|
||||
if (_outputCache)
|
||||
{
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
outlineEOL[0] = '\0';
|
||||
vsprintf(outlineEOL, format, args);
|
||||
char* str = new (alloc) char[VMPI_strlen(s)+1];
|
||||
VMPI_strcpy(str, s);
|
||||
_outputCache->insert(str);
|
||||
}
|
||||
|
||||
void Assembler::outputf(const char* format, ...)
|
||||
else
|
||||
{
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
outline[0] = '\0';
|
||||
|
||||
// Format the output string and remember the number of characters
|
||||
// that were written.
|
||||
uint32_t outline_len = vsprintf(outline, format, args);
|
||||
|
||||
// Add the EOL string to the output, ensuring that we leave enough
|
||||
// space for the terminating NULL character, then reset it so it
|
||||
// doesn't repeat on the next outputf.
|
||||
strncat(outline, outlineEOL, sizeof(outline)-(outline_len+1));
|
||||
outlineEOL[0] = '\0';
|
||||
|
||||
output(outline);
|
||||
_logc->printf("%s\n", s);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::output(const char* s)
|
||||
{
|
||||
if (_outputCache)
|
||||
{
|
||||
char* str = new (alloc) char[VMPI_strlen(s)+1];
|
||||
strcpy(str, s);
|
||||
_outputCache->insert(str);
|
||||
}
|
||||
else
|
||||
{
|
||||
_logc->printf("%s\n", s);
|
||||
}
|
||||
}
|
||||
void Assembler::output_asm(const char* s)
|
||||
{
|
||||
if (!(_logc->lcbits & LC_Assembly))
|
||||
return;
|
||||
|
||||
void Assembler::output_asm(const char* s)
|
||||
{
|
||||
if (!(_logc->lcbits & LC_Assembly))
|
||||
return;
|
||||
// Add the EOL string to the output, ensuring that we leave enough
|
||||
// space for the terminating NULL character, then reset it so it
|
||||
// doesn't repeat on the next outputf.
|
||||
VMPI_strncat(outline, outlineEOL, sizeof(outline)-(strlen(outline)+1));
|
||||
outlineEOL[0] = '\0';
|
||||
|
||||
// Add the EOL string to the output, ensuring that we leave enough
|
||||
// space for the terminating NULL character, then reset it so it
|
||||
// doesn't repeat on the next outputf.
|
||||
strncat(outline, outlineEOL, sizeof(outline)-(strlen(outline)+1));
|
||||
outlineEOL[0] = '\0';
|
||||
output(s);
|
||||
}
|
||||
|
||||
output(s);
|
||||
}
|
||||
|
||||
char* Assembler::outputAlign(char *s, int col)
|
||||
{
|
||||
int len = strlen(s);
|
||||
int add = ((col-len)>0) ? col-len : 1;
|
||||
memset(&s[len], ' ', add);
|
||||
s[col] = '\0';
|
||||
return &s[col];
|
||||
}
|
||||
#endif // verbose
|
||||
char* Assembler::outputAlign(char *s, int col)
|
||||
{
|
||||
int len = (int)VMPI_strlen(s);
|
||||
int add = ((col-len)>0) ? col-len : 1;
|
||||
VMPI_memset(&s[len], ' ', add);
|
||||
s[col] = '\0';
|
||||
return &s[col];
|
||||
}
|
||||
#endif // NJ_VERBOSE
|
||||
|
||||
uint32_t CallInfo::_count_args(uint32_t mask) const
|
||||
{
|
||||
|
@ -1893,3 +1917,4 @@ namespace nanojit
|
|||
return labels.get(label);
|
||||
}
|
||||
}
|
||||
#endif /* FEATURE_NANOJIT */
|
||||
|
|
|
@ -157,7 +157,7 @@ namespace nanojit
|
|||
void output_asm(const char* s);
|
||||
|
||||
bool outputAddr, vpad[3]; // if outputAddr=true then next asm instr. will include address in output
|
||||
void printActivationState();
|
||||
void printActivationState(const char* what);
|
||||
|
||||
StringList* _outputCache;
|
||||
|
||||
|
@ -176,13 +176,11 @@ namespace nanojit
|
|||
void beginAssembly(Fragment *frag);
|
||||
|
||||
void releaseRegisters();
|
||||
|
||||
void patch(GuardRecord *lr);
|
||||
void patch(SideExit *exit);
|
||||
#ifdef NANOJIT_IA32
|
||||
void patch(SideExit *exit, SwitchInfo* si);
|
||||
#endif
|
||||
|
||||
AssmError error() { return _err; }
|
||||
void setError(AssmError e) { _err = e; }
|
||||
|
||||
|
@ -244,7 +242,7 @@ namespace nanojit
|
|||
|
||||
Allocator& alloc;
|
||||
CodeAlloc& _codeAlloc;
|
||||
DWB(Fragment*) _thisfrag;
|
||||
Fragment* _thisfrag;
|
||||
RegAllocMap _branchStateMap;
|
||||
NInsMap _patches;
|
||||
LabelStateMap _labels;
|
||||
|
@ -275,7 +273,6 @@ namespace nanojit
|
|||
void asm_spilli(LInsp i, bool pop);
|
||||
void asm_spill(Register rr, int d, bool pop, bool quad);
|
||||
void asm_load64(LInsp i);
|
||||
void asm_pusharg(LInsp p);
|
||||
void asm_ret(LInsp p);
|
||||
void asm_quad(LInsp i);
|
||||
void asm_fcond(LInsp i);
|
||||
|
@ -286,7 +283,6 @@ namespace nanojit
|
|||
void asm_cmov(LInsp i);
|
||||
void asm_param(LInsp i);
|
||||
void asm_int(LInsp i);
|
||||
void asm_short(LInsp i);
|
||||
void asm_qlo(LInsp i);
|
||||
void asm_qhi(LInsp i);
|
||||
void asm_fneg(LInsp ins);
|
||||
|
@ -297,7 +293,6 @@ namespace nanojit
|
|||
Register asm_prep_fcall(Reservation *rR, LInsp ins);
|
||||
void asm_nongp_copy(Register r, Register s);
|
||||
void asm_call(LInsp);
|
||||
void asm_arg(ArgSize, LInsp, Register);
|
||||
Register asm_binop_rhs_reg(LInsp ins);
|
||||
NIns* asm_branch(bool branchOnFalse, LInsp cond, NIns* targ);
|
||||
void asm_switch(LIns* ins, NIns* target);
|
||||
|
|
|
@ -84,8 +84,9 @@ namespace nanojit
|
|||
}
|
||||
|
||||
CodeList* CodeAlloc::firstBlock(CodeList* term) {
|
||||
char* end = (char*)alignUp(term, bytesPerPage);
|
||||
return (CodeList*) (end - bytesPerAlloc);
|
||||
// use uintptr_t, rather than char*, to avoid "increases required alignment" warning
|
||||
uintptr_t end = (uintptr_t)alignUp(term, bytesPerPage);
|
||||
return (CodeList*) (end - (uintptr_t)bytesPerAlloc);
|
||||
}
|
||||
|
||||
int round(size_t x) {
|
||||
|
@ -166,7 +167,7 @@ namespace nanojit
|
|||
CodeList *coalescedBlock = blk->higher;
|
||||
|
||||
if ( coalescedBlock->size() >= minAllocSize ) {
|
||||
// Unlink higher from the available block chain.
|
||||
// Unlink coalescedBlock from the available block chain.
|
||||
if ( availblocks == coalescedBlock ) {
|
||||
removeBlock(availblocks);
|
||||
}
|
||||
|
@ -183,7 +184,7 @@ namespace nanojit
|
|||
}
|
||||
}
|
||||
|
||||
// combine blk->higher into blk (destroy blk->higher)
|
||||
// combine blk->higher into blk (destroy coalescedBlock)
|
||||
blk->higher = higher;
|
||||
higher->lower = blk;
|
||||
}
|
||||
|
@ -299,12 +300,20 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
|
|||
}
|
||||
|
||||
#elif defined AVMPLUS_UNIX
|
||||
#ifdef ANDROID
|
||||
void CodeAlloc::flushICache(CodeList* &blocks) {
|
||||
for (CodeList *b = blocks; b != 0; b = b->next) {
|
||||
cacheflush((int)b->start(), (int)b->start()+b->size(), 0);
|
||||
}
|
||||
}
|
||||
#else
|
||||
// fixme: __clear_cache is a libgcc feature, test for libgcc or gcc
|
||||
void CodeAlloc::flushICache(CodeList* &blocks) {
|
||||
for (CodeList *b = blocks; b != 0; b = b->next) {
|
||||
__clear_cache((char*)b->start(), (char*)b->start()+b->size());
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif // AVMPLUS_MAC && NANOJIT_PPC
|
||||
|
||||
void CodeAlloc::addBlock(CodeList* &blocks, CodeList* b) {
|
||||
|
|
|
@ -42,12 +42,9 @@
|
|||
|
||||
namespace nanojit
|
||||
{
|
||||
// Temporary tracemonkey hack until namespaces are sorted out.
|
||||
using namespace MMgc;
|
||||
|
||||
/** return true if ptr is in the range [start, end) */
|
||||
/** return true if ptr is in the range [start, end] */
|
||||
inline bool containsPtr(const NIns* start, const NIns* end, const NIns* ptr) {
|
||||
return ptr >= start && ptr < end;
|
||||
return ptr >= start && ptr <= end;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -39,6 +39,8 @@
|
|||
|
||||
#include "nanojit.h"
|
||||
|
||||
#ifdef FEATURE_NANOJIT
|
||||
|
||||
namespace nanojit
|
||||
{
|
||||
BitSet::BitSet(Allocator& allocator, int nbits)
|
||||
|
@ -89,3 +91,5 @@ namespace nanojit
|
|||
bits = bits2;
|
||||
}
|
||||
}
|
||||
|
||||
#endif // FEATURE_NANOJIT
|
||||
|
|
|
@ -279,6 +279,7 @@ namespace nanojit
|
|||
, nbuckets(nbuckets)
|
||||
, buckets(new (a) Seq<Node>*[nbuckets])
|
||||
{
|
||||
NanoAssert(nbuckets > 0);
|
||||
clear();
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,6 @@
|
|||
* ***** END LICENSE BLOCK ***** */
|
||||
|
||||
#include "nanojit.h"
|
||||
#undef MEMORY_INFO
|
||||
|
||||
namespace nanojit
|
||||
{
|
||||
|
|
|
@ -38,13 +38,6 @@
|
|||
* ***** END LICENSE BLOCK ***** */
|
||||
|
||||
#include "nanojit.h"
|
||||
#include <stdio.h>
|
||||
#include <ctype.h>
|
||||
|
||||
#ifdef PERFM
|
||||
#include "../vprof/vprof.h"
|
||||
#endif /* PERFM */
|
||||
|
||||
|
||||
namespace nanojit
|
||||
{
|
||||
|
@ -114,7 +107,7 @@ namespace nanojit
|
|||
names(NULL),
|
||||
#endif
|
||||
abi(ABI_FASTCALL), state(NULL), param1(NULL), sp(NULL), rp(NULL),
|
||||
_allocator(alloc), _bytesAllocated(0)
|
||||
_allocator(alloc)
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
@ -253,10 +246,10 @@ namespace nanojit
|
|||
return ins;
|
||||
}
|
||||
|
||||
LInsp LirBufWriter::insGuard(LOpcode op, LInsp c, LInsp data)
|
||||
LInsp LirBufWriter::insGuard(LOpcode op, LInsp c, GuardRecord *gr)
|
||||
{
|
||||
debug_only( if (LIR_x == op || LIR_xbarrier == op) NanoAssert(!c); )
|
||||
return ins2(op, c, data);
|
||||
return ins2(op, c, (LIns*)gr);
|
||||
}
|
||||
|
||||
LInsp LirBufWriter::insBranch(LOpcode op, LInsp condition, LInsp toLabel)
|
||||
|
@ -785,7 +778,7 @@ namespace nanojit
|
|||
return out->ins3(v, oprnd1, oprnd2, oprnd3);
|
||||
}
|
||||
|
||||
LIns* ExprFilter::insGuard(LOpcode v, LInsp c, LInsp x)
|
||||
LIns* ExprFilter::insGuard(LOpcode v, LInsp c, GuardRecord *gr)
|
||||
{
|
||||
if (v == LIR_xt || v == LIR_xf) {
|
||||
if (c->isconst()) {
|
||||
|
@ -801,7 +794,7 @@ namespace nanojit
|
|||
// so assert in debug builds.
|
||||
NanoAssertMsg(0, "Constantly false guard detected");
|
||||
#endif
|
||||
return out->insGuard(LIR_x, NULL, x);
|
||||
return out->insGuard(LIR_x, NULL, gr);
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
@ -814,7 +807,7 @@ namespace nanojit
|
|||
}
|
||||
}
|
||||
}
|
||||
return out->insGuard(v, c, x);
|
||||
return out->insGuard(v, c, gr);
|
||||
}
|
||||
|
||||
LIns* ExprFilter::insBranch(LOpcode v, LIns *c, LIns *t)
|
||||
|
@ -834,6 +827,19 @@ namespace nanojit
|
|||
return out->insBranch(v, c, t);
|
||||
}
|
||||
|
||||
LIns* ExprFilter::insLoad(LOpcode op, LIns* base, int32_t off) {
|
||||
if (base->isconstp() && !isS8(off)) {
|
||||
// if the effective address is constant, then transform:
|
||||
// ld const[bigconst] => ld (const+bigconst)[0]
|
||||
// note: we don't do this optimization for <8bit field offsets,
|
||||
// under the assumption that we're more likely to CSE-match the
|
||||
// constant base address if we dont const-fold small offsets.
|
||||
uintptr_t p = (uintptr_t)base->constvalp() + off;
|
||||
return out->insLoad(op, insImmPtr((void*)p), 0);
|
||||
}
|
||||
return out->insLoad(op, base, off);
|
||||
}
|
||||
|
||||
LIns* LirWriter::ins_eq0(LIns* oprnd1)
|
||||
{
|
||||
return ins2i(LIR_eq, oprnd1, 0);
|
||||
|
@ -1146,7 +1152,6 @@ namespace nanojit
|
|||
NanoAssert(i->isLInsOp3());
|
||||
return hash3(op, i->oprnd1(), i->oprnd2(), i->oprnd3());
|
||||
}
|
||||
NanoAssert(0);
|
||||
}
|
||||
|
||||
inline bool LInsHashSet::equals(LInsp a, LInsp b)
|
||||
|
@ -1188,7 +1193,6 @@ namespace nanojit
|
|||
NanoAssert(a->isLInsOp3());
|
||||
return a->oprnd1() == b->oprnd1() && a->oprnd2() == b->oprnd2() && a->oprnd3() == b->oprnd3();
|
||||
}
|
||||
NanoAssert(0);
|
||||
}
|
||||
|
||||
void LInsHashSet::grow()
|
||||
|
@ -1444,7 +1448,7 @@ namespace nanojit
|
|||
total++;
|
||||
|
||||
// first handle side-effect instructions
|
||||
if (!i->isCse())
|
||||
if (i->isStmt())
|
||||
{
|
||||
live.add(i,0);
|
||||
if (i->isGuard())
|
||||
|
@ -1531,7 +1535,7 @@ namespace nanojit
|
|||
|
||||
void LirNameMap::copyName(LInsp i, const char *s, int suffix) {
|
||||
char s2[200];
|
||||
if (isdigit(s[VMPI_strlen(s)-1])) {
|
||||
if (VMPI_isdigit(s[VMPI_strlen(s)-1])) {
|
||||
// if s ends with a digit, add '_' to clarify the suffix
|
||||
VMPI_sprintf(s2,"%s_%d", s, suffix);
|
||||
} else {
|
||||
|
@ -1704,19 +1708,19 @@ namespace nanojit
|
|||
formatGuard(i, s);
|
||||
break;
|
||||
|
||||
case LIR_add:
|
||||
case LIR_add: case LIR_qiadd:
|
||||
case LIR_iaddp: case LIR_qaddp:
|
||||
case LIR_sub:
|
||||
case LIR_mul:
|
||||
case LIR_mul:
|
||||
case LIR_div:
|
||||
case LIR_fadd:
|
||||
case LIR_fsub:
|
||||
case LIR_fmul:
|
||||
case LIR_fmul:
|
||||
case LIR_fdiv:
|
||||
case LIR_and:
|
||||
case LIR_or:
|
||||
case LIR_and: case LIR_qiand:
|
||||
case LIR_or: case LIR_qior:
|
||||
case LIR_xor: case LIR_qxor:
|
||||
case LIR_lsh:
|
||||
case LIR_lsh: case LIR_qilsh:
|
||||
case LIR_rsh: case LIR_qirsh:
|
||||
case LIR_ush: case LIR_qursh:
|
||||
case LIR_eq: case LIR_qeq:
|
||||
|
@ -1733,10 +1737,6 @@ namespace nanojit
|
|||
case LIR_fle:
|
||||
case LIR_fgt:
|
||||
case LIR_fge:
|
||||
case LIR_qiadd:
|
||||
case LIR_qiand:
|
||||
case LIR_qilsh:
|
||||
case LIR_qior:
|
||||
VMPI_sprintf(s, "%s = %s %s, %s", formatRef(i), lirNames[op],
|
||||
formatRef(i->oprnd1()),
|
||||
formatRef(i->oprnd2()));
|
||||
|
@ -1876,7 +1876,7 @@ namespace nanojit
|
|||
return out->insLoad(v,base,disp);
|
||||
}
|
||||
|
||||
LInsp CseFilter::insGuard(LOpcode v, LInsp c, LInsp x)
|
||||
LInsp CseFilter::insGuard(LOpcode v, LInsp c, GuardRecord *gr)
|
||||
{
|
||||
// LIR_xt and LIR_xf guards are CSEable. Note that we compare the
|
||||
// opcode and condition when determining if two guards are equivalent
|
||||
|
@ -1902,9 +1902,9 @@ namespace nanojit
|
|||
LInsp found = exprs.find1(v, c, k);
|
||||
if (found)
|
||||
return 0;
|
||||
return exprs.add(out->insGuard(v,c,x), k);
|
||||
return exprs.add(out->insGuard(v,c,gr), k);
|
||||
}
|
||||
return out->insGuard(v, c, x);
|
||||
return out->insGuard(v, c, gr);
|
||||
}
|
||||
|
||||
LInsp CseFilter::insCall(const CallInfo *ci, LInsp args[])
|
||||
|
@ -1920,7 +1920,7 @@ namespace nanojit
|
|||
return out->insCall(ci, args);
|
||||
}
|
||||
|
||||
void compile(Assembler* assm, Fragment* frag, Allocator& alloc verbose_only(, LabelMap* labels))
|
||||
void compile(Assembler* assm, Fragment* frag verbose_only(, Allocator& alloc, LabelMap* labels))
|
||||
{
|
||||
verbose_only(
|
||||
LogControl *logc = assm->_logc;
|
||||
|
|
|
@ -51,8 +51,6 @@
|
|||
*/
|
||||
namespace nanojit
|
||||
{
|
||||
using namespace MMgc;
|
||||
|
||||
enum LOpcode
|
||||
#if defined(_MSC_VER) && _MSC_VER >= 1400
|
||||
#pragma warning(disable:4480) // nonstandard extension used: specifying underlying type for enum
|
||||
|
@ -930,7 +928,7 @@ namespace nanojit
|
|||
|
||||
GuardRecord *LIns::record() const {
|
||||
NanoAssert(isGuard());
|
||||
return (GuardRecord*)oprnd2()->payload();
|
||||
return (GuardRecord*)oprnd2();
|
||||
}
|
||||
|
||||
int32_t LIns::disp() const {
|
||||
|
@ -1012,15 +1010,14 @@ namespace nanojit
|
|||
return toLInsC()->ci;
|
||||
}
|
||||
|
||||
// make it a GCObject so we can explicitly delete it early
|
||||
class LirWriter : public GCObject
|
||||
class LirWriter
|
||||
{
|
||||
public:
|
||||
LirWriter *out;
|
||||
|
||||
virtual ~LirWriter() {}
|
||||
LirWriter(LirWriter* out)
|
||||
: out(out) {}
|
||||
virtual ~LirWriter() {}
|
||||
|
||||
virtual LInsp ins0(LOpcode v) {
|
||||
return out->ins0(v);
|
||||
|
@ -1034,8 +1031,8 @@ namespace nanojit
|
|||
virtual LInsp ins3(LOpcode v, LIns* a, LIns* b, LIns* c) {
|
||||
return out->ins3(v, a, b, c);
|
||||
}
|
||||
virtual LInsp insGuard(LOpcode v, LIns *c, LIns *x) {
|
||||
return out->insGuard(v, c, x);
|
||||
virtual LInsp insGuard(LOpcode v, LIns *c, GuardRecord *gr) {
|
||||
return out->insGuard(v, c, gr);
|
||||
}
|
||||
virtual LInsp insBranch(LOpcode v, LInsp condition, LInsp to) {
|
||||
return out->insBranch(v, condition, to);
|
||||
|
@ -1070,6 +1067,13 @@ namespace nanojit
|
|||
virtual LInsp insSkip(size_t size) {
|
||||
return out->insSkip(size);
|
||||
}
|
||||
void insAssert(LIns* expr) {
|
||||
#if defined DEBUG
|
||||
LIns* branch = insBranch(LIR_jt, expr, NULL);
|
||||
ins0(LIR_dbreak);
|
||||
branch->setTarget(ins0(LIR_label));
|
||||
#endif
|
||||
}
|
||||
|
||||
// convenience functions
|
||||
|
||||
|
@ -1203,8 +1207,8 @@ namespace nanojit
|
|||
}
|
||||
}
|
||||
|
||||
LIns* insGuard(LOpcode op, LInsp cond, LIns *x) {
|
||||
return add_flush(out->insGuard(op,cond,x));
|
||||
LIns* insGuard(LOpcode op, LInsp cond, GuardRecord *gr) {
|
||||
return add_flush(out->insGuard(op,cond,gr));
|
||||
}
|
||||
|
||||
LIns* insBranch(LOpcode v, LInsp condition, LInsp to) {
|
||||
|
@ -1262,8 +1266,9 @@ namespace nanojit
|
|||
LIns* ins1(LOpcode v, LIns* a);
|
||||
LIns* ins2(LOpcode v, LIns* a, LIns* b);
|
||||
LIns* ins3(LOpcode v, LIns* a, LIns* b, LIns* c);
|
||||
LIns* insGuard(LOpcode, LIns *cond, LIns *);
|
||||
LIns* insGuard(LOpcode, LIns *cond, GuardRecord *);
|
||||
LIns* insBranch(LOpcode, LIns *cond, LIns *target);
|
||||
LIns* insLoad(LOpcode op, LInsp base, int32_t off);
|
||||
};
|
||||
|
||||
// @todo, this could be replaced by a generic HashMap or HashSet, if we had one
|
||||
|
@ -1319,7 +1324,7 @@ namespace nanojit
|
|||
LIns* ins3(LOpcode v, LInsp, LInsp, LInsp);
|
||||
LIns* insLoad(LOpcode op, LInsp cond, int32_t d);
|
||||
LIns* insCall(const CallInfo *call, LInsp args[]);
|
||||
LIns* insGuard(LOpcode op, LInsp cond, LIns *x);
|
||||
LIns* insGuard(LOpcode op, LInsp cond, GuardRecord *gr);
|
||||
};
|
||||
|
||||
class LirBuffer
|
||||
|
@ -1395,7 +1400,7 @@ namespace nanojit
|
|||
LInsp insImmq(uint64_t imm);
|
||||
LInsp insImmf(double d);
|
||||
LInsp insCall(const CallInfo *call, LInsp args[]);
|
||||
LInsp insGuard(LOpcode op, LInsp cond, LIns *x);
|
||||
LInsp insGuard(LOpcode op, LInsp cond, GuardRecord *gr);
|
||||
LInsp insBranch(LOpcode v, LInsp condition, LInsp to);
|
||||
LInsp insAlloc(int32_t size);
|
||||
LInsp insSkip(size_t);
|
||||
|
@ -1432,14 +1437,11 @@ namespace nanojit
|
|||
LInsp pos() {
|
||||
return _i;
|
||||
}
|
||||
void setpos(LIns *i) {
|
||||
_i = i;
|
||||
}
|
||||
};
|
||||
|
||||
class Assembler;
|
||||
|
||||
void compile(Assembler *assm, Fragment *frag, Allocator& alloc verbose_only(, LabelMap*));
|
||||
void compile(Assembler *assm, Fragment *frag verbose_only(, Allocator& alloc, LabelMap*));
|
||||
verbose_only(void live(Allocator& alloc, Fragment *frag, LirBuffer *lirbuf);)
|
||||
|
||||
class StackFilter: public LirFilter
|
||||
|
|
|
@ -74,7 +74,7 @@
|
|||
OPDEF(start, 0, 0, Op0) // start of a fragment
|
||||
OPDEF(regfence, 1, 0, Op0) // register fence, no register allocation is allowed across this meta instruction
|
||||
OPDEF(skip, 2, 1, Sk) // holds blobs ("payloads") of data; also links pages
|
||||
OPDEF(unused3, 3,-1, None)
|
||||
OPDEF(dbreak, 3, 0, Op0)
|
||||
OPDEF(unused4, 4,-1, None)
|
||||
OPDEF(unused5, 5,-1, None)
|
||||
OPDEF(unused6, 6,-1, None)
|
||||
|
@ -223,6 +223,7 @@ OPDEF64(i2q, 26, 1, Op1) // sign-extend i32 to i64
|
|||
OPDEF64(u2q, 27, 1, Op1) // zero-extend u32 to u64
|
||||
OPDEF64(i2f, 28, 1, Op1) // convert a signed 32-bit integer to a float
|
||||
OPDEF64(u2f, 29, 1, Op1) // convert an unsigned 32-bit integer to a float
|
||||
|
||||
OPDEF64(unused30_64, 30,-1, None)
|
||||
OPDEF64(unused31_64, 31,-1, None)
|
||||
OPDEF64(unused32_64, 32,-1, None)
|
||||
|
|
|
@ -59,7 +59,7 @@
|
|||
#elif defined(NANOJIT_ARM)
|
||||
#include "NativeARM.h"
|
||||
#elif defined(NANOJIT_PPC)
|
||||
#include "NativePpc.h"
|
||||
#include "NativePPC.h"
|
||||
#elif defined(NANOJIT_SPARC)
|
||||
#include "NativeSparc.h"
|
||||
#elif defined(NANOJIT_X64)
|
||||
|
@ -69,8 +69,6 @@
|
|||
#endif
|
||||
|
||||
namespace nanojit {
|
||||
const size_t NJ_PAGE_SIZE = 1 << NJ_LOG2_PAGE_SIZE;
|
||||
|
||||
class Fragment;
|
||||
struct SideExit;
|
||||
struct SwitchInfo;
|
||||
|
@ -121,9 +119,9 @@ namespace nanojit {
|
|||
if (_logc->lcbits & LC_Assembly) { \
|
||||
outline[0]='\0'; \
|
||||
if (outputAddr) \
|
||||
sprintf(outline, "%010lx ", (unsigned long)_nIns); \
|
||||
VMPI_sprintf(outline, "%010lx ", (unsigned long)_nIns); \
|
||||
else \
|
||||
memset(outline, (int)' ', 10+3); \
|
||||
VMPI_memset(outline, (int)' ', 10+3); \
|
||||
sprintf(&outline[13], ##__VA_ARGS__); \
|
||||
Assembler::outputAlign(outline, 35); \
|
||||
_allocator.formatRegisters(outline, _thisfrag); \
|
||||
|
|
|
@ -554,18 +554,6 @@ Assembler::genEpilogue()
|
|||
return _nIns;
|
||||
}
|
||||
|
||||
/*
|
||||
* This should never be called; ARM only uses the longer form.
|
||||
* TODO: We should delete this as it is never called from outside this file. It
|
||||
* should be declared in the DECLARE_PLATFORM_ASSEMBLER block of each native
|
||||
* back-end where required.
|
||||
*/
|
||||
void
|
||||
Assembler::asm_arg(ArgSize sz, LInsp p, Register r)
|
||||
{
|
||||
NanoAssert(0);
|
||||
}
|
||||
|
||||
/*
|
||||
* asm_arg will encode the specified argument according to the current ABI, and
|
||||
* will update r and stkd as appropriate so that the next argument can be
|
||||
|
|
|
@ -56,9 +56,6 @@
|
|||
|
||||
namespace nanojit
|
||||
{
|
||||
|
||||
const int NJ_LOG2_PAGE_SIZE = 12; // 4K
|
||||
|
||||
// only d0-d6 are actually used; we'll use d7 as s14-s15 for i2f/u2f/etc.
|
||||
#define NJ_VFP_MAX_REGISTERS 8
|
||||
#define NJ_MAX_REGISTERS (11 + NJ_VFP_MAX_REGISTERS)
|
||||
|
@ -93,7 +90,7 @@ typedef enum {
|
|||
LR = 14,
|
||||
PC = 15,
|
||||
|
||||
// FP regs
|
||||
// VFP regs (we currently only use D0-D6 and S14)
|
||||
D0 = 16,
|
||||
D1 = 17,
|
||||
D2 = 18,
|
||||
|
@ -105,8 +102,8 @@ typedef enum {
|
|||
// D7 is still listed here for completeness and to facilitate assertions.
|
||||
D7 = 23,
|
||||
|
||||
FirstFloatReg = 16,
|
||||
LastFloatReg = 22,
|
||||
FirstFloatReg = D0,
|
||||
LastFloatReg = D6,
|
||||
|
||||
FirstReg = 0,
|
||||
LastReg = 22, // This excludes D7 from the register allocator.
|
||||
|
@ -461,6 +458,7 @@ enum {
|
|||
// _d = 0 - _r
|
||||
#define RSBS(_d,_r) ALUi(AL, rsb, 1, _d, _r, 0)
|
||||
|
||||
// MVN
|
||||
// _d = ~_r (one's compliment)
|
||||
#define MVN(_d,_r) ALUr(AL, mvn, 0, _d, 0, _r)
|
||||
#define MVNis_chk(_d,_op2imm,_stat,_chk) ALUi_chk(AL, mvn, _stat, _d, 0, op2imm, _chk)
|
||||
|
|
|
@ -73,18 +73,18 @@ namespace nanojit
|
|||
* see http://developer.apple.com/documentation/developertools/Conceptual/LowLevelABI/index.html
|
||||
* stack layout (higher address going down)
|
||||
* sp -> out linkage area
|
||||
* out parameter area
|
||||
* local variables
|
||||
* saved registers
|
||||
* sp' -> in linkage area
|
||||
* in parameter area
|
||||
* out parameter area
|
||||
* local variables
|
||||
* saved registers
|
||||
* sp' -> in linkage area
|
||||
* in parameter area
|
||||
*
|
||||
* linkage area layout:
|
||||
* PPC32 PPC64
|
||||
* sp+0 sp+0 saved sp
|
||||
* sp+4 sp+8 saved cr
|
||||
* sp+8 sp+16 saved lr
|
||||
* sp+12 sp+24 reserved
|
||||
* sp+0 sp+0 saved sp
|
||||
* sp+4 sp+8 saved cr
|
||||
* sp+8 sp+16 saved lr
|
||||
* sp+12 sp+24 reserved
|
||||
*/
|
||||
|
||||
const int linkage_size = 6*sizeof(void*);
|
||||
|
@ -108,7 +108,7 @@ namespace nanojit
|
|||
}
|
||||
|
||||
NIns *patchEntry = _nIns;
|
||||
MR(FP,SP); // save SP to use as a FP
|
||||
MR(FP,SP); // save SP to use as a FP
|
||||
STP(FP, cr_offset, SP); // cheat and save our FP in linkage.cr
|
||||
STP(R0, lr_offset, SP); // save LR in linkage.lr
|
||||
MFLR(R0);
|
||||
|
@ -142,9 +142,8 @@ namespace nanojit
|
|||
|
||||
void Assembler::asm_ld(LIns *ins) {
|
||||
LIns* base = ins->oprnd1();
|
||||
LIns* disp = ins->oprnd2();
|
||||
int d = ins->disp();
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
int d = disp->constval();
|
||||
Register ra = getBaseReg(base, d, GpRegs);
|
||||
|
||||
#if !PEDANTIC
|
||||
|
@ -197,7 +196,7 @@ namespace nanojit
|
|||
Register rr = prepResultReg(ins, FpRegs);
|
||||
#endif
|
||||
|
||||
int dr = ins->oprnd2()->constval();
|
||||
int dr = ins->disp();
|
||||
Register ra = getBaseReg(base, dr, GpRegs);
|
||||
|
||||
#ifdef NANOJIT_64BIT
|
||||
|
@ -265,7 +264,7 @@ namespace nanojit
|
|||
#if !PEDANTIC && !defined NANOJIT_64BIT
|
||||
if (value->isop(LIR_quad) && isS16(dr) && isS16(dr+4)) {
|
||||
// quad constant and short offset
|
||||
uint64_t q = value->constvalq();
|
||||
uint64_t q = value->imm64();
|
||||
STW(R0, dr, ra); // hi
|
||||
asm_li(R0, int32_t(q>>32)); // hi
|
||||
STW(R0, dr+4, ra); // lo
|
||||
|
@ -507,7 +506,7 @@ namespace nanojit
|
|||
|
||||
#if !PEDANTIC
|
||||
if (b->isconst()) {
|
||||
int32_t d = b->constval();
|
||||
int32_t d = b->imm32();
|
||||
if (isS16(d)) {
|
||||
if (condop >= LIR_eq && condop <= LIR_ge) {
|
||||
CMPWI(cr, ra, d);
|
||||
|
@ -562,7 +561,7 @@ namespace nanojit
|
|||
|
||||
void Assembler::asm_ret(LIns *ins) {
|
||||
genEpilogue();
|
||||
assignSavedParams();
|
||||
assignSavedRegs();
|
||||
LIns *value = ins->oprnd1();
|
||||
Register r = ins->isop(LIR_ret) ? R3 : F1;
|
||||
findSpecificRegFor(value, r);
|
||||
|
@ -582,9 +581,9 @@ namespace nanojit
|
|||
}
|
||||
else if (i->isconst()) {
|
||||
if (!resv->arIndex) {
|
||||
reserveFree(i);
|
||||
i->resv()->clear();
|
||||
}
|
||||
asm_li(r, i->constval());
|
||||
asm_li(r, i->imm32());
|
||||
}
|
||||
else {
|
||||
d = findMemFor(i);
|
||||
|
@ -596,10 +595,9 @@ namespace nanojit
|
|||
} else {
|
||||
LWZ(r, d, FP);
|
||||
}
|
||||
verbose_only(
|
||||
if (_verbose)
|
||||
outputf(" restore %s",_thisfrag->lirbuf->names->formatRef(i));
|
||||
)
|
||||
verbose_only( if (_logc->lcbits & LC_RegAlloc) {
|
||||
outputForEOL(" <= restore %s",
|
||||
_thisfrag->lirbuf->names->formatRef(i)); } )
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -609,13 +607,7 @@ namespace nanojit
|
|||
|
||||
void Assembler::asm_int(LIns *ins) {
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
asm_li(rr, ins->constval());
|
||||
}
|
||||
|
||||
void Assembler::asm_short(LIns *ins) {
|
||||
int32_t val = ins->imm16();
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
LI(rr, val);
|
||||
asm_li(rr, ins->imm32());
|
||||
}
|
||||
|
||||
void Assembler::asm_fneg(LIns *ins) {
|
||||
|
@ -625,8 +617,8 @@ namespace nanojit
|
|||
}
|
||||
|
||||
void Assembler::asm_param(LIns *ins) {
|
||||
uint32_t a = ins->imm8();
|
||||
uint32_t kind = ins->imm8b();
|
||||
uint32_t a = ins->paramArg();
|
||||
uint32_t kind = ins->paramKind();
|
||||
if (kind == 0) {
|
||||
// ordinary param
|
||||
// first eight args always in R3..R10 for PPC
|
||||
|
@ -652,7 +644,7 @@ namespace nanojit
|
|||
|
||||
bool indirect;
|
||||
if (!(indirect = call->isIndirect())) {
|
||||
verbose_only(if (_verbose)
|
||||
verbose_only(if (_logc->lcbits & LC_Assembly)
|
||||
outputf(" %p:", _nIns);
|
||||
)
|
||||
br((NIns*)call->_address, 1);
|
||||
|
@ -723,7 +715,7 @@ namespace nanojit
|
|||
#endif
|
||||
// arg goes in specific register
|
||||
if (p->isconst()) {
|
||||
asm_li(r, p->constval());
|
||||
asm_li(r, p->imm32());
|
||||
} else {
|
||||
Reservation* rA = getresv(p);
|
||||
if (rA) {
|
||||
|
@ -802,7 +794,7 @@ namespace nanojit
|
|||
Register ra = findRegFor(lhs, GpRegs);
|
||||
|
||||
if (rhs->isconst()) {
|
||||
int32_t rhsc = rhs->constval();
|
||||
int32_t rhsc = rhs->imm32();
|
||||
if (isS16(rhsc)) {
|
||||
// ppc arith immediate ops sign-exted the imm16 value
|
||||
switch (op) {
|
||||
|
@ -874,10 +866,10 @@ namespace nanojit
|
|||
XOR(rr, ra, rb);
|
||||
break;
|
||||
case LIR_sub: SUBF(rr, rb, ra); break;
|
||||
case LIR_lsh: SLW(rr, ra, R0); ANDI(R0, rb, 31); break;
|
||||
case LIR_rsh: SRAW(rr, ra, R0); ANDI(R0, rb, 31); break;
|
||||
case LIR_ush: SRW(rr, ra, R0); ANDI(R0, rb, 31); break;
|
||||
case LIR_mul: MULLW(rr, ra, rb); break;
|
||||
case LIR_lsh: SLW(rr, ra, R0); ANDI(R0, rb, 31); break;
|
||||
case LIR_rsh: SRAW(rr, ra, R0); ANDI(R0, rb, 31); break;
|
||||
case LIR_ush: SRW(rr, ra, R0); ANDI(R0, rb, 31); break;
|
||||
case LIR_mul: MULLW(rr, ra, rb); break;
|
||||
#ifdef NANOJIT_64BIT
|
||||
case LIR_qilsh:
|
||||
SLD(rr, ra, R0);
|
||||
|
@ -927,8 +919,8 @@ namespace nanojit
|
|||
|
||||
#if defined NANOJIT_64BIT && !PEDANTIC
|
||||
FCFID(r, r); // convert to double
|
||||
LFD(r, d, SP); // load into fpu register
|
||||
STD(v, d, SP); // save int64
|
||||
LFD(r, d, SP); // load into fpu register
|
||||
STD(v, d, SP); // save int64
|
||||
EXTSW(v, v); // extend sign destructively, ok since oprnd1 only is 32bit
|
||||
#else
|
||||
FSUB(r, r, F0);
|
||||
|
@ -950,8 +942,8 @@ namespace nanojit
|
|||
|
||||
#if defined NANOJIT_64BIT && !PEDANTIC
|
||||
FCFID(r, r); // convert to double
|
||||
LFD(r, d, SP); // load into fpu register
|
||||
STD(v, d, SP); // save int64
|
||||
LFD(r, d, SP); // load into fpu register
|
||||
STD(v, d, SP); // save int64
|
||||
CLRLDI(v, v, 32); // zero-extend destructively
|
||||
#else
|
||||
FSUB(r, r, F0);
|
||||
|
@ -1005,7 +997,7 @@ namespace nanojit
|
|||
int32_t hi, lo;
|
||||
} w;
|
||||
};
|
||||
d = ins->constvalf();
|
||||
d = ins->imm64f();
|
||||
LFD(r, 12, SP);
|
||||
STW(R0, 12, SP);
|
||||
asm_li(R0, w.hi);
|
||||
|
@ -1013,7 +1005,7 @@ namespace nanojit
|
|||
asm_li(R0, w.lo);
|
||||
}
|
||||
else {
|
||||
int64_t q = ins->constvalq();
|
||||
int64_t q = ins->imm64();
|
||||
if (isS32(q)) {
|
||||
asm_li(r, int32_t(q));
|
||||
return;
|
||||
|
@ -1094,7 +1086,7 @@ namespace nanojit
|
|||
#endif
|
||||
if (pc - instr - br_size < top) {
|
||||
// really do need a page break
|
||||
verbose_only(if (_verbose) outputf("newpage %p:", pc);)
|
||||
verbose_only(if (_logc->lcbits & LC_Assembly) outputf("newpage %p:", pc);)
|
||||
codeAlloc();
|
||||
}
|
||||
// now emit the jump, but make sure we won't need another page break.
|
||||
|
@ -1105,7 +1097,7 @@ namespace nanojit
|
|||
}
|
||||
#else
|
||||
if (pc - instr < top) {
|
||||
verbose_only(if (_verbose) outputf("newpage %p:", pc);)
|
||||
verbose_only(if (_logc->lcbits & LC_Assembly) outputf("newpage %p:", pc);)
|
||||
codeAlloc();
|
||||
// this jump will call underrunProtect again, but since we're on a new
|
||||
// page, nothing will happen.
|
||||
|
@ -1116,17 +1108,19 @@ namespace nanojit
|
|||
|
||||
void Assembler::asm_cmov(LIns *ins) {
|
||||
NanoAssert(ins->isop(LIR_cmov) || ins->isop(LIR_qcmov));
|
||||
LIns* cond = ins->oprnd1();
|
||||
NanoAssert(cond->isCmp());
|
||||
LIns* iftrue = ins->oprnd2();
|
||||
LIns* cond = ins->oprnd1();
|
||||
LIns* iftrue = ins->oprnd2();
|
||||
LIns* iffalse = ins->oprnd3();
|
||||
|
||||
NanoAssert(cond->isCmp());
|
||||
NanoAssert(iftrue->isQuad() == iffalse->isQuad());
|
||||
|
||||
// fixme: we could handle fpu registers here, too, since we're just branching
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
findSpecificRegFor(iftrue, rr);
|
||||
Register rf = findRegFor(iffalse, GpRegs & ~rmask(rr));
|
||||
NIns *after = _nIns;
|
||||
verbose_only(if (_verbose) outputf("%p:",after);)
|
||||
verbose_only(if (_logc->lcbits & LC_Assembly) outputf("%p:",after);)
|
||||
MR(rr, rf);
|
||||
asm_branch(false, cond, after);
|
||||
}
|
||||
|
@ -1138,9 +1132,9 @@ namespace nanojit
|
|||
prefer = rmask(R3);
|
||||
else if (op == LIR_fcall)
|
||||
prefer = rmask(F1);
|
||||
else if (op == LIR_iparam) {
|
||||
if (i->imm8() < 8) {
|
||||
prefer = rmask(argRegs[i->imm8()]);
|
||||
else if (op == LIR_param) {
|
||||
if (i->paramArg() < 8) {
|
||||
prefer = rmask(argRegs[i->paramArg()]);
|
||||
}
|
||||
}
|
||||
// narrow the allow set to whatever is preferred and also free
|
||||
|
@ -1297,12 +1291,6 @@ namespace nanojit
|
|||
void Assembler::nFragExit(LIns*) {
|
||||
TODO(nFragExit);
|
||||
}
|
||||
|
||||
NIns* Assembler::asm_adjustBranch(NIns*, NIns*) {
|
||||
TODO(asm_adjustBranch);
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace nanojit
|
||||
|
||||
#endif // FEATURE_NANOJIT && NANOJIT_PPC
|
||||
|
|
|
@ -54,8 +54,6 @@
|
|||
|
||||
namespace nanojit
|
||||
{
|
||||
const int NJ_LOG2_PAGE_SIZE = 12; // 4K
|
||||
|
||||
#define NJ_MAX_STACK_ENTRY 256
|
||||
#define NJ_ALIGN_STACK 16
|
||||
|
||||
|
@ -204,8 +202,8 @@ namespace nanojit
|
|||
PPC_mulli = 0x1C000000, // multiply low immediate
|
||||
PPC_mullw = 0x7C0001D6, // multiply low word
|
||||
PPC_neg = 0x7C0000D0, // negate
|
||||
PPC_nor = 0x7C0000F8, // nor
|
||||
PPC_or = 0x7C000378, // or
|
||||
PPC_nor = 0x7C0000F8, // nor
|
||||
PPC_or = 0x7C000378, // or
|
||||
PPC_ori = 0x60000000, // or immediate
|
||||
PPC_oris = 0x64000000, // or immediate shifted
|
||||
PPC_rlwinm = 0x54000000, // rotate left word then and with mask
|
||||
|
@ -226,7 +224,7 @@ namespace nanojit
|
|||
PPC_stdx = 0x7C00012A, // store doubleword indexed
|
||||
PPC_stfd = 0xD8000000, // store floating-point double
|
||||
PPC_stfdx = 0x7C0005AE, // store floating-point double indexed
|
||||
PPC_stw = 0x90000000, // store word
|
||||
PPC_stw = 0x90000000, // store word
|
||||
PPC_stwu = 0x94000000, // store word with update
|
||||
PPC_stwux = 0x7C00016E, // store word with update indexed
|
||||
PPC_stwx = 0x7C00012E, // store word indexed
|
||||
|
@ -277,11 +275,11 @@ namespace nanojit
|
|||
void underrunProtect(int bytes); \
|
||||
void nativePageReset(); \
|
||||
void nativePageSetup(); \
|
||||
void br(NIns *addr, int link); \
|
||||
void br_far(NIns *addr, int link); \
|
||||
void br(NIns *addr, int link); \
|
||||
void br_far(NIns *addr, int link); \
|
||||
void asm_regarg(ArgSize, LIns*, Register); \
|
||||
void asm_li(Register r, int32_t imm); \
|
||||
void asm_li32(Register r, int32_t imm); \
|
||||
void asm_li(Register r, int32_t imm); \
|
||||
void asm_li32(Register r, int32_t imm); \
|
||||
void asm_li64(Register r, uint64_t imm); \
|
||||
void asm_cmp(LOpcode op, LIns *a, LIns *b, ConditionRegister); \
|
||||
NIns* asm_branch_far(bool onfalse, LIns *cond, NIns * const targ); \
|
||||
|
@ -470,15 +468,15 @@ namespace nanojit
|
|||
#ifdef NANOJIT_64BIT
|
||||
#define LP(r, d, b) LD(r, d, b)
|
||||
#define STP(r, d, b) STD(r, d, b)
|
||||
#define STPU(r, d, b) STDU(r, d, b)
|
||||
#define STPX(s, a, b) STDX(s, a, b)
|
||||
#define STPUX(s, a, b) STDUX(s, a, b)
|
||||
#define STPU(r, d, b) STDU(r, d, b)
|
||||
#define STPX(s, a, b) STDX(s, a, b)
|
||||
#define STPUX(s, a, b) STDUX(s, a, b)
|
||||
#else
|
||||
#define LP(r, d, b) LWZ(r, d, b)
|
||||
#define STP(r, d, b) STW(r, d, b)
|
||||
#define STPU(r, d, b) STWU(r, d, b)
|
||||
#define STPX(s, a, b) STWX(s, a, b)
|
||||
#define STPUX(s, a, b) STWUX(s, a, b)
|
||||
#define STPU(r, d, b) STWU(r, d, b)
|
||||
#define STPX(s, a, b) STWX(s, a, b)
|
||||
#define STPUX(s, a, b) STWUX(s, a, b)
|
||||
#endif
|
||||
|
||||
#define LFD(r, d, b) FMEMd(lfd, r, d, b)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
|
||||
/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
|
||||
/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: t; tab-width: 4 -*- */
|
||||
/* ***** BEGIN LICENSE BLOCK *****
|
||||
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
||||
*
|
||||
|
@ -66,10 +65,9 @@ namespace nanojit
|
|||
|
||||
static const int kLinkageAreaSize = 68;
|
||||
static const int kcalleeAreaSize = 80; // The max size.
|
||||
static const int NJ_PAGE_SIZE_SPARC = 8192; // Use sparc page size here.
|
||||
|
||||
#define TODO(x) do{ verbose_only(outputf(#x);) NanoAssertMsgf(false, "%s", #x); } while(0)
|
||||
#define BIT_ROUND_UP(v,q) ( (((uintptr_t)v)+(q)-1) & ~((q)-1) )
|
||||
#define TODO(x) do{ verbose_only(outputf(#x);) NanoAssertMsgf(false, "%s", #x); } while(0)
|
||||
|
||||
void Assembler::nInit(AvmCore* core)
|
||||
{
|
||||
|
@ -97,8 +95,11 @@ namespace nanojit
|
|||
SETHI(frameSize, G1);
|
||||
}
|
||||
|
||||
verbose_only( verbose_outputf(" %p:",_nIns); )
|
||||
verbose_only( asm_output(" patch entry:"); )
|
||||
verbose_only(
|
||||
if (_logc->lcbits & LC_Assembly) {
|
||||
outputf(" %p:",_nIns);
|
||||
output(" patch entry:");
|
||||
})
|
||||
NIns *patchEntry = _nIns;
|
||||
|
||||
// The frame size in SAVE is faked. We will still re-caculate SP later.
|
||||
|
@ -160,11 +161,19 @@ namespace nanojit
|
|||
ArgSize sizes[MAXARGS];
|
||||
uint32_t argc = call->get_sizes(sizes);
|
||||
|
||||
NanoAssert(ins->isop(LIR_call) || ins->isop(LIR_fcall));
|
||||
NanoAssert(ins->isop(LIR_pcall) || ins->isop(LIR_fcall));
|
||||
verbose_only(if (_logc->lcbits & LC_Assembly)
|
||||
outputf(" %p:", _nIns);
|
||||
)
|
||||
CALL(call);
|
||||
bool indirect = call->isIndirect();
|
||||
if (!indirect) {
|
||||
CALL(call);
|
||||
}
|
||||
else {
|
||||
argc--;
|
||||
Register r = findSpecificRegFor(ins->arg(argc), I0);
|
||||
JMPL(G0, I0, 15);
|
||||
}
|
||||
|
||||
uint32_t GPRIndex = O0;
|
||||
uint32_t offset = kLinkageAreaSize; // start of parameters stack postion.
|
||||
|
@ -218,7 +227,7 @@ namespace nanojit
|
|||
a.clear();
|
||||
a.free = GpRegs | FpRegs;
|
||||
debug_only( a.managed = a.free; )
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::nPatchBranch(NIns* branch, NIns* location)
|
||||
{
|
||||
|
@ -276,7 +285,7 @@ namespace nanojit
|
|||
verbose_only(if (_logc->lcbits & LC_RegAlloc) {
|
||||
outputf(" remat %s size %d", _thisfrag->lirbuf->names->formatRef(i), i->size());
|
||||
})
|
||||
}
|
||||
}
|
||||
else if (i->isconst()) {
|
||||
if (!i->getArIndex()) {
|
||||
i->markAsClear();
|
||||
|
@ -293,7 +302,7 @@ namespace nanojit
|
|||
verbose_only(if (_logc->lcbits & LC_RegAlloc) {
|
||||
outputf(" restore %s", _thisfrag->lirbuf->names->formatRef(i));
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_store32(LIns *value, int dr, LIns *base)
|
||||
|
@ -521,7 +530,7 @@ namespace nanojit
|
|||
LOpcode condop = cond->opcode();
|
||||
|
||||
// LIR_ov recycles the flags set by arithmetic ops
|
||||
if ((condop == LIR_ov))
|
||||
if (condop == LIR_ov)
|
||||
return;
|
||||
|
||||
LInsp lhs = cond->oprnd1();
|
||||
|
@ -983,18 +992,12 @@ namespace nanojit
|
|||
FCMPD(rLhs, rRhs);
|
||||
}
|
||||
|
||||
/** no longer called by patch/unpatch
|
||||
NIns* Assembler::asm_adjustBranch(NIns* at, NIns* target)
|
||||
verbose_only(
|
||||
void Assembler::asm_inc_m32(uint32_t* pCtr)
|
||||
{
|
||||
NIns* was;
|
||||
was = (NIns*)(((*(uint32_t*)&at[0] & 0x3FFFFF) << 10) | (*(uint32_t*)&at[1] & 0x3FF ));
|
||||
*(uint32_t*)&at[0] &= 0xFFC00000;
|
||||
*(uint32_t*)&at[0] |= ((intptr_t)target >> 10) & 0x3FFFFF;
|
||||
*(uint32_t*)&at[1] &= 0xFFFFFC00;
|
||||
*(uint32_t*)&at[1] |= (intptr_t)target & 0x3FF;
|
||||
return was;
|
||||
// TODO(asm_inc_m32);
|
||||
}
|
||||
*/
|
||||
)
|
||||
|
||||
void Assembler::nativePageReset()
|
||||
{
|
||||
|
@ -1007,8 +1010,10 @@ namespace nanojit
|
|||
|
||||
void Assembler::nativePageSetup()
|
||||
{
|
||||
if (!_nIns) codeAlloc(codeStart, codeEnd, _nIns);
|
||||
if (!_nExitIns) codeAlloc(exitStart, exitEnd, _nExitIns);
|
||||
if (!_nIns)
|
||||
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
|
||||
if (!_nExitIns)
|
||||
codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1019,9 +1024,9 @@ namespace nanojit
|
|||
// We are done with the current page. Tell Valgrind that new code
|
||||
// has been generated.
|
||||
if (_inExit)
|
||||
codeAlloc(exitStart, exitEnd, _nIns);
|
||||
codeAlloc(exitStart, exitEnd, _nIns verbose_only(, exitBytes));
|
||||
else
|
||||
codeAlloc(codeStart, codeEnd, _nIns);
|
||||
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
|
||||
JMP_long_nocheck((intptr_t)eip);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,11 +67,6 @@
|
|||
|
||||
namespace nanojit
|
||||
{
|
||||
#ifdef MMGC_SPARC
|
||||
const int NJ_LOG2_PAGE_SIZE = 12; // 4K
|
||||
#else
|
||||
const int NJ_LOG2_PAGE_SIZE = 13; // 8K
|
||||
#endif
|
||||
const int NJ_MAX_REGISTERS = 30; // L0 - L7, I0 - I5, F2 - F14
|
||||
|
||||
const int LARGEST_UNDERRUN_PROT = 32; // largest value passed to underrunProtect
|
||||
|
|
|
@ -357,7 +357,7 @@ namespace nanojit
|
|||
case LIR_rsh: xop = X64_sari; break;
|
||||
case LIR_lsh: xop = X64_shli; break;
|
||||
}
|
||||
int shift = ins->oprnd2()->imm32() & 255;
|
||||
int shift = ins->oprnd2()->imm32() & 63;
|
||||
emit8(rexrb(xop | uint64_t(rr&7)<<48, (Register)0, rr), shift);
|
||||
if (rr != ra)
|
||||
MR(rr, ra);
|
||||
|
@ -423,14 +423,53 @@ namespace nanojit
|
|||
MR(rr, ra);
|
||||
}
|
||||
|
||||
void Assembler::asm_div_mod(LIns *ins) {
|
||||
LIns *div;
|
||||
if (ins->opcode() == LIR_mod) {
|
||||
// LIR_mod expects the LIR_div to be near
|
||||
div = ins->oprnd1();
|
||||
prepResultReg(ins, rmask(RDX));
|
||||
} else {
|
||||
div = ins;
|
||||
evictIfActive(RDX);
|
||||
}
|
||||
|
||||
NanoAssert(div->isop(LIR_div));
|
||||
|
||||
LIns *lhs = div->oprnd1();
|
||||
LIns *rhs = div->oprnd2();
|
||||
|
||||
prepResultReg(div, rmask(RAX));
|
||||
|
||||
Register rhsReg = findRegFor(rhs, (GpRegs ^ (rmask(RAX)|rmask(RDX))));
|
||||
Register lhsReg = lhs->isUnusedOrHasUnknownReg()
|
||||
? findSpecificRegFor(lhs, RAX)
|
||||
: lhs->getReg();
|
||||
emitr(X64_idiv, rhsReg);
|
||||
emit8(rexrb(X64_sari | uint64_t(RDX&7)<<48, (Register)0, RDX), 31);
|
||||
MR(RDX, RAX);
|
||||
if (RAX != lhsReg)
|
||||
MR(RAX, lhsReg);
|
||||
}
|
||||
|
||||
// binary op with integer registers
|
||||
void Assembler::asm_arith(LIns *ins) {
|
||||
Register rr, ra, rb;
|
||||
LOpcode op = ins->opcode();
|
||||
if ((op & ~LIR64) >= LIR_lsh && (op & ~LIR64) <= LIR_ush) {
|
||||
|
||||
switch (ins->opcode() & ~LIR64) {
|
||||
case LIR_lsh:
|
||||
case LIR_rsh:
|
||||
case LIR_ush:
|
||||
asm_shift(ins);
|
||||
return;
|
||||
case LIR_mod:
|
||||
case LIR_div:
|
||||
asm_div_mod(ins);
|
||||
return;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
LIns *b = ins->oprnd2();
|
||||
if (isImm32(b)) {
|
||||
asm_arith_imm(ins);
|
||||
|
|
|
@ -58,7 +58,6 @@
|
|||
|
||||
namespace nanojit
|
||||
{
|
||||
const int NJ_LOG2_PAGE_SIZE = 12; // 4K
|
||||
#define NJ_MAX_STACK_ENTRY 256
|
||||
#define NJ_ALIGN_STACK 16
|
||||
|
||||
|
@ -194,6 +193,7 @@ namespace nanojit
|
|||
X64_divsd = 0xC05E0F40F2000005LL, // divide scalar double r /= b
|
||||
X64_mulsd = 0xC0590F40F2000005LL, // multiply scalar double r *= b
|
||||
X64_addsd = 0xC0580F40F2000005LL, // add scalar double r += b
|
||||
X64_idiv = 0xF8F7400000000003LL, // 32bit signed div (rax = rdx:rax/r, rdx=rdx:rax%r)
|
||||
X64_imul = 0xC0AF0F4000000004LL, // 32bit signed mul r *= b
|
||||
X64_imuli = 0xC069400000000003LL, // 32bit signed mul r = b * imm32
|
||||
X64_imul8 = 0x00C06B4000000004LL, // 32bit signed mul r = b * imm8
|
||||
|
@ -371,6 +371,7 @@ namespace nanojit
|
|||
void asm_cmp_imm(LIns*);\
|
||||
void fcmp(LIns*, LIns*);\
|
||||
NIns* asm_fbranch(bool, LIns*, NIns*);\
|
||||
void asm_div_mod(LIns *i);\
|
||||
int max_stk_used;
|
||||
|
||||
#define swapptrs() { NIns* _tins = _nIns; _nIns=_nExitIns; _nExitIns=_tins; }
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
* the terms of any one of the MPL, the GPL or the LGPL.
|
||||
*
|
||||
* ***** END LICENSE BLOCK ***** */
|
||||
#include "nanojit.h"
|
||||
|
||||
#ifdef _MAC
|
||||
// for MakeDataExecutable
|
||||
|
@ -49,11 +50,10 @@
|
|||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#endif
|
||||
#include "nanojit.h"
|
||||
|
||||
namespace nanojit
|
||||
{
|
||||
#ifdef FEATURE_NANOJIT
|
||||
#if defined FEATURE_NANOJIT && defined NANOJIT_IA32
|
||||
|
||||
#ifdef NJ_VERBOSE
|
||||
const char *regNames[] = {
|
||||
|
@ -80,7 +80,7 @@ namespace nanojit
|
|||
void Assembler::nInit(AvmCore* core)
|
||||
{
|
||||
(void) core;
|
||||
OSDep::getDate();
|
||||
VMPI_getDate();
|
||||
}
|
||||
|
||||
void Assembler::nBeginAssembly() {
|
||||
|
@ -114,7 +114,7 @@ namespace nanojit
|
|||
|
||||
return fragEntry;
|
||||
}
|
||||
|
||||
|
||||
void Assembler::nFragExit(LInsp guard)
|
||||
{
|
||||
SideExit *exit = guard->record()->exit;
|
||||
|
@ -192,8 +192,9 @@ namespace nanojit
|
|||
const int32_t pushsize = 4*istack + 8*fargs; // actual stack space used
|
||||
|
||||
#if _MSC_VER
|
||||
// msc is slack, and MIR doesn't do anything extra, so lets use this
|
||||
// call-site alignment to at least have code size parity with MIR.
|
||||
// msc only provides 4-byte alignment, anything more than 4 on windows
|
||||
// x86-32 requires dynamic ESP alignment in prolog/epilog and static
|
||||
// esp-alignment here.
|
||||
uint32_t align = 4;//NJ_ALIGN_STACK;
|
||||
#else
|
||||
uint32_t align = NJ_ALIGN_STACK;
|
||||
|
@ -386,7 +387,7 @@ namespace nanojit
|
|||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_restore(LInsp i, Reservation *unused, Register r)
|
||||
void Assembler::asm_restore(LInsp i, Reservation* /*unused*/, Register r)
|
||||
{
|
||||
uint32_t arg;
|
||||
uint32_t abi_regcount;
|
||||
|
@ -405,10 +406,15 @@ namespace nanojit
|
|||
else if (i->isop(LIR_param) && i->paramKind() == 0 &&
|
||||
(arg = i->paramArg()) >= (abi_regcount = max_abi_regs[_thisfrag->lirbuf->abi])) {
|
||||
// incoming arg is on stack, can restore it from there instead of spilling
|
||||
NanoAssert(0);
|
||||
if (!i->getArIndex()) {
|
||||
i->markAsClear();
|
||||
}
|
||||
// compute position of argument relative to ebp. higher argument
|
||||
// numbers are at higher positive offsets. the first abi_regcount
|
||||
// arguments are in registers, rest on stack. +8 accomodates the
|
||||
// return address and saved ebp value. assuming abi_regcount == 0:
|
||||
// low-addr ebp
|
||||
// [frame...][saved-ebp][return-addr][arg0][arg1]...
|
||||
int d = (arg - abi_regcount) * sizeof(intptr_t) + 8;
|
||||
LD(r, d, FP);
|
||||
}
|
||||
|
@ -586,7 +592,7 @@ namespace nanojit
|
|||
if (rmask(rv) & XmmRegs) {
|
||||
SSE_STQ(dr, rb, rv);
|
||||
} else {
|
||||
FSTQ(pop, dr, rb);
|
||||
FSTQ(pop?1:0, dr, rb);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -686,6 +692,36 @@ namespace nanojit
|
|||
JMP(exit);
|
||||
}
|
||||
|
||||
// This generates a 'test' or 'cmp' instruction for a condition, which
|
||||
// causes the condition codes to be set appropriately. It's used with
|
||||
// conditional branches, conditional moves, and when generating
|
||||
// conditional values. For example:
|
||||
//
|
||||
// LIR: eq1 = eq a, 0
|
||||
// LIR: xf1: xf eq1 -> ...
|
||||
// asm: test edx, edx # generated by this function
|
||||
// asm: je ...
|
||||
//
|
||||
// If this is the only use of eq1, then on entry 'cond' is *not* marked as
|
||||
// used, and we do not allocate a register for it. That's because its
|
||||
// result ends up in the condition codes rather than a normal register.
|
||||
// This doesn't get recorded in the regstate and so the asm code that
|
||||
// consumes the result (eg. a conditional branch like 'je') must follow
|
||||
// shortly after.
|
||||
//
|
||||
// If eq1 is instead used again later, we will also generate code
|
||||
// (eg. in asm_cond()) to compute it into a normal register, something
|
||||
// like this:
|
||||
//
|
||||
// LIR: eq1 = eq a, 0
|
||||
// LIR: test edx, edx
|
||||
// asm: sete ebx
|
||||
// asm: movzx ebx, ebx
|
||||
//
|
||||
// In this case we end up computing the condition twice, but that's ok, as
|
||||
// it's just as short as testing eq1's value in the code generated for the
|
||||
// guard.
|
||||
//
|
||||
void Assembler::asm_cmp(LIns *cond)
|
||||
{
|
||||
LOpcode condop = cond->opcode();
|
||||
|
@ -771,10 +807,7 @@ namespace nanojit
|
|||
LInsp lhs = ins->oprnd1();
|
||||
|
||||
if (op == LIR_mod) {
|
||||
/* LIR_mod expects the LIR_div to be near (no interference from the register allocator) */
|
||||
findSpecificRegFor(lhs, EDX);
|
||||
prepResultReg(ins, rmask(EDX));
|
||||
evictIfActive(EAX);
|
||||
asm_div_mod(ins);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -786,6 +819,8 @@ namespace nanojit
|
|||
|
||||
switch (op) {
|
||||
case LIR_div:
|
||||
// Nb: if the div feeds into a mod it will be handled by
|
||||
// asm_div_mod() rather than here.
|
||||
forceReg = true;
|
||||
rb = findRegFor(rhs, (GpRegs ^ (rmask(EAX)|rmask(EDX))));
|
||||
allow = rmask(EAX);
|
||||
|
@ -804,7 +839,7 @@ namespace nanojit
|
|||
}
|
||||
break;
|
||||
case LIR_add:
|
||||
case LIR_iaddp:
|
||||
case LIR_addp:
|
||||
if (lhs->isop(LIR_alloc) && rhs->isconst()) {
|
||||
// add alloc+const, use lea
|
||||
Register rr = prepResultReg(ins, allow);
|
||||
|
@ -866,9 +901,8 @@ namespace nanojit
|
|||
SHR(rr, rb);
|
||||
break;
|
||||
case LIR_div:
|
||||
case LIR_mod:
|
||||
DIV(rb);
|
||||
CDQ();
|
||||
DIV(rb);
|
||||
CDQ();
|
||||
break;
|
||||
default:
|
||||
NanoAssertMsg(0, "Unsupported");
|
||||
|
@ -917,6 +951,35 @@ namespace nanojit
|
|||
MR(rr,ra);
|
||||
}
|
||||
|
||||
// This is called when we have a mod(div(divLhs, divRhs)) sequence.
|
||||
void Assembler::asm_div_mod(LInsp mod)
|
||||
{
|
||||
LInsp div = mod->oprnd1();
|
||||
|
||||
// LIR_mod expects the LIR_div to be near (no interference from the register allocator)
|
||||
|
||||
NanoAssert(mod->isop(LIR_mod));
|
||||
NanoAssert(div->isop(LIR_div));
|
||||
|
||||
LInsp divLhs = div->oprnd1();
|
||||
LInsp divRhs = div->oprnd2();
|
||||
|
||||
prepResultReg(mod, rmask(EDX));
|
||||
prepResultReg(div, rmask(EAX));
|
||||
|
||||
Register rDivRhs = findRegFor(divRhs, (GpRegs ^ (rmask(EAX)|rmask(EDX))));
|
||||
|
||||
Register rDivLhs = ( divLhs->isUnusedOrHasUnknownReg()
|
||||
? findSpecificRegFor(divLhs, EAX)
|
||||
: divLhs->getReg() );
|
||||
|
||||
DIV(rDivRhs);
|
||||
CDQ(); // sign-extend EAX into EDX:EAX
|
||||
|
||||
if ( EAX != rDivLhs )
|
||||
MR(EAX, rDivLhs);
|
||||
}
|
||||
|
||||
void Assembler::asm_neg_not(LInsp ins)
|
||||
{
|
||||
LOpcode op = ins->opcode();
|
||||
|
@ -1105,6 +1168,13 @@ namespace nanojit
|
|||
// 1.0 is extremely frequent and worth special-casing!
|
||||
static const double k_ONE = 1.0;
|
||||
LDSDm(rr, &k_ONE);
|
||||
} else if (d && d == (int)d) {
|
||||
// can fit in 32bits? then use cvt which is faster
|
||||
Register gr = registerAlloc(GpRegs);
|
||||
SSE_CVTSI2SD(rr, gr);
|
||||
SSE_XORPDr(rr,rr); // zero rr to ensure no dependency stalls
|
||||
LDi(gr, (int)d);
|
||||
_allocator.addFree(gr);
|
||||
} else {
|
||||
findMemFor(ins);
|
||||
const int d = disp(ins);
|
||||
|
@ -1337,10 +1407,9 @@ namespace nanojit
|
|||
if (lhs->isUnusedOrHasUnknownReg()) {
|
||||
ra = findSpecificRegFor(lhs, rr);
|
||||
} else if ((rmask(lhs->getReg()) & XmmRegs) == 0) {
|
||||
/* We need this case on AMD64, because it's possible that
|
||||
* an earlier instruction has done a quadword load and reserved a
|
||||
* GPR. If so, ask for a new register.
|
||||
*/
|
||||
// We need this case on AMD64, because it's possible that
|
||||
// an earlier instruction has done a quadword load and reserved a
|
||||
// GPR. If so, ask for a new register.
|
||||
ra = findRegFor(lhs, XmmRegs);
|
||||
} else {
|
||||
// lhs already has a register assigned but maybe not from the allow set
|
||||
|
@ -1402,6 +1471,7 @@ namespace nanojit
|
|||
// todo support int value in memory
|
||||
Register gr = findRegFor(ins->oprnd1(), GpRegs);
|
||||
SSE_CVTSI2SD(rr, gr);
|
||||
SSE_XORPDr(rr,rr); // zero rr to ensure no dependency stalls
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1410,7 +1480,7 @@ namespace nanojit
|
|||
}
|
||||
}
|
||||
|
||||
Register Assembler::asm_prep_fcall(Reservation *unused, LInsp ins)
|
||||
Register Assembler::asm_prep_fcall(Reservation* /*unused*/, LInsp ins)
|
||||
{
|
||||
Register rr;
|
||||
if (ins->isUsed() && (rr = ins->getReg(), isKnownReg(rr)) && (rmask(rr) & XmmRegs)) {
|
||||
|
@ -1451,6 +1521,7 @@ namespace nanojit
|
|||
SSE_ADDSDm(rr, &k_NEGONE);
|
||||
|
||||
SSE_CVTSI2SD(rr, gr);
|
||||
SSE_XORPDr(rr,rr); // zero rr to ensure no dependency stalls
|
||||
|
||||
LIns* op1 = ins->oprnd1();
|
||||
Register xr;
|
||||
|
@ -1653,8 +1724,7 @@ namespace nanojit
|
|||
)
|
||||
|
||||
void Assembler::nativePageReset()
|
||||
{
|
||||
}
|
||||
{}
|
||||
|
||||
void Assembler::nativePageSetup()
|
||||
{
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#define __nanojit_Nativei386__
|
||||
|
||||
#ifdef PERFM
|
||||
#define DOPROF
|
||||
#include "../vprof/vprof.h"
|
||||
#define count_instr() _nvprof("x86",1)
|
||||
#define count_ret() _nvprof("x86-ret",1); count_instr();
|
||||
|
@ -90,7 +91,6 @@
|
|||
|
||||
namespace nanojit
|
||||
{
|
||||
const int NJ_LOG2_PAGE_SIZE = 12; // 4K
|
||||
const int NJ_MAX_REGISTERS = 24; // gpregs, x87 regs, xmm regs
|
||||
|
||||
#define NJ_MAX_STACK_ENTRY 256
|
||||
|
@ -174,8 +174,11 @@ namespace nanojit
|
|||
void nativePageSetup();\
|
||||
void underrunProtect(int);\
|
||||
void asm_farg(LInsp);\
|
||||
void asm_cmp(LIns *cond);\
|
||||
void asm_fcmp(LIns *cond);
|
||||
void asm_arg(ArgSize, LIns*, Register);\
|
||||
void asm_pusharg(LInsp);\
|
||||
void asm_fcmp(LIns *cond); \
|
||||
void asm_cmp(LIns *cond); \
|
||||
void asm_div_mod(LIns *cond);
|
||||
|
||||
#define swapptrs() { NIns* _tins = _nIns; _nIns=_nExitIns; _nExitIns=_tins; }
|
||||
|
||||
|
@ -184,7 +187,7 @@ namespace nanojit
|
|||
*((int32_t*)_nIns) = (int32_t)(i)
|
||||
|
||||
#define MODRMs(r,d,b,l,i) \
|
||||
NanoAssert(unsigned(r)<8 && unsigned(b)<8 && unsigned(i)<8); \
|
||||
NanoAssert(unsigned(i)<8 && unsigned(b)<8 && unsigned(r)<8); \
|
||||
if ((d) == 0 && (b) != EBP) { \
|
||||
_nIns -= 2; \
|
||||
_nIns[0] = (uint8_t) ( 0<<6 | (r)<<3 | 4); \
|
||||
|
@ -313,15 +316,15 @@ namespace nanojit
|
|||
#define ALUmi(c,d,b,i) \
|
||||
underrunProtect(10); \
|
||||
NanoAssert(((unsigned)b)<8); \
|
||||
if (isS8(i)) { \
|
||||
if (isS8(i)) { \
|
||||
*(--_nIns) = uint8_t(i); \
|
||||
MODRMm((c>>3),(d),(b)); \
|
||||
*(--_nIns) = uint8_t(0x83); \
|
||||
} else { \
|
||||
IMM32(i); \
|
||||
} else { \
|
||||
IMM32(i); \
|
||||
MODRMm((c>>3),(d),(b)); \
|
||||
*(--_nIns) = uint8_t(0x81); \
|
||||
}
|
||||
}
|
||||
|
||||
#define ALU2(c,d,s) \
|
||||
underrunProtect(3); \
|
||||
|
@ -373,7 +376,7 @@ namespace nanojit
|
|||
#define LEA(r,d,b) do { count_alu(); ALUm(0x8d, r,d,b); asm_output("lea %s,%d(%s)",gpn(r),d,gpn(b)); } while(0)
|
||||
// lea %r, d(%i*4)
|
||||
// This addressing mode is not supported by the MODRMSIB macro.
|
||||
#define LEAmi4(r,d,i) do { count_alu(); IMM32(d); *(--_nIns) = (2<<6)|(i<<3)|5; *(--_nIns) = (0<<6)|(r<<3)|4; *(--_nIns) = 0x8d; asm_output("lea %s, %p(%s*4)", gpn(r), (void*)d, gpn(i)); } while(0)
|
||||
#define LEAmi4(r,d,i) do { count_alu(); IMM32(d); *(--_nIns) = (2<<6)|((uint8_t)i<<3)|5; *(--_nIns) = (0<<6)|((uint8_t)r<<3)|4; *(--_nIns) = 0x8d; asm_output("lea %s, %p(%s*4)", gpn(r), (void*)d, gpn(i)); } while(0)
|
||||
|
||||
#define CDQ() do { SARi(EDX, 31); MR(EDX, EAX); } while(0)
|
||||
|
||||
|
@ -392,7 +395,7 @@ namespace nanojit
|
|||
#define SETBE(r) do { count_alu(); ALU2(0x0f96,(r),(r)); asm_output("setbe %s",gpn(r)); } while(0)
|
||||
#define SETA(r) do { count_alu(); ALU2(0x0f97,(r),(r)); asm_output("seta %s",gpn(r)); } while(0)
|
||||
#define SETAE(r) do { count_alu(); ALU2(0x0f93,(r),(r)); asm_output("setae %s",gpn(r)); } while(0)
|
||||
#define SETO(r) do { count_alu(); ALU2(0x0f92,(r),(r)); asm_output("seto %s",gpn(r)); } while(0)
|
||||
#define SETO(r) do { count_alu(); ALU2(0x0f92,(r),(r)); asm_output("seto %s",gpn(r)); } while(0)
|
||||
|
||||
#define MREQ(dr,sr) do { count_alu(); ALU2(0x0f44,dr,sr); asm_output("cmove %s,%s", gpn(dr),gpn(sr)); } while(0)
|
||||
#define MRNE(dr,sr) do { count_alu(); ALU2(0x0f45,dr,sr); asm_output("cmovne %s,%s", gpn(dr),gpn(sr)); } while(0)
|
||||
|
@ -447,7 +450,7 @@ namespace nanojit
|
|||
// load 8-bit, zero extend
|
||||
// note, only 5-bit offsets (!) are supported for this, but that's all we need at the moment
|
||||
// (movzx actually allows larger offsets mode but 5-bit gives us advantage in Thumb mode)
|
||||
#define LD8Z(r,d,b) do { NanoAssert((d)>=0&&(d)<=31); ALU2m(0x0fb6,r,d,b); asm_output("movzx %s,%d(%s)", gpn(r),d,gpn(b)); } while(0)
|
||||
#define LD8Z(r,d,b) do { count_ld(); NanoAssert((d)>=0&&(d)<=31); ALU2m(0x0fb6,r,d,b); asm_output("movzx %s,%d(%s)", gpn(r),d,gpn(b)); } while(0)
|
||||
|
||||
#define LD8Zdm(r,addr) do { \
|
||||
count_ld(); \
|
||||
|
|
|
@ -56,9 +56,14 @@ namespace nanojit
|
|||
continue;
|
||||
NanoAssertMsg(!isFree(r), "Coding error; register is both free and active! " );
|
||||
|
||||
s += strlen(s);
|
||||
if (ins->isop(LIR_param) && ins->paramKind()==1 && r == Assembler::savedRegs[ins->paramArg()]) {
|
||||
// dont print callee-saved regs that arent used
|
||||
continue;
|
||||
}
|
||||
|
||||
s += VMPI_strlen(s);
|
||||
const char* rname = ins->isQuad() ? fpn(r) : gpn(r);
|
||||
sprintf(s, " %s(%s)", rname, names->formatRef(ins));
|
||||
VMPI_sprintf(s, " %s(%s)", rname, names->formatRef(ins));
|
||||
}
|
||||
}
|
||||
#endif /* NJ_VERBOSE */
|
||||
|
|
|
@ -46,7 +46,7 @@ namespace nanojit
|
|||
{
|
||||
inline RegisterMask rmask(Register r)
|
||||
{
|
||||
return 1 << r;
|
||||
return RegisterMask(1) << r;
|
||||
}
|
||||
|
||||
class RegAlloc
|
||||
|
@ -121,7 +121,7 @@ namespace nanojit
|
|||
|
||||
debug_only( uint32_t countActive(); )
|
||||
debug_only( bool isConsistent(Register r, LIns* v); )
|
||||
debug_only( RegisterMask managed; ) // bitfield of 0..NJ_MAX_REGISTERS denoting which are under our management
|
||||
debug_only( RegisterMask managed; ) // bitfield denoting which are under our management
|
||||
|
||||
LIns* active[LastReg + 1]; // active[r] = OP that defines r
|
||||
int32_t usepri[LastReg + 1]; // used priority. lower = more likely to spill.
|
||||
|
|
|
@ -48,10 +48,6 @@
|
|||
using namespace avmplus;
|
||||
|
||||
Config AvmCore::config;
|
||||
static GC _gc;
|
||||
GC* AvmCore::gc = &_gc;
|
||||
GCHeap GC::heap;
|
||||
String* AvmCore::k_str[] = { (String*)"" };
|
||||
|
||||
void
|
||||
avmplus::AvmLog(char const *msg, ...) {
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <ctype.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#if defined(AVMPLUS_UNIX) || defined(AVMPLUS_OS2)
|
||||
|
@ -57,10 +58,17 @@
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#define FASTCALL JS_FASTCALL
|
||||
#if defined(_MSC_VER) && defined(_M_IX86)
|
||||
#define FASTCALL __fastcall
|
||||
#elif defined(__GNUC__) && defined(__i386__) && \
|
||||
((__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
|
||||
#define FASTCALL __attribute__((fastcall))
|
||||
#else
|
||||
#define FASTCALL
|
||||
#define NO_FASTCALL
|
||||
#endif
|
||||
|
||||
#if defined(JS_NO_FASTCALL)
|
||||
#define NJ_NO_FASTCALL
|
||||
#if defined(NO_FASTCALL)
|
||||
#if defined(AVMPLUS_IA32)
|
||||
#define SIMULATE_FASTCALL(lr, state_ptr, frag_ptr, func_addr) \
|
||||
asm volatile( \
|
||||
|
@ -70,7 +78,7 @@
|
|||
: "memory", "cc" \
|
||||
);
|
||||
#endif /* defined(AVMPLUS_IA32) */
|
||||
#endif /* defined(JS_NO_FASTCALL) */
|
||||
#endif /* defined(NO_FASTCALL) */
|
||||
|
||||
#ifdef WIN32
|
||||
#include <windows.h>
|
||||
|
@ -159,143 +167,24 @@ static __inline__ unsigned long long rdtsc(void)
|
|||
|
||||
struct JSContext;
|
||||
|
||||
namespace MMgc {
|
||||
|
||||
class GC;
|
||||
|
||||
class GCObject
|
||||
{
|
||||
public:
|
||||
inline void*
|
||||
operator new(size_t size, GC* gc)
|
||||
{
|
||||
return calloc(1, size);
|
||||
}
|
||||
|
||||
static void operator delete (void *gcObject)
|
||||
{
|
||||
free(gcObject);
|
||||
}
|
||||
};
|
||||
|
||||
#define MMGC_SUBCLASS_DECL : public avmplus::GCObject
|
||||
|
||||
class GCFinalizedObject : public GCObject
|
||||
{
|
||||
public:
|
||||
static void operator delete (void *gcObject)
|
||||
{
|
||||
free(gcObject);
|
||||
}
|
||||
};
|
||||
|
||||
class GCHeap
|
||||
{
|
||||
public:
|
||||
int32_t kNativePageSize;
|
||||
|
||||
GCHeap()
|
||||
{
|
||||
#if defined _SC_PAGE_SIZE
|
||||
kNativePageSize = sysconf(_SC_PAGE_SIZE);
|
||||
#else
|
||||
kNativePageSize = 4096; // @todo: what is this?
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void*
|
||||
Alloc(uint32_t pages)
|
||||
{
|
||||
#ifdef XP_WIN
|
||||
return VirtualAlloc(NULL,
|
||||
pages * kNativePageSize,
|
||||
MEM_COMMIT | MEM_RESERVE,
|
||||
PAGE_EXECUTE_READWRITE);
|
||||
#elif defined AVMPLUS_UNIX
|
||||
/**
|
||||
* Don't use normal heap with mprotect+PROT_EXEC for executable code.
|
||||
* SELinux and friends don't allow this.
|
||||
*/
|
||||
return mmap(NULL,
|
||||
pages * kNativePageSize,
|
||||
PROT_READ | PROT_WRITE | PROT_EXEC,
|
||||
MAP_PRIVATE | MAP_ANON,
|
||||
-1,
|
||||
0);
|
||||
#else
|
||||
return valloc(pages * kNativePageSize);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void
|
||||
Free(void* p, uint32_t pages)
|
||||
{
|
||||
#ifdef XP_WIN
|
||||
VirtualFree(p, 0, MEM_RELEASE);
|
||||
#elif defined AVMPLUS_UNIX
|
||||
#if defined SOLARIS
|
||||
munmap((char*)p, pages * kNativePageSize);
|
||||
#else
|
||||
munmap(p, pages * kNativePageSize);
|
||||
#endif
|
||||
#else
|
||||
free(p);
|
||||
#endif
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
class GC
|
||||
{
|
||||
static GCHeap heap;
|
||||
|
||||
public:
|
||||
/**
|
||||
* flags to be passed as second argument to alloc
|
||||
*/
|
||||
enum AllocFlags
|
||||
{
|
||||
kZero=1,
|
||||
kContainsPointers=2,
|
||||
kFinalize=4,
|
||||
kRCObject=8
|
||||
};
|
||||
|
||||
static inline void*
|
||||
Alloc(uint32_t bytes, int flags=kZero)
|
||||
{
|
||||
if (flags & kZero)
|
||||
return calloc(1, bytes);
|
||||
else
|
||||
return malloc(bytes);
|
||||
}
|
||||
|
||||
static inline void
|
||||
Free(void* p)
|
||||
{
|
||||
free(p);
|
||||
}
|
||||
|
||||
static inline GCHeap*
|
||||
GetGCHeap()
|
||||
{
|
||||
return &heap;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#define DWB(x) x
|
||||
#define DRCWB(x) x
|
||||
#define WB(gc, container, addr, value) do { *(addr) = (value); } while(0)
|
||||
#define WBRC(gc, container, addr, value) do { *(addr) = (value); } while(0)
|
||||
|
||||
#define MMGC_MEM_TYPE(x)
|
||||
#ifdef PERFM
|
||||
# define PERFM_NVPROF(n,v) _nvprof(n,v)
|
||||
# define PERFM_NTPROF(n) _ntprof(n)
|
||||
# define PERFM_TPROF_END() _tprof_end()
|
||||
#else
|
||||
# define PERFM_NVPROF(n,v)
|
||||
# define PERFM_NTPROF(n)
|
||||
# define PERFM_TPROF_END()
|
||||
#endif
|
||||
|
||||
#define VMPI_strlen strlen
|
||||
#define VMPI_strcat strcat
|
||||
#define VMPI_strncat strncat
|
||||
#define VMPI_strcpy strcpy
|
||||
#define VMPI_sprintf sprintf
|
||||
#define VMPI_memset memset
|
||||
#define VMPI_isdigit isdigit
|
||||
#define VMPI_getDate()
|
||||
|
||||
extern void VMPI_setPageProtection(void *address,
|
||||
size_t size,
|
||||
|
@ -304,42 +193,10 @@ extern void VMPI_setPageProtection(void *address,
|
|||
|
||||
namespace avmplus {
|
||||
|
||||
using namespace MMgc;
|
||||
|
||||
typedef int FunctionID;
|
||||
|
||||
extern void AvmLog(char const *msg, ...);
|
||||
|
||||
class String
|
||||
{
|
||||
};
|
||||
|
||||
typedef class String AvmString;
|
||||
|
||||
class StringNullTerminatedUTF8
|
||||
{
|
||||
const char* cstr;
|
||||
|
||||
public:
|
||||
StringNullTerminatedUTF8(GC* gc, String* s)
|
||||
{
|
||||
cstr = strdup((const char*)s);
|
||||
}
|
||||
|
||||
~StringNullTerminatedUTF8()
|
||||
{
|
||||
free((void*)cstr);
|
||||
}
|
||||
|
||||
inline
|
||||
const char* c_str()
|
||||
{
|
||||
return cstr;
|
||||
}
|
||||
};
|
||||
|
||||
typedef String* Stringp;
|
||||
|
||||
class Config
|
||||
{
|
||||
public:
|
||||
|
@ -450,8 +307,6 @@ namespace avmplus {
|
|||
AvmConsole console;
|
||||
|
||||
static Config config;
|
||||
static GC* gc;
|
||||
static String* k_str[];
|
||||
|
||||
#ifdef AVMPLUS_IA32
|
||||
static inline bool
|
||||
|
@ -483,457 +338,8 @@ namespace avmplus {
|
|||
return config.verbose;
|
||||
}
|
||||
|
||||
static inline GC*
|
||||
GetGC()
|
||||
{
|
||||
return gc;
|
||||
}
|
||||
|
||||
static inline String* newString(const char* cstr) {
|
||||
return (String*)strdup(cstr);
|
||||
}
|
||||
|
||||
static inline void freeString(String* str) {
|
||||
return free((char*)str);
|
||||
}
|
||||
};
|
||||
|
||||
class OSDep
|
||||
{
|
||||
public:
|
||||
static inline void
|
||||
getDate()
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* The List<T> template implements a simple List, which can
|
||||
* be templated to support different types.
|
||||
*
|
||||
* Elements can be added to the end, modified in the middle,
|
||||
* but no holes are allowed. That is for set(n, v) to work
|
||||
* size() > n
|
||||
*
|
||||
* Note that [] operators are provided and you can violate the
|
||||
* set properties using these operators, if you want a real
|
||||
* list dont use the [] operators, if you want a general purpose
|
||||
* array use the [] operators.
|
||||
*/
|
||||
|
||||
enum ListElementType {
|
||||
LIST_NonGCObjects = 0,
|
||||
LIST_GCObjects = 1,
|
||||
LIST_RCObjects = 2
|
||||
};
|
||||
|
||||
template <typename T, ListElementType kElementType>
|
||||
class List
|
||||
{
|
||||
public:
|
||||
enum { kInitialCapacity = 128 };
|
||||
|
||||
List(GC *_gc, uint32_t _capacity=kInitialCapacity) : data(NULL), len(0), capacity(0)
|
||||
{
|
||||
ensureCapacity(_capacity);
|
||||
}
|
||||
|
||||
~List()
|
||||
{
|
||||
//clear();
|
||||
destroy();
|
||||
// zero out in case we are part of an RCObject
|
||||
len = 0;
|
||||
}
|
||||
|
||||
inline void destroy()
|
||||
{
|
||||
if (data)
|
||||
free(data);
|
||||
}
|
||||
|
||||
const T *getData() const { return data; }
|
||||
|
||||
// 'this' steals the guts of 'that' and 'that' gets reset.
|
||||
void become(List& that)
|
||||
{
|
||||
this->destroy();
|
||||
|
||||
this->data = that.data;
|
||||
this->len = that.len;
|
||||
this->capacity = that.capacity;
|
||||
|
||||
that.data = 0;
|
||||
that.len = 0;
|
||||
that.capacity = 0;
|
||||
}
|
||||
uint32_t add(T value)
|
||||
{
|
||||
if (len >= capacity) {
|
||||
grow();
|
||||
}
|
||||
wb(len++, value);
|
||||
return len-1;
|
||||
}
|
||||
|
||||
inline bool isEmpty() const
|
||||
{
|
||||
return len == 0;
|
||||
}
|
||||
|
||||
inline uint32_t size() const
|
||||
{
|
||||
return len;
|
||||
}
|
||||
|
||||
inline T get(uint32_t index) const
|
||||
{
|
||||
AvmAssert(index < len);
|
||||
return *(T*)(data + index);
|
||||
}
|
||||
|
||||
void set(uint32_t index, T value)
|
||||
{
|
||||
AvmAssert(index < capacity);
|
||||
if (index >= len)
|
||||
{
|
||||
len = index+1;
|
||||
}
|
||||
AvmAssert(len <= capacity);
|
||||
wb(index, value);
|
||||
}
|
||||
|
||||
void add(const List<T, kElementType>& l)
|
||||
{
|
||||
ensureCapacity(len+l.size());
|
||||
// FIXME: make RCObject version
|
||||
AvmAssert(kElementType != LIST_RCObjects);
|
||||
arraycopy(l.getData(), 0, data, len, l.size());
|
||||
len += l.size();
|
||||
}
|
||||
|
||||
inline void clear()
|
||||
{
|
||||
zero_range(0, len);
|
||||
len = 0;
|
||||
}
|
||||
|
||||
int indexOf(T value) const
|
||||
{
|
||||
for(uint32_t i=0; i<len; i++)
|
||||
if (get(i) == value)
|
||||
return i;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int lastIndexOf(T value) const
|
||||
{
|
||||
for(int32_t i=len-1; i>=0; i--)
|
||||
if (get(i) == value)
|
||||
return i;
|
||||
return -1;
|
||||
}
|
||||
|
||||
inline T last() const
|
||||
{
|
||||
return get(len-1);
|
||||
}
|
||||
|
||||
T removeLast()
|
||||
{
|
||||
if(isEmpty())
|
||||
return undef_list_val();
|
||||
T t = get(len-1);
|
||||
set(len-1, undef_list_val());
|
||||
len--;
|
||||
return t;
|
||||
}
|
||||
|
||||
inline T operator[](uint32_t index) const
|
||||
{
|
||||
AvmAssert(index < capacity);
|
||||
return get(index);
|
||||
}
|
||||
|
||||
void ensureCapacity(uint32_t cap)
|
||||
{
|
||||
if (cap > capacity) {
|
||||
if (data == NULL) {
|
||||
data = (T*)calloc(1, factor(cap));
|
||||
} else {
|
||||
data = (T*)realloc(data, factor(cap));
|
||||
zero_range(capacity, cap - capacity);
|
||||
}
|
||||
capacity = cap;
|
||||
}
|
||||
}
|
||||
|
||||
void insert(uint32_t index, T value, uint32_t count = 1)
|
||||
{
|
||||
AvmAssert(index <= len);
|
||||
AvmAssert(count > 0);
|
||||
ensureCapacity(len+count);
|
||||
memmove(data + index + count, data + index, factor(len - index));
|
||||
wbzm(index, index+count, value);
|
||||
len += count;
|
||||
}
|
||||
|
||||
T removeAt(uint32_t index)
|
||||
{
|
||||
T old = get(index);
|
||||
// dec the refcount on the one we're removing
|
||||
wb(index, undef_list_val());
|
||||
memmove(data + index, data + index + 1, factor(len - index - 1));
|
||||
len--;
|
||||
return old;
|
||||
}
|
||||
|
||||
private:
|
||||
void grow()
|
||||
{
|
||||
// growth is fast at first, then slows at larger list sizes.
|
||||
uint32_t newMax = 0;
|
||||
const uint32_t curMax = capacity;
|
||||
if (curMax == 0)
|
||||
newMax = kInitialCapacity;
|
||||
else if(curMax > 15)
|
||||
newMax = curMax * 3/2;
|
||||
else
|
||||
newMax = curMax * 2;
|
||||
|
||||
ensureCapacity(newMax);
|
||||
}
|
||||
|
||||
void arraycopy(const T* src, int srcStart, T* dst, int dstStart, int nbr)
|
||||
{
|
||||
// we have 2 cases, either closing a gap or opening it.
|
||||
if ((src == dst) && (srcStart > dstStart) )
|
||||
{
|
||||
for(int i=0; i<nbr; i++)
|
||||
dst[i+dstStart] = src[i+srcStart];
|
||||
}
|
||||
else
|
||||
{
|
||||
for(int i=nbr-1; i>=0; i--)
|
||||
dst[i+dstStart] = src[i+srcStart];
|
||||
}
|
||||
}
|
||||
|
||||
inline void do_wb_nongc(T* slot, T value)
|
||||
{
|
||||
*slot = value;
|
||||
}
|
||||
|
||||
inline void do_wb_gc(GCObject** slot, const GCObject** value)
|
||||
{
|
||||
*slot = (GCObject*)*value;
|
||||
}
|
||||
|
||||
void wb(uint32_t index, T value)
|
||||
{
|
||||
AvmAssert(index < capacity);
|
||||
AvmAssert(data != NULL);
|
||||
T* slot = &data[index];
|
||||
do_wb_nongc(slot, value);
|
||||
}
|
||||
|
||||
// multiple wb call with the same value, and assumption that existing value is all zero bits,
|
||||
// like
|
||||
// for (uint32_t u = index; u < index_end; ++u)
|
||||
// wb(u, value);
|
||||
void wbzm(uint32_t index, uint32_t index_end, T value)
|
||||
{
|
||||
AvmAssert(index < capacity);
|
||||
AvmAssert(index_end <= capacity);
|
||||
AvmAssert(index < index_end);
|
||||
AvmAssert(data != NULL);
|
||||
T* slot = data + index;
|
||||
for ( ; index < index_end; ++index, ++slot)
|
||||
do_wb_nongc(slot, value);
|
||||
}
|
||||
|
||||
inline uint32_t factor(uint32_t index) const
|
||||
{
|
||||
return index * sizeof(T);
|
||||
}
|
||||
|
||||
void zero_range(uint32_t _first, uint32_t _count)
|
||||
{
|
||||
memset(data + _first, 0, factor(_count));
|
||||
}
|
||||
|
||||
// stuff that needs specialization based on the type
|
||||
static inline T undef_list_val();
|
||||
|
||||
private:
|
||||
List(const List& toCopy); // unimplemented
|
||||
void operator=(const List& that); // unimplemented
|
||||
|
||||
// ------------------------ DATA SECTION BEGIN
|
||||
private:
|
||||
T* data;
|
||||
uint32_t len;
|
||||
uint32_t capacity;
|
||||
// ------------------------ DATA SECTION END
|
||||
|
||||
};
|
||||
|
||||
// stuff that needs specialization based on the type
|
||||
template<typename T, ListElementType kElementType>
|
||||
/* static */ inline T List<T, kElementType>::undef_list_val() { return T(0); }
|
||||
|
||||
/**
|
||||
* The SortedMap<K,T> template implements an object that
|
||||
* maps keys to values. The keys are sorted
|
||||
* from smallest to largest in the map. Time of operations
|
||||
* is as follows:
|
||||
* put() is O(1) if the key is higher than any existing
|
||||
* key; O(logN) if the key already exists,
|
||||
* and O(N) otherwise.
|
||||
* get() is an O(logN) binary search.
|
||||
*
|
||||
* no duplicates are allowed.
|
||||
*/
|
||||
template <class K, class T, ListElementType valType>
|
||||
class SortedMap : public GCObject
|
||||
{
|
||||
public:
|
||||
enum { kInitialCapacity= 64 };
|
||||
|
||||
SortedMap(GC* gc, int _capacity=kInitialCapacity)
|
||||
: keys(gc, _capacity), values(gc, _capacity)
|
||||
{
|
||||
}
|
||||
|
||||
bool isEmpty() const
|
||||
{
|
||||
return keys.size() == 0;
|
||||
}
|
||||
|
||||
int size() const
|
||||
{
|
||||
return keys.size();
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
keys.clear();
|
||||
values.clear();
|
||||
}
|
||||
|
||||
void destroy()
|
||||
{
|
||||
keys.destroy();
|
||||
values.destroy();
|
||||
}
|
||||
|
||||
T put(K k, T v)
|
||||
{
|
||||
if (keys.size() == 0 || k > keys.last())
|
||||
{
|
||||
keys.add(k);
|
||||
values.add(v);
|
||||
return (T)v;
|
||||
}
|
||||
else
|
||||
{
|
||||
int i = find(k);
|
||||
if (i >= 0)
|
||||
{
|
||||
T old = values[i];
|
||||
keys.set(i, k);
|
||||
values.set(i, v);
|
||||
return old;
|
||||
}
|
||||
else
|
||||
{
|
||||
i = -i - 1; // recover the insertion point
|
||||
AvmAssert(keys.size() != (uint32_t)i);
|
||||
keys.insert(i, k);
|
||||
values.insert(i, v);
|
||||
return v;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
T get(K k) const
|
||||
{
|
||||
int i = find(k);
|
||||
return i >= 0 ? values[i] : 0;
|
||||
}
|
||||
|
||||
bool get(K k, T& v) const
|
||||
{
|
||||
int i = find(k);
|
||||
if (i >= 0)
|
||||
{
|
||||
v = values[i];
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool containsKey(K k) const
|
||||
{
|
||||
int i = find(k);
|
||||
return (i >= 0) ? true : false;
|
||||
}
|
||||
|
||||
T remove(K k)
|
||||
{
|
||||
int i = find(k);
|
||||
return removeAt(i);
|
||||
}
|
||||
|
||||
T removeAt(int i)
|
||||
{
|
||||
T old = values.removeAt(i);
|
||||
keys.removeAt(i);
|
||||
return old;
|
||||
}
|
||||
|
||||
T removeFirst() { return isEmpty() ? (T)0 : removeAt(0); }
|
||||
T removeLast() { return isEmpty() ? (T)0 : removeAt(keys.size()-1); }
|
||||
T first() const { return isEmpty() ? (T)0 : values[0]; }
|
||||
T last() const { return isEmpty() ? (T)0 : values[keys.size()-1]; }
|
||||
|
||||
K firstKey() const { return isEmpty() ? 0 : keys[0]; }
|
||||
K lastKey() const { return isEmpty() ? 0 : keys[keys.size()-1]; }
|
||||
|
||||
// iterator
|
||||
T at(int i) const { return values[i]; }
|
||||
K keyAt(int i) const { return keys[i]; }
|
||||
|
||||
int findNear(K k) const {
|
||||
int i = find(k);
|
||||
return i >= 0 ? i : -i-2;
|
||||
}
|
||||
protected:
|
||||
List<K, LIST_NonGCObjects> keys;
|
||||
List<T, valType> values;
|
||||
|
||||
int find(K k) const
|
||||
{
|
||||
int lo = 0;
|
||||
int hi = keys.size()-1;
|
||||
|
||||
while (lo <= hi)
|
||||
{
|
||||
int i = (lo + hi)/2;
|
||||
K m = keys[i];
|
||||
if (k > m)
|
||||
lo = i + 1;
|
||||
else if (k < m)
|
||||
hi = i - 1;
|
||||
else
|
||||
return i; // key found
|
||||
}
|
||||
return -(lo + 1); // key not found, low is the insertion point
|
||||
}
|
||||
};
|
||||
|
||||
#define GCSortedMap SortedMap
|
||||
|
||||
/**
|
||||
* Bit vectors are an efficent method of keeping True/False information
|
||||
* on a set of items or conditions. Class BitSet provides functions
|
||||
|
|
|
@ -40,63 +40,34 @@
|
|||
#ifndef __nanojit_h__
|
||||
#define __nanojit_h__
|
||||
|
||||
#include <stddef.h>
|
||||
#include "avmplus.h"
|
||||
|
||||
#ifdef FEATURE_NANOJIT
|
||||
|
||||
#ifdef AVMPLUS_IA32
|
||||
#define NANOJIT_IA32
|
||||
#elif AVMPLUS_ARM
|
||||
#define NANOJIT_ARM
|
||||
#elif AVMPLUS_PPC
|
||||
#define NANOJIT_PPC
|
||||
#elif AVMPLUS_SPARC
|
||||
#define NANOJIT_SPARC
|
||||
#elif AVMPLUS_AMD64
|
||||
#define NANOJIT_X64
|
||||
#define NANOJIT_64BIT
|
||||
#if defined AVMPLUS_IA32
|
||||
#define NANOJIT_IA32
|
||||
#elif defined AVMPLUS_ARM
|
||||
#define NANOJIT_ARM
|
||||
#elif defined AVMPLUS_PPC
|
||||
#define NANOJIT_PPC
|
||||
#elif defined AVMPLUS_SPARC
|
||||
#define NANOJIT_SPARC
|
||||
#elif defined AVMPLUS_AMD64
|
||||
#define NANOJIT_X64
|
||||
#else
|
||||
#error "unknown nanojit architecture"
|
||||
#error "unknown nanojit architecture"
|
||||
#endif
|
||||
|
||||
/*
|
||||
If we're using MMGC, using operator delete on a GCFinalizedObject is problematic:
|
||||
in particular, calling it from inside a dtor is risky because the dtor for the sub-object
|
||||
might already have been called, wrecking its vtable and ending up in the wrong version
|
||||
of operator delete (the global version rather than the class-specific one). Calling GC::Free
|
||||
directly is fine (since it ignores the vtable), so we macro-ize to make the distinction.
|
||||
#ifdef AVMPLUS_64BIT
|
||||
#define NANOJIT_64BIT
|
||||
#endif
|
||||
|
||||
macro-ization of operator new isn't strictly necessary, but is done to bottleneck both
|
||||
sides of the new/delete pair to forestall future needs.
|
||||
*/
|
||||
#ifdef MMGC_API
|
||||
|
||||
// separate overloads because GCObject and GCFinalizedObjects have different dtors
|
||||
// (GCFinalizedObject's is virtual, GCObject's is not)
|
||||
inline void mmgc_delete(GCObject* o)
|
||||
{
|
||||
GC* g = GC::GetGC(o);
|
||||
if (g->Collecting())
|
||||
g->Free(o);
|
||||
else
|
||||
delete o;
|
||||
}
|
||||
|
||||
inline void mmgc_delete(GCFinalizedObject* o)
|
||||
{
|
||||
GC* g = GC::GetGC(o);
|
||||
if (g->Collecting())
|
||||
g->Free(o);
|
||||
else
|
||||
delete o;
|
||||
}
|
||||
|
||||
#define NJ_NEW(gc, cls) new (gc) cls
|
||||
#define NJ_DELETE(obj) do { mmgc_delete(obj); } while (0)
|
||||
#if defined NANOJIT_64BIT
|
||||
#define IF_64BIT(...) __VA_ARGS__
|
||||
#define UNLESS_64BIT(...)
|
||||
#else
|
||||
#define NJ_NEW(gc, cls) new (gc) cls
|
||||
#define NJ_DELETE(obj) do { delete obj; } while (0)
|
||||
#define IF_64BIT(...)
|
||||
#define UNLESS_64BIT(...) __VA_ARGS__
|
||||
#endif
|
||||
|
||||
// Embed no-op macros that let Valgrind work with the JIT.
|
||||
|
@ -116,10 +87,7 @@ namespace nanojit
|
|||
* START AVM bridging definitions
|
||||
* -------------------------------------------
|
||||
*/
|
||||
class Fragment;
|
||||
typedef avmplus::AvmCore AvmCore;
|
||||
typedef avmplus::OSDep OSDep;
|
||||
typedef avmplus::GCSortedMap<const void*,Fragment*,avmplus::LIST_GCObjects> FragmentMap;
|
||||
|
||||
const uint32_t MAXARGS = 8;
|
||||
|
||||
|
@ -168,8 +136,12 @@ namespace nanojit
|
|||
}
|
||||
|
||||
#ifdef AVMPLUS_VERBOSE
|
||||
#define NJ_VERBOSE 1
|
||||
#define NJ_PROFILE 1
|
||||
#ifndef NJ_VERBOSE_DISABLED
|
||||
#define NJ_VERBOSE 1
|
||||
#endif
|
||||
#ifndef NJ_PROFILE_DISABLED
|
||||
#define NJ_PROFILE 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef MOZ_NO_VARADIC_MACROS
|
||||
|
@ -188,7 +160,7 @@ namespace nanojit
|
|||
#endif /*NJ_VERBOSE*/
|
||||
|
||||
#ifdef _DEBUG
|
||||
#define debug_only(x) x
|
||||
#define debug_only(x) x
|
||||
#else
|
||||
#define debug_only(x)
|
||||
#endif /* DEBUG */
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
try {
|
||||
for (var j = 0; j < 2; ++j) {
|
||||
if (j == 1) {
|
||||
++(null[2]);
|
||||
}
|
||||
}
|
||||
} catch(e) {
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
// Make sure the arch flags are valid on startup, even if nothing has
|
||||
// been traced yet. We don't know what arch the user is building on,
|
||||
// but presumably we want at least 1 flag to be set on all supported
|
||||
// platforms.
|
||||
|
||||
if (HAVE_TM) {
|
||||
assertEq(jitstats.archIsIA32 ||
|
||||
jitstats.archIs64BIT ||
|
||||
jitstats.archIsARM ||
|
||||
jitstats.archIsSPARC ||
|
||||
jitstats.archIsPPC ||
|
||||
jitstats.archIsAMD64,
|
||||
1);
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
var count = 0;
|
||||
|
||||
function f() {
|
||||
arguments.length--;
|
||||
for (var i = 0; i < arguments.length; ++i) {
|
||||
++count;
|
||||
}
|
||||
}
|
||||
|
||||
f(1, 2);
|
||||
f(1, 2);
|
||||
f(2, 2);
|
||||
|
||||
assertEq(count, 3);
|
|
@ -0,0 +1,12 @@
|
|||
// don't crash
|
||||
|
||||
var q;
|
||||
|
||||
function f() {
|
||||
while (arguments.length > 0) {
|
||||
q = arguments[arguments.length-1];
|
||||
arguments.length--;
|
||||
}
|
||||
}
|
||||
|
||||
f(1, 2, 3, 4, 5);
|
|
@ -2,7 +2,11 @@ function testEliminatedGuardWithinAnchor() {
|
|||
for (let i = 0; i < 5; ++i) { i / (i * i); }
|
||||
return "ok";
|
||||
}
|
||||
|
||||
assertEq(testEliminatedGuardWithinAnchor(), "ok");
|
||||
checkStats({
|
||||
sideExitIntoInterpreter: 3
|
||||
});
|
||||
|
||||
if (HAVE_TM) {
|
||||
checkStats({
|
||||
sideExitIntoInterpreter: (jitstats.archIsARM ? 1 : 3)
|
||||
});
|
||||
}
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
for (var j = 0; j < 7; ++j)
|
||||
({x: function () {}}).x;
|
||||
|
||||
checkStats({
|
||||
recorderStarted: 1,
|
||||
recorderAborted: 0,
|
||||
traceCompleted: 1,
|
||||
sideExitIntoInterpreter: 1
|
||||
});
|
|
@ -0,0 +1,9 @@
|
|||
for (var j = 0; j < 7; ++j)
|
||||
uneval({x: function () {}});
|
||||
|
||||
checkStats({
|
||||
recorderStarted: 1,
|
||||
recorderAborted: 0,
|
||||
traceCompleted: 1,
|
||||
sideExitIntoInterpreter: 4
|
||||
});
|
|
@ -1,4 +1,4 @@
|
|||
// |trace-test| TMFLAGS: full,fragprofile,treevis
|
||||
// |trace-test| TMFLAGS: full,fragprofile,treevis; valgrind
|
||||
|
||||
function testRegExpTest() {
|
||||
var r = /abc/;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// |trace-test| TMFLAGS: full,fragprofile,treevis
|
||||
// |trace-test| TMFLAGS: full,fragprofile,treevis; valgrind
|
||||
|
||||
/* Test the proper operation of the left shift operator. This is especially
|
||||
* important on ARM as an explicit mask is required at the native instruction
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// |trace-test| TMFLAGS: full,fragprofile,treevis
|
||||
// |trace-test| TMFLAGS: full,fragprofile,treevis; valgrind
|
||||
|
||||
function testSideExitInConstructor() {
|
||||
var FCKConfig = {};
|
||||
|
|
|
@ -30,20 +30,22 @@ def _relpath(path, start=None):
|
|||
os.path.relpath = _relpath
|
||||
|
||||
class Test:
|
||||
def __init__(self, path, slow, allow_oom, tmflags):
|
||||
def __init__(self, path, slow, allow_oom, tmflags, valgrind):
|
||||
""" path path to test file
|
||||
slow True means the test is slow-running
|
||||
allow_oom True means OOM should not be considered a failure """
|
||||
allow_oom True means OOM should not be considered a failure
|
||||
valgrind True means test should run under valgrind """
|
||||
self.path = path
|
||||
self.slow = slow
|
||||
self.allow_oom = allow_oom
|
||||
self.tmflags = tmflags
|
||||
self.valgrind = valgrind
|
||||
|
||||
COOKIE = '|trace-test|'
|
||||
|
||||
@classmethod
|
||||
def from_file(cls, path):
|
||||
slow = allow_oom = False
|
||||
def from_file(cls, path, options):
|
||||
slow = allow_oom = valgrind = False
|
||||
tmflags = ''
|
||||
|
||||
line = open(path).readline()
|
||||
|
@ -67,10 +69,12 @@ class Test:
|
|||
slow = True
|
||||
elif name == 'allow-oom':
|
||||
allow_oom = True
|
||||
elif name == 'valgrind':
|
||||
valgrind = options.valgrind
|
||||
else:
|
||||
print('warning: unrecognized |trace-test| attribute %s'%part)
|
||||
|
||||
return cls(path, slow, allow_oom, tmflags)
|
||||
return cls(path, slow, allow_oom, tmflags, valgrind or options.valgrind_all)
|
||||
|
||||
def find_tests(dir, substring = None):
|
||||
ans = []
|
||||
|
@ -102,6 +106,18 @@ def run_test(test, lib_dir):
|
|||
else:
|
||||
env = None
|
||||
cmd = get_test_cmd(test.path, lib_dir)
|
||||
|
||||
if (test.valgrind and
|
||||
any([os.path.exists(os.path.join(d, 'valgrind'))
|
||||
for d in os.environ['PATH'].split(os.pathsep)])):
|
||||
valgrind_prefix = [ 'valgrind',
|
||||
'--smc-check=all',
|
||||
'--error-exitcode=1',
|
||||
'--leak-check=full']
|
||||
if os.uname()[0] == 'Darwin':
|
||||
valgrind_prefix += ['--dsymutil=yes']
|
||||
cmd = valgrind_prefix + cmd
|
||||
|
||||
if OPTIONS.show_cmd:
|
||||
print(cmd)
|
||||
# close_fds is not supported on Windows and will cause a ValueError.
|
||||
|
@ -112,6 +128,8 @@ def run_test(test, lib_dir):
|
|||
if OPTIONS.show_output:
|
||||
sys.stdout.write(out)
|
||||
sys.stdout.write(err)
|
||||
if test.valgrind:
|
||||
sys.stdout.write(err)
|
||||
return (check_output(out, err, p.returncode, test.allow_oom), out, err)
|
||||
|
||||
def check_output(out, err, rc, allow_oom):
|
||||
|
@ -126,7 +144,7 @@ def check_output(out, err, rc, allow_oom):
|
|||
if rc != 0:
|
||||
# Allow a non-zero exit code if we want to allow OOM, but only if we
|
||||
# actually got OOM.
|
||||
return allow_oom and ': out of memory\n' in err
|
||||
return allow_oom and ': out of memory' in err
|
||||
|
||||
return True
|
||||
|
||||
|
@ -172,7 +190,7 @@ def run_tests(tests, test_dir, lib_dir):
|
|||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
if pb:
|
||||
if pb:
|
||||
pb.finish()
|
||||
|
||||
if failures:
|
||||
|
@ -230,6 +248,10 @@ if __name__ == '__main__':
|
|||
help='Retest using test list file [FILE]')
|
||||
op.add_option('-g', '--debug', dest='debug', action='store_true',
|
||||
help='Run test in gdb')
|
||||
op.add_option('--valgrind', dest='valgrind', action='store_true',
|
||||
help='Enable the |valgrind| flag, if valgrind is in $PATH.')
|
||||
op.add_option('--valgrind-all', dest='valgrind_all', action='store_true',
|
||||
help='Run all tests with valgrind, if valgrind is in $PATH.')
|
||||
(OPTIONS, args) = op.parse_args()
|
||||
if len(args) < 1:
|
||||
op.error('missing JS_SHELL argument')
|
||||
|
@ -276,8 +298,9 @@ if __name__ == '__main__':
|
|||
if not test_list:
|
||||
print >> sys.stderr, "No tests found matching command line arguments."
|
||||
sys.exit(0)
|
||||
|
||||
test_list = [ Test.from_file(_) for _ in test_list ]
|
||||
|
||||
test_list = [ Test.from_file(_, OPTIONS) for _ in test_list ]
|
||||
|
||||
if not OPTIONS.run_slow:
|
||||
test_list = [ _ for _ in test_list if not _.slow ]
|
||||
|
||||
|
@ -293,4 +316,11 @@ if __name__ == '__main__':
|
|||
call(cmd)
|
||||
sys.exit()
|
||||
|
||||
run_tests(test_list, test_dir, lib_dir)
|
||||
try:
|
||||
run_tests(test_list, test_dir, lib_dir)
|
||||
except OSError:
|
||||
if not os.path.exists(JS):
|
||||
print >> sys.stderr, "JS shell argument: file does not exist: '%s'"%JS
|
||||
sys.exit(1)
|
||||
else:
|
||||
raise
|
||||
|
|
|
@ -168,7 +168,7 @@ int _histEntryValue (void* id, int64_t value);
|
|||
|
||||
#define NUM_EVARS 4
|
||||
|
||||
typedef enum {
|
||||
enum {
|
||||
LOCK_IS_FREE = 0,
|
||||
LOCK_IS_TAKEN = 1
|
||||
};
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
* Any copyright is dedicated to the Public Domain.
|
||||
* http://creativecommons.org/licenses/publicdomain/
|
||||
*/
|
||||
|
||||
var gTestfile = '15.2.3.14-01.js';
|
||||
//-----------------------------------------------------------------------------
|
||||
var BUGNUMBER = 307791;
|
||||
var summary = 'ES5 Object.keys(O)';
|
||||
var actual = '';
|
||||
var expect = '';
|
||||
|
||||
printBugNumber(BUGNUMBER);
|
||||
printStatus(summary);
|
||||
|
||||
function arraysEqual(a1, a2)
|
||||
{
|
||||
return a1.length === a2.length &&
|
||||
a1.every(function(v, i) { return v === a2[i]; });
|
||||
}
|
||||
|
||||
/**************
|
||||
* BEGIN TEST *
|
||||
**************/
|
||||
|
||||
assertEq(Object.keys.length, 1);
|
||||
|
||||
var o, keys;
|
||||
|
||||
o = { a: 3, b: 2 };
|
||||
keys = Object.keys(o);
|
||||
assertEq(arraysEqual(keys, ["a", "b"]), true,
|
||||
"" + keys);
|
||||
|
||||
o = { get a() { return 17; }, b: 2 };
|
||||
keys = Object.keys(o),
|
||||
assertEq(arraysEqual(keys, ["a", "b"]), true,
|
||||
"" + keys);
|
||||
|
||||
o = { __iterator__: function() { return Iterator({a: 2, b: 3}); } };
|
||||
keys = Object.keys(o);
|
||||
assertEq(arraysEqual(keys, ["__iterator__"]), true,
|
||||
"" + keys);
|
||||
|
||||
o = { a: 1, b: 2 };
|
||||
delete o.a;
|
||||
o.a = 3;
|
||||
keys = Object.keys(o);
|
||||
assertEq(arraysEqual(keys, ["b", "a"]), true,
|
||||
"" + keys);
|
||||
|
||||
o = [0, 1, 2];
|
||||
keys = Object.keys(o);
|
||||
assertEq(arraysEqual(keys, ["0", "1", "2"]), true,
|
||||
"" + keys);
|
||||
|
||||
o = /./.exec("abc");
|
||||
keys = Object.keys(o);
|
||||
assertEq(arraysEqual(keys, ["0", "index", "input"]), true,
|
||||
"" + keys);
|
||||
|
||||
o = { a: 1, b: 2, c: 3 };
|
||||
delete o.b;
|
||||
o.b = 5;
|
||||
keys = Object.keys(o);
|
||||
assertEq(arraysEqual(keys, ["a", "c", "b"]), true,
|
||||
"" + keys);
|
||||
|
||||
function f() { }
|
||||
f.prototype.p = 1;
|
||||
o = new f();
|
||||
o.g = 1;
|
||||
keys = Object.keys(o);
|
||||
assertEq(arraysEqual(keys, ["g"]), true,
|
||||
"" + keys);
|
||||
|
||||
if (typeof Namespace !== "undefined" && typeof QName !== "undefined")
|
||||
{
|
||||
var o2 = {};
|
||||
var qn = new QName(new Namespace("foo"), "v");
|
||||
o2.f = 1;
|
||||
o2[qn] = 3;
|
||||
o2.baz = 4;
|
||||
var keys2 = Object.keys(o2);
|
||||
assertEq(arraysEqual(keys2, ["f", "foo::v", "baz"]), true,
|
||||
"" + keys2);
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
printStatus("All tests passed!");
|
|
@ -274,18 +274,31 @@ function escapeString (str)
|
|||
}
|
||||
|
||||
/*
|
||||
* assertEq(actual, expected)
|
||||
* Throw if the two arguments are not ===
|
||||
* see https://bugzilla.mozilla.org/show_bug.cgi?id=480199
|
||||
* assertEq(actual, expected [, message])
|
||||
* Throw if the two arguments are not the same. The sameness of two values
|
||||
* is determined as follows. If both values are zero, they are the same iff
|
||||
* their signs are the same. Otherwise, if both values are NaN, they are the
|
||||
* same. Otherwise, they are the same if they compare equal using ===.
|
||||
* see https://bugzilla.mozilla.org/show_bug.cgi?id=480199 and
|
||||
* https://bugzilla.mozilla.org/show_bug.cgi?id=515285
|
||||
*/
|
||||
if (typeof assertEq == 'undefined')
|
||||
{
|
||||
var assertEq =
|
||||
function (actual, expected)
|
||||
function (actual, expected, message)
|
||||
{
|
||||
if (actual !== expected)
|
||||
function SameValue(v1, v2)
|
||||
{
|
||||
throw new TypeError('Assertion failed: got "' + actual + '", expected "' + expected);
|
||||
if (v1 === 0 && v2 === 0)
|
||||
return 1 / v1 === 1 / v2;
|
||||
if (v1 !== v1 && v2 !== v2)
|
||||
return true;
|
||||
return v1 === v2;
|
||||
}
|
||||
if (!SameValue(actual, expected))
|
||||
{
|
||||
throw new TypeError('Assertion failed: got "' + actual + '", expected "' + expected +
|
||||
(message ? ": " + message : ""));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -389,7 +389,7 @@ nsFastLoadFileReader::HasMuxedDocument(const char* aURISpec, PRBool *aResult)
|
|||
nsDocumentMapReadEntry* docMapEntry =
|
||||
static_cast<nsDocumentMapReadEntry*>
|
||||
(PL_DHashTableOperate(&mFooter.mDocumentMap, aURISpec,
|
||||
PL_DHASH_LOOKUP));
|
||||
PL_DHASH_LOOKUP));
|
||||
|
||||
*aResult = PL_DHASH_ENTRY_IS_BUSY(docMapEntry);
|
||||
return NS_OK;
|
||||
|
@ -401,7 +401,7 @@ nsFastLoadFileReader::StartMuxedDocument(nsISupports* aURI, const char* aURISpec
|
|||
nsDocumentMapReadEntry* docMapEntry =
|
||||
static_cast<nsDocumentMapReadEntry*>
|
||||
(PL_DHashTableOperate(&mFooter.mDocumentMap, aURISpec,
|
||||
PL_DHASH_LOOKUP));
|
||||
PL_DHASH_LOOKUP));
|
||||
|
||||
// If the spec isn't in the map, return NS_ERROR_NOT_AVAILABLE so the
|
||||
// FastLoad service can try for a file update.
|
||||
|
@ -412,7 +412,7 @@ nsFastLoadFileReader::StartMuxedDocument(nsISupports* aURI, const char* aURISpec
|
|||
nsURIMapReadEntry* uriMapEntry =
|
||||
static_cast<nsURIMapReadEntry*>
|
||||
(PL_DHashTableOperate(&mFooter.mURIMap, key,
|
||||
PL_DHASH_ADD));
|
||||
PL_DHASH_ADD));
|
||||
if (!uriMapEntry)
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
|
||||
|
@ -441,7 +441,7 @@ nsFastLoadFileReader::SelectMuxedDocument(nsISupports* aURI,
|
|||
nsURIMapReadEntry* uriMapEntry =
|
||||
static_cast<nsURIMapReadEntry*>
|
||||
(PL_DHashTableOperate(&mFooter.mURIMap, key,
|
||||
PL_DHASH_LOOKUP));
|
||||
PL_DHASH_LOOKUP));
|
||||
|
||||
// If the URI isn't in the map, return NS_ERROR_NOT_AVAILABLE so the
|
||||
// FastLoad service can try selecting the file updater.
|
||||
|
@ -504,7 +504,7 @@ nsFastLoadFileReader::EndMuxedDocument(nsISupports* aURI)
|
|||
nsURIMapReadEntry* uriMapEntry =
|
||||
static_cast<nsURIMapReadEntry*>
|
||||
(PL_DHashTableOperate(&mFooter.mURIMap, key,
|
||||
PL_DHASH_LOOKUP));
|
||||
PL_DHASH_LOOKUP));
|
||||
|
||||
// If the URI isn't in the map, return NS_ERROR_NOT_AVAILABLE so the
|
||||
// FastLoad service can try to end a select on its file updater.
|
||||
|
@ -771,8 +771,8 @@ nsFastLoadFileReader::ReadFooter(nsFastLoadFooter *aFooter)
|
|||
nsDocumentMapReadEntry* entry =
|
||||
static_cast<nsDocumentMapReadEntry*>
|
||||
(PL_DHashTableOperate(&aFooter->mDocumentMap,
|
||||
info.mURISpec,
|
||||
PL_DHASH_ADD));
|
||||
info.mURISpec,
|
||||
PL_DHASH_ADD));
|
||||
if (!entry) {
|
||||
nsMemory::Free((void*) info.mURISpec);
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
|
@ -1363,7 +1363,7 @@ nsFastLoadFileWriter::HasMuxedDocument(const char* aURISpec, PRBool *aResult)
|
|||
nsDocumentMapWriteEntry* docMapEntry =
|
||||
static_cast<nsDocumentMapWriteEntry*>
|
||||
(PL_DHashTableOperate(&mDocumentMap, aURISpec,
|
||||
PL_DHASH_LOOKUP));
|
||||
PL_DHASH_LOOKUP));
|
||||
|
||||
*aResult = PL_DHASH_ENTRY_IS_BUSY(docMapEntry);
|
||||
return NS_OK;
|
||||
|
@ -1383,7 +1383,7 @@ nsFastLoadFileWriter::StartMuxedDocument(nsISupports* aURI,
|
|||
nsDocumentMapWriteEntry* docMapEntry =
|
||||
static_cast<nsDocumentMapWriteEntry*>
|
||||
(PL_DHashTableOperate(&mDocumentMap, aURISpec,
|
||||
PL_DHASH_ADD));
|
||||
PL_DHASH_ADD));
|
||||
if (!docMapEntry)
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
|
||||
|
@ -1392,7 +1392,7 @@ nsFastLoadFileWriter::StartMuxedDocument(nsISupports* aURI,
|
|||
mCurrentDocumentMapEntry =
|
||||
static_cast<nsDocumentMapWriteEntry*>
|
||||
(PL_DHashTableOperate(&mDocumentMap, saveURISpec,
|
||||
PL_DHASH_LOOKUP));
|
||||
PL_DHASH_LOOKUP));
|
||||
NS_ASSERTION(PL_DHASH_ENTRY_IS_BUSY(mCurrentDocumentMapEntry),
|
||||
"mCurrentDocumentMapEntry lost during table growth?!");
|
||||
|
||||
|
@ -1465,8 +1465,8 @@ nsFastLoadFileWriter::SelectMuxedDocument(nsISupports* aURI,
|
|||
docMapEntry =
|
||||
static_cast<nsDocumentMapWriteEntry*>
|
||||
(PL_DHashTableOperate(&mDocumentMap,
|
||||
uriMapEntry->mURISpec,
|
||||
PL_DHASH_LOOKUP));
|
||||
uriMapEntry->mURISpec,
|
||||
PL_DHASH_LOOKUP));
|
||||
NS_ASSERTION(PL_DHASH_ENTRY_IS_BUSY(docMapEntry), "lost mDocMapEntry!?");
|
||||
uriMapEntry->mDocMapEntry = docMapEntry;
|
||||
uriMapEntry->mGeneration = mDocumentMap.generation;
|
||||
|
@ -1600,7 +1600,7 @@ nsFastLoadFileWriter::AddDependency(nsIFile* aFile)
|
|||
nsDependencyMapEntry* entry =
|
||||
static_cast<nsDependencyMapEntry*>
|
||||
(PL_DHashTableOperate(&mDependencyMap, path.get(),
|
||||
PL_DHASH_ADD));
|
||||
PL_DHASH_ADD));
|
||||
if (!entry)
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
|
||||
|
@ -2097,7 +2097,7 @@ nsFastLoadFileWriter::WriteObjectCommon(nsISupports* aObject,
|
|||
nsSharpObjectMapEntry* entry =
|
||||
static_cast<nsSharpObjectMapEntry*>
|
||||
(PL_DHashTableOperate(&mObjectMap, aObject,
|
||||
PL_DHASH_ADD));
|
||||
PL_DHASH_ADD));
|
||||
if (!entry) {
|
||||
aObject->Release();
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
|
@ -2355,7 +2355,7 @@ nsFastLoadFileUpdater::CopyReadDocumentMapEntryToUpdater(PLDHashTable *aTable,
|
|||
nsDocumentMapWriteEntry* writeEntry =
|
||||
static_cast<nsDocumentMapWriteEntry*>
|
||||
(PL_DHashTableOperate(&updater->mDocumentMap, spec,
|
||||
PL_DHASH_ADD));
|
||||
PL_DHASH_ADD));
|
||||
if (!writeEntry) {
|
||||
nsMemory::Free(spec);
|
||||
return PL_DHASH_STOP;
|
||||
|
@ -2463,7 +2463,7 @@ nsFastLoadFileUpdater::Open(nsFastLoadFileReader* aReader)
|
|||
nsSharpObjectMapEntry* writeEntry =
|
||||
static_cast<nsSharpObjectMapEntry*>
|
||||
(PL_DHashTableOperate(&mObjectMap, key,
|
||||
PL_DHASH_ADD));
|
||||
PL_DHASH_ADD));
|
||||
if (!writeEntry)
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче