Merge tracemonkey to mozilla-central.

This commit is contained in:
Robert Sayre 2010-05-04 13:27:55 -04:00
Родитель 83d152e07e 8ac8eea08e
Коммит a8da5c275d
105 изменённых файлов: 4133 добавлений и 3137 удалений

Просмотреть файл

@ -6859,9 +6859,9 @@ nsWindowSH::NewResolve(nsIXPConnectWrappedNative *wrapper, JSContext *cx,
// Since we always create the undeclared property here without given a
// chance for the interpreter to report applicable strict mode warnings,
// we must take care to check those warnings here.
JSString *str = JSVAL_TO_STRING(id);
if (!::js_CheckUndeclaredVarAssignment(cx) ||
if ((!(flags & JSRESOLVE_QUALIFIED) &&
!js_CheckUndeclaredVarAssignment(cx, id)) ||
!::JS_DefineUCProperty(cx, obj, ::JS_GetStringChars(str),
::JS_GetStringLength(str), JSVAL_VOID,
JS_PropertyStub, JS_PropertyStub,

Просмотреть файл

@ -715,9 +715,9 @@ _isActiveHook(JSDContext* jsdc, JSScript *script, JSDExecHook* jsdhook)
JSTrapStatus
jsd_TrapHandler(JSContext *cx, JSScript *script, jsbytecode *pc, jsval *rval,
void *closure)
jsval closure)
{
JSDExecHook* jsdhook = (JSDExecHook*) JSVAL_TO_PRIVATE(((jsval)closure));
JSDExecHook* jsdhook = (JSDExecHook*) JSVAL_TO_PRIVATE(closure);
JSD_ExecutionHookProc hook;
void* hookData;
JSDContext* jsdc;
@ -799,8 +799,8 @@ jsd_SetExecutionHook(JSDContext* jsdc,
jsdhook->callerdata = callerdata;
if( ! JS_SetTrap(jsdc->dumbContext, jsdscript->script,
(jsbytecode*)pc, jsd_TrapHandler,
(void*) PRIVATE_TO_JSVAL(jsdhook)) )
(jsbytecode*)pc, jsd_TrapHandler,
PRIVATE_TO_JSVAL(jsdhook)) )
{
free(jsdhook);
JSD_UNLOCK();

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -41,6 +41,7 @@
#include "jscntxt.h"
#include "jsapi.h"
#include "jshashtable.h"
#include "prlink.h"
#include "ffi.h"
@ -237,29 +238,42 @@ enum TypeCode {
TYPE_struct
};
// Descriptor of one field in a StructType. The name of the field is stored
// as the key to the hash entry.
struct FieldInfo
{
// We need to provide a copy constructor because of Vector.
FieldInfo() {}
FieldInfo(const FieldInfo& other)
{
JS_NOT_REACHED("shouldn't be copy constructing FieldInfo");
JSObject* mType; // CType of the field
size_t mIndex; // index of the field in the struct (first is 0)
size_t mOffset; // offset of the field in the struct, in bytes
};
// Hash policy for FieldInfos.
struct FieldHashPolicy
{
typedef JSString* Key;
typedef Key Lookup;
static uint32 hash(const Lookup &l) {
const jschar* s = l->chars();
size_t n = l->length();
uint32 hash = 0;
for (; n > 0; s++, n--)
hash = hash * 33 + *s;
return hash;
}
String mName;
JSObject* mType;
size_t mOffset;
static JSBool match(const Key &k, const Lookup &l) {
if (k == l)
return true;
if (k->length() != l->length())
return false;
return memcmp(k->chars(), l->chars(), k->length() * sizeof(jschar)) == 0;
}
};
// Just like JSPropertySpec, but with a Unicode name.
struct PropertySpec
{
const jschar* name;
size_t namelen;
uint8 flags;
JSPropertyOp getter;
JSPropertyOp setter;
};
typedef HashMap<JSString*, FieldInfo, FieldHashPolicy, SystemAllocPolicy> FieldInfoHash;
// Descriptor of ABI, return type, argument types, and variadicity for a
// FunctionType.
@ -395,8 +409,7 @@ enum Int64FunctionSlot {
namespace CType {
JSObject* Create(JSContext* cx, JSObject* typeProto, JSObject* dataProto,
TypeCode type, JSString* name, jsval size, jsval align, ffi_type* ffiType,
PropertySpec* ps);
TypeCode type, JSString* name, jsval size, jsval align, ffi_type* ffiType);
JSObject* DefineBuiltin(JSContext* cx, JSObject* parent, const char* propName,
JSObject* typeProto, JSObject* dataProto, const char* name, TypeCode type,
@ -416,8 +429,7 @@ namespace CType {
}
namespace PointerType {
JSObject* CreateInternal(JSContext* cx, JSObject* ctor, JSObject* baseType,
JSString* name);
JSObject* CreateInternal(JSContext* cx, JSObject* baseType);
JSObject* GetBaseType(JSContext* cx, JSObject* obj);
}
@ -429,11 +441,16 @@ namespace ArrayType {
JSObject* GetBaseType(JSContext* cx, JSObject* obj);
size_t GetLength(JSContext* cx, JSObject* obj);
bool GetSafeLength(JSContext* cx, JSObject* obj, size_t* result);
ffi_type* BuildFFIType(JSContext* cx, JSObject* obj);
}
namespace StructType {
Array<FieldInfo>* GetFieldInfo(JSContext* cx, JSObject* obj);
FieldInfo* LookupField(JSContext* cx, JSObject* obj, jsval idval);
JSBool DefineInternal(JSContext* cx, JSObject* typeObj, JSObject* fieldsObj);
const FieldInfoHash* GetFieldInfo(JSContext* cx, JSObject* obj);
const FieldInfo* LookupField(JSContext* cx, JSObject* obj, jsval idval);
JSObject* BuildFieldsArray(JSContext* cx, JSObject* obj);
ffi_type* BuildFFIType(JSContext* cx, JSObject* obj);
}
namespace FunctionType {

Просмотреть файл

@ -253,7 +253,7 @@ Library::Declare(JSContext* cx, uintN argc, jsval* vp)
root.setObject(typeObj);
// Make a function pointer type.
typeObj = PointerType::CreateInternal(cx, NULL, typeObj, NULL);
typeObj = PointerType::CreateInternal(cx, typeObj);
if (!typeObj)
return JS_FALSE;
root.setObject(typeObj);

Просмотреть файл

@ -256,6 +256,62 @@ static struct {
/*40*/ JSOP_STOP,
},
};
static struct {
jsbytecode incelem[7];
jsbytecode eleminc[15];
} incelem_imacros = {
{
/* 0*/ JSOP_DUP2,
/* 1*/ JSOP_GETELEM,
/* 2*/ JSOP_POS,
/* 3*/ JSOP_ONE,
/* 4*/ JSOP_ADD,
/* 5*/ JSOP_SETELEM,
/* 6*/ JSOP_STOP,
},
{
/* 0*/ JSOP_DUP2,
/* 1*/ JSOP_GETELEM,
/* 2*/ JSOP_POS,
/* 3*/ JSOP_DUP,
/* 4*/ JSOP_ONE,
/* 5*/ JSOP_ADD,
/* 6*/ JSOP_PICK, 3,
/* 8*/ JSOP_PICK, 3,
/*10*/ JSOP_PICK, 2,
/*12*/ JSOP_SETELEM,
/*13*/ JSOP_POP,
/*14*/ JSOP_STOP,
},
};
static struct {
jsbytecode decelem[7];
jsbytecode elemdec[15];
} decelem_imacros = {
{
/* 0*/ JSOP_DUP2,
/* 1*/ JSOP_GETELEM,
/* 2*/ JSOP_POS,
/* 3*/ JSOP_ONE,
/* 4*/ JSOP_SUB,
/* 5*/ JSOP_SETELEM,
/* 6*/ JSOP_STOP,
},
{
/* 0*/ JSOP_DUP2,
/* 1*/ JSOP_GETELEM,
/* 2*/ JSOP_POS,
/* 3*/ JSOP_DUP,
/* 4*/ JSOP_ONE,
/* 5*/ JSOP_SUB,
/* 6*/ JSOP_PICK, 3,
/* 8*/ JSOP_PICK, 3,
/*10*/ JSOP_PICK, 2,
/*12*/ JSOP_SETELEM,
/*13*/ JSOP_POP,
/*14*/ JSOP_STOP,
},
};
static struct {
jsbytecode String[38];
} call_imacros = {
@ -744,16 +800,16 @@ uint8 js_opcode2extra[JSOP_LIMIT] = {
0, /* JSOP_VOID */
0, /* JSOP_INCNAME */
0, /* JSOP_INCPROP */
0, /* JSOP_INCELEM */
3, /* JSOP_INCELEM */
0, /* JSOP_DECNAME */
0, /* JSOP_DECPROP */
0, /* JSOP_DECELEM */
3, /* JSOP_DECELEM */
0, /* JSOP_NAMEINC */
0, /* JSOP_PROPINC */
0, /* JSOP_ELEMINC */
3, /* JSOP_ELEMINC */
0, /* JSOP_NAMEDEC */
0, /* JSOP_PROPDEC */
0, /* JSOP_ELEMDEC */
3, /* JSOP_ELEMDEC */
1, /* JSOP_GETPROP */
0, /* JSOP_SETPROP */
0, /* JSOP_GETELEM */
@ -963,6 +1019,10 @@ uint8 js_opcode2extra[JSOP_LIMIT] = {
|| x == JSOP_MOD \
|| x == JSOP_NEG \
|| x == JSOP_POS \
|| x == JSOP_INCELEM \
|| x == JSOP_DECELEM \
|| x == JSOP_ELEMINC \
|| x == JSOP_ELEMDEC \
|| x == JSOP_GETPROP \
|| x == JSOP_CALL \
|| x == JSOP_ITER \
@ -986,6 +1046,10 @@ js_GetImacroStart(jsbytecode* pc) {
if (size_t(pc - add_imacros.obj_any) < 38) return add_imacros.obj_any;
if (size_t(pc - add_imacros.obj_obj) < 72) return add_imacros.obj_obj;
if (size_t(pc - unary_imacros.sign) < 41) return unary_imacros.sign;
if (size_t(pc - incelem_imacros.incelem) < 7) return incelem_imacros.incelem;
if (size_t(pc - incelem_imacros.eleminc) < 15) return incelem_imacros.eleminc;
if (size_t(pc - decelem_imacros.decelem) < 7) return decelem_imacros.decelem;
if (size_t(pc - decelem_imacros.elemdec) < 15) return decelem_imacros.elemdec;
if (size_t(pc - call_imacros.String) < 38) return call_imacros.String;
if (size_t(pc - new_imacros.String) < 38) return new_imacros.String;
if (size_t(pc - apply_imacros.apply0) < 8) return apply_imacros.apply0;

Просмотреть файл

@ -298,6 +298,60 @@
.end unary
.igroup incelem JSOP_INCELEM,JSOP_ELEMINC
.imacro incelem # obj id
dup2 # obj id obj id
getelem # obj id val
pos # obj id n
one # obj id n 1
add # obj id m
setelem # m
stop
.end
.imacro eleminc # obj id
dup2 # obj id obj id
getelem # obj id val
pos # obj id n
dup # obj id n n
one # obj id n n 1
add # obj id n m
pick 3 # id n m obj
pick 3 # n m obj id
pick 2 # n obj id m
setelem # n m
pop # n
stop
.end
.end incelem
.igroup decelem JSOP_DECELEM,JSOP_ELEMDEC
.imacro decelem # obj id
dup2 # obj id obj id
getelem # obj id val
pos # obj id n
one # obj id n 1
sub # obj id m
setelem # m
stop
.end
.imacro elemdec # obj id
dup2 # obj id obj id
getelem # obj id val
pos # obj id n
dup # obj id n n
one # obj id n n 1
sub # obj id n m
pick 3 # id n m obj
pick 3 # n m obj id
pick 2 # n obj id m
setelem # n m
pop # n
stop
.end
.end decelem
.igroup call JSOP_CALL
.imacro String # String this obj

Просмотреть файл

@ -24,3 +24,52 @@ BEGIN_TEST(testContexts_IsRunning)
return ok;
}
END_TEST(testContexts_IsRunning)
#ifdef JS_THREADSAFE
#include "prthread.h"
struct ThreadData {
JSRuntime *rt;
JSObject *obj;
const char *code;
bool ok;
};
BEGIN_TEST(testContexts_bug561444)
{
const char *code = "<a><b/></a>.b.@c = '';";
EXEC(code);
jsrefcount rc = JS_SuspendRequest(cx);
{
ThreadData data = {rt, global, code, false};
PRThread *thread =
PR_CreateThread(PR_USER_THREAD, threadMain, &data,
PR_PRIORITY_NORMAL, PR_LOCAL_THREAD, PR_JOINABLE_THREAD, 0);
CHECK(thread);
PR_JoinThread(thread);
CHECK(data.ok);
}
JS_ResumeRequest(cx, rc);
return true;
}
static void threadMain(void *arg) {
ThreadData *d = (ThreadData *) arg;
JSContext *cx = JS_NewContext(d->rt, 8192);
if (!cx)
return;
JS_BeginRequest(cx);
{
jsvalRoot v(cx);
if (!JS_EvaluateScript(cx, d->obj, d->code, strlen(d->code), __FILE__, __LINE__, v.addr()))
return;
}
JS_DestroyContext(cx);
d->ok = true;
}
END_TEST(testContexts_bug561444)
#endif

Просмотреть файл

@ -5,10 +5,11 @@ static int emptyTrapCallCount = 0;
static JSTrapStatus
EmptyTrapHandler(JSContext *cx, JSScript *script, jsbytecode *pc, jsval *rval,
void *closure)
jsval closure)
{
JS_GC(cx);
++emptyTrapCallCount;
if (JSVAL_IS_STRING(closure))
++emptyTrapCallCount;
return JSTRAP_CONTINUE;
}
@ -49,8 +50,8 @@ BEGIN_TEST(testTrap_gc)
static const char trapClosureText[] = "some trap closure";
JSString *trapClosure = JS_NewStringCopyZ(cx, trapClosureText);
CHECK(trapClosure);
JS_SetTrap(cx, script, line2, EmptyTrapHandler, trapClosure);
JS_SetTrap(cx, script, line6, EmptyTrapHandler, trapClosure);
JS_SetTrap(cx, script, line2, EmptyTrapHandler, STRING_TO_JSVAL(trapClosure));
JS_SetTrap(cx, script, line6, EmptyTrapHandler, STRING_TO_JSVAL(trapClosure));
JS_GC(cx);

Просмотреть файл

@ -592,9 +592,6 @@ JSRuntime::init(uint32 maxbytes)
debuggerLock = JS_NEW_LOCK();
if (!debuggerLock)
return false;
deallocatorThread = new JSBackgroundThread();
if (!deallocatorThread || !deallocatorThread->init())
return false;
#endif
return propertyTree.init() && js_InitThreads(this);
}
@ -643,10 +640,6 @@ JSRuntime::~JSRuntime()
JS_DESTROY_CONDVAR(titleSharingDone);
if (debuggerLock)
JS_DESTROY_LOCK(debuggerLock);
if (deallocatorThread) {
deallocatorThread->cancel();
delete deallocatorThread;
}
#endif
propertyTree.finish();
}
@ -5514,39 +5507,23 @@ JS_SetRegExpInput(JSContext *cx, JSString *input, JSBool multiline)
CHECK_REQUEST(cx);
/* No locking required, cx is thread-private and input must be live. */
res = &cx->regExpStatics;
res->clearRoots();
res->input = input;
res->multiline = multiline;
cx->runtime->gcPoke = JS_TRUE;
}
JS_PUBLIC_API(void)
JS_ClearRegExpStatics(JSContext *cx)
{
JSRegExpStatics *res;
/* No locking required, cx is thread-private and input must be live. */
res = &cx->regExpStatics;
res->input = NULL;
res->multiline = JS_FALSE;
res->parenCount = 0;
res->lastMatch = res->lastParen = js_EmptySubString;
res->leftContext = res->rightContext = js_EmptySubString;
if (res->moreParens) {
cx->free(res->moreParens);
res->moreParens = NULL;
}
cx->runtime->gcPoke = JS_TRUE;
cx->regExpStatics.clear();
}
JS_PUBLIC_API(void)
JS_ClearRegExpRoots(JSContext *cx)
{
JSRegExpStatics *res;
/* No locking required, cx is thread-private and input must be live. */
res = &cx->regExpStatics;
res->input = NULL;
cx->runtime->gcPoke = JS_TRUE;
cx->regExpStatics.clearRoots();
}
/* TODO: compile, execute, get/set other statics... */

Просмотреть файл

@ -112,7 +112,7 @@ JSVAL_IS_OBJECT(jsval v)
static JS_ALWAYS_INLINE JSBool
JSVAL_IS_INT(jsval v)
{
return v & JSVAL_INT;
return (JSBool)(v & JSVAL_INT);
}
static JS_ALWAYS_INLINE JSBool
@ -270,7 +270,7 @@ BOOLEAN_TO_JSVAL(JSBool b)
/* A private data pointer (2-byte-aligned) can be stored as an int jsval. */
#define JSVAL_TO_PRIVATE(v) ((void *)((v) & ~JSVAL_INT))
#define PRIVATE_TO_JSVAL(p) ((jsval)(p) | JSVAL_INT)
#define PRIVATE_TO_JSVAL(p) ((jsval)(ptrdiff_t)(p) | JSVAL_INT)
/* Property attributes, set in JSPropertySpec and passed to API functions. */
#define JSPROP_ENUMERATE 0x01 /* property is visible to for/in loop */

Просмотреть файл

@ -42,15 +42,22 @@
* JS array class.
*
* Array objects begin as "dense" arrays, optimized for index-only property
* access over a vector of slots (obj->dslots) with high load factor. Array
* methods optimize for denseness by testing that the object's class is
* access over a vector of slots with high load factor. Array methods
* optimize for denseness by testing that the object's class is
* &js_ArrayClass, and can then directly manipulate the slots for efficiency.
*
* We track these pieces of metadata for arrays in dense mode:
* - the array's length property as a uint32, in JSSLOT_ARRAY_LENGTH,
* - the number of indices that are filled (non-holes), in JSSLOT_ARRAY_COUNT,
* - the net number of slots starting at dslots (capacity), in dslots[-1] if
* dslots is non-NULL.
* - The array's length property as a uint32, accessible with
* getArrayLength(), setDenseArrayLength().
* - The number of indices that are filled (non-holes), accessible with
* {get,set}DenseArrayCount().
* - The number of element slots (capacity), gettable with
* getDenseArrayCapacity().
* - The minimum of length and capacity (minLenCap). There are no explicit
* setters, it's updated automatically by setDenseArrayLength() and
* setDenseArrayCapacity(). There are also no explicit getters, the only
* user is TraceRecorder which can access it directly because it's a
* friend.
*
* In dense mode, holes in the array are represented by JSVAL_HOLE. The final
* slot in fslots is unused.
@ -120,10 +127,10 @@ INDEX_TOO_BIG(jsuint index)
return index > JS_BIT(29) - 1;
}
#define INDEX_TOO_SPARSE(array, index) \
(INDEX_TOO_BIG(index) || \
((index) > js_DenseArrayCapacity(array) && (index) >= MIN_SPARSE_INDEX && \
(index) > ((array)->getArrayCount() + 1) * 4))
#define INDEX_TOO_SPARSE(array, index) \
(INDEX_TOO_BIG(index) || \
((index) > array->getDenseArrayCapacity() && (index) >= MIN_SPARSE_INDEX && \
(index) > ((array)->getDenseArrayCount() + 1) * 4))
JS_STATIC_ASSERT(sizeof(JSScopeProperty) > 4 * sizeof(jsval));
@ -315,67 +322,63 @@ BigIndexToId(JSContext *cx, JSObject *obj, jsuint index, JSBool createAtom,
return JS_TRUE;
}
static JSBool
ResizeSlots(JSContext *cx, JSObject *obj, uint32 oldlen, uint32 newlen,
bool initializeAllSlots = true)
bool
JSObject::resizeDenseArrayElements(JSContext *cx, uint32 oldcap, uint32 newcap,
bool initializeAllSlots)
{
jsval *slots, *newslots;
JS_ASSERT(isDenseArray());
if (newlen == 0) {
if (obj->dslots) {
cx->free(obj->dslots - 1);
obj->dslots = NULL;
}
if (newcap == 0) {
freeDenseArrayElements(cx);
return JS_TRUE;
}
if (newlen > MAX_DSLOTS_LENGTH) {
if (newcap > MAX_DSLOTS_LENGTH) {
js_ReportAllocationOverflow(cx);
return JS_FALSE;
}
slots = obj->dslots ? obj->dslots - 1 : NULL;
newslots = (jsval *) cx->realloc(slots, (size_t(newlen) + 1) * sizeof(jsval));
jsval *slots = dslots ? dslots - 1 : NULL;
jsval *newslots = (jsval *) cx->realloc(slots, (size_t(newcap) + 1) * sizeof(jsval));
if (!newslots)
return JS_FALSE;
return false;
obj->dslots = newslots + 1;
js_SetDenseArrayCapacity(obj, newlen);
dslots = newslots + 1;
setDenseArrayCapacity(newcap);
if (initializeAllSlots) {
for (slots = obj->dslots + oldlen; slots < obj->dslots + newlen; slots++)
*slots = JSVAL_HOLE;
for (uint32 i = oldcap; i < newcap; i++)
setDenseArrayElement(i, JSVAL_HOLE);
}
return JS_TRUE;
return true;
}
/*
* When a dense array with CAPACITY_DOUBLING_MAX or fewer slots needs to grow,
* double its capacity, to push() N elements in amortized O(N) time.
*
* Above this limit, grow by 12.5% each time. Speed is still amortized O(N),
* with a higher constant factor, and we waste less space.
*/
#define CAPACITY_DOUBLING_MAX (1024 * 1024)
/*
* Round up all large allocations to a multiple of this (1MB), so as not to
* waste space if malloc gives us 1MB-sized chunks (as jemalloc does).
*/
#define CAPACITY_CHUNK (1024 * 1024 / sizeof(jsval))
static JSBool
EnsureCapacity(JSContext *cx, JSObject *obj, uint32 newcap,
bool initializeAllSlots = true)
bool
JSObject::ensureDenseArrayElements(JSContext *cx, uint32 newcap, bool initializeAllSlots)
{
uint32 oldcap = js_DenseArrayCapacity(obj);
/*
* When a dense array with CAPACITY_DOUBLING_MAX or fewer slots needs to
* grow, double its capacity, to push() N elements in amortized O(N) time.
*
* Above this limit, grow by 12.5% each time. Speed is still amortized
* O(N), with a higher constant factor, and we waste less space.
*/
static const size_t CAPACITY_DOUBLING_MAX = 1024 * 1024;
/*
* Round up all large allocations to a multiple of this (1MB), so as not
* to waste space if malloc gives us 1MB-sized chunks (as jemalloc does).
*/
static const size_t CAPACITY_CHUNK = 1024 * 1024 / sizeof(jsval);
uint32 oldcap = getDenseArrayCapacity();
if (newcap > oldcap) {
/*
* If this overflows uint32, newcap is very large. nextsize will end
* up being less than newcap, the code below will thus disregard it,
* and ResizeSlots will fail.
* and resizeDenseArrayElements() will fail.
*
* The way we use dslots[-1] forces a few +1s and -1s here. For
* example, (oldcap * 2 + 1) produces the sequence 7, 15, 31, 63, ...
@ -391,20 +394,20 @@ EnsureCapacity(JSContext *cx, JSObject *obj, uint32 newcap,
actualCapacity = JS_ROUNDUP(actualCapacity + 1, CAPACITY_CHUNK) - 1; /* -1 for dslots[-1] */
else if (actualCapacity < ARRAY_CAPACITY_MIN)
actualCapacity = ARRAY_CAPACITY_MIN;
if (!ResizeSlots(cx, obj, oldcap, actualCapacity, initializeAllSlots))
return JS_FALSE;
if (!resizeDenseArrayElements(cx, oldcap, actualCapacity, initializeAllSlots))
return false;
if (!initializeAllSlots) {
/*
* Initialize the slots caller didn't actually ask for.
*/
for (jsval *slots = obj->dslots + newcap;
slots < obj->dslots + actualCapacity;
slots++) {
*slots = JSVAL_HOLE;
for (uint32 i = newcap; i < actualCapacity; i++) {
setDenseArrayElement(i, JSVAL_HOLE);
}
}
}
return JS_TRUE;
return true;
}
static bool
@ -449,8 +452,9 @@ GetArrayElement(JSContext *cx, JSObject *obj, jsdouble index, JSBool *hole,
jsval *vp)
{
JS_ASSERT(index >= 0);
if (obj->isDenseArray() && index < js_DenseArrayCapacity(obj) &&
(*vp = obj->dslots[jsuint(index)]) != JSVAL_HOLE) {
if (obj->isDenseArray() && index < obj->getDenseArrayCapacity() &&
(*vp = obj->getDenseArrayElement(jsuint(index))) != JSVAL_HOLE) {
*hole = JS_FALSE;
return JS_TRUE;
}
@ -495,13 +499,13 @@ SetArrayElement(JSContext *cx, JSObject *obj, jsdouble index, jsval v)
jsuint idx = jsuint(index);
if (!INDEX_TOO_SPARSE(obj, idx)) {
JS_ASSERT(idx + 1 > idx);
if (!EnsureCapacity(cx, obj, idx + 1))
if (!obj->ensureDenseArrayElements(cx, idx + 1))
return JS_FALSE;
if (idx >= obj->getArrayLength())
obj->setArrayLength(idx + 1);
if (obj->dslots[idx] == JSVAL_HOLE)
obj->incArrayCountBy(1);
obj->dslots[idx] = v;
obj->setDenseArrayLength(idx + 1);
if (obj->getDenseArrayElement(idx) == JSVAL_HOLE)
obj->incDenseArrayCountBy(1);
obj->setDenseArrayElement(idx, v);
return JS_TRUE;
}
}
@ -526,10 +530,10 @@ DeleteArrayElement(JSContext *cx, JSObject *obj, jsdouble index)
if (obj->isDenseArray()) {
if (index <= jsuint(-1)) {
jsuint idx = jsuint(index);
if (!INDEX_TOO_SPARSE(obj, idx) && idx < js_DenseArrayCapacity(obj)) {
if (obj->dslots[idx] != JSVAL_HOLE)
obj->decArrayCountBy(1);
obj->dslots[idx] = JSVAL_HOLE;
if (!INDEX_TOO_SPARSE(obj, idx) && idx < obj->getDenseArrayCapacity()) {
if (obj->getDenseArrayElement(idx) != JSVAL_HOLE)
obj->decDenseArrayCountBy(1);
obj->setDenseArrayElement(idx, JSVAL_HOLE);
return JS_TRUE;
}
}
@ -648,21 +652,26 @@ array_length_setter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
return false;
if (oldlen < newlen) {
obj->setArrayLength(newlen);
if (obj->isDenseArray())
obj->setDenseArrayLength(newlen);
else
obj->setSlowArrayLength(newlen);
return true;
}
if (obj->isDenseArray()) {
/* Don't reallocate if we're not actually shrinking our slots. */
jsuint capacity = js_DenseArrayCapacity(obj);
if (capacity > newlen && !ResizeSlots(cx, obj, capacity, newlen))
jsuint capacity = obj->getDenseArrayCapacity();
if (capacity > newlen && !obj->resizeDenseArrayElements(cx, capacity, newlen))
return false;
obj->setDenseArrayLength(newlen);
} else if (oldlen - newlen < (1 << 24)) {
do {
--oldlen;
if (!JS_CHECK_OPERATION_LIMIT(cx) || !DeleteArrayElement(cx, obj, oldlen))
return false;
} while (oldlen != newlen);
obj->setSlowArrayLength(newlen);
} else {
/*
* We are going to remove a lot of indexes in a presumably sparse
@ -689,9 +698,9 @@ array_length_setter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
return false;
}
}
obj->setSlowArrayLength(newlen);
}
obj->setArrayLength(newlen);
return true;
}
@ -708,8 +717,8 @@ IsDenseArrayId(JSContext *cx, JSObject *obj, jsid id)
return id == ATOM_TO_JSID(cx->runtime->atomState.lengthAtom) ||
(js_IdIsIndex(id, &i) &&
obj->getArrayLength() != 0 &&
i < js_DenseArrayCapacity(obj) &&
obj->dslots[i] != JSVAL_HOLE);
i < obj->getDenseArrayCapacity() &&
obj->getDenseArrayElement(i) != JSVAL_HOLE);
}
static JSBool
@ -752,7 +761,7 @@ js_GetDenseArrayElementValue(JSContext *cx, JSObject *obj, JSProperty *prop,
JS_ASSERT(id == ATOM_TO_JSID(cx->runtime->atomState.lengthAtom));
return IndexToValue(cx, obj->getArrayLength(), vp);
}
*vp = obj->dslots[i];
*vp = obj->getDenseArrayElement(i);
return JS_TRUE;
}
@ -772,8 +781,8 @@ array_getProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
if (!obj->isDenseArray())
return js_GetProperty(cx, obj, id, vp);
if (!js_IdIsIndex(ID_TO_VALUE(id), &i) || i >= js_DenseArrayCapacity(obj) ||
obj->dslots[i] == JSVAL_HOLE) {
if (!js_IdIsIndex(ID_TO_VALUE(id), &i) || i >= obj->getDenseArrayCapacity() ||
obj->getDenseArrayElement(i) == JSVAL_HOLE) {
JSObject *obj2;
JSProperty *prop;
JSScopeProperty *sprop;
@ -800,7 +809,7 @@ array_getProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
return JS_TRUE;
}
*vp = obj->dslots[i];
*vp = obj->getDenseArrayElement(i);
return JS_TRUE;
}
@ -813,7 +822,7 @@ slowarray_addProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
return JS_TRUE;
length = obj->getArrayLength();
if (index >= length)
obj->setArrayLength(index + 1);
obj->setSlowArrayLength(index + 1);
return JS_TRUE;
}
@ -864,14 +873,14 @@ array_setProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
return js_SetProperty(cx, obj, id, vp);
}
if (!EnsureCapacity(cx, obj, i + 1))
if (!obj->ensureDenseArrayElements(cx, i + 1))
return JS_FALSE;
if (i >= obj->getArrayLength())
obj->setArrayLength(i + 1);
if (obj->dslots[i] == JSVAL_HOLE)
obj->incArrayCountBy(1);
obj->dslots[i] = *vp;
obj->setDenseArrayLength(i + 1);
if (obj->getDenseArrayElement(i) == JSVAL_HOLE)
obj->incDenseArrayCountBy(1);
obj->setDenseArrayElement(i, *vp);
return JS_TRUE;
}
@ -919,20 +928,20 @@ dense_grow(JSContext* cx, JSObject* obj, jsint i, jsval v)
* If needed, grow the array as long it remains dense, otherwise fall off trace.
*/
jsuint u = jsuint(i);
jsuint capacity = js_DenseArrayCapacity(obj);
if ((u >= capacity) && (INDEX_TOO_SPARSE(obj, u) || !EnsureCapacity(cx, obj, u + 1)))
jsuint capacity = obj->getDenseArrayCapacity();
if ((u >= capacity) && (INDEX_TOO_SPARSE(obj, u) || !obj->ensureDenseArrayElements(cx, u + 1)))
return JS_FALSE;
if (obj->dslots[u] == JSVAL_HOLE) {
if (obj->getDenseArrayElement(u) == JSVAL_HOLE) {
if (js_PrototypeHasIndexedProperties(cx, obj))
return JS_FALSE;
if (u >= obj->getArrayLength())
obj->setArrayLength(u + 1);
obj->incArrayCountBy(1);
obj->setDenseArrayLength(u + 1);
obj->incDenseArrayCountBy(1);
}
obj->dslots[u] = v;
obj->setDenseArrayElement(u, v);
return JS_TRUE;
}
@ -1037,10 +1046,10 @@ array_deleteProperty(JSContext *cx, JSObject *obj, jsval id, jsval *rval)
return JS_TRUE;
}
if (js_IdIsIndex(id, &i) && i < js_DenseArrayCapacity(obj) &&
obj->dslots[i] != JSVAL_HOLE) {
obj->decArrayCountBy(1);
obj->dslots[i] = JSVAL_HOLE;
if (js_IdIsIndex(id, &i) && i < obj->getDenseArrayCapacity() &&
obj->getDenseArrayElement(i) != JSVAL_HOLE) {
obj->decDenseArrayCountBy(1);
obj->setDenseArrayElement(i, JSVAL_HOLE);
}
*rval = JSVAL_TRUE;
@ -1118,12 +1127,12 @@ array_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
switch (enum_op) {
case JSENUMERATE_INIT:
JS_ASSERT(obj->isDenseArray());
capacity = js_DenseArrayCapacity(obj);
capacity = obj->getDenseArrayCapacity();
if (idp)
*idp = INT_TO_JSVAL(obj->getArrayCount());
*idp = INT_TO_JSVAL(obj->getDenseArrayCount());
ii = NULL;
for (i = 0; i != capacity; ++i) {
if (obj->dslots[i] == JSVAL_HOLE) {
if (obj->getDenseArrayElement(i) == JSVAL_HOLE) {
if (!ii) {
ii = (JSIndexIterState *)
cx->malloc(offsetof(JSIndexIterState, holes) +
@ -1214,9 +1223,7 @@ slowarray_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
static void
array_finalize(JSContext *cx, JSObject *obj)
{
if (obj->dslots)
cx->free(obj->dslots - 1);
obj->dslots = NULL;
obj->freeDenseArrayElements(cx);
}
static void
@ -1229,11 +1236,11 @@ array_trace(JSTracer *trc, JSObject *obj)
JS_ASSERT(obj->isDenseArray());
obj->traceProtoAndParent(trc);
capacity = js_DenseArrayCapacity(obj);
capacity = obj->getDenseArrayCapacity();
for (i = 0; i < capacity; i++) {
v = obj->dslots[i];
v = obj->getDenseArrayElement(i);
if (JSVAL_IS_TRACEABLE(v)) {
JS_SET_TRACING_INDEX(trc, "array_dslots", i);
JS_SET_TRACING_INDEX(trc, "dense_array_elems", i);
js_CallGCMarker(trc, JSVAL_TO_TRACEABLE(v), JSVAL_TRACE_KIND(v));
}
}
@ -1266,7 +1273,8 @@ JSClass js_ArrayClass = {
"Array",
JSCLASS_HAS_RESERVED_SLOTS(2) |
JSCLASS_HAS_CACHED_PROTO(JSProto_Array) |
JSCLASS_NEW_ENUMERATE,
JSCLASS_NEW_ENUMERATE |
JSCLASS_CONSTRUCT_PROTOTYPE,
JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
JS_EnumerateStub, JS_ResolveStub, js_TryValueOf, array_finalize,
array_getObjectOps, NULL, NULL, NULL,
@ -1289,7 +1297,7 @@ JSClass js_SlowArrayClass = {
JSBool
js_MakeArraySlow(JSContext *cx, JSObject *obj)
{
JS_ASSERT(obj->getClass() == &js_ArrayClass);
JS_ASSERT(obj->isDenseArray());
/*
* Create a native scope. All slow arrays other than Array.prototype get
@ -1310,15 +1318,17 @@ js_MakeArraySlow(JSContext *cx, JSObject *obj)
if (!scope)
return JS_FALSE;
uint32 capacity = js_DenseArrayCapacity(obj);
uint32 capacity = obj->getDenseArrayCapacity();
if (capacity) {
scope->freeslot = obj->numSlots() + JS_INITIAL_NSLOTS;
obj->dslots[-1] = JS_INITIAL_NSLOTS + capacity;
// XXX: changing the capacity like this is awful. Bug 558263 will remove
// the need for this.
obj->setDenseArrayCapacity(JS_INITIAL_NSLOTS + capacity);
} else {
scope->freeslot = obj->numSlots();
}
/* Create new properties pointing to existing values in dslots */
/* Create new properties pointing to existing elements. */
for (uint32 i = 0; i < capacity; i++) {
jsid id;
JSScopeProperty *sprop;
@ -1326,8 +1336,8 @@ js_MakeArraySlow(JSContext *cx, JSObject *obj)
if (!JS_ValueToId(cx, INT_TO_JSVAL(i), &id))
goto out_bad;
if (obj->dslots[i] == JSVAL_HOLE) {
obj->dslots[i] = JSVAL_VOID;
if (obj->getDenseArrayElement(i) == JSVAL_HOLE) {
obj->setDenseArrayElement(i, JSVAL_VOID);
continue;
}
@ -1338,13 +1348,13 @@ js_MakeArraySlow(JSContext *cx, JSObject *obj)
}
/*
* Render our formerly-reserved count property GC-safe. We do not need to
* make the length slot GC-safe because it is the private slot (this is
* statically asserted within JSObject) where the implementation can store
* an arbitrary value.
* Render our formerly-reserved non-private properties GC-safe. We do not
* need to make the length slot GC-safe because it is the private slot
* (this is statically asserted within JSObject) where the implementation
* can store an arbitrary value.
*/
JS_ASSERT(js_SlowArrayClass.flags & JSCLASS_HAS_PRIVATE);
obj->voidDenseArrayCount();
obj->voidDenseOnlyArraySlots();
/* Make sure we preserve any flags borrowing bits in classword. */
obj->classword ^= (jsuword) &js_ArrayClass;
@ -1654,34 +1664,34 @@ InitArrayElements(JSContext *cx, JSObject *obj, jsuint start, jsuint count, jsva
jsuint newlen = start + count;
JS_ASSERT(jsdouble(start) + count == jsdouble(newlen));
if (!EnsureCapacity(cx, obj, newlen))
if (!obj->ensureDenseArrayElements(cx, newlen))
return JS_FALSE;
if (newlen > obj->getArrayLength())
obj->setArrayLength(newlen);
obj->setDenseArrayLength(newlen);
JS_ASSERT(count < size_t(-1) / sizeof(jsval));
if (targetType == TargetElementsMayContainValues) {
jsuint valueCount = 0;
for (jsuint i = 0; i < count; i++) {
if (obj->dslots[start + i] != JSVAL_HOLE)
if (obj->getDenseArrayElement(start + i) != JSVAL_HOLE)
valueCount++;
}
JS_ASSERT(obj->getArrayCount() >= valueCount);
obj->decArrayCountBy(valueCount);
JS_ASSERT(obj->getDenseArrayCount() >= valueCount);
obj->decDenseArrayCountBy(valueCount);
}
memcpy(obj->dslots + start, vector, sizeof(jsval) * count);
memcpy(obj->getDenseArrayElements() + start, vector, sizeof(jsval) * count);
if (vectorType == SourceVectorAllValues) {
obj->incArrayCountBy(count);
obj->incDenseArrayCountBy(count);
} else {
jsuint valueCount = 0;
for (jsuint i = 0; i < count; i++) {
if (obj->dslots[start + i] != JSVAL_HOLE)
if (obj->getDenseArrayElement(start + i) != JSVAL_HOLE)
valueCount++;
}
obj->incArrayCountBy(valueCount);
obj->incDenseArrayCountBy(valueCount);
}
JS_ASSERT_IF(count != 0, obj->dslots[newlen - 1] != JSVAL_HOLE);
JS_ASSERT_IF(count != 0, obj->getDenseArrayElement(newlen - 1) != JSVAL_HOLE);
return JS_TRUE;
}
@ -1726,53 +1736,34 @@ InitArrayObject(JSContext *cx, JSObject *obj, jsuint length, const jsval *vector
{
JS_ASSERT(obj->isArray());
obj->setArrayLength(length);
if (vector) {
if (!EnsureCapacity(cx, obj, length))
JS_ASSERT(obj->isDenseArray());
obj->setDenseArrayLength(length);
if (!obj->ensureDenseArrayElements(cx, length))
return JS_FALSE;
jsuint count = length;
if (!holey) {
memcpy(obj->dslots, vector, length * sizeof (jsval));
memcpy(obj->getDenseArrayElements(), vector, length * sizeof (jsval));
} else {
for (jsuint i = 0; i < length; i++) {
if (vector[i] == JSVAL_HOLE)
--count;
obj->dslots[i] = vector[i];
obj->setDenseArrayElement(i, vector[i]);
}
}
obj->setArrayCount(count);
obj->setDenseArrayCount(count);
} else {
obj->setArrayCount(0);
if (obj->isDenseArray()) {
obj->setDenseArrayLength(length);
obj->setDenseArrayCount(0);
} else {
obj->setSlowArrayLength(length);
}
}
return JS_TRUE;
}
#ifdef JS_TRACER
static JSString* FASTCALL
Array_p_join(JSContext* cx, JSObject* obj, JSString *str)
{
AutoValueRooter tvr(cx);
if (!array_toString_sub(cx, obj, JS_FALSE, str, tvr.addr())) {
SetBuiltinError(cx);
return NULL;
}
return JSVAL_TO_STRING(tvr.value());
}
static JSString* FASTCALL
Array_p_toString(JSContext* cx, JSObject* obj)
{
AutoValueRooter tvr(cx);
if (!array_toString_sub(cx, obj, JS_FALSE, NULL, tvr.addr())) {
SetBuiltinError(cx);
return NULL;
}
return JSVAL_TO_STRING(tvr.value());
}
#endif
/*
* Perl-inspired join, reverse, and sort.
*/
@ -1805,7 +1796,7 @@ array_reverse(JSContext *cx, uintN argc, jsval *vp)
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj)) {
/* An empty array or an array with no elements is already reversed. */
if (len == 0 || !obj->dslots)
if (len == 0 || obj->getDenseArrayCapacity() == 0)
return JS_TRUE;
/*
@ -1817,15 +1808,14 @@ array_reverse(JSContext *cx, uintN argc, jsval *vp)
* holes in the array at its start) and ensure that the capacity is
* sufficient to hold all the elements in the array if it were full.
*/
if (!EnsureCapacity(cx, obj, len))
if (!obj->ensureDenseArrayElements(cx, len))
return JS_FALSE;
jsval* lo = &obj->dslots[0];
jsval* hi = &obj->dslots[len - 1];
uint32 lo = 0, hi = len - 1;
for (; lo < hi; lo++, hi--) {
jsval tmp = *lo;
*lo = *hi;
*hi = tmp;
jsval tmp = obj->getDenseArrayElement(lo);
obj->setDenseArrayElement(lo, obj->getDenseArrayElement(hi));
obj->setDenseArrayElement(hi, tmp);
}
/*
@ -2357,13 +2347,13 @@ array_push1_dense(JSContext* cx, JSObject* obj, jsval v, jsval *rval)
return array_push_slowly(cx, obj, 1, &v, rval);
}
if (!EnsureCapacity(cx, obj, length + 1))
if (!obj->ensureDenseArrayElements(cx, length + 1))
return JS_FALSE;
obj->setArrayLength(length + 1);
obj->setDenseArrayLength(length + 1);
JS_ASSERT(obj->dslots[length] == JSVAL_HOLE);
obj->incArrayCountBy(1);
obj->dslots[length] = v;
JS_ASSERT(obj->getDenseArrayElement(length) == JSVAL_HOLE);
obj->incDenseArrayCountBy(1);
obj->setDenseArrayElement(length, v);
return IndexToValue(cx, obj->getArrayLength(), rval);
}
@ -2372,41 +2362,26 @@ js_ArrayCompPush(JSContext *cx, JSObject *obj, jsval v)
{
JS_ASSERT(obj->isDenseArray());
uint32_t length = obj->getArrayLength();
JS_ASSERT(length <= js_DenseArrayCapacity(obj));
JS_ASSERT(length <= obj->getDenseArrayCapacity());
if (length == js_DenseArrayCapacity(obj)) {
if (length == obj->getDenseArrayCapacity()) {
if (length > JS_ARGS_LENGTH_MAX) {
JS_ReportErrorNumberUC(cx, js_GetErrorMessage, NULL,
JSMSG_ARRAY_INIT_TOO_BIG);
return JS_FALSE;
}
if (!EnsureCapacity(cx, obj, length + 1))
if (!obj->ensureDenseArrayElements(cx, length + 1))
return JS_FALSE;
}
obj->setArrayLength(length + 1);
obj->incArrayCountBy(1);
obj->dslots[length] = v;
obj->setDenseArrayLength(length + 1);
obj->incDenseArrayCountBy(1);
obj->setDenseArrayElement(length, v);
return JS_TRUE;
}
JS_DEFINE_CALLINFO_3(extern, BOOL, js_ArrayCompPush, CONTEXT, OBJECT, JSVAL, 0,
nanojit::ACC_STORE_ANY)
#ifdef JS_TRACER
static jsval FASTCALL
Array_p_push1(JSContext* cx, JSObject* obj, jsval v)
{
AutoValueRooter tvr(cx, v);
if (obj->isDenseArray()
? array_push1_dense(cx, obj, v, tvr.addr())
: array_push_slowly(cx, obj, 1, tvr.addr(), tvr.addr())) {
return tvr.value();
}
SetBuiltinError(cx);
return JSVAL_VOID;
}
#endif
static JSBool
array_push(JSContext *cx, uintN argc, jsval *vp)
{
@ -2460,25 +2435,10 @@ array_pop_dense(JSContext *cx, JSObject* obj, jsval *vp)
return JS_FALSE;
if (!hole && !DeleteArrayElement(cx, obj, index))
return JS_FALSE;
obj->setArrayLength(index);
obj->setDenseArrayLength(index);
return JS_TRUE;
}
#ifdef JS_TRACER
static jsval FASTCALL
Array_p_pop(JSContext* cx, JSObject* obj)
{
AutoValueRooter tvr(cx);
if (obj->isDenseArray()
? array_pop_dense(cx, obj, tvr.addr())
: array_pop_slowly(cx, obj, tvr.addr())) {
return tvr.value();
}
SetBuiltinError(cx);
return JSVAL_VOID;
}
#endif
static JSBool
array_pop(JSContext *cx, uintN argc, jsval *vp)
{
@ -2509,15 +2469,16 @@ array_shift(JSContext *cx, uintN argc, jsval *vp)
length--;
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
length < js_DenseArrayCapacity(obj)) {
*vp = obj->dslots[0];
length < obj->getDenseArrayCapacity()) {
*vp = obj->getDenseArrayElement(0);
if (*vp == JSVAL_HOLE)
*vp = JSVAL_VOID;
else
obj->decArrayCountBy(1);
memmove(obj->dslots, obj->dslots + 1, length * sizeof(jsval));
obj->dslots[length] = JSVAL_HOLE;
obj->setArrayLength(length);
obj->decDenseArrayCountBy(1);
jsval *elems = obj->getDenseArrayElements();
memmove(elems, elems + 1, length * sizeof(jsval));
obj->setDenseArrayElement(length, JSVAL_HOLE);
obj->setDenseArrayLength(length);
return JS_TRUE;
}
@ -2562,11 +2523,12 @@ array_unshift(JSContext *cx, uintN argc, jsval *vp)
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
!INDEX_TOO_SPARSE(obj, unsigned(newlen + argc))) {
JS_ASSERT(newlen + argc == length + argc);
if (!EnsureCapacity(cx, obj, length + argc))
if (!obj->ensureDenseArrayElements(cx, length + argc))
return JS_FALSE;
memmove(obj->dslots + argc, obj->dslots, length * sizeof(jsval));
jsval *elems = obj->getDenseArrayElements();
memmove(elems + argc, elems, length * sizeof(jsval));
for (uint32 i = 0; i < argc; i++)
obj->dslots[i] = JSVAL_HOLE;
obj->setDenseArrayElement(i, JSVAL_HOLE);
} else {
last = length;
jsdouble upperIndex = last + argc;
@ -2664,9 +2626,9 @@ array_splice(JSContext *cx, uintN argc, jsval *vp)
if (count > 0) {
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
!js_PrototypeHasIndexedProperties(cx, obj2) &&
end <= js_DenseArrayCapacity(obj)) {
if (!InitArrayObject(cx, obj2, count, obj->dslots + begin,
obj->getArrayCount() != obj->getArrayLength())) {
end <= obj->getDenseArrayCapacity()) {
if (!InitArrayObject(cx, obj2, count, obj->getDenseArrayElements() + begin,
obj->getDenseArrayCount() != obj->getArrayLength())) {
return JS_FALSE;
}
} else {
@ -2691,19 +2653,19 @@ array_splice(JSContext *cx, uintN argc, jsval *vp)
delta = (jsuint)argc - count;
last = length;
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
length <= js_DenseArrayCapacity(obj) &&
(length == 0 || obj->dslots[length - 1] != JSVAL_HOLE)) {
if (!EnsureCapacity(cx, obj, length + delta))
length <= obj->getDenseArrayCapacity() &&
(length == 0 || obj->getDenseArrayElement(length - 1) != JSVAL_HOLE)) {
if (!obj->ensureDenseArrayElements(cx, length + delta))
return JS_FALSE;
/* (uint) end could be 0, so we can't use a vanilla >= test. */
while (last-- > end) {
jsval srcval = obj->dslots[last];
jsval* dest = &obj->dslots[last + delta];
if (*dest == JSVAL_HOLE && srcval != JSVAL_HOLE)
obj->incArrayCountBy(1);
*dest = srcval;
jsval srcval = obj->getDenseArrayElement(last);
jsval dest = obj->getDenseArrayElement(last + delta);
if (dest == JSVAL_HOLE && srcval != JSVAL_HOLE)
obj->incDenseArrayCountBy(1);
obj->setDenseArrayElement(last + delta, srcval);
}
obj->setArrayLength(obj->getArrayLength() + delta);
obj->setDenseArrayLength(obj->getArrayLength() + delta);
} else {
/* (uint) end could be 0, so we can't use a vanilla >= test. */
while (last-- > end) {
@ -2718,14 +2680,14 @@ array_splice(JSContext *cx, uintN argc, jsval *vp)
} else if (argc < count) {
delta = count - (jsuint)argc;
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
length <= js_DenseArrayCapacity(obj)) {
length <= obj->getDenseArrayCapacity()) {
/* (uint) end could be 0, so we can't use a vanilla >= test. */
for (last = end; last < length; last++) {
jsval srcval = obj->dslots[last];
jsval* dest = &obj->dslots[last - delta];
if (*dest == JSVAL_HOLE && srcval != JSVAL_HOLE)
obj->incArrayCountBy(1);
*dest = srcval;
jsval srcval = obj->getDenseArrayElement(last);
jsval dest = obj->getDenseArrayElement(last - delta);
if (dest == JSVAL_HOLE && srcval != JSVAL_HOLE)
obj->incDenseArrayCountBy(1);
obj->setDenseArrayElement(last - delta, srcval);
}
} else {
for (last = end; last < length; last++) {
@ -2776,12 +2738,12 @@ array_concat(JSContext *cx, uintN argc, jsval *vp)
* capacity.
*/
length = aobj->getArrayLength();
jsuint capacity = js_DenseArrayCapacity(aobj);
nobj = js_NewArrayObject(cx, JS_MIN(length, capacity), aobj->dslots,
aobj->getArrayCount() != length);
jsuint capacity = aobj->getDenseArrayCapacity();
nobj = js_NewArrayObject(cx, JS_MIN(length, capacity), aobj->getDenseArrayElements(),
aobj->getDenseArrayCount() != length);
if (!nobj)
return JS_FALSE;
nobj->setArrayLength(length);
nobj->setDenseArrayLength(length);
*vp = OBJECT_TO_JSVAL(nobj);
if (argc == 0)
return JS_TRUE;
@ -2890,10 +2852,10 @@ array_slice(JSContext *cx, uintN argc, jsval *vp)
if (begin > end)
begin = end;
if (obj->isDenseArray() && end <= js_DenseArrayCapacity(obj) &&
if (obj->isDenseArray() && end <= obj->getDenseArrayCapacity() &&
!js_PrototypeHasIndexedProperties(cx, obj)) {
nobj = js_NewArrayObject(cx, end - begin, obj->dslots + begin,
obj->getArrayCount() != obj->getArrayLength());
nobj = js_NewArrayObject(cx, end - begin, obj->getDenseArrayElements() + begin,
obj->getDenseArrayCount() != obj->getArrayLength());
if (!nobj)
return JS_FALSE;
*vp = OBJECT_TO_JSVAL(nobj);
@ -3257,28 +3219,19 @@ static JSPropertySpec array_props[] = {
{0,0,0,0,0}
};
JS_DEFINE_TRCINFO_1(array_toString,
(2, (static, STRING_FAIL, Array_p_toString, CONTEXT, THIS, 0, nanojit::ACC_STORE_ANY)))
JS_DEFINE_TRCINFO_1(array_join,
(3, (static, STRING_FAIL, Array_p_join, CONTEXT, THIS, STRING, 0, nanojit::ACC_STORE_ANY)))
JS_DEFINE_TRCINFO_1(array_push,
(3, (static, JSVAL_FAIL, Array_p_push1, CONTEXT, THIS, JSVAL, 0, nanojit::ACC_STORE_ANY)))
JS_DEFINE_TRCINFO_1(array_pop,
(2, (static, JSVAL_FAIL, Array_p_pop, CONTEXT, THIS, 0, nanojit::ACC_STORE_ANY)))
static JSFunctionSpec array_methods[] = {
#if JS_HAS_TOSOURCE
JS_FN(js_toSource_str, array_toSource, 0,0),
#endif
JS_TN(js_toString_str, array_toString, 0,0, &array_toString_trcinfo),
JS_FN(js_toString_str, array_toString, 0,0),
JS_FN(js_toLocaleString_str,array_toLocaleString,0,0),
/* Perl-ish methods. */
JS_TN("join", array_join, 1,JSFUN_GENERIC_NATIVE, &array_join_trcinfo),
JS_FN("join", array_join, 1,JSFUN_GENERIC_NATIVE),
JS_FN("reverse", array_reverse, 0,JSFUN_GENERIC_NATIVE),
JS_FN("sort", array_sort, 1,JSFUN_GENERIC_NATIVE),
JS_TN("push", array_push, 1,JSFUN_GENERIC_NATIVE, &array_push_trcinfo),
JS_TN("pop", array_pop, 0,JSFUN_GENERIC_NATIVE, &array_pop_trcinfo),
JS_FN("push", array_push, 1,JSFUN_GENERIC_NATIVE),
JS_FN("pop", array_pop, 0,JSFUN_GENERIC_NATIVE),
JS_FN("shift", array_shift, 0,JSFUN_GENERIC_NATIVE),
JS_FN("unshift", array_unshift, 1,JSFUN_GENERIC_NATIVE),
JS_FN("splice", array_splice, 2,JSFUN_GENERIC_NATIVE),
@ -3350,14 +3303,10 @@ js_NewEmptyArray(JSContext* cx, JSObject* proto)
/* Initialize all fields of JSObject. */
obj->map = const_cast<JSObjectMap *>(&SharedArrayMap);
obj->classword = jsuword(&js_ArrayClass);
obj->setProto(proto);
obj->setParent(proto->getParent());
obj->setArrayLength(0);
obj->setArrayCount(0);
obj->voidArrayUnused();
obj->dslots = NULL;
obj->init(&js_ArrayClass, proto, proto->getParent(), JSVAL_NULL);
obj->setDenseArrayLength(0);
obj->setDenseArrayCount(0);
return obj;
}
#ifdef JS_TRACER
@ -3372,7 +3321,7 @@ js_NewEmptyArrayWithLength(JSContext* cx, JSObject* proto, int32 len)
JSObject *obj = js_NewEmptyArray(cx, proto);
if (!obj)
return NULL;
obj->setArrayLength(len);
obj->setDenseArrayLength(len);
return obj;
}
#ifdef JS_TRACER
@ -3386,8 +3335,8 @@ js_NewArrayWithSlots(JSContext* cx, JSObject* proto, uint32 len)
JSObject* obj = js_NewEmptyArray(cx, proto);
if (!obj)
return NULL;
obj->setArrayLength(len);
if (!ResizeSlots(cx, obj, 0, JS_MAX(len, ARRAY_CAPACITY_MIN)))
obj->setDenseArrayLength(len);
if (!obj->resizeDenseArrayElements(cx, 0, JS_MAX(len, ARRAY_CAPACITY_MIN)))
return NULL;
return obj;
}
@ -3437,7 +3386,7 @@ js_NewSlowArrayObject(JSContext *cx)
{
JSObject *obj = NewObject(cx, &js_SlowArrayClass, NULL, NULL);
if (obj)
obj->setArrayLength(0);
obj->setSlowArrayLength(0);
return obj;
}
@ -3466,8 +3415,8 @@ js_ArrayInfo(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
array->getArrayLength());
if (array->isDenseArray()) {
fprintf(stderr, ", count %lu, capacity %lu",
array->getArrayCount(),
js_DenseArrayCapacity(array));
array->getDenseArrayCount(),
array->getDenseArrayCapacity());
}
fputs(")\n", stderr);
cx->free(bytes);
@ -3491,7 +3440,7 @@ js_CoerceArrayToCanvasImageData(JSObject *obj, jsuint offset, jsuint count,
JSUint8 *dp = dest;
for (uintN i = offset; i < offset+count; i++) {
jsval v = obj->dslots[i];
jsval v = obj->getDenseArrayElement(i);
if (JSVAL_IS_INT(v)) {
jsint vi = JSVAL_TO_INT(v);
if (jsuint(vi) > 255)
@ -3542,7 +3491,7 @@ js_NewArrayObjectWithCapacity(JSContext *cx, jsuint capacity, jsval **vector)
return NULL;
AutoValueRooter tvr(cx, obj);
if (!EnsureCapacity(cx, obj, capacity, JS_FALSE))
if (!obj->ensureDenseArrayElements(cx, capacity, JS_FALSE))
obj = NULL;
/* Set/clear newborn root, in case we lost it. */
@ -3550,8 +3499,8 @@ js_NewArrayObjectWithCapacity(JSContext *cx, jsuint capacity, jsval **vector)
if (!obj)
return NULL;
obj->setArrayCount(capacity);
*vector = obj->dslots;
obj->setDenseArrayCount(capacity);
*vector = obj->getDenseArrayElements();
return obj;
}
@ -3592,7 +3541,7 @@ js_CloneDensePrimitiveArray(JSContext *cx, JSObject *obj, JSObject **clone)
* over-allocated. In the normal case where length is <= capacity the
* clone and original array will have the same capacity.
*/
jsuint jsvalCount = JS_MIN(js_DenseArrayCapacity(obj), length);
jsuint jsvalCount = JS_MIN(obj->getDenseArrayCapacity(), length);
js::AutoValueVector vector(cx);
if (!vector.reserve(jsvalCount))
@ -3629,8 +3578,8 @@ js_CloneDensePrimitiveArray(JSContext *cx, JSObject *obj, JSObject **clone)
AutoObjectRooter cloneRoot(cx, *clone);
memcpy(buffer, vector.buffer(), jsvalCount * sizeof (jsval));
(*clone)->setArrayLength(length);
(*clone)->setArrayCount(length - holeCount);
(*clone)->setDenseArrayLength(length);
(*clone)->setDenseArrayCount(length - holeCount);
return JS_TRUE;
}

Просмотреть файл

@ -121,21 +121,6 @@ js_NewSlowArrayObject(JSContext *cx);
extern JSBool
js_MakeArraySlow(JSContext *cx, JSObject *obj);
static JS_INLINE uint32
js_DenseArrayCapacity(JSObject *obj)
{
JS_ASSERT(obj->isDenseArray());
return obj->dslots ? (uint32) obj->dslots[-1] : 0;
}
static JS_INLINE void
js_SetDenseArrayCapacity(JSObject *obj, uint32 capacity)
{
JS_ASSERT(obj->isDenseArray());
JS_ASSERT(obj->dslots);
obj->dslots[-1] = (jsval) capacity;
}
extern JSBool
js_GetLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp);

Просмотреть файл

@ -102,10 +102,6 @@ js_imod(int32 a, int32 b)
}
JS_DEFINE_CALLINFO_2(extern, INT32, js_imod, INT32, INT32, 1, ACC_NONE)
/* The following boxing/unboxing primitives we can't emit inline because
they either interact with the GC and depend on Spidermonkey's 32-bit
integer representation. */
jsval FASTCALL
js_BoxDouble(JSContext* cx, jsdouble d)
{
@ -115,7 +111,7 @@ js_BoxDouble(JSContext* cx, jsdouble d)
JS_ASSERT(JS_ON_TRACE(cx));
jsval v; /* not rooted but ok here because we know GC won't run */
if (!js_NewDoubleInRootedValue(cx, d, &v))
return JSVAL_ERROR_COOKIE;
return JSVAL_NULL;
return v;
}
JS_DEFINE_CALLINFO_2(extern, JSVAL, js_BoxDouble, CONTEXT, DOUBLE, 1, ACC_NONE)
@ -129,7 +125,7 @@ js_BoxInt32(JSContext* cx, int32 i)
jsval v; /* not rooted but ok here because we know GC won't run */
jsdouble d = (jsdouble)i;
if (!js_NewDoubleInRootedValue(cx, d, &v))
return JSVAL_ERROR_COOKIE;
return JSVAL_NULL;
return v;
}
JS_DEFINE_CALLINFO_2(extern, JSVAL, js_BoxInt32, CONTEXT, INT32, 1, ACC_NONE)

Просмотреть файл

@ -48,7 +48,7 @@
#undef THIS
#endif
enum JSTNErrType { INFALLIBLE, FAIL_STATUS, FAIL_NULL, FAIL_NEG, FAIL_VOID, FAIL_COOKIE };
enum JSTNErrType { INFALLIBLE, FAIL_STATUS, FAIL_NULL, FAIL_NEG, FAIL_VOID };
enum { JSTN_ERRTYPE_MASK = 0x07, JSTN_UNBOX_AFTER = 0x08, JSTN_MORE = 0x10,
JSTN_CONSTRUCTOR = 0x20 };
@ -101,14 +101,6 @@ struct JSNativeTraceInfo {
JSSpecializedNative *specializations;
};
/*
* We use a magic boxed pointer value to represent error conditions that
* trigger a side exit. The address is so low that it should never be actually
* in use. If it is, a performance regression occurs, not an actual runtime
* error.
*/
#define JSVAL_ERROR_COOKIE OBJECT_TO_JSVAL((JSObject*)0x10)
/* Macros used by JS_DEFINE_CALLINFOn. */
#ifdef DEBUG
#define _JS_CI_NAME(op) ,#op
@ -160,7 +152,6 @@ struct ClosureVarInfo;
* INT32_RETRY: any negative value
* STRING_RETRY: NULL
* OBJECT_RETRY_NULL: NULL
* JSVAL_RETRY: JSVAL_ERROR_COOKIE
*
* _RETRY function calls are faster than _FAIL calls. Each _RETRY call
* saves two writes to cx->bailExit and a read from state->builtinStatus.
@ -194,7 +185,6 @@ struct ClosureVarInfo;
#define _JS_CTYPE_PC _JS_CTYPE(jsbytecode *, _JS_PTR,"P", "", INFALLIBLE)
#define _JS_CTYPE_JSVALPTR _JS_CTYPE(jsval *, _JS_PTR,"P", "", INFALLIBLE)
#define _JS_CTYPE_JSVAL _JS_JSVAL_CTYPE( _JS_PTR, "","v", INFALLIBLE)
#define _JS_CTYPE_JSVAL_RETRY _JS_JSVAL_CTYPE( _JS_PTR, --, --, FAIL_COOKIE)
#define _JS_CTYPE_JSVAL_FAIL _JS_JSVAL_CTYPE( _JS_PTR, --, --, FAIL_STATUS)
#define _JS_CTYPE_JSID _JS_CTYPE(jsid, _JS_PTR, --, --, INFALLIBLE)
#define _JS_CTYPE_BOOL _JS_CTYPE(JSBool, _JS_I32, "","i", INFALLIBLE)

Просмотреть файл

@ -180,6 +180,8 @@ JSThreadData::purge(JSContext *cx)
js_DestroyScriptsToGC(cx, this);
js_PurgeCachedNativeEnumerators(cx, this);
dtoaCache.s = NULL;
}
void

Просмотреть файл

@ -334,6 +334,8 @@ typedef HashMap<jsbytecode*,
DefaultHasher<jsbytecode*>,
SystemAllocPolicy> RecordAttemptMap;
class Oracle;
/*
* Trace monitor. Every JSThread (if JS_THREADSAFE) or JSRuntime (if not
* JS_THREADSAFE) has an associated trace monitor that keeps track of loop
@ -395,6 +397,7 @@ struct TraceMonitor {
nanojit::Assembler* assembler;
FrameInfoCache* frameCache;
Oracle* oracle;
TraceRecorder* recorder;
GlobalState globalStates[MONITOR_N_GLOBAL_STATES];
@ -553,6 +556,17 @@ struct JSThreadData {
/* State used by dtoa.c. */
DtoaState *dtoaState;
/*
* State used to cache some double-to-string conversions. A stupid
* optimization aimed directly at v8-splay.js, which stupidly converts
* many doubles multiple times in a row.
*/
struct {
jsdouble d;
jsint base;
JSString *s; // if s==NULL, d and base are not valid
} dtoaCache;
/*
* Cache of reusable JSNativeEnumerators mapped by shape identifiers (as
* stored in scope->shape). This cache is nulled by the GC and protected
@ -611,11 +625,6 @@ struct JSThread {
*/
bool gcWaiting;
/*
* Deallocator task for this thread.
*/
JSFreePointerListTask *deallocatorTask;
/* Factored out of JSThread for !JS_THREADSAFE embedding in JSRuntime. */
JSThreadData data;
};
@ -811,6 +820,10 @@ struct JSRuntime {
size_t gcMarkLaterCount;
#endif
#ifdef JS_THREADSAFE
JSBackgroundThread gcHelperThread;
#endif
/*
* The trace operation and its data argument to trace embedding-specific
* GC roots.
@ -852,7 +865,7 @@ struct JSRuntime {
#ifdef JS_TRACER
/* True if any debug hooks not supported by the JIT are enabled. */
bool debuggerInhibitsJIT() const {
return (globalDebugHooks.interruptHandler ||
return (globalDebugHooks.interruptHook ||
globalDebugHooks.callHook ||
globalDebugHooks.objectHook);
}
@ -984,10 +997,6 @@ struct JSRuntime {
/* Literal table maintained by jsatom.c functions. */
JSAtomState atomState;
#ifdef JS_THREADSAFE
JSBackgroundThread *deallocatorThread;
#endif
JSEmptyScope *emptyArgumentsScope;
JSEmptyScope *emptyBlockScope;
@ -1180,9 +1189,27 @@ namespace js {
class AutoGCRooter;
}
struct JSRegExpStatics {
JSContext *cx;
JSString *input; /* input string to match (perl $_, GC root) */
JSBool multiline; /* whether input contains newlines (perl $*) */
JSSubString lastMatch; /* last string matched (perl $&) */
JSSubString lastParen; /* last paren matched (perl $+) */
JSSubString leftContext; /* input to left of last match (perl $`) */
JSSubString rightContext; /* input to right of last match (perl $') */
js::Vector<JSSubString> parens; /* last set of parens matched (perl $1, $2) */
JSRegExpStatics(JSContext *cx) : cx(cx), parens(cx) {}
bool copy(const JSRegExpStatics& other);
void clearRoots();
void clear();
};
struct JSContext
{
explicit JSContext(JSRuntime *rt) : runtime(rt), busyArrays(this) {}
explicit JSContext(JSRuntime *rt) :
runtime(rt), regExpStatics(this), busyArrays(this) {}
/*
* If this flag is set, we were asked to call back the operation callback
@ -1268,7 +1295,7 @@ struct JSContext
/* Storage to root recently allocated GC things and script result. */
JSWeakRoots weakRoots;
/* Regular expression class statics (XXX not shared globally). */
/* Regular expression class statics. */
JSRegExpStatics regExpStatics;
/* State for object and array toSource conversion. */
@ -1414,8 +1441,6 @@ struct JSContext
bool jitEnabled;
#endif
JSClassProtoCache classProtoCache;
/* Caller must be holding runtime->gcLock. */
void updateJITEnabled() {
#ifdef JS_TRACER
@ -1426,19 +1451,13 @@ struct JSContext
#endif
}
#ifdef JS_THREADSAFE
inline void createDeallocatorTask() {
JS_ASSERT(!thread->deallocatorTask);
if (runtime->deallocatorThread && !runtime->deallocatorThread->busy())
thread->deallocatorTask = new JSFreePointerListTask();
}
JSClassProtoCache classProtoCache;
inline void submitDeallocatorTask() {
if (thread->deallocatorTask) {
runtime->deallocatorThread->schedule(thread->deallocatorTask);
thread->deallocatorTask = NULL;
}
}
#ifdef JS_THREADSAFE
/*
* The sweep task for this context.
*/
js::BackgroundSweepTask *gcSweepTask;
#endif
ptrdiff_t &getMallocCounter() {
@ -1513,26 +1532,15 @@ struct JSContext
return p;
}
inline void free(void* p) {
#ifdef JS_THREADSAFE
inline void free(void* p) {
if (!p)
if (gcSweepTask) {
gcSweepTask->freeLater(p);
return;
if (thread) {
JSFreePointerListTask* task = thread->deallocatorTask;
if (task) {
task->add(p);
return;
}
}
runtime->free(p);
}
#else
inline void free(void* p) {
if (!p)
return;
runtime->free(p);
}
#endif
runtime->free(p);
}
/*
* In the common case that we'd like to allocate the memory for an object
@ -1677,17 +1685,17 @@ class AutoGCRooter {
void operator=(AutoGCRooter &ida);
};
class AutoSaveRestoreWeakRoots : private AutoGCRooter
class AutoPreserveWeakRoots : private AutoGCRooter
{
public:
explicit AutoSaveRestoreWeakRoots(JSContext *cx
JS_GUARD_OBJECT_NOTIFIER_PARAM)
explicit AutoPreserveWeakRoots(JSContext *cx
JS_GUARD_OBJECT_NOTIFIER_PARAM)
: AutoGCRooter(cx, WEAKROOTS), savedRoots(cx->weakRoots)
{
JS_GUARD_OBJECT_NOTIFIER_INIT;
}
~AutoSaveRestoreWeakRoots()
~AutoPreserveWeakRoots()
{
context->weakRoots = savedRoots;
}

Просмотреть файл

@ -78,7 +78,7 @@ AutoGCRooter::trace(JSTracer *trc)
return;
case WEAKROOTS:
static_cast<AutoSaveRestoreWeakRoots *>(this)->savedRoots.mark(trc);
static_cast<AutoPreserveWeakRoots *>(this)->savedRoots.mark(trc);
return;
case PARSER:

Просмотреть файл

@ -2199,26 +2199,6 @@ date_toString(JSContext *cx, uintN argc, jsval *vp)
return date_format(cx, utctime, FORMATSPEC_FULL, vp);
}
#ifdef JS_TRACER
static jsval FASTCALL
date_valueOf_tn(JSContext* cx, JSObject* obj, JSString* str)
{
JS_ASSERT(JS_InstanceOf(cx, obj, &js_DateClass, NULL));
jsdouble t = *JSVAL_TO_DOUBLE(obj->getDateUTCTime());
JSString* number_str = ATOM_TO_STRING(cx->runtime->atomState.typeAtoms[JSTYPE_NUMBER]);
jsval v;
if (js_EqualStrings(str, number_str)) {
if (!js_NewNumberInRootedValue(cx, t, &v))
return JSVAL_ERROR_COOKIE;
} else {
if (!date_format(cx, t, FORMATSPEC_FULL, &v))
return JSVAL_ERROR_COOKIE;
}
return v;
}
#endif
static JSBool
date_valueOf(JSContext *cx, uintN argc, jsval *vp)
{
@ -2254,9 +2234,6 @@ static JSFunctionSpec date_static_methods[] = {
JS_FS_END
};
JS_DEFINE_TRCINFO_1(date_valueOf,
(3, (static, JSVAL_RETRY, date_valueOf_tn, CONTEXT, THIS, STRING, 0, nanojit::ACC_STORE_ANY)))
static JSFunctionSpec date_methods[] = {
JS_FN("getTime", date_getTime, 0,0),
JS_FN("getTimezoneOffset", date_getTimezoneOffset, 0,0),
@ -2302,12 +2279,11 @@ static JSFunctionSpec date_methods[] = {
JS_FN("toTimeString", date_toTimeString, 0,0),
JS_FN("toISOString", date_toISOString, 0,0),
JS_FN(js_toJSON_str, date_toISOString, 0,0),
#if JS_HAS_TOSOURCE
JS_FN(js_toSource_str, date_toSource, 0,0),
#endif
JS_FN(js_toString_str, date_toString, 0,0),
JS_TN(js_valueOf_str, date_valueOf, 0,0, &date_valueOf_trcinfo),
JS_FN(js_valueOf_str, date_valueOf, 0,0),
JS_FS_END
};

Просмотреть файл

@ -77,7 +77,7 @@ typedef struct JSTrap {
jsbytecode *pc;
JSOp op;
JSTrapHandler handler;
void *closure;
jsval closure;
} JSTrap;
#define DBG_LOCK(rt) JS_ACQUIRE_LOCK((rt)->debuggerLock)
@ -142,7 +142,7 @@ js_UntrapScriptCode(JSContext *cx, JSScript *script)
JS_PUBLIC_API(JSBool)
JS_SetTrap(JSContext *cx, JSScript *script, jsbytecode *pc,
JSTrapHandler handler, void *closure)
JSTrapHandler handler, jsval closure)
{
JSTrap *junk, *trap, *twin;
JSRuntime *rt;
@ -168,7 +168,7 @@ JS_SetTrap(JSContext *cx, JSScript *script, jsbytecode *pc,
trap = (JSTrap *) cx->malloc(sizeof *trap);
if (!trap)
return JS_FALSE;
trap->closure = NULL;
trap->closure = JSVAL_NULL;
DBG_LOCK(rt);
twin = (rt->debuggerMutations != sample)
? FindTrap(rt, script, pc)
@ -220,7 +220,7 @@ DestroyTrapAndUnlock(JSContext *cx, JSTrap *trap)
JS_PUBLIC_API(void)
JS_ClearTrap(JSContext *cx, JSScript *script, jsbytecode *pc,
JSTrapHandler *handlerp, void **closurep)
JSTrapHandler *handlerp, jsval *closurep)
{
JSTrap *trap;
@ -229,7 +229,7 @@ JS_ClearTrap(JSContext *cx, JSScript *script, jsbytecode *pc,
if (handlerp)
*handlerp = trap ? trap->handler : NULL;
if (closurep)
*closurep = trap ? trap->closure : NULL;
*closurep = trap ? trap->closure : JSVAL_NULL;
if (trap)
DestroyTrapAndUnlock(cx, trap);
else
@ -295,10 +295,8 @@ js_MarkTraps(JSTracer *trc)
for (JSTrap *trap = (JSTrap *) rt->trapList.next;
&trap->links != &rt->trapList;
trap = (JSTrap *) trap->links.next) {
if (trap->closure) {
JS_SET_TRACING_NAME(trc, "trap->closure");
js_CallValueTracerIfGCThing(trc, (jsval) trap->closure);
}
JS_SET_TRACING_NAME(trc, "trap->closure");
js_CallValueTracerIfGCThing(trc, trap->closure);
}
}
@ -375,15 +373,15 @@ LeaveTraceRT(JSRuntime *rt)
#endif
JS_PUBLIC_API(JSBool)
JS_SetInterrupt(JSRuntime *rt, JSTrapHandler handler, void *closure)
JS_SetInterrupt(JSRuntime *rt, JSInterruptHook hook, void *closure)
{
#ifdef JS_TRACER
{
AutoLockGC lock(rt);
bool wasInhibited = rt->debuggerInhibitsJIT();
#endif
rt->globalDebugHooks.interruptHandler = handler;
rt->globalDebugHooks.interruptHandlerData = closure;
rt->globalDebugHooks.interruptHook = hook;
rt->globalDebugHooks.interruptHookData = closure;
#ifdef JS_TRACER
JITInhibitingHookChange(rt, wasInhibited);
}
@ -393,18 +391,18 @@ JS_SetInterrupt(JSRuntime *rt, JSTrapHandler handler, void *closure)
}
JS_PUBLIC_API(JSBool)
JS_ClearInterrupt(JSRuntime *rt, JSTrapHandler *handlerp, void **closurep)
JS_ClearInterrupt(JSRuntime *rt, JSInterruptHook *hoop, void **closurep)
{
#ifdef JS_TRACER
AutoLockGC lock(rt);
bool wasInhibited = rt->debuggerInhibitsJIT();
#endif
if (handlerp)
*handlerp = rt->globalDebugHooks.interruptHandler;
if (hoop)
*hoop = rt->globalDebugHooks.interruptHook;
if (closurep)
*closurep = rt->globalDebugHooks.interruptHandlerData;
rt->globalDebugHooks.interruptHandler = 0;
rt->globalDebugHooks.interruptHandlerData = 0;
*closurep = rt->globalDebugHooks.interruptHookData;
rt->globalDebugHooks.interruptHook = 0;
rt->globalDebugHooks.interruptHookData = 0;
#ifdef JS_TRACER
JITInhibitingHookChange(rt, wasInhibited);
#endif
@ -506,10 +504,8 @@ js_TraceWatchPoints(JSTracer *trc, JSObject *obj)
wp = (JSWatchPoint *)wp->links.next) {
if (wp->object == obj) {
wp->sprop->trace(trc);
if (wp->sprop->hasSetterValue() && wp->setter) {
JS_CALL_OBJECT_TRACER(trc, js_CastAsObject(wp->setter),
"wp->setter");
}
if (wp->sprop->hasSetterValue() && wp->setter)
JS_CALL_OBJECT_TRACER(trc, CastAsObject(wp->setter), "wp->setter");
JS_SET_TRACING_NAME(trc, "wp->closure");
js_CallValueTracerIfGCThing(trc, OBJECT_TO_JSVAL(wp->closure));
}
@ -735,7 +731,7 @@ js_watch_set(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
ok = !wp->setter ||
(sprop->hasSetterValue()
? js_InternalCall(cx, obj,
js_CastAsObjectJSVal(wp->setter),
CastAsObjectJSVal(wp->setter),
1, vp, vp)
: wp->setter(cx, obj, userid, vp));
if (injectFrame) {
@ -774,7 +770,7 @@ IsWatchedProperty(JSContext *cx, JSScopeProperty *sprop)
{
if (sprop->hasSetterValue()) {
JSObject *funobj = sprop->setterObject();
if (!funobj->isFunction())
if (!funobj || !funobj->isFunction())
return false;
JSFunction *fun = GET_FUNCTION_PRIVATE(cx, funobj);
@ -803,10 +799,10 @@ js_WrapWatchedSetter(JSContext *cx, jsid id, uintN attrs, JSPropertyOp setter)
}
wrapper = js_NewFunction(cx, NULL, js_watch_set_wrapper, 1, 0,
setter ? js_CastAsObject(setter)->getParent() : NULL, atom);
setter ? CastAsObject(setter)->getParent() : NULL, atom);
if (!wrapper)
return NULL;
return js_CastAsPropertyOp(FUN_OBJECT(wrapper));
return CastAsPropertyOp(FUN_OBJECT(wrapper));
}
JS_PUBLIC_API(JSBool)
@ -1563,7 +1559,7 @@ JS_PutPropertyDescArray(JSContext *cx, JSPropertyDescArray *pda)
/************************************************************************/
JS_PUBLIC_API(JSBool)
JS_SetDebuggerHandler(JSRuntime *rt, JSTrapHandler handler, void *closure)
JS_SetDebuggerHandler(JSRuntime *rt, JSDebuggerHandler handler, void *closure)
{
rt->globalDebugHooks.debuggerHandler = handler;
rt->globalDebugHooks.debuggerHandlerData = closure;
@ -1625,7 +1621,7 @@ JS_SetObjectHook(JSRuntime *rt, JSObjectHook hook, void *closure)
}
JS_PUBLIC_API(JSBool)
JS_SetThrowHook(JSRuntime *rt, JSTrapHandler hook, void *closure)
JS_SetThrowHook(JSRuntime *rt, JSThrowHook hook, void *closure)
{
rt->globalDebugHooks.throwHook = hook;
rt->globalDebugHooks.throwHookData = closure;

Просмотреть файл

@ -57,16 +57,17 @@ JS_BEGIN_EXTERN_C
extern jsbytecode *
js_UntrapScriptCode(JSContext *cx, JSScript *script);
/* The closure argument will be marked. */
extern JS_PUBLIC_API(JSBool)
JS_SetTrap(JSContext *cx, JSScript *script, jsbytecode *pc,
JSTrapHandler handler, void *closure);
JSTrapHandler handler, jsval closure);
extern JS_PUBLIC_API(JSOp)
JS_GetTrapOpcode(JSContext *cx, JSScript *script, jsbytecode *pc);
extern JS_PUBLIC_API(void)
JS_ClearTrap(JSContext *cx, JSScript *script, jsbytecode *pc,
JSTrapHandler *handlerp, void **closurep);
JSTrapHandler *handlerp, jsval *closurep);
extern JS_PUBLIC_API(void)
JS_ClearScriptTraps(JSContext *cx, JSScript *script);
@ -78,10 +79,10 @@ extern JS_PUBLIC_API(JSTrapStatus)
JS_HandleTrap(JSContext *cx, JSScript *script, jsbytecode *pc, jsval *rval);
extern JS_PUBLIC_API(JSBool)
JS_SetInterrupt(JSRuntime *rt, JSTrapHandler handler, void *closure);
JS_SetInterrupt(JSRuntime *rt, JSInterruptHook handler, void *closure);
extern JS_PUBLIC_API(JSBool)
JS_ClearInterrupt(JSRuntime *rt, JSTrapHandler *handlerp, void **closurep);
JS_ClearInterrupt(JSRuntime *rt, JSInterruptHook *handlerp, void **closurep);
/************************************************************************/
@ -337,7 +338,7 @@ JS_PutPropertyDescArray(JSContext *cx, JSPropertyDescArray *pda);
/************************************************************************/
extern JS_PUBLIC_API(JSBool)
JS_SetDebuggerHandler(JSRuntime *rt, JSTrapHandler handler, void *closure);
JS_SetDebuggerHandler(JSRuntime *rt, JSDebuggerHandler hook, void *closure);
extern JS_PUBLIC_API(JSBool)
JS_SetSourceHandler(JSRuntime *rt, JSSourceHandler handler, void *closure);
@ -352,7 +353,7 @@ extern JS_PUBLIC_API(JSBool)
JS_SetObjectHook(JSRuntime *rt, JSObjectHook hook, void *closure);
extern JS_PUBLIC_API(JSBool)
JS_SetThrowHook(JSRuntime *rt, JSTrapHandler hook, void *closure);
JS_SetThrowHook(JSRuntime *rt, JSThrowHook hook, void *closure);
extern JS_PUBLIC_API(JSBool)
JS_SetDebugErrorHook(JSRuntime *rt, JSDebugErrorHook hook, void *closure);

Просмотреть файл

@ -52,7 +52,7 @@
static char dempty[] = "<null>";
static char *
jsdtrace_fun_classname(JSFunction *fun)
jsdtrace_fun_classname(const JSFunction *fun)
{
return (fun &&
!FUN_INTERPRETED(fun) &&
@ -71,7 +71,7 @@ jsdtrace_filename(JSStackFrame *fp)
}
static int
jsdtrace_fun_linenumber(JSContext *cx, JSFunction *fun)
jsdtrace_fun_linenumber(JSContext *cx, const JSFunction *fun)
{
if (fun && FUN_INTERPRETED(fun))
return (int) JS_GetScriptBaseLineNumber(cx, FUN_SCRIPT(fun));
@ -109,7 +109,7 @@ jsdtrace_frame_linenumber(JSContext *cx, JSStackFrame *fp)
* provide raw (unmasked) jsvals should type info be useful from D scripts.
*/
static void *
jsdtrace_jsvaltovoid(JSContext *cx, jsval argval)
jsdtrace_jsvaltovoid(JSContext *cx, const jsval argval)
{
JSType type = TYPEOF(cx, argval);
@ -136,7 +136,7 @@ jsdtrace_jsvaltovoid(JSContext *cx, jsval argval)
}
static char *
jsdtrace_fun_name(JSContext *cx, JSFunction *fun)
jsdtrace_fun_name(JSContext *cx, const JSFunction *fun)
{
JSAtom *atom;
char *name;
@ -166,7 +166,7 @@ jsdtrace_fun_name(JSContext *cx, JSFunction *fun)
* a number of usually unused lines of code would cause.
*/
void
jsdtrace_function_entry(JSContext *cx, JSStackFrame *fp, JSFunction *fun)
jsdtrace_function_entry(JSContext *cx, JSStackFrame *fp, const JSFunction *fun)
{
JAVASCRIPT_FUNCTION_ENTRY(
jsdtrace_filename(fp),
@ -190,7 +190,8 @@ jsdtrace_function_info(JSContext *cx, JSStackFrame *fp, JSStackFrame *dfp,
}
void
jsdtrace_function_args(JSContext *cx, JSStackFrame *fp, JSFunction *fun, jsuint argc, jsval *argv)
jsdtrace_function_args(JSContext *cx, JSStackFrame *fp, const JSFunction *fun,
jsuint argc, jsval *argv)
{
JAVASCRIPT_FUNCTION_ARGS(
jsdtrace_filename(fp),

Просмотреть файл

@ -33,7 +33,9 @@
*
* ***** END LICENSE BLOCK ***** */
#ifdef INCLUDE_MOZILLA_DTRACE
#include "javascript-trace.h"
#endif
#include "jspubtd.h"
#include "jsprvtd.h"
@ -43,39 +45,126 @@
JS_BEGIN_EXTERN_C
extern void
jsdtrace_function_entry(JSContext *cx, JSStackFrame *fp, JSFunction *fun);
jsdtrace_function_entry(JSContext *cx, JSStackFrame *fp, const JSFunction *fun);
extern void
jsdtrace_function_info(JSContext *cx, JSStackFrame *fp, JSStackFrame *dfp,
JSFunction *fun);
const JSFunction *fun);
extern void
jsdtrace_function_args(JSContext *cx, JSStackFrame *fp, JSFunction *fun, jsuint argc, jsval *argv);
jsdtrace_function_args(JSContext *cx, JSStackFrame *fp, const JSFunction *fun,
jsuint argc, jsval *argv);
extern void
jsdtrace_function_rval(JSContext *cx, JSStackFrame *fp, JSFunction *fun, jsval *rval);
jsdtrace_function_rval(JSContext *cx, JSStackFrame *fp, const JSFunction *fun,
jsval rval);
extern void
jsdtrace_function_return(JSContext *cx, JSStackFrame *fp, JSFunction *fun);
jsdtrace_function_return(JSContext *cx, JSStackFrame *fp, const JSFunction *fun);
extern void
jsdtrace_object_create_start(JSStackFrame *fp, JSClass *clasp);
jsdtrace_object_create_start(JSStackFrame *fp, const JSClass *clasp);
extern void
jsdtrace_object_create_done(JSStackFrame *fp, JSClass *clasp);
jsdtrace_object_create_done(JSStackFrame *fp, const JSClass *clasp);
extern void
jsdtrace_object_create(JSContext *cx, JSClass *clasp, JSObject *obj);
jsdtrace_object_create(JSContext *cx, const JSClass *clasp, const JSObject *obj);
extern void
jsdtrace_object_finalize(JSObject *obj);
jsdtrace_object_finalize(const JSObject *obj);
extern void
jsdtrace_execute_start(JSScript *script);
jsdtrace_execute_start(const JSScript *script);
extern void
jsdtrace_execute_done(JSScript *script);
jsdtrace_execute_done(const JSScript *script);
JS_END_EXTERN_C
namespace js {
class DTrace {
public:
/*
* If |lval| is provided to the enter/exit methods, it is tested to see if
* it is a function as a predicate to the dtrace event emission.
*/
static void enterJSFun(JSContext *cx, JSStackFrame *fp, const JSFunction *fun,
JSStackFrame *dfp, jsuint argc, jsval *argv, jsval *lval = NULL);
static void exitJSFun(JSContext *cx, JSStackFrame *fp, const JSFunction *fun,
jsval rval, jsval *lval = NULL);
static void finalizeObject(const JSObject *obj);
class ExecutionScope {
const JSScript *script;
void startExecution();
void endExecution();
public:
explicit ExecutionScope(const JSScript *script) : script(script) { startExecution(); }
~ExecutionScope() { endExecution(); }
};
};
inline void
DTrace::enterJSFun(JSContext *cx, JSStackFrame *fp, const JSFunction *fun,
JSStackFrame *dfp, jsuint argc, jsval *argv, jsval *lval)
{
#ifdef INCLUDE_MOZILLA_DTRACE
if (!lval || VALUE_IS_FUNCTION(cx, *lval)) {
if (JAVASCRIPT_FUNCTION_ENTRY_ENABLED())
jsdtrace_function_entry(cx, fp, fun);
if (JAVASCRIPT_FUNCTION_INFO_ENABLED())
jsdtrace_function_info(cx, fp, dfp, fun);
if (JAVASCRIPT_FUNCTION_ARGS_ENABLED())
jsdtrace_function_args(cx, fp, fun, argc, argv);
}
#endif
}
inline void
DTrace::exitJSFun(JSContext *cx, JSStackFrame *fp, const JSFunction *fun,
jsval rval, jsval *lval)
{
#ifdef INCLUDE_MOZILLA_DTRACE
if (!lval || VALUE_IS_FUNCTION(cx, *lval)) {
if (JAVASCRIPT_FUNCTION_RVAL_ENABLED())
jsdtrace_function_rval(cx, fp, fun, rval);
if (JAVASCRIPT_FUNCTION_RETURN_ENABLED())
jsdtrace_function_return(cx, fp, fun);
}
#endif
}
inline void
DTrace::finalizeObject(const JSObject *obj)
{
#ifdef INCLUDE_MOZILLA_DTRACE
if (JAVASCRIPT_OBJECT_FINALIZE_ENABLED())
jsdtrace_object_finalize(obj);
#endif
}
inline void
DTrace::ExecutionScope::startExecution()
{
#ifdef INCLUDE_MOZILLA_DTRACE
if (JAVASCRIPT_EXECUTE_START_ENABLED())
jsdtrace_execute_start(script);
#endif
}
inline void
DTrace::ExecutionScope::endExecution()
{
#ifdef INCLUDE_MOZILLA_DTRACE
if (JAVASCRIPT_EXECUTE_DONE_ENABLED())
jsdtrace_execute_done(script);
#endif
}
} /* namespace js */
#endif /* _JSDTRACE_H */

Просмотреть файл

@ -96,7 +96,8 @@ JSCodeGenerator::JSCodeGenerator(Parser *parser,
spanDeps(NULL), jumpTargets(NULL), jtFreeList(NULL),
numSpanDeps(0), numJumpTargets(0), spanDepTodo(0),
arrayCompDepth(0),
emitLevel(0)
emitLevel(0),
constMap(parser->context)
{
flags = TCF_COMPILING;
memset(&prolog, 0, sizeof prolog);
@ -107,6 +108,11 @@ JSCodeGenerator::JSCodeGenerator(Parser *parser,
memset(&upvarMap, 0, sizeof upvarMap);
}
bool JSCodeGenerator::init()
{
return constMap.init();
}
JSCodeGenerator::~JSCodeGenerator()
{
JS_ARENA_RELEASE(codePool, codeMark);
@ -1257,7 +1263,7 @@ JSTreeContext::ensureSharpSlots()
}
JS_ASSERT(!(flags & TCF_HAS_SHARPS));
if (flags & TCF_IN_FUNCTION) {
if (inFunction()) {
JSContext *cx = parser->context;
JSAtom *sharpArrayAtom = js_Atomize(cx, "#array", 6, 0);
JSAtom *sharpDepthAtom = js_Atomize(cx, "#depth", 6, 0);
@ -1544,7 +1550,6 @@ js_DefineCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
jsint ival;
JSAtom *valueAtom;
jsval v;
JSAtomListElement *ale;
/* XXX just do numbers for now */
if (pn->pn_type == TOK_NUMBER) {
@ -1562,10 +1567,8 @@ js_DefineCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
return JS_FALSE;
v = ATOM_KEY(valueAtom);
}
ale = cg->constList.add(cg->parser, atom);
if (!ale)
if (!cg->constMap.put(atom, v))
return JS_FALSE;
ALE_SET_VALUE(ale, v);
}
return JS_TRUE;
}
@ -1626,7 +1629,6 @@ LookupCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
{
JSBool ok;
JSStmtInfo *stmt;
JSAtomListElement *ale;
JSObject *obj, *objbox;
JSProperty *prop;
uintN attrs;
@ -1638,16 +1640,15 @@ LookupCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
*/
*vp = JSVAL_HOLE;
do {
if (cg->flags & (TCF_IN_FUNCTION | TCF_COMPILE_N_GO)) {
if (cg->inFunction() && cg->compileAndGo()) {
/* XXX this will need revising if 'const' becomes block-scoped. */
stmt = js_LexicalLookup(cg, atom, NULL);
if (stmt)
return JS_TRUE;
ale = cg->constList.lookup(atom);
if (ale) {
JS_ASSERT(ALE_VALUE(ale) != JSVAL_HOLE);
*vp = ALE_VALUE(ale);
if (JSCodeGenerator::ConstMap::Ptr p = cg->constMap.lookup(atom)) {
JS_ASSERT(p->value != JSVAL_HOLE);
*vp = p->value;
return JS_TRUE;
}
@ -1658,11 +1659,11 @@ LookupCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
* with object or catch variable; nor can prop's value be changed,
* nor can prop be deleted.
*/
if (cg->flags & TCF_IN_FUNCTION) {
if (cg->inFunction()) {
if (js_LookupLocal(cx, cg->fun, atom, NULL) != JSLOCAL_NONE)
break;
} else {
JS_ASSERT(cg->flags & TCF_COMPILE_N_GO);
JS_ASSERT(cg->compileAndGo());
obj = cg->scopeChain;
ok = obj->lookupProperty(cx, ATOM_TO_JSID(atom), &objbox, &prop);
if (!ok)
@ -1834,7 +1835,7 @@ static jsint
AdjustBlockSlot(JSContext *cx, JSCodeGenerator *cg, jsint slot)
{
JS_ASSERT((jsuint) slot < cg->maxStackDepth);
if (cg->flags & TCF_IN_FUNCTION) {
if (cg->inFunction()) {
slot += cg->fun->u.i.nvars;
if ((uintN) slot >= SLOTNO_LIMIT) {
ReportCompileErrorNumber(cx, CG_TS(cg), NULL, JSREPORT_ERROR, JSMSG_TOO_MANY_LOCALS);
@ -1942,7 +1943,7 @@ MakeUpvarForEval(JSParseNode *pn, JSCodeGenerator *cg)
JSAtomListElement *ale = cg->upvarList.lookup(atom);
if (!ale) {
if ((cg->flags & TCF_IN_FUNCTION) &&
if (cg->inFunction() &&
!js_AddLocal(cx, cg->fun, atom, JSLOCAL_UPVAR)) {
return false;
}
@ -2064,7 +2065,7 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
case JSOP_DELNAME:
if (dn_kind != JSDefinition::UNKNOWN) {
if (cg->parser->callerFrame && !cg->funbox)
JS_ASSERT(cg->flags & TCF_COMPILE_N_GO);
JS_ASSERT(cg->compileAndGo());
else
pn->pn_op = JSOP_FALSE;
pn->pn_dflags |= PND_BOUND;
@ -2079,7 +2080,7 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
if (cookie == FREE_UPVAR_COOKIE) {
JSStackFrame *caller = cg->parser->callerFrame;
if (caller) {
JS_ASSERT(cg->flags & TCF_COMPILE_N_GO);
JS_ASSERT(cg->compileAndGo());
/*
* Don't generate upvars on the left side of a for loop. See
@ -2098,7 +2099,7 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
* created function objects have the top-level cg's scopeChain set
* as their parent by Parser::newFunction.
*/
JSObject *scopeobj = (cg->flags & TCF_IN_FUNCTION)
JSObject *scopeobj = cg->inFunction()
? FUN_OBJECT(cg->fun)->getParent()
: cg->scopeChain;
if (scopeobj != cg->parser->callerVarObj)
@ -2134,9 +2135,10 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
* If this is a global reference from within a function, leave pn_op as
* JSOP_NAME, etc. We could emit JSOP_*GVAR ops within function code if
* only we could depend on the global frame's slots being valid for all
* calls to the function.
* calls to the function, and if we could equate the atom index in the
* function's atom map for every global name with its frame slot.
*/
if (cg->flags & TCF_IN_FUNCTION)
if (cg->inFunction())
return JS_TRUE;
/*
@ -2192,10 +2194,10 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
JSTreeContext *tc = cg;
while (tc->staticLevel != level)
tc = tc->parent;
JS_ASSERT(tc->flags & TCF_COMPILING);
JS_ASSERT(tc->compiling());
JSCodeGenerator *evalcg = (JSCodeGenerator *) tc;
JS_ASSERT(evalcg->flags & TCF_COMPILE_N_GO);
JS_ASSERT(evalcg->compileAndGo());
JS_ASSERT(caller->fun && cg->parser->callerVarObj == evalcg->scopeChain);
/*
@ -2217,7 +2219,7 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
uintN skip = cg->staticLevel - level;
if (skip != 0) {
JS_ASSERT(cg->flags & TCF_IN_FUNCTION);
JS_ASSERT(cg->inFunction());
JS_ASSERT_IF(UPVAR_FRAME_SLOT(cookie) != CALLEE_UPVAR_SLOT,
cg->lexdeps.lookup(atom));
JS_ASSERT(JOF_OPTYPE(op) == JOF_ATOM);
@ -2291,7 +2293,7 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
do {
tc = tc->parent;
} while (tc->staticLevel != level);
if (tc->flags & TCF_IN_FUNCTION)
if (tc->inFunction())
slot += tc->fun->nargs;
}
@ -3647,7 +3649,7 @@ MaybeEmitVarDecl(JSContext *cx, JSCodeGenerator *cg, JSOp prologOp,
}
if (JOF_OPTYPE(pn->pn_op) == JOF_ATOM &&
(!(cg->flags & TCF_IN_FUNCTION) || (cg->flags & TCF_FUN_HEAVYWEIGHT))) {
(!cg->inFunction() || (cg->flags & TCF_FUN_HEAVYWEIGHT))) {
CG_SWITCH_TO_PROLOG(cg);
if (!UpdateLineNumberNotes(cx, cg, pn->pn_pos.begin.lineno))
return JS_FALSE;
@ -4370,7 +4372,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
* comments in the TOK_LC case.
*/
JS_ASSERT(pn->pn_op == JSOP_NOP);
JS_ASSERT(cg->flags & TCF_IN_FUNCTION);
JS_ASSERT(cg->inFunction());
if (!EmitFunctionDefNop(cx, cg, pn->pn_index))
return JS_FALSE;
break;
@ -4396,6 +4398,10 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
new (cg2space) JSCodeGenerator(cg->parser,
cg->codePool, cg->notePool,
pn->pn_pos.begin.lineno);
if (!cg2->init())
return JS_FALSE;
cg2->flags = pn->pn_funbox->tcflags | TCF_IN_FUNCTION;
#if JS_HAS_SHARP_VARS
if (cg2->flags & TCF_HAS_SHARPS) {
@ -4450,7 +4456,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
* invocation of the emitter and calls to js_EmitTree for function
* definitions can be scheduled before generating the rest of code.
*/
if (!(cg->flags & TCF_IN_FUNCTION)) {
if (!cg->inFunction()) {
JS_ASSERT(!cg->topStmt);
CG_SWITCH_TO_PROLOG(cg);
op = FUN_FLAT_CLOSURE(fun) ? JSOP_DEFFUN_FC : JSOP_DEFFUN;
@ -4514,7 +4520,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
stmtInfo.update = top;
if (!js_SetSrcNoteOffset(cx, cg, noteIndex, 0, jmp - beq))
return JS_FALSE;
if (!js_SetSrcNoteOffset(cx, cg, noteIndex, 1, top - jmp))
if (!js_SetSrcNoteOffset(cx, cg, noteIndex, 1, top - beq))
return JS_FALSE;
}
@ -5484,7 +5490,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
#if JS_HAS_GENERATORS
case TOK_YIELD:
if (!(cg->flags & TCF_IN_FUNCTION)) {
if (!cg->inFunction()) {
ReportCompileErrorNumber(cx, CG_TS(cg), pn, JSREPORT_ERROR,
JSMSG_BAD_RETURN_OR_YIELD,
js_yield_str);
@ -5541,7 +5547,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
* Currently this is used only for functions, as compile-as-we go
* mode for scripts does not allow separate emitter passes.
*/
JS_ASSERT(cg->flags & TCF_IN_FUNCTION);
JS_ASSERT(cg->inFunction());
if (pn->pn_xflags & PNX_DESTRUCT) {
/*
* Assign the destructuring arguments before defining any
@ -6006,7 +6012,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
case TOK_PLUS:
/* For TCF_IN_FUNCTION test, see TOK_RB concerning JSOP_NEWARRAY. */
if (pn->pn_arity == PN_LIST && pn->pn_count < JS_BIT(16) &&
(cg->flags & TCF_IN_FUNCTION)) {
cg->inFunction()) {
/* Emit up to the first string literal conventionally. */
for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
if (pn2->pn_type == TOK_STRING)
@ -6017,25 +6023,32 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
return JS_FALSE;
}
/* Emit remainder as a single JSOP_CONCATN. */
for (index = 0; pn2; pn2 = pn2->pn_next, index++) {
if (!pn2)
break;
/*
* Having seen a string literal, we know statically that the rest
* of the additions are string concatenation, so we emit them as a
* single concatn. First, do string conversion on the result of the
* preceding zero or more additions so that any side effects of
* string conversion occur before the next operand begins.
*/
if (pn2 == pn->pn_head) {
index = 0;
} else {
if (!js_Emit1(cx, cg, JSOP_OBJTOSTR))
return JS_FALSE;
index = 1;
}
for (; pn2; pn2 = pn2->pn_next, index++) {
if (!js_EmitTree(cx, cg, pn2))
return JS_FALSE;
if (!pn2->isLiteral() &&
js_Emit1(cx, cg, JSOP_OBJTOSTR) < 0) {
if (!pn2->isLiteral() && js_Emit1(cx, cg, JSOP_OBJTOSTR) < 0)
return JS_FALSE;
}
}
if (index != 0) {
EMIT_UINT16_IMM_OP(JSOP_CONCATN, index);
/* If we had a prefix, we need to be added to it now. */
if (pn->pn_head->pn_type != TOK_STRING &&
js_Emit1(cx, cg, JSOP_ADD) < 0) {
return JS_FALSE;
}
}
EMIT_UINT16_IMM_OP(JSOP_CONCATN, index);
break;
}
case TOK_BITOR:
@ -6415,7 +6428,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
type = PN_TYPE(pn->expr());
if (type != TOK_CATCH && type != TOK_LET && type != TOK_FOR &&
(!(stmt = stmtInfo.down)
? !(cg->flags & TCF_IN_FUNCTION)
? !cg->inFunction()
: stmt->type == STMT_BLOCK)) {
#if defined DEBUG_brendan || defined DEBUG_mrbkap
/* There must be no source note already output for the next op. */
@ -6527,7 +6540,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
do_emit_array:
#endif
op = (JS_LIKELY(pn->pn_count < JS_BIT(16)) && (cg->flags & TCF_IN_FUNCTION))
op = (JS_LIKELY(pn->pn_count < JS_BIT(16)) && cg->inFunction())
? JSOP_NEWARRAY
: JSOP_NEWINIT;
@ -6745,7 +6758,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
* and lastIndex sharing, select JSOP_REGEXP.
*/
JS_ASSERT(pn->pn_op == JSOP_REGEXP);
bool singleton = !cg->fun && (cg->flags & TCF_COMPILE_N_GO);
bool singleton = !cg->fun && cg->compileAndGo();
if (singleton) {
for (JSStmtInfo *stmt = cg->topStmt; stmt; stmt = stmt->down) {
if (STMT_IS_LOOP(stmt)) {

Просмотреть файл

@ -163,8 +163,102 @@ struct JSStmtInfo {
# define JS_SCOPE_DEPTH_METERING_IF(code, x) ((void) 0)
#endif
#define TCF_COMPILING 0x01 /* JSTreeContext is JSCodeGenerator */
#define TCF_IN_FUNCTION 0x02 /* parsing inside function body */
#define TCF_RETURN_EXPR 0x04 /* function has 'return expr;' */
#define TCF_RETURN_VOID 0x08 /* function has 'return;' */
#define TCF_IN_FOR_INIT 0x10 /* parsing init expr of for; exclude 'in' */
#define TCF_FUN_SETS_OUTER_NAME 0x20 /* function set outer name (lexical or free) */
#define TCF_FUN_PARAM_ARGUMENTS 0x40 /* function has parameter named arguments */
#define TCF_FUN_USES_ARGUMENTS 0x80 /* function uses arguments except as a
parameter name */
#define TCF_FUN_HEAVYWEIGHT 0x100 /* function needs Call object per call */
#define TCF_FUN_IS_GENERATOR 0x200 /* parsed yield statement in function */
#define TCF_FUN_USES_OWN_NAME 0x400 /* named function expression that uses its
own name */
#define TCF_HAS_FUNCTION_STMT 0x800 /* block contains a function statement */
#define TCF_GENEXP_LAMBDA 0x1000 /* flag lambda from generator expression */
#define TCF_COMPILE_N_GO 0x2000 /* compile-and-go mode of script, can
optimize name references based on scope
chain */
#define TCF_NO_SCRIPT_RVAL 0x4000 /* API caller does not want result value
from global script */
#define TCF_HAS_SHARPS 0x8000 /* source contains sharp defs or uses */
/*
* Set when parsing a declaration-like destructuring pattern. This
* flag causes PrimaryExpr to create PN_NAME parse nodes for variable
* references which are not hooked into any definition's use chain,
* added to any tree context's AtomList, etc. etc. CheckDestructuring
* will do that work later.
*
* The comments atop CheckDestructuring explain the distinction
* between assignment-like and declaration-like destructuring
* patterns, and why they need to be treated differently.
*/
#define TCF_DECL_DESTRUCTURING 0x10000
/*
* A request flag passed to Compiler::compileScript and then down via
* JSCodeGenerator to js_NewScriptFromCG, from script_compile_sub and any
* kindred functions that need to make mutable scripts (even empty ones;
* i.e., they can't share the const JSScript::emptyScript() singleton).
*/
#define TCF_NEED_MUTABLE_SCRIPT 0x20000
/*
* This function/global/eval code body contained a Use Strict Directive. Treat
* certain strict warnings as errors, and forbid the use of 'with'. See also
* TSF_STRICT_MODE_CODE, JSScript::strictModeCode, and JSREPORT_STRICT_ERROR.
*/
#define TCF_STRICT_MODE_CODE 0x40000
/* Function has parameter named 'eval'. */
#define TCF_FUN_PARAM_EVAL 0x80000
/*
* Flag signifying that the current function seems to be a constructor that
* sets this.foo to define "methods", at least one of which can't be a null
* closure, so we should avoid over-specializing property cache entries and
* trace inlining guards to method function object identity, which will vary
* per instance.
*/
#define TCF_FUN_UNBRAND_THIS 0x100000
/*
* "Module pattern", i.e., a lambda that is immediately applied and the whole
* of an expression statement.
*/
#define TCF_FUN_MODULE_PATTERN 0x200000
/*
* Flag to prevent a non-escaping function from being optimized into a null
* closure (i.e., a closure that needs only its global object for free variable
* resolution, thanks to JSOP_{GET,CALL}UPVAR), because this function contains
* a closure that needs one or more scope objects surrounding it (i.e., Call
* object for a heavyweight outer function). See bug 560234.
*/
#define TCF_FUN_ENTRAINS_SCOPES 0x400000
/*
* Flags to check for return; vs. return expr; in a function.
*/
#define TCF_RETURN_FLAGS (TCF_RETURN_EXPR | TCF_RETURN_VOID)
/*
* Sticky deoptimization flags to propagate from FunctionBody.
*/
#define TCF_FUN_FLAGS (TCF_FUN_SETS_OUTER_NAME | \
TCF_FUN_USES_ARGUMENTS | \
TCF_FUN_PARAM_ARGUMENTS | \
TCF_FUN_HEAVYWEIGHT | \
TCF_FUN_IS_GENERATOR | \
TCF_FUN_USES_OWN_NAME | \
TCF_HAS_SHARPS | \
TCF_STRICT_MODE_CODE)
struct JSTreeContext { /* tree context for semantic checks */
uint32 flags; /* statement state flags, see below */
uint32 flags; /* statement state flags, see above */
uint16 ngvars; /* max. no. of global variables/regexps */
uint32 bodyid; /* block number of program/function body */
uint32 blockidGen; /* preincremented block number generator */
@ -243,93 +337,12 @@ struct JSTreeContext { /* tree context for semantic checks */
// (going upward) from this context's lexical scope. Always return true if
// this context is itself a generator.
bool skipSpansGenerator(unsigned skip);
bool compileAndGo() { return !!(flags & TCF_COMPILE_N_GO); }
bool inFunction() { return !!(flags & TCF_IN_FUNCTION); }
bool compiling() { return !!(flags & TCF_COMPILING); }
};
#define TCF_COMPILING 0x01 /* JSTreeContext is JSCodeGenerator */
#define TCF_IN_FUNCTION 0x02 /* parsing inside function body */
#define TCF_RETURN_EXPR 0x04 /* function has 'return expr;' */
#define TCF_RETURN_VOID 0x08 /* function has 'return;' */
#define TCF_IN_FOR_INIT 0x10 /* parsing init expr of for; exclude 'in' */
#define TCF_FUN_SETS_OUTER_NAME 0x20 /* function set outer name (lexical or free) */
#define TCF_FUN_PARAM_ARGUMENTS 0x40 /* function has parameter named arguments */
#define TCF_FUN_USES_ARGUMENTS 0x80 /* function uses arguments except as a
parameter name */
#define TCF_FUN_HEAVYWEIGHT 0x100 /* function needs Call object per call */
#define TCF_FUN_IS_GENERATOR 0x200 /* parsed yield statement in function */
#define TCF_FUN_USES_OWN_NAME 0x400 /* named function expression that uses its
own name */
#define TCF_HAS_FUNCTION_STMT 0x800 /* block contains a function statement */
#define TCF_GENEXP_LAMBDA 0x1000 /* flag lambda from generator expression */
#define TCF_COMPILE_N_GO 0x2000 /* compile-and-go mode of script, can
optimize name references based on scope
chain */
#define TCF_NO_SCRIPT_RVAL 0x4000 /* API caller does not want result value
from global script */
#define TCF_HAS_SHARPS 0x8000 /* source contains sharp defs or uses */
/*
* Set when parsing a declaration-like destructuring pattern. This
* flag causes PrimaryExpr to create PN_NAME parse nodes for variable
* references which are not hooked into any definition's use chain,
* added to any tree context's AtomList, etc. etc. CheckDestructuring
* will do that work later.
*
* The comments atop CheckDestructuring explain the distinction
* between assignment-like and declaration-like destructuring
* patterns, and why they need to be treated differently.
*/
#define TCF_DECL_DESTRUCTURING 0x10000
/*
* A request flag passed to Compiler::compileScript and then down via
* JSCodeGenerator to js_NewScriptFromCG, from script_compile_sub and any
* kindred functions that need to make mutable scripts (even empty ones;
* i.e., they can't share the const JSScript::emptyScript() singleton).
*/
#define TCF_NEED_MUTABLE_SCRIPT 0x20000
/*
* This function/global/eval code body contained a Use Strict Directive. Treat
* certain strict warnings as errors, and forbid the use of 'with'. See also
* TSF_STRICT_MODE_CODE, JSScript::strictModeCode, and JSREPORT_STRICT_ERROR.
*/
#define TCF_STRICT_MODE_CODE 0x40000
/* Function has parameter named 'eval'. */
#define TCF_FUN_PARAM_EVAL 0x80000
/*
* Flag signifying that the current function seems to be a constructor that
* sets this.foo to define "methods", at least one of which can't be a null
* closure, so we should avoid over-specializing property cache entries and
* trace inlining guards to method function object identity, which will vary
* per instance.
*/
#define TCF_FUN_UNBRAND_THIS 0x100000
/*
* "Module pattern", i.e., a lambda that is immediately applied and the whole
* of an expression statement.
*/
#define TCF_FUN_MODULE_PATTERN 0x200000
/*
* Flags to check for return; vs. return expr; in a function.
*/
#define TCF_RETURN_FLAGS (TCF_RETURN_EXPR | TCF_RETURN_VOID)
/*
* Sticky deoptimization flags to propagate from FunctionBody.
*/
#define TCF_FUN_FLAGS (TCF_FUN_SETS_OUTER_NAME | \
TCF_FUN_USES_ARGUMENTS | \
TCF_FUN_PARAM_ARGUMENTS | \
TCF_FUN_HEAVYWEIGHT | \
TCF_FUN_IS_GENERATOR | \
TCF_FUN_USES_OWN_NAME | \
TCF_HAS_SHARPS | \
TCF_STRICT_MODE_CODE)
/*
* Return true if we need to check for conditions that elicit
* JSOPTION_STRICT warnings or strict mode errors.
@ -455,7 +468,9 @@ struct JSCodeGenerator : public JSTreeContext
uintN arrayCompDepth; /* stack depth of array in comprehension */
uintN emitLevel; /* js_EmitTree recursion level */
JSAtomList constList; /* compile time constants */
typedef js::HashMap<JSAtom *, jsval> ConstMap;
ConstMap constMap; /* compile time constants */
JSCGObjectList objectList; /* list of emitted objects */
JSCGObjectList regexpList; /* list of emitted regexp that will be
@ -473,6 +488,8 @@ struct JSCodeGenerator : public JSTreeContext
JSArenaPool *codePool, JSArenaPool *notePool,
uintN lineno);
bool init();
/*
* Release cg->codePool, cg->notePool, and parser->context->tempPool to
* marks set by JSCodeGenerator's ctor. Note that cgs are magic: they own

Просмотреть файл

@ -137,7 +137,7 @@ js_GetArgsProperty(JSContext *cx, JSStackFrame *fp, jsid id, jsval *vp)
JSObject *argsobj = JSVAL_TO_OBJECT(fp->argsobj);
if (arg < fp->argc) {
if (argsobj) {
jsval v = GetArgsSlot(argsobj, arg);
jsval v = argsobj->getArgsElement(arg);
if (v == JSVAL_HOLE)
return argsobj->getProperty(cx, id, vp);
}
@ -197,9 +197,9 @@ PutArguments(JSContext *cx, JSObject *argsobj, jsval *args)
{
uint32 argc = argsobj->getArgsLength();
for (uint32 i = 0; i != argc; ++i) {
jsval v = argsobj->dslots[i];
jsval v = argsobj->getArgsElement(i);
if (v != JSVAL_HOLE)
argsobj->dslots[i] = args[i];
argsobj->setArgsElement(i, args[i]);
}
}
@ -301,7 +301,7 @@ args_delProperty(JSContext *cx, JSObject *obj, jsval idval, jsval *vp)
if (JSVAL_IS_INT(idval)) {
uintN arg = uintN(JSVAL_TO_INT(idval));
if (arg < obj->getArgsLength())
SetArgsSlot(obj, arg, JSVAL_HOLE);
obj->setArgsElement(arg, JSVAL_HOLE);
} else if (idval == ATOM_KEY(cx->runtime->atomState.lengthAtom)) {
obj->setArgsLengthOverridden();
} else if (idval == ATOM_KEY(cx->runtime->atomState.calleeAtom)) {
@ -508,7 +508,7 @@ ArgGetter(JSContext *cx, JSObject *obj, jsval idval, jsval *vp)
if (fp) {
*vp = fp->argv[arg];
} else {
jsval v = GetArgsSlot(obj, arg);
jsval v = obj->getArgsElement(arg);
if (v != JSVAL_HOLE)
*vp = v;
}
@ -595,7 +595,7 @@ args_resolve(JSContext *cx, JSObject *obj, jsval idval, uintN flags,
jsid id = 0;
if (JSVAL_IS_INT(idval)) {
uint32 arg = uint32(JSVAL_TO_INT(idval));
if (arg < obj->getArgsLength() && GetArgsSlot(obj, arg) != JSVAL_HOLE)
if (arg < obj->getArgsLength() && obj->getArgsElement(arg) != JSVAL_HOLE)
id = INT_JSVAL_TO_JSID(idval);
} else if (idval == ATOM_KEY(cx->runtime->atomState.lengthAtom)) {
if (!obj->isArgsLengthOverridden())
@ -2040,19 +2040,19 @@ js_fun_apply(JSContext *cx, uintN argc, jsval *vp)
/*
* Two cases, two loops: note how in the case of an active stack frame
* backing aobj, even though we copy from fp->argv, we still must check
* aobj->dslots[i] for a hole, to handle a delete on the corresponding
* arguments element. See args_delProperty.
* aobj->getArgsElement(i) for a hole, to handle a delete on the
* corresponding arguments element. See args_delProperty.
*/
JSStackFrame *fp = (JSStackFrame *) aobj->getPrivate();
if (fp) {
memcpy(sp, fp->argv, argc * sizeof(jsval));
for (i = 0; i < argc; i++) {
if (aobj->dslots[i] == JSVAL_HOLE) // suppress deleted element
if (aobj->getArgsElement(i) == JSVAL_HOLE) // suppress deleted element
sp[i] = JSVAL_VOID;
}
} else {
memcpy(sp, aobj->dslots, argc * sizeof(jsval));
for (i = 0; i < argc; i++) {
sp[i] = aobj->getArgsElement(i);
if (sp[i] == JSVAL_HOLE)
sp[i] = JSVAL_VOID;
}

Просмотреть файл

@ -429,22 +429,6 @@ const uint32 JS_ARGS_LENGTH_MAX = JS_BIT(24) - 1;
JS_STATIC_ASSERT(JS_ARGS_LENGTH_MAX <= JS_BIT(30));
JS_STATIC_ASSERT(jsval((JS_ARGS_LENGTH_MAX << 1) | 1) <= JSVAL_INT_MAX);
namespace js {
inline jsval
GetArgsSlot(JSObject *argsobj, uint32 arg)
{
return argsobj->dslots[arg];
}
inline void
SetArgsSlot(JSObject *argsobj, uint32 arg, jsval v)
{
argsobj->dslots[arg] = v;
}
} /* namespace js */
extern JSBool
js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp);

Просмотреть файл

@ -84,10 +84,7 @@
#include "jsxml.h"
#endif
#ifdef INCLUDE_MOZILLA_DTRACE
#include "jsdtracef.h"
#endif
#include "jscntxtinlines.h"
#include "jsobjinlines.h"
@ -937,6 +934,11 @@ js_InitGC(JSRuntime *rt, uint32 maxbytes)
return false;
}
#ifdef JS_THREADSAFE
if (!rt->gcHelperThread.init())
return false;
#endif
/*
* Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
* for default backward API compatibility.
@ -1151,6 +1153,9 @@ js_FinishGC(JSRuntime *rt)
js_DumpGCStats(rt, stdout);
#endif
#ifdef JS_THREADSAFE
rt->gcHelperThread.cancel();
#endif
FinishGCArenaLists(rt);
if (rt->gcRootsHash.ops) {
@ -1408,7 +1413,7 @@ LastDitchGC(JSContext *cx)
JS_ASSERT(!JS_ON_TRACE(cx));
/* The last ditch GC preserves weak roots and all atoms. */
AutoSaveRestoreWeakRoots save(cx);
AutoPreserveWeakRoots save(cx);
AutoKeepAtoms keep(cx->runtime);
/*
@ -2512,10 +2517,7 @@ FinalizeObject(JSContext *cx, JSObject *obj, unsigned thingKind)
if (clasp->finalize)
clasp->finalize(cx, obj);
#ifdef INCLUDE_MOZILLA_DTRACE
if (JAVASCRIPT_OBJECT_FINALIZE_ENABLED())
jsdtrace_object_finalize(obj);
#endif
DTrace::finalizeObject(obj);
if (JS_LIKELY(obj->isNative())) {
JSScope *scope = obj->scope();
@ -2869,6 +2871,49 @@ SweepDoubles(JSRuntime *rt)
rt->gcDoubleArenaList.cursor = rt->gcDoubleArenaList.head;
}
#ifdef JS_THREADSAFE
namespace js {
JS_FRIEND_API(void)
BackgroundSweepTask::replenishAndFreeLater(void *ptr)
{
JS_ASSERT(freeCursor == freeCursorEnd);
do {
if (freeCursor && !freeVector.append(freeCursorEnd - FREE_ARRAY_LENGTH))
break;
freeCursor = (void **) js_malloc(FREE_ARRAY_SIZE);
if (!freeCursor) {
freeCursorEnd = NULL;
break;
}
freeCursorEnd = freeCursor + FREE_ARRAY_LENGTH;
*freeCursor++ = ptr;
return;
} while (false);
js_free(ptr);
}
void
BackgroundSweepTask::run()
{
if (freeCursor) {
void **array = freeCursorEnd - FREE_ARRAY_LENGTH;
freeElementsAndArray(array, freeCursor);
freeCursor = freeCursorEnd = NULL;
} else {
JS_ASSERT(!freeCursorEnd);
}
for (void ***iter = freeVector.begin(); iter != freeVector.end(); ++iter) {
void **array = *iter;
freeElementsAndArray(array, array + FREE_ARRAY_LENGTH);
}
}
}
#endif /* JS_THREADSAFE */
/*
* Common cache invalidation and so forth that must be done before GC. Even if
* GCUntilDone calls GC several times, this work only needs to be done once.
@ -2891,10 +2936,6 @@ PreGCCleanup(JSContext *cx, JSGCInvocationKind gckind)
}
#endif
#ifdef JS_TRACER
PurgeJITOracle();
#endif
/*
* Reset the property cache's type id generator so we can compress ids.
* Same for the protoHazardShape proxy-shape standing in for all object
@ -2978,7 +3019,9 @@ GC(JSContext *cx GCTIMER_PARAM)
rt->gcMarkingTracer = NULL;
#ifdef JS_THREADSAFE
cx->createDeallocatorTask();
JS_ASSERT(!cx->gcSweepTask);
if (!rt->gcHelperThread.busy())
cx->gcSweepTask = new js::BackgroundSweepTask();
#endif
/*
@ -3071,7 +3114,10 @@ GC(JSContext *cx GCTIMER_PARAM)
TIMESTAMP(sweepDestroyEnd);
#ifdef JS_THREADSAFE
cx->submitDeallocatorTask();
if (cx->gcSweepTask) {
rt->gcHelperThread.schedule(cx->gcSweepTask);
cx->gcSweepTask = NULL;
}
#endif
if (rt->gcCallback)
@ -3359,7 +3405,7 @@ FireGCEnd(JSContext *cx, JSGCInvocationKind gckind)
* interlock mechanism here.
*/
if (gckind != GC_SET_SLOT_REQUEST && callback) {
Conditionally<AutoUnlockGC> unlockIf(gckind & GC_LOCK_HELD, rt);
Conditionally<AutoUnlockGC> unlockIf(!!(gckind & GC_LOCK_HELD), rt);
(void) callback(cx, JSGC_END);

Просмотреть файл

@ -48,6 +48,7 @@
#include "jsbit.h"
#include "jsutil.h"
#include "jstask.h"
#include "jsvector.h"
#include "jsversion.h"
JS_BEGIN_EXTERN_C
@ -361,25 +362,52 @@ struct JSWeakRoots {
#define JS_CLEAR_WEAK_ROOTS(wr) (memset((wr), 0, sizeof(JSWeakRoots)))
#ifdef JS_THREADSAFE
class JSFreePointerListTask : public JSBackgroundTask {
void *head;
namespace js {
/*
* During the finalization we do not free immediately. Rather we add the
* corresponding pointers to a buffer which we later release on the
* background thread.
*
* The buffer is implemented as a vector of 64K arrays of pointers, not as a
* simple vector, to avoid realloc calls during the vector growth and to not
* bloat the binary size of the inlined freeLater method. Any OOM during
* buffer growth results in the pointer being freed immediately.
*/
class BackgroundSweepTask : public JSBackgroundTask {
static const size_t FREE_ARRAY_SIZE = size_t(1) << 16;
static const size_t FREE_ARRAY_LENGTH = FREE_ARRAY_SIZE / sizeof(void *);
Vector<void **, 16, js::SystemAllocPolicy> freeVector;
void **freeCursor;
void **freeCursorEnd;
JS_FRIEND_API(void)
replenishAndFreeLater(void *ptr);
static void freeElementsAndArray(void **array, void **end) {
JS_ASSERT(array <= end);
for (void **p = array; p != end; ++p)
js_free(*p);
js_free(array);
}
public:
JSFreePointerListTask() : head(NULL) {}
BackgroundSweepTask()
: freeCursor(NULL), freeCursorEnd(NULL) { }
void add(void* ptr) {
*(void**)ptr = head;
head = ptr;
void freeLater(void* ptr) {
if (freeCursor != freeCursorEnd)
*freeCursor++ = ptr;
else
replenishAndFreeLater(ptr);
}
void run() {
void *ptr = head;
while (ptr) {
void *next = *(void **)ptr;
js_free(ptr);
ptr = next;
}
}
virtual void run();
};
}
#endif
extern void

Просмотреть файл

@ -203,7 +203,7 @@ MapAlignedPages(size_t size, size_t alignment)
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
void *p = mmap((void *) alignment, size, PROT_READ | PROT_WRITE,
void *p = mmap((caddr_t) alignment, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_NOSYNC | MAP_ALIGN | MAP_ANON, -1, 0);
if (p == MAP_FAILED)
return NULL;
@ -236,7 +236,7 @@ MapPages(void *addr, size_t size)
static void
UnmapPages(void *addr, size_t size)
{
JS_ALWAYS_TRUE(munmap(addr, size) == 0);
JS_ALWAYS_TRUE(munmap((caddr_t) addr, size) == 0);
}
#endif
@ -256,7 +256,7 @@ AllocGCChunk()
{
void *p;
#if JS_GC_HAS_MAP_ALIGN
#ifdef JS_GC_HAS_MAP_ALIGN
p = MapAlignedPages(GC_CHUNK_SIZE, GC_CHUNK_SIZE);
if (!p)
return NULL;

Просмотреть файл

@ -81,10 +81,7 @@
#include "jsscopeinlines.h"
#include "jsscriptinlines.h"
#include "jsstrinlines.h"
#ifdef INCLUDE_MOZILLA_DTRACE
#include "jsdtracef.h"
#endif
#if JS_HAS_XML_SUPPORT
#include "jsxml.h"
@ -814,15 +811,7 @@ js_Invoke(JSContext *cx, uintN argc, jsval *vp, uintN flags)
if (hook)
hookData = hook(cx, &frame, JS_TRUE, 0, cx->debugHooks->callHookData);
#ifdef INCLUDE_MOZILLA_DTRACE
/* DTrace function entry, non-inlines */
if (JAVASCRIPT_FUNCTION_ENTRY_ENABLED())
jsdtrace_function_entry(cx, &frame, fun);
if (JAVASCRIPT_FUNCTION_INFO_ENABLED())
jsdtrace_function_info(cx, &frame, frame.down, fun);
if (JAVASCRIPT_FUNCTION_ARGS_ENABLED())
jsdtrace_function_args(cx, &frame, fun, frame.argc, frame.argv);
#endif
DTrace::enterJSFun(cx, &frame, fun, frame.down, frame.argc, frame.argv);
/* Call the function, either a native method or an interpreted script. */
if (native) {
@ -842,13 +831,7 @@ js_Invoke(JSContext *cx, uintN argc, jsval *vp, uintN flags)
ok = js_Interpret(cx);
}
#ifdef INCLUDE_MOZILLA_DTRACE
/* DTrace function return, non-inlines */
if (JAVASCRIPT_FUNCTION_RVAL_ENABLED())
jsdtrace_function_rval(cx, &frame, fun, &frame.rval);
if (JAVASCRIPT_FUNCTION_RETURN_ENABLED())
jsdtrace_function_return(cx, &frame, fun);
#endif
DTrace::exitJSFun(cx, &frame, fun, frame.rval);
out:
if (hookData) {
@ -948,20 +931,7 @@ js_Execute(JSContext *cx, JSObject *chain, JSScript *script,
LeaveTrace(cx);
#ifdef INCLUDE_MOZILLA_DTRACE
struct JSDNotifyGuard {
JSScript *script;
JSDNotifyGuard(JSScript *s) : script(s) {
if (JAVASCRIPT_EXECUTE_START_ENABLED())
jsdtrace_execute_start(script);
}
~JSDNotifyGuard() {
if (JAVASCRIPT_EXECUTE_DONE_ENABLED())
jsdtrace_execute_done(script);
}
} jsdNotifyGuard(script);
#endif
DTrace::ExecutionScope executionScope(script);
JSInterpreterHook hook = cx->debugHooks->executeHook;
void *hookData = NULL;
@ -2464,19 +2434,21 @@ js_Interpret(JSContext *cx)
atoms = FrameAtomBase(cx, fp); \
currentVersion = (JSVersion) script->version; \
JS_ASSERT(fp->regs == &regs); \
if (cx->throwing) \
goto error; \
JS_END_MACRO
#define MONITOR_BRANCH(reason) \
JS_BEGIN_MACRO \
if (TRACING_ENABLED(cx)) { \
if (MonitorLoopEdge(cx, inlineCallCount, reason)) { \
MonitorResult r = MonitorLoopEdge(cx, inlineCallCount, reason); \
if (r == MONITOR_RECORDING) { \
JS_ASSERT(TRACE_RECORDER(cx)); \
MONITOR_BRANCH_TRACEVIS; \
ENABLE_INTERRUPTS(); \
} \
RESTORE_INTERP_VARS(); \
JS_ASSERT_IF(cx->throwing, r == MONITOR_ERROR); \
if (r == MONITOR_ERROR) \
goto error; \
} \
JS_END_MACRO
@ -2547,7 +2519,7 @@ js_Interpret(JSContext *cx)
# define CHECK_INTERRUPT_HANDLER() \
JS_BEGIN_MACRO \
if (cx->debugHooks->interruptHandler) \
if (cx->debugHooks->interruptHook) \
ENABLE_INTERRUPTS(); \
JS_END_MACRO
@ -2707,7 +2679,7 @@ js_Interpret(JSContext *cx)
/* This is an error, not a catchable exception, quit the frame ASAP. */
ok = JS_FALSE;
} else {
JSTrapHandler handler;
JSThrowHook handler;
JSTryNote *tn, *tnlimit;
uint32 offset;

Просмотреть файл

@ -1413,13 +1413,14 @@ js_IsTitleLocked(JSContext *cx, JSTitle *title)
return JS_TRUE;
/*
* General case: the title is either exclusively owned (by cx), or it has
* a thin or fat lock to cope with shared (concurrent) ownership.
* General case: the title is either exclusively owned by some context, or
* it has a thin or fat lock to cope with shared (concurrent) ownership.
*
* js_LockTitle(cx, title) must set ownercx to cx when claiming the title
* from another context on the same thread.
*/
if (title->ownercx) {
JS_ASSERT(title->ownercx == cx || title->ownercx->thread == cx->thread);
return JS_TRUE;
}
if (title->ownercx)
return title->ownercx == cx;
return js_CurrentThreadId() ==
((JSThread *)Thin_RemoveWait(ReadWord(title->lock.owner)))->id;
}

Просмотреть файл

@ -885,12 +885,18 @@ js_NumberToStringWithBase(JSContext *cx, jsdouble d, jsint base)
return JSString::unitString(jschar('a' + i - 10));
}
}
JSThreadData *data = JS_THREAD_DATA(cx);
if (data->dtoaCache.s && data->dtoaCache.base == base && data->dtoaCache.d == d)
return data->dtoaCache.s;
numStr = NumberToCString(cx, d, base, buf, sizeof buf);
if (!numStr)
return NULL;
s = JS_NewStringCopyZ(cx, numStr);
if (!(numStr >= buf && numStr < buf + sizeof buf))
js_free(numStr);
data->dtoaCache.base = base;
data->dtoaCache.d = d;
data->dtoaCache.s = s;
return s;
}

Просмотреть файл

@ -442,6 +442,127 @@ js_DoubleToECMAInt32(jsdouble d)
}
return int32(du.d);
#elif defined (__arm__) && defined (__GNUC__)
int32_t i;
uint32_t tmp0;
uint32_t tmp1;
uint32_t tmp2;
asm (
// We use a pure integer solution here. In the 'softfp' ABI, the argument
// will start in r0 and r1, and VFP can't do all of the necessary ECMA
// conversions by itself so some integer code will be required anyway. A
// hybrid solution is faster on A9, but this pure integer solution is
// notably faster for A8.
// %0 is the result register, and may alias either of the %[QR]1 registers.
// %Q4 holds the lower part of the mantissa.
// %R4 holds the sign, exponent, and the upper part of the mantissa.
// %1, %2 and %3 are used as temporary values.
// Extract the exponent.
" mov %1, %R4, LSR #20\n"
" bic %1, %1, #(1 << 11)\n" // Clear the sign.
// Set the implicit top bit of the mantissa. This clobbers a bit of the
// exponent, but we have already extracted that.
" orr %R4, %R4, #(1 << 20)\n"
// Special Cases
// We should return zero in the following special cases:
// - Exponent is 0x000 - 1023: +/-0 or subnormal.
// - Exponent is 0x7ff - 1023: +/-INFINITY or NaN
// - This case is implicitly handled by the standard code path anyway,
// as shifting the mantissa up by the exponent will result in '0'.
//
// The result is composed of the mantissa, prepended with '1' and
// bit-shifted left by the (decoded) exponent. Note that because the r1[20]
// is the bit with value '1', r1 is effectively already shifted (left) by
// 20 bits, and r0 is already shifted by 52 bits.
// Adjust the exponent to remove the encoding offset. If the decoded
// exponent is negative, quickly bail out with '0' as such values round to
// zero anyway. This also catches +/-0 and subnormals.
" sub %1, %1, #0xff\n"
" subs %1, %1, #0x300\n"
" bmi 8f\n"
// %1 = (decoded) exponent >= 0
// %R4 = upper mantissa and sign
// ---- Lower Mantissa ----
" subs %3, %1, #52\n" // Calculate exp-52
" bmi 1f\n"
// Shift r0 left by exp-52.
// Ensure that we don't overflow ARM's 8-bit shift operand range.
// We need to handle anything up to an 11-bit value here as we know that
// 52 <= exp <= 1024 (0x400). Any shift beyond 31 bits results in zero
// anyway, so as long as we don't touch the bottom 5 bits, we can use
// a logical OR to push long shifts into the 32 <= (exp&0xff) <= 255 range.
" bic %2, %3, #0xff\n"
" orr %3, %3, %2, LSR #3\n"
// We can now perform a straight shift, avoiding the need for any
// conditional instructions or extra branches.
" mov %Q4, %Q4, LSL %3\n"
" b 2f\n"
"1:\n" // Shift r0 right by 52-exp.
// We know that 0 <= exp < 52, and we can shift up to 255 bits so 52-exp
// will always be a valid shift and we can sk%3 the range check for this case.
" rsb %3, %1, #52\n"
" mov %Q4, %Q4, LSR %3\n"
// %1 = (decoded) exponent
// %R4 = upper mantissa and sign
// %Q4 = partially-converted integer
"2:\n"
// ---- Upper Mantissa ----
// This is much the same as the lower mantissa, with a few different
// boundary checks and some masking to hide the exponent & sign bit in the
// upper word.
// Note that the upper mantissa is pre-shifted by 20 in %R4, but we shift
// it left more to remove the sign and exponent so it is effectively
// pre-shifted by 31 bits.
" subs %3, %1, #31\n" // Calculate exp-31
" mov %1, %R4, LSL #11\n" // Re-use %1 as a temporary register.
" bmi 3f\n"
// Shift %R4 left by exp-31.
// Avoid overflowing the 8-bit shift range, as before.
" bic %2, %3, #0xff\n"
" orr %3, %3, %2, LSR #3\n"
// Perform the shift.
" mov %2, %1, LSL %3\n"
" b 4f\n"
"3:\n" // Shift r1 right by 31-exp.
// We know that 0 <= exp < 31, and we can shift up to 255 bits so 31-exp
// will always be a valid shift and we can skip the range check for this case.
" rsb %3, %3, #0\n" // Calculate 31-exp from -(exp-31)
" mov %2, %1, LSR %3\n" // Thumb-2 can't do "LSR %3" in "orr".
// %Q4 = partially-converted integer (lower)
// %R4 = upper mantissa and sign
// %2 = partially-converted integer (upper)
"4:\n"
// Combine the converted parts.
" orr %Q4, %Q4, %2\n"
// Negate the result if we have to, and move it to %0 in the process. To
// avoid conditionals, we can do this by inverting on %R4[31], then adding
// %R4[31]>>31.
" eor %Q4, %Q4, %R4, ASR #31\n"
" add %0, %Q4, %R4, LSR #31\n"
" b 9f\n"
"8:\n"
// +/-INFINITY, +/-0, subnormals, NaNs, and anything else out-of-range that
// will result in a conversion of '0'.
" mov %0, #0\n"
"9:\n"
: "=r" (i), "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
: "r" (d)
: "cc"
);
return i;
#else
int32 i;
jsdouble two32, two31;

Просмотреть файл

@ -93,10 +93,7 @@
#include "jsxdrapi.h"
#endif
#ifdef INCLUDE_MOZILLA_DTRACE
#include "jsdtracef.h"
#endif
#include "jsatominlines.h"
#include "jsobjinlines.h"
#include "jsscriptinlines.h"
@ -1012,14 +1009,6 @@ obj_valueOf(JSContext *cx, uintN argc, jsval *vp)
return !JSVAL_IS_NULL(*vp);
}
#ifdef JS_TRACER
static jsval FASTCALL
Object_p_valueOf(JSContext* cx, JSObject* obj, JSString *hint)
{
return OBJECT_TO_JSVAL(obj);
}
#endif
/*
* Check if CSP allows new Function() or eval() to run in the current
* principals.
@ -1633,26 +1622,6 @@ js_HasOwnProperty(JSContext *cx, JSLookupPropOp lookup, JSObject *obj, jsid id,
return true;
}
#ifdef JS_TRACER
static JSBool FASTCALL
Object_p_hasOwnProperty(JSContext* cx, JSObject* obj, JSString *str)
{
jsid id;
JSObject *pobj;
JSProperty *prop;
if (!js_ValueToStringId(cx, STRING_TO_JSVAL(str), &id) ||
!js_HasOwnProperty(cx, obj->map->ops->lookupProperty, obj, id, &pobj, &prop)) {
SetBuiltinError(cx);
return JSVAL_TO_BOOLEAN(JSVAL_VOID);
}
if (prop)
pobj->dropProperty(cx, prop);
return !!prop;
}
#endif
/* Proposed ECMA 15.2.4.6. */
static JSBool
obj_isPrototypeOf(JSContext *cx, uintN argc, jsval *vp)
@ -1681,23 +1650,6 @@ obj_propertyIsEnumerable(JSContext *cx, uintN argc, jsval *vp)
return obj && js_PropertyIsEnumerable(cx, obj, id, vp);
}
#ifdef JS_TRACER
static JSBool FASTCALL
Object_p_propertyIsEnumerable(JSContext* cx, JSObject* obj, JSString *str)
{
jsid id = ATOM_TO_JSID(STRING_TO_JSVAL(str));
jsval v;
if (!js_PropertyIsEnumerable(cx, obj, id, &v)) {
SetBuiltinError(cx);
return JSVAL_TO_BOOLEAN(JSVAL_VOID);
}
JS_ASSERT(JSVAL_IS_BOOLEAN(v));
return JSVAL_TO_BOOLEAN(v);
}
#endif
JSBool
js_PropertyIsEnumerable(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
{
@ -1776,7 +1728,7 @@ js_obj_defineGetter(JSContext *cx, uintN argc, jsval *vp)
return JS_FALSE;
*vp = JSVAL_VOID;
return obj->defineProperty(cx, id, JSVAL_VOID,
js_CastAsPropertyOp(JSVAL_TO_OBJECT(fval)), JS_PropertyStub,
CastAsPropertyOp(JSVAL_TO_OBJECT(fval)), JS_PropertyStub,
JSPROP_ENUMERATE | JSPROP_GETTER | JSPROP_SHARED);
}
@ -1809,7 +1761,7 @@ js_obj_defineSetter(JSContext *cx, uintN argc, jsval *vp)
return JS_FALSE;
*vp = JSVAL_VOID;
return obj->defineProperty(cx, id, JSVAL_VOID,
JS_PropertyStub, js_CastAsPropertyOp(JSVAL_TO_OBJECT(fval)),
JS_PropertyStub, CastAsPropertyOp(JSVAL_TO_OBJECT(fval)),
JSPROP_ENUMERATE | JSPROP_SETTER | JSPROP_SHARED);
}
@ -2001,7 +1953,7 @@ obj_keys(JSContext *cx, uintN argc, jsval *vp)
jsval *slots = aobj->dslots;
size_t len = ida.length();
JS_ASSERT(js_DenseArrayCapacity(aobj) >= len);
JS_ASSERT(aobj->getDenseArrayCapacity() >= len);
for (size_t i = 0; i < len; i++) {
jsid id = ida[i];
if (JSID_IS_INT(id)) {
@ -2020,7 +1972,7 @@ obj_keys(JSContext *cx, uintN argc, jsval *vp)
}
JS_ASSERT(len <= UINT32_MAX);
aobj->setArrayCount(len);
aobj->setDenseArrayCount(len);
return JS_TRUE;
}
@ -2182,8 +2134,8 @@ Reject(JSContext *cx, JSObject *obj, JSProperty *prop, uintN errorNumber, bool t
}
static JSBool
DefinePropertyObject(JSContext *cx, JSObject *obj, const PropertyDescriptor &desc,
bool throwError, bool *rval)
DefinePropertyOnObject(JSContext *cx, JSObject *obj, const PropertyDescriptor &desc,
bool throwError, bool *rval)
{
/* 8.12.9 step 1. */
JSProperty *current;
@ -2221,9 +2173,7 @@ DefinePropertyObject(JSContext *cx, JSObject *obj, const PropertyDescriptor &des
return JS_FALSE;
return js_DefineProperty(cx, obj, desc.id, JSVAL_VOID,
desc.getterObject() ? desc.getter() : JS_PropertyStub,
desc.setterObject() ? desc.setter() : JS_PropertyStub,
desc.attrs);
desc.getter(), desc.setter(), desc.attrs);
}
/* 8.12.9 steps 5-6 (note 5 is merely a special case of 6). */
@ -2247,13 +2197,15 @@ DefinePropertyObject(JSContext *cx, JSObject *obj, const PropertyDescriptor &des
if (desc.hasGet &&
!js_SameValue(desc.getterValue(),
sprop->hasGetterValue() ? sprop->getterValue() : JSVAL_VOID, cx)) {
sprop->hasGetterValue() ? sprop->getterValue() : JSVAL_VOID,
cx)) {
break;
}
if (desc.hasSet &&
!js_SameValue(desc.setterValue(),
sprop->hasSetterValue() ? sprop->setterValue() : JSVAL_VOID, cx)) {
sprop->hasSetterValue() ? sprop->setterValue() : JSVAL_VOID,
cx)) {
break;
}
} else {
@ -2350,8 +2302,9 @@ DefinePropertyObject(JSContext *cx, JSObject *obj, const PropertyDescriptor &des
return Reject(cx, obj2, current, JSMSG_CANT_REDEFINE_UNCONFIGURABLE_PROP,
throwError, desc.id, rval);
}
} else if (desc.isDataDescriptor() && sprop->isDataDescriptor()) {
} else if (desc.isDataDescriptor()) {
/* 8.12.9 step 10. */
JS_ASSERT(sprop->isDataDescriptor());
if (!sprop->configurable() && !sprop->writable()) {
if ((desc.hasWritable && desc.writable()) ||
(desc.hasValue && !js_SameValue(desc.value, v, cx))) {
@ -2369,7 +2322,8 @@ DefinePropertyObject(JSContext *cx, JSObject *obj, const PropertyDescriptor &des
cx)) ||
(desc.hasGet &&
!js_SameValue(desc.getterValue(),
sprop->hasGetterValue() ? sprop->getterValue() : JSVAL_VOID, cx)))
sprop->hasGetterValue() ? sprop->getterValue() : JSVAL_VOID,
cx)))
{
return Reject(cx, obj2, current, JSMSG_CANT_REDEFINE_UNCONFIGURABLE_PROP,
throwError, desc.id, rval);
@ -2429,14 +2383,20 @@ DefinePropertyObject(JSContext *cx, JSObject *obj, const PropertyDescriptor &des
changed |= JSPROP_SETTER | JSPROP_SHARED;
attrs = (desc.attrs & changed) | (sprop->attributes() & ~changed);
if (desc.hasGet)
getter = desc.getterObject() ? desc.getter() : JS_PropertyStub;
else
getter = sprop->hasDefaultGetter() ? JS_PropertyStub : sprop->getter();
if (desc.hasSet)
setter = desc.setterObject() ? desc.setter() : JS_PropertyStub;
else
setter = sprop->hasDefaultSetter() ? JS_PropertyStub : sprop->setter();
if (desc.hasGet) {
getter = desc.getter();
} else {
getter = (sprop->hasDefaultGetter() && !sprop->hasGetterValue())
? JS_PropertyStub
: sprop->getter();
}
if (desc.hasSet) {
setter = desc.setter();
} else {
setter = (sprop->hasDefaultSetter() && !sprop->hasSetterValue())
? JS_PropertyStub
: sprop->setter();
}
}
*rval = true;
@ -2445,8 +2405,8 @@ DefinePropertyObject(JSContext *cx, JSObject *obj, const PropertyDescriptor &des
}
static JSBool
DefinePropertyArray(JSContext *cx, JSObject *obj, const PropertyDescriptor &desc,
bool throwError, bool *rval)
DefinePropertyOnArray(JSContext *cx, JSObject *obj, const PropertyDescriptor &desc,
bool throwError, bool *rval)
{
/*
* We probably should optimize dense array property definitions where
@ -2479,21 +2439,21 @@ DefinePropertyArray(JSContext *cx, JSObject *obj, const PropertyDescriptor &desc
if (index >= oldLen && lengthPropertyNotWritable())
return ThrowTypeError(cx, JSMSG_CANT_APPEND_PROPERTIES_TO_UNWRITABLE_LENGTH_ARRAY);
*/
if (!DefinePropertyObject(cx, obj, desc, false, rval))
if (!DefinePropertyOnObject(cx, obj, desc, false, rval))
return JS_FALSE;
if (!*rval)
return Reject(cx, JSMSG_CANT_DEFINE_ARRAY_INDEX, throwError, rval);
if (index >= oldLen) {
JS_ASSERT(index != UINT32_MAX);
obj->setArrayLength(index + 1);
obj->setSlowArrayLength(index + 1);
}
*rval = true;
return JS_TRUE;
}
return DefinePropertyObject(cx, obj, desc, throwError, rval);
return DefinePropertyOnObject(cx, obj, desc, throwError, rval);
}
static JSBool
@ -2501,12 +2461,12 @@ DefineProperty(JSContext *cx, JSObject *obj, const PropertyDescriptor &desc, boo
bool *rval)
{
if (obj->isArray())
return DefinePropertyArray(cx, obj, desc, throwError, rval);
return DefinePropertyOnArray(cx, obj, desc, throwError, rval);
if (!obj->isNative())
if (obj->map->ops->lookupProperty != js_LookupProperty)
return Reject(cx, JSMSG_OBJECT_NOT_EXTENSIBLE, throwError, rval);
return DefinePropertyObject(cx, obj, desc, throwError, rval);
return DefinePropertyOnObject(cx, obj, desc, throwError, rval);
}
JSBool
@ -2671,30 +2631,20 @@ const char js_hasOwnProperty_str[] = "hasOwnProperty";
const char js_isPrototypeOf_str[] = "isPrototypeOf";
const char js_propertyIsEnumerable_str[] = "propertyIsEnumerable";
JS_DEFINE_TRCINFO_1(obj_valueOf,
(3, (static, JSVAL, Object_p_valueOf, CONTEXT, THIS, STRING, 0,
nanojit::ACC_STORE_ANY)))
JS_DEFINE_TRCINFO_1(obj_hasOwnProperty,
(3, (static, BOOL_FAIL, Object_p_hasOwnProperty, CONTEXT, THIS, STRING, 0,
nanojit::ACC_STORE_ANY)))
JS_DEFINE_TRCINFO_1(obj_propertyIsEnumerable,
(3, (static, BOOL_FAIL, Object_p_propertyIsEnumerable, CONTEXT, THIS, STRING, 0,
nanojit::ACC_STORE_ANY)))
static JSFunctionSpec object_methods[] = {
#if JS_HAS_TOSOURCE
JS_FN(js_toSource_str, obj_toSource, 0,0),
#endif
JS_FN(js_toString_str, obj_toString, 0,0),
JS_FN(js_toLocaleString_str, obj_toLocaleString, 0,0),
JS_TN(js_valueOf_str, obj_valueOf, 0,0, &obj_valueOf_trcinfo),
JS_FN(js_valueOf_str, obj_valueOf, 0,0),
#if JS_HAS_OBJ_WATCHPOINT
JS_FN(js_watch_str, obj_watch, 2,0),
JS_FN(js_unwatch_str, obj_unwatch, 1,0),
#endif
JS_TN(js_hasOwnProperty_str, obj_hasOwnProperty, 1,0, &obj_hasOwnProperty_trcinfo),
JS_FN(js_hasOwnProperty_str, obj_hasOwnProperty, 1,0),
JS_FN(js_isPrototypeOf_str, obj_isPrototypeOf, 1,0),
JS_TN(js_propertyIsEnumerable_str, obj_propertyIsEnumerable, 1,0, &obj_propertyIsEnumerable_trcinfo),
JS_FN(js_propertyIsEnumerable_str, obj_propertyIsEnumerable, 1,0),
#if OLD_GETTER_SETTER_METHODS
JS_FN(js_defineGetter_str, js_obj_defineGetter, 2,0),
JS_FN(js_defineSetter_str, js_obj_defineSetter, 2,0),
@ -4250,9 +4200,9 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
/* Use the object's class getter and setter by default. */
clasp = obj->getClass();
if (!(defineHow & JSDNP_SET_METHOD)) {
if (!getter)
if (!getter && !(attrs & JSPROP_GETTER))
getter = clasp->getProperty;
if (!setter)
if (!setter && !(attrs & JSPROP_SETTER))
setter = clasp->setProperty;
}
@ -4273,15 +4223,25 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
JSObject *funobj = JSVAL_TO_OBJECT(value);
if (FUN_OBJECT(GET_FUNCTION_PRIVATE(cx, funobj)) == funobj) {
flags |= JSScopeProperty::METHOD;
getter = js_CastAsPropertyOp(funobj);
getter = CastAsPropertyOp(funobj);
}
}
added = !scope->hasProperty(id);
uint32 oldShape = scope->shape;
sprop = scope->putProperty(cx, id, getter, setter, SPROP_INVALID_SLOT,
attrs, flags, shortid);
if (!sprop)
goto error;
/*
* If sprop is a method, the above call to putProperty suffices to
* update the shape if necessary. But if scope->branded(), the shape
* may not have changed and we may be overwriting a function-valued
* property. See bug 560998.
*/
if (scope->shape == oldShape && scope->branded())
scope->methodWriteBarrier(cx, sprop->slot, value);
}
/* Store value before calling addProperty, in case the latter GC's. */
@ -4617,8 +4577,13 @@ js_FindIdentifierBase(JSContext *cx, JSObject *scopeChain, jsid id)
* property. We also stop when we reach the global object skipping any
* farther checks or lookups. For details see the JSOP_BINDNAME case of
* js_Interpret.
*
* The test order here matters because js_IsCacheableNonGlobalScope
* must not be passed a global object (i.e. one with null parent).
*/
for (int scopeIndex = 0; js_IsCacheableNonGlobalScope(obj); scopeIndex++) {
for (int scopeIndex = 0;
!obj->getParent() || js_IsCacheableNonGlobalScope(obj);
scopeIndex++) {
JSObject *pobj;
JSProperty *prop;
int protoIndex = js_LookupPropertyWithFlags(cx, obj, id,
@ -4628,7 +4593,8 @@ js_FindIdentifierBase(JSContext *cx, JSObject *scopeChain, jsid id)
return NULL;
if (prop) {
JS_ASSERT(pobj->isNative());
JS_ASSERT(pobj->getClass() == obj->getClass());
JS_ASSERT(!obj->getParent() ||
pobj->getClass() == obj->getClass());
#ifdef DEBUG
PropertyCacheEntry *entry =
#endif
@ -4639,10 +4605,10 @@ js_FindIdentifierBase(JSContext *cx, JSObject *scopeChain, jsid id)
return obj;
}
/* Call and other cacheable objects always have a parent. */
obj = obj->getParent();
if (!obj->getParent())
JSObject *parent = obj->getParent();
if (!parent)
return obj;
obj = parent;
}
/* Loop until we find a property or reach the global object. */
@ -4903,7 +4869,7 @@ js_GetMethod(JSContext *cx, JSObject *obj, jsid id, uintN getHow, jsval *vp)
}
JS_FRIEND_API(bool)
js_CheckUndeclaredVarAssignment(JSContext *cx)
js_CheckUndeclaredVarAssignment(JSContext *cx, jsval propname)
{
JSStackFrame *fp = js_GetTopStackFrame(cx);
if (!fp)
@ -4915,16 +4881,7 @@ js_CheckUndeclaredVarAssignment(JSContext *cx)
return true;
}
/* This check is only appropriate when executing JSOP_SETNAME. */
if (!fp->regs ||
js_GetOpcode(cx, fp->script, fp->regs->pc) != JSOP_SETNAME) {
return true;
}
JSAtom *atom;
GET_ATOM_FROM_BYTECODE(fp->script, fp->regs->pc, 0, atom);
const char *bytes = js_AtomToPrintableString(cx, atom);
const char *bytes = js_GetStringBytes(cx, JSVAL_TO_STRING(propname));
return bytes &&
JS_ReportErrorFlagsAndNumber(cx,
(JSREPORT_WARNING | JSREPORT_STRICT
@ -4965,7 +4922,8 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow,
JSPropertyOp getter, setter;
bool added;
JS_ASSERT((defineHow & ~(JSDNP_CACHE_RESULT | JSDNP_SET_METHOD)) == 0);
JS_ASSERT((defineHow &
~(JSDNP_CACHE_RESULT | JSDNP_SET_METHOD | JSDNP_UNQUALIFIED)) == 0);
if (defineHow & JSDNP_CACHE_RESULT)
JS_ASSERT_NOT_ON_TRACE(cx);
@ -4992,8 +4950,11 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow,
/* We should never add properties to lexical blocks. */
JS_ASSERT(obj->getClass() != &js_BlockClass);
if (!obj->getParent() && !js_CheckUndeclaredVarAssignment(cx))
if (!obj->getParent() &&
(defineHow & JSDNP_UNQUALIFIED) &&
!js_CheckUndeclaredVarAssignment(cx, ID_TO_VALUE(id))) {
return JS_FALSE;
}
}
sprop = (JSScopeProperty *) prop;
@ -5139,7 +5100,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow,
JSObject *funobj = JSVAL_TO_OBJECT(*vp);
if (FUN_OBJECT(GET_FUNCTION_PRIVATE(cx, funobj)) == funobj) {
flags |= JSScopeProperty::METHOD;
getter = js_CastAsPropertyOp(funobj);
getter = CastAsPropertyOp(funobj);
}
}
@ -5183,7 +5144,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow,
JSBool
js_SetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
{
return js_SetPropertyHelper(cx, obj, id, false, vp);
return js_SetPropertyHelper(cx, obj, id, 0, vp);
}
JSBool
@ -6463,11 +6424,12 @@ js_SetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, jsval v)
return true;
JSClass *clasp = obj->getClass();
uint32 limit = JSCLASS_RESERVED_SLOTS(clasp);
JS_LOCK_OBJ(cx, obj);
if (index >= limit && !ReservedSlotIndexOK(cx, obj, clasp, index, limit))
return false;
#ifdef DEBUG
uint32 limit = JSCLASS_RESERVED_SLOTS(clasp);
JS_ASSERT(index < limit || ReservedSlotIndexOK(cx, obj, clasp, index, limit));
#endif
uint32 slot = JSSLOT_START(clasp) + index;
if (slot >= JS_INITIAL_NSLOTS && !obj->dslots) {
@ -6711,11 +6673,11 @@ js_DumpObject(JSObject *obj)
fprintf(stderr, "class %p %s\n", (void *)clasp, clasp->name);
if (obj->isDenseArray()) {
slots = JS_MIN(obj->getArrayLength(), js_DenseArrayCapacity(obj));
slots = JS_MIN(obj->getArrayLength(), obj->getDenseArrayCapacity());
fprintf(stderr, "elements\n");
for (i = 0; i < slots; i++) {
fprintf(stderr, " %3d: ", i);
dumpValue(obj->dslots[i]);
dumpValue(obj->getDenseArrayElement(i));
fprintf(stderr, "\n");
fflush(stderr);
}

Просмотреть файл

@ -97,10 +97,10 @@ struct PropertyDescriptor {
}
JSObject* getterObject() const {
return get != JSVAL_VOID ? JSVAL_TO_OBJECT(get) : NULL;
return (get != JSVAL_VOID) ? JSVAL_TO_OBJECT(get) : NULL;
}
JSObject* setterObject() const {
return set != JSVAL_VOID ? JSVAL_TO_OBJECT(set) : NULL;
return (set != JSVAL_VOID) ? JSVAL_TO_OBJECT(set) : NULL;
}
jsval getterValue() const {
@ -111,10 +111,10 @@ struct PropertyDescriptor {
}
JSPropertyOp getter() const {
return js_CastAsPropertyOp(getterObject());
return js::CastAsPropertyOp(getterObject());
}
JSPropertyOp setter() const {
return js_CastAsPropertyOp(setterObject());
return js::CastAsPropertyOp(setterObject());
}
static void traceDescriptorArray(JSTracer* trc, JSObject* obj);
@ -292,7 +292,7 @@ struct JSObject {
classword |= jsuword(2);
}
uint32 numSlots(void) {
uint32 numSlots(void) const {
return dslots ? (uint32)dslots[-1] : (uint32)JS_INITIAL_NSLOTS;
}
@ -405,26 +405,47 @@ struct JSObject {
*/
private:
// Used by dense and slow arrays.
static const uint32 JSSLOT_ARRAY_LENGTH = JSSLOT_PRIVATE;
static const uint32 JSSLOT_ARRAY_COUNT = JSSLOT_PRIVATE + 1;
static const uint32 JSSLOT_ARRAY_UNUSED = JSSLOT_PRIVATE + 2;
// Used only by dense arrays.
static const uint32 JSSLOT_DENSE_ARRAY_COUNT = JSSLOT_PRIVATE + 1;
static const uint32 JSSLOT_DENSE_ARRAY_MINLENCAP = JSSLOT_PRIVATE + 2;
// This assertion must remain true; see comment in js_MakeArraySlow().
// (Nb: This method is never called, it just contains a static assertion.
// The static assertion isn't inline because that doesn't work on Mac.)
inline void staticAssertArrayLengthIsInPrivateSlot();
inline bool isDenseArrayMinLenCapOk() const;
inline uint32 uncheckedGetArrayLength() const;
inline uint32 uncheckedGetDenseArrayCapacity() const;
public:
inline uint32 getArrayLength() const;
inline void setArrayLength(uint32 length);
inline void setDenseArrayLength(uint32 length);
inline void setSlowArrayLength(uint32 length);
inline uint32 getArrayCount() const;
inline void voidDenseArrayCount();
inline void setArrayCount(uint32 count);
inline void incArrayCountBy(uint32 posDelta);
inline void decArrayCountBy(uint32 negDelta);
inline uint32 getDenseArrayCount() const;
inline void setDenseArrayCount(uint32 count);
inline void incDenseArrayCountBy(uint32 posDelta);
inline void decDenseArrayCountBy(uint32 negDelta);
inline void voidArrayUnused();
inline uint32 getDenseArrayCapacity() const;
inline void setDenseArrayCapacity(uint32 capacity); // XXX: bug 558263 will remove this
inline jsval getDenseArrayElement(uint32 i) const;
inline void setDenseArrayElement(uint32 i, jsval v);
inline jsval *getDenseArrayElements() const; // returns pointer to the Array's elements array
bool resizeDenseArrayElements(JSContext *cx, uint32 oldcap, uint32 newcap,
bool initializeAllSlots = true);
bool ensureDenseArrayElements(JSContext *cx, uint32 newcap,
bool initializeAllSlots = true);
inline void freeDenseArrayElements(JSContext *cx);
inline void voidDenseOnlyArraySlots(); // used when converting a dense array to a slow array
/*
* Arguments-specific getters and setters.
@ -440,8 +461,8 @@ struct JSObject {
* JSSLOT_ARGS_CALLEE - the arguments.callee value or JSVAL_HOLE if that
* was overwritten.
*
* Argument index i is stored in dslots[i]. But future-proof your code by
* using {Get,Set}ArgsSlot instead of naked dslots references.
* Argument index i is stored in dslots[i], accessible via
* {get,set}ArgsElement().
*/
private:
static const uint32 JSSLOT_ARGS_LENGTH = JSSLOT_PRIVATE + 1;
@ -454,11 +475,14 @@ struct JSObject {
inline uint32 getArgsLength() const;
inline void setArgsLength(uint32 argc);
inline void setArgsLengthOverridden();
inline bool isArgsLengthOverridden();
inline bool isArgsLengthOverridden() const;
inline jsval getArgsCallee() const;
inline void setArgsCallee(jsval callee);
inline jsval getArgsElement(uint32 i) const;
inline void setArgsElement(uint32 i, jsval v);
/*
* Date-specific getters and setters.
*/
@ -923,6 +947,9 @@ const uintN JSDNP_DONT_PURGE = 2; /* suppress js_PurgeScopeChain */
const uintN JSDNP_SET_METHOD = 4; /* js_{DefineNativeProperty,SetPropertyHelper}
must pass the JSScopeProperty::METHOD
flag on to js_AddScopeProperty */
const uintN JSDNP_UNQUALIFIED = 8; /* Unqualified property set. Only used in
the defineHow argument of
js_SetPropertyHelper. */
/*
* On error, return false. On success, if propp is non-null, return true with
@ -1045,11 +1072,13 @@ extern JSBool
js_GetMethod(JSContext *cx, JSObject *obj, jsid id, uintN getHow, jsval *vp);
/*
* Check whether it is OK to assign an undeclared property of the global
* object at the current script PC.
* Check whether it is OK to assign an undeclared property with name
* propname of the global object in the current script on cx. Reports
* an error if one needs to be reported (in particular in all cases
* when it returns false).
*/
extern JS_FRIEND_API(bool)
js_CheckUndeclaredVarAssignment(JSContext *cx);
js_CheckUndeclaredVarAssignment(JSContext *cx, jsval propname);
extern JSBool
js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow,

Просмотреть файл

@ -115,60 +115,138 @@ inline void JSObject::staticAssertArrayLengthIsInPrivateSlot()
JS_STATIC_ASSERT(JSSLOT_ARRAY_LENGTH == JSSLOT_PRIVATE);
}
inline bool JSObject::isDenseArrayMinLenCapOk() const
{
JS_ASSERT(isDenseArray());
uint32 length = uncheckedGetArrayLength();
uint32 capacity = uncheckedGetDenseArrayCapacity();
uint32 minLenCap = uint32(fslots[JSSLOT_DENSE_ARRAY_MINLENCAP]);
return minLenCap == JS_MIN(length, capacity);
}
inline uint32
JSObject::uncheckedGetArrayLength() const
{
return uint32(fslots[JSSLOT_ARRAY_LENGTH]);
}
inline uint32
JSObject::getArrayLength() const
{
JS_ASSERT(isArray());
return uint32(fslots[JSSLOT_ARRAY_LENGTH]);
JS_ASSERT_IF(isDenseArray(), isDenseArrayMinLenCapOk());
return uncheckedGetArrayLength();
}
inline void
JSObject::setArrayLength(uint32 length)
JSObject::setDenseArrayLength(uint32 length)
{
JS_ASSERT(isArray());
JS_ASSERT(isDenseArray());
fslots[JSSLOT_ARRAY_LENGTH] = length;
uint32 capacity = uncheckedGetDenseArrayCapacity();
fslots[JSSLOT_DENSE_ARRAY_MINLENCAP] = JS_MIN(length, capacity);
}
inline void
JSObject::setSlowArrayLength(uint32 length)
{
JS_ASSERT(isSlowArray());
fslots[JSSLOT_ARRAY_LENGTH] = length;
}
inline uint32
JSObject::getArrayCount() const
{
JS_ASSERT(isArray());
return uint32(fslots[JSSLOT_ARRAY_COUNT]);
}
inline void
JSObject::setArrayCount(uint32 count)
{
JS_ASSERT(isArray());
fslots[JSSLOT_ARRAY_COUNT] = count;
}
inline void
JSObject::voidDenseArrayCount()
JSObject::getDenseArrayCount() const
{
JS_ASSERT(isDenseArray());
fslots[JSSLOT_ARRAY_COUNT] = JSVAL_VOID;
return uint32(fslots[JSSLOT_DENSE_ARRAY_COUNT]);
}
inline void
JSObject::incArrayCountBy(uint32 posDelta)
JSObject::setDenseArrayCount(uint32 count)
{
JS_ASSERT(isArray());
fslots[JSSLOT_ARRAY_COUNT] += posDelta;
JS_ASSERT(isDenseArray());
fslots[JSSLOT_DENSE_ARRAY_COUNT] = count;
}
inline void
JSObject::decArrayCountBy(uint32 negDelta)
JSObject::incDenseArrayCountBy(uint32 posDelta)
{
JS_ASSERT(isArray());
fslots[JSSLOT_ARRAY_COUNT] -= negDelta;
JS_ASSERT(isDenseArray());
fslots[JSSLOT_DENSE_ARRAY_COUNT] += posDelta;
}
inline void
JSObject::decDenseArrayCountBy(uint32 negDelta)
{
JS_ASSERT(isDenseArray());
fslots[JSSLOT_DENSE_ARRAY_COUNT] -= negDelta;
}
inline uint32
JSObject::uncheckedGetDenseArrayCapacity() const
{
return dslots ? uint32(dslots[-1]) : 0;
}
inline uint32
JSObject::getDenseArrayCapacity() const
{
JS_ASSERT(isDenseArray());
JS_ASSERT(isDenseArrayMinLenCapOk());
return uncheckedGetDenseArrayCapacity();
}
inline void
JSObject::voidArrayUnused()
JSObject::setDenseArrayCapacity(uint32 capacity)
{
JS_ASSERT(isArray());
fslots[JSSLOT_ARRAY_UNUSED] = JSVAL_VOID;
JS_ASSERT(isDenseArray());
JS_ASSERT(dslots);
dslots[-1] = capacity;
uint32 length = uncheckedGetArrayLength();
fslots[JSSLOT_DENSE_ARRAY_MINLENCAP] = JS_MIN(length, capacity);
}
inline jsval
JSObject::getDenseArrayElement(uint32 i) const
{
JS_ASSERT(isDenseArray());
JS_ASSERT(i < getDenseArrayCapacity());
return dslots[i];
}
inline void
JSObject::setDenseArrayElement(uint32 i, jsval v)
{
JS_ASSERT(isDenseArray());
JS_ASSERT(i < getDenseArrayCapacity());
dslots[i] = v;
}
inline jsval *
JSObject::getDenseArrayElements() const
{
JS_ASSERT(isDenseArray());
return dslots;
}
inline void
JSObject::freeDenseArrayElements(JSContext *cx)
{
JS_ASSERT(isDenseArray());
if (dslots) {
cx->free(dslots - 1);
dslots = NULL;
}
fslots[JSSLOT_DENSE_ARRAY_MINLENCAP] = 0;
JS_ASSERT(isDenseArrayMinLenCapOk());
}
inline void
JSObject::voidDenseOnlyArraySlots()
{
JS_ASSERT(isDenseArray());
fslots[JSSLOT_DENSE_ARRAY_COUNT] = JSVAL_VOID;
fslots[JSSLOT_DENSE_ARRAY_MINLENCAP] = JSVAL_VOID;
}
inline void
@ -200,7 +278,7 @@ JSObject::setArgsLengthOverridden()
}
inline bool
JSObject::isArgsLengthOverridden()
JSObject::isArgsLengthOverridden() const
{
JS_ASSERT(isArguments());
jsval v = fslots[JSSLOT_ARGS_LENGTH];
@ -221,6 +299,22 @@ JSObject::setArgsCallee(jsval callee)
fslots[JSSLOT_ARGS_CALLEE] = callee;
}
inline jsval
JSObject::getArgsElement(uint32 i) const
{
JS_ASSERT(isArguments());
JS_ASSERT(i < numSlots() - JS_INITIAL_NSLOTS);
return dslots[i];
}
inline void
JSObject::setArgsElement(uint32 i, jsval v)
{
JS_ASSERT(isArguments());
JS_ASSERT(i < numSlots() - JS_INITIAL_NSLOTS);
dslots[i] = v;
}
inline jsval
JSObject::getDateLocalTime() const
{

Просмотреть файл

@ -3271,6 +3271,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
*/
cond = js_GetSrcNoteOffset(sn, 1);
if (cond != 0) {
cond -= tail;
DECOMPILE_CODE(pc + oplen, cond - oplen);
pc += cond;
elseif = JS_TRUE;

Просмотреть файл

@ -48,14 +48,14 @@
#endif /* !JS_THREADED_INTERP */
{
bool moreInterrupts = false;
JSTrapHandler handler = cx->debugHooks->interruptHandler;
if (handler) {
JSInterruptHook hook = cx->debugHooks->interruptHook;
if (hook) {
#ifdef JS_TRACER
if (TRACE_RECORDER(cx))
AbortRecording(cx, "interrupt handler");
AbortRecording(cx, "interrupt hook");
#endif
switch (handler(cx, script, regs.pc, &rval,
cx->debugHooks->interruptHandlerData)) {
switch (hook(cx, script, regs.pc, &rval,
cx->debugHooks->interruptHookData)) {
case JSTRAP_ERROR:
goto error;
case JSTRAP_CONTINUE:
@ -76,6 +76,7 @@
#ifdef JS_TRACER
if (TraceRecorder* tr = TRACE_RECORDER(cx)) {
AbortableRecordingStatus status = tr->monitorRecording(op);
JS_ASSERT_IF(cx->throwing, status == ARECORD_ERROR);
switch (status) {
case ARECORD_CONTINUE:
moreInterrupts = true;
@ -257,13 +258,7 @@ BEGIN_CASE(JSOP_STOP)
*/
fp->putActivationObjects(cx);
#ifdef INCLUDE_MOZILLA_DTRACE
/* DTrace function return, inlines */
if (JAVASCRIPT_FUNCTION_RVAL_ENABLED())
jsdtrace_function_rval(cx, fp, fp->fun, &fp->rval);
if (JAVASCRIPT_FUNCTION_RETURN_ENABLED())
jsdtrace_function_return(cx, fp, fp->fun);
#endif
DTrace::exitJSFun(cx, fp, fp->fun, fp->rval);
/* Restore context version only if callee hasn't set version. */
if (JS_LIKELY(cx->version == currentVersion)) {
@ -1816,9 +1811,13 @@ BEGIN_CASE(JSOP_SETMETHOD)
LOAD_ATOM(0);
id = ATOM_TO_JSID(atom);
if (entry && JS_LIKELY(obj->map->ops->setProperty == js_SetProperty)) {
uintN defineHow = (op == JSOP_SETMETHOD)
? JSDNP_CACHE_RESULT | JSDNP_SET_METHOD
: JSDNP_CACHE_RESULT;
uintN defineHow;
if (op == JSOP_SETMETHOD)
defineHow = JSDNP_CACHE_RESULT | JSDNP_SET_METHOD;
else if (op == JSOP_SETNAME)
defineHow = JSDNP_CACHE_RESULT | JSDNP_UNQUALIFIED;
else
defineHow = JSDNP_CACHE_RESULT;
if (!js_SetPropertyHelper(cx, obj, id, defineHow, &rval))
goto error;
} else {
@ -1851,8 +1850,8 @@ BEGIN_CASE(JSOP_GETELEM)
jsuint idx = jsuint(JSVAL_TO_INT(rval));
if (idx < obj->getArrayLength() &&
idx < js_DenseArrayCapacity(obj)) {
rval = obj->dslots[idx];
idx < obj->getDenseArrayCapacity()) {
rval = obj->getDenseArrayElement(idx);
if (rval != JSVAL_HOLE)
goto end_getelem;
@ -1873,7 +1872,7 @@ BEGIN_CASE(JSOP_GETELEM)
goto end_getelem;
}
rval = GetArgsSlot(obj, arg);
rval = obj->getArgsElement(arg);
if (rval != JSVAL_HOLE)
goto end_getelem;
rval = FETCH_OPND(-1);
@ -1916,17 +1915,17 @@ BEGIN_CASE(JSOP_SETELEM)
if (obj->isDenseArray() && JSID_IS_INT(id)) {
jsuint length;
length = js_DenseArrayCapacity(obj);
length = obj->getDenseArrayCapacity();
i = JSID_TO_INT(id);
if ((jsuint)i < length) {
if (obj->dslots[i] == JSVAL_HOLE) {
if (obj->getDenseArrayElement(i) == JSVAL_HOLE) {
if (js_PrototypeHasIndexedProperties(cx, obj))
break;
if ((jsuint)i >= obj->getArrayLength())
obj->setArrayLength(i + 1);
obj->incArrayCountBy(1);
obj->setDenseArrayLength(i + 1);
obj->incDenseArrayCountBy(1);
}
obj->dslots[i] = rval;
obj->setDenseArrayElement(i, rval);
goto end_setelem;
}
}
@ -2158,20 +2157,20 @@ BEGIN_CASE(JSOP_APPLY)
inlineCallCount++;
JS_RUNTIME_METER(rt, inlineCalls);
#ifdef INCLUDE_MOZILLA_DTRACE
/* DTrace function entry, inlines */
if (JAVASCRIPT_FUNCTION_ENTRY_ENABLED())
jsdtrace_function_entry(cx, fp, fun);
if (JAVASCRIPT_FUNCTION_INFO_ENABLED())
jsdtrace_function_info(cx, fp, fp->down, fun);
if (JAVASCRIPT_FUNCTION_ARGS_ENABLED())
jsdtrace_function_args(cx, fp, fun, fp->argc, fp->argv);
#endif
DTrace::enterJSFun(cx, fp, fun, fp->down, fp->argc, fp->argv);
#ifdef JS_TRACER
if (TRACE_RECORDER(cx)) {
TRACE_1(EnterFrame, inlineCallCount);
if (TraceRecorder *tr = TRACE_RECORDER(cx)) {
AbortableRecordingStatus status = tr->record_EnterFrame(inlineCallCount);
RESTORE_INTERP_VARS();
if (StatusAbortsRecorderIfActive(status)) {
if (TRACE_RECORDER(cx)) {
JS_ASSERT(TRACE_RECORDER(cx) == tr);
AbortRecording(cx, "record_EnterFrame failed");
}
if (status == ARECORD_ERROR)
goto error;
}
} else if (fp->script == fp->down->script &&
*fp->down->regs->pc == JSOP_CALL &&
*fp->regs->pc == JSOP_TRACE) {
@ -2192,30 +2191,13 @@ BEGIN_CASE(JSOP_APPLY)
}
if (fun->flags & JSFUN_FAST_NATIVE) {
#ifdef INCLUDE_MOZILLA_DTRACE
/* DTrace function entry, non-inlines */
if (VALUE_IS_FUNCTION(cx, lval)) {
if (JAVASCRIPT_FUNCTION_ENTRY_ENABLED())
jsdtrace_function_entry(cx, NULL, fun);
if (JAVASCRIPT_FUNCTION_INFO_ENABLED())
jsdtrace_function_info(cx, NULL, fp, fun);
if (JAVASCRIPT_FUNCTION_ARGS_ENABLED())
jsdtrace_function_args(cx, fp, fun, argc, vp+2);
}
#endif
DTrace::enterJSFun(cx, NULL, fun, fp, argc, vp + 2, &lval);
JS_ASSERT(fun->u.n.extra == 0);
JS_ASSERT(JSVAL_IS_OBJECT(vp[1]) ||
PRIMITIVE_THIS_TEST(fun, vp[1]));
ok = ((JSFastNative) fun->u.n.native)(cx, argc, vp);
#ifdef INCLUDE_MOZILLA_DTRACE
if (VALUE_IS_FUNCTION(cx, lval)) {
if (JAVASCRIPT_FUNCTION_RVAL_ENABLED())
jsdtrace_function_rval(cx, NULL, fun, vp);
if (JAVASCRIPT_FUNCTION_RETURN_ENABLED())
jsdtrace_function_return(cx, NULL, fun);
}
#endif
DTrace::exitJSFun(cx, NULL, fun, *vp, &lval);
regs.sp = vp + 1;
if (!ok) {
/*
@ -2920,9 +2902,9 @@ BEGIN_CASE(JSOP_DEFFUN)
attrs |= flags | JSPROP_SHARED;
rval = JSVAL_VOID;
if (flags == JSPROP_GETTER)
getter = js_CastAsPropertyOp(obj);
getter = CastAsPropertyOp(obj);
else
setter = js_CastAsPropertyOp(obj);
setter = CastAsPropertyOp(obj);
}
/*
@ -3020,10 +3002,10 @@ BEGIN_CASE(JSOP_DEFFUN_DBGFC)
ok = parent->defineProperty(cx, id, rval,
(flags & JSPROP_GETTER)
? js_CastAsPropertyOp(obj)
? CastAsPropertyOp(obj)
: JS_PropertyStub,
(flags & JSPROP_SETTER)
? js_CastAsPropertyOp(obj)
? CastAsPropertyOp(obj)
: JS_PropertyStub,
attrs);
}
@ -3246,12 +3228,12 @@ BEGIN_CASE(JSOP_SETTER)
goto error;
if (op == JSOP_GETTER) {
getter = js_CastAsPropertyOp(JSVAL_TO_OBJECT(rval));
getter = CastAsPropertyOp(JSVAL_TO_OBJECT(rval));
setter = JS_PropertyStub;
attrs = JSPROP_GETTER;
} else {
getter = JS_PropertyStub;
setter = js_CastAsPropertyOp(JSVAL_TO_OBJECT(rval));
setter = CastAsPropertyOp(JSVAL_TO_OBJECT(rval));
attrs = JSPROP_SETTER;
}
attrs |= JSPROP_ENUMERATE | JSPROP_SHARED;
@ -3665,7 +3647,7 @@ END_CASE(JSOP_INSTANCEOF)
#if JS_HAS_DEBUGGER_KEYWORD
BEGIN_CASE(JSOP_DEBUGGER)
{
JSTrapHandler handler = cx->debugHooks->debuggerHandler;
JSDebuggerHandler handler = cx->debugHooks->debuggerHandler;
if (handler) {
switch (handler(cx, script, regs.pc, &rval, cx->debugHooks->debuggerHandlerData)) {
case JSTRAP_ERROR:

Просмотреть файл

@ -722,7 +722,6 @@ Compiler::compileScript(JSContext *cx, JSObject *scopeChain, JSStackFrame *calle
JSString *source /* = NULL */,
unsigned staticLevel /* = 0 */)
{
Compiler compiler(cx, principals, callerFrame);
JSArenaPool codePool, notePool;
TokenKind tt;
JSParseNode *pn;
@ -742,6 +741,7 @@ Compiler::compileScript(JSContext *cx, JSObject *scopeChain, JSStackFrame *calle
JS_ASSERT_IF(callerFrame, tcflags & TCF_COMPILE_N_GO);
JS_ASSERT_IF(staticLevel != 0, callerFrame);
Compiler compiler(cx, principals, callerFrame);
if (!compiler.init(chars, length, file, filename, lineno))
return NULL;
@ -754,6 +754,8 @@ Compiler::compileScript(JSContext *cx, JSObject *scopeChain, JSStackFrame *calle
TokenStream &tokenStream = parser.tokenStream;
JSCodeGenerator cg(&parser, &codePool, &notePool, tokenStream.getLineno());
if (!cg.init())
return NULL;
MUST_FLOW_THROUGH("out");
@ -1102,7 +1104,7 @@ ReportBadReturn(JSContext *cx, JSTreeContext *tc, uintN flags, uintN errnum,
{
const char *name;
JS_ASSERT(tc->flags & TCF_IN_FUNCTION);
JS_ASSERT(tc->inFunction());
if (tc->fun->atom) {
name = js_AtomToPrintableString(cx, tc->fun->atom);
} else {
@ -1115,7 +1117,7 @@ ReportBadReturn(JSContext *cx, JSTreeContext *tc, uintN flags, uintN errnum,
static JSBool
CheckFinalReturn(JSContext *cx, JSTreeContext *tc, JSParseNode *pn)
{
JS_ASSERT(tc->flags & TCF_IN_FUNCTION);
JS_ASSERT(tc->inFunction());
return HasFinalReturn(pn) == ENDS_IN_RETURN ||
ReportBadReturn(cx, tc, JSREPORT_WARNING | JSREPORT_STRICT,
JSMSG_NO_RETURN_VALUE, JSMSG_ANON_NO_RETURN_VALUE);
@ -1229,7 +1231,7 @@ Parser::functionBody()
uintN oldflags, firstLine;
JSParseNode *pn;
JS_ASSERT(tc->flags & TCF_IN_FUNCTION);
JS_ASSERT(tc->inFunction());
js_PushStatement(tc, &stmtInfo, STMT_BLOCK, -1);
stmtInfo.flags = SIF_BODY_BLOCK;
@ -1527,6 +1529,9 @@ Compiler::compileFunctionBody(JSContext *cx, JSFunction *fun, JSPrincipals *prin
TokenStream &tokenStream = parser.tokenStream;
JSCodeGenerator funcg(&parser, &codePool, &notePool, tokenStream.getLineno());
if (!funcg.init())
return NULL;
funcg.flags |= TCF_IN_FUNCTION;
funcg.fun = fun;
if (!GenerateBlockId(&funcg, funcg.bodyid))
@ -1655,7 +1660,7 @@ BindDestructuringArg(JSContext *cx, BindData *data, JSAtom *atom,
if (atom == tc->parser->context->runtime->atomState.evalAtom)
tc->flags |= TCF_FUN_PARAM_EVAL;
JS_ASSERT(tc->flags & TCF_IN_FUNCTION);
JS_ASSERT(tc->inFunction());
JSLocalKind localKind = js_LookupLocal(cx, tc->fun, atom, NULL);
if (localKind != JSLOCAL_NONE) {
@ -1695,12 +1700,12 @@ Parser::newFunction(JSTreeContext *tc, JSAtom *atom, uintN lambda)
*/
while (tc->parent)
tc = tc->parent;
parent = (tc->flags & TCF_IN_FUNCTION) ? NULL : tc->scopeChain;
parent = tc->inFunction() ? NULL : tc->scopeChain;
fun = js_NewFunction(context, NULL, NULL, 0, JSFUN_INTERPRETED | lambda,
parent, atom);
if (fun && !(tc->flags & TCF_COMPILE_N_GO)) {
if (fun && !tc->compileAndGo()) {
FUN_OBJECT(fun)->clearParent();
FUN_OBJECT(fun)->clearProto();
}
@ -1991,6 +1996,14 @@ CanFlattenUpvar(JSDefinition *dn, JSFunctionBox *funbox, uint32 tcflags)
*/
if (!afunbox || afunbox->node->isFunArg())
return false;
/*
* Reaching up for dn across a generator also means we can't flatten,
* since the generator iterator does not run until later, in general.
* See bug 563034.
*/
if (afunbox->tcflags & TCF_FUN_IS_GENERATOR)
return false;
}
/*
@ -2081,23 +2094,23 @@ CanFlattenUpvar(JSDefinition *dn, JSFunctionBox *funbox, uint32 tcflags)
static void
FlagHeavyweights(JSDefinition *dn, JSFunctionBox *funbox, uint32& tcflags)
{
JSFunctionBox *afunbox = funbox->parent;
uintN dnLevel = dn->frameLevel();
while (afunbox) {
while ((funbox = funbox->parent) != NULL) {
/*
* Notice that afunbox->level is the static level of the definition or
* expression of the function parsed into afunbox, not the static level
* Notice that funbox->level is the static level of the definition or
* expression of the function parsed into funbox, not the static level
* of its body. Therefore we must add 1 to match dn's level to find the
* afunbox whose body contains the dn definition.
* funbox whose body contains the dn definition.
*/
if (afunbox->level + 1U == dnLevel || (dnLevel == 0 && dn->isLet())) {
afunbox->tcflags |= TCF_FUN_HEAVYWEIGHT;
if (funbox->level + 1U == dnLevel || (dnLevel == 0 && dn->isLet())) {
funbox->tcflags |= TCF_FUN_HEAVYWEIGHT;
break;
}
afunbox = afunbox->parent;
funbox->tcflags |= TCF_FUN_ENTRAINS_SCOPES;
}
if (!afunbox && (tcflags & TCF_IN_FUNCTION))
if (!funbox && (tcflags & TCF_IN_FUNCTION))
tcflags |= TCF_FUN_HEAVYWEIGHT;
}
@ -2172,10 +2185,11 @@ Parser::setFunctionKinds(JSFunctionBox *funbox, uint32& tcflags)
JSFunction *fun = (JSFunction *) funbox->object;
JS_ASSERT(FUN_KIND(fun) == JSFUN_INTERPRETED);
FUN_METER(allfun);
if (funbox->tcflags & TCF_FUN_HEAVYWEIGHT) {
FUN_METER(heavy);
JS_ASSERT(FUN_KIND(fun) == JSFUN_INTERPRETED);
} else if (pn->pn_type != TOK_UPVARS) {
/*
* No lexical dependencies => null closure, for best performance.
@ -2247,7 +2261,8 @@ Parser::setFunctionKinds(JSFunctionBox *funbox, uint32& tcflags)
if (nupvars == 0) {
FUN_METER(onlyfreevar);
FUN_SET_KIND(fun, JSFUN_NULL_CLOSURE);
} else if (!mutation && !(funbox->tcflags & TCF_FUN_IS_GENERATOR)) {
} else if (!mutation &&
!(funbox->tcflags & (TCF_FUN_IS_GENERATOR | TCF_FUN_ENTRAINS_SCOPES))) {
/*
* Algol-like functions can read upvars using the dynamic
* link (cx->fp/fp->down), optimized using the cx->display
@ -2647,7 +2662,7 @@ Parser::functionDef(uintN lambda, bool namePermitted)
if (topLevel) {
pn->pn_dflags |= PND_TOPLEVEL;
if (tc->flags & TCF_IN_FUNCTION) {
if (tc->inFunction()) {
JSLocalKind localKind;
uintN index;
@ -3226,20 +3241,69 @@ OuterLet(JSTreeContext *tc, JSStmtInfo *stmt, JSAtom *atom)
return false;
}
/*
* If we are generating global or eval-called-from-global code, bind a "gvar"
* here, as soon as possible. The JSOP_GETGVAR, etc., ops speed up interpreted
* global variable access by memoizing name-to-slot mappings during execution
* of the script prolog (via JSOP_DEFVAR/JSOP_DEFCONST). If the memoization
* can't be done due to a pre-existing property of the same name as the var or
* const but incompatible attributes/getter/setter/etc, these ops devolve to
* JSOP_NAME, etc.
*
* For now, don't try to lookup eval frame variables at compile time. This is
* sub-optimal: we could handle eval-called-from-global-code gvars since eval
* gets its own script and frame. The eval-from-function-code case is harder,
* since functions do not atomize gvars and then reserve their atom indexes as
* stack frame slots.
*/
static bool
BindGvar(JSParseNode *pn, JSTreeContext *tc, bool inWith = false)
{
JS_ASSERT(pn->pn_op == JSOP_NAME);
JS_ASSERT(!tc->inFunction());
if (tc->compiling() && !tc->parser->callerFrame) {
JSCodeGenerator *cg = (JSCodeGenerator *) tc;
/* Index pn->pn_atom so we can map fast global number to name. */
JSAtomListElement *ale = cg->atomList.add(tc->parser, pn->pn_atom);
if (!ale)
return false;
/* Defend against cg->ngvars 16-bit overflow. */
uintN slot = ALE_INDEX(ale);
if ((slot + 1) >> 16)
return true;
if ((uint16)(slot + 1) > cg->ngvars)
cg->ngvars = (uint16)(slot + 1);
if (!inWith) {
pn->pn_op = JSOP_GETGVAR;
pn->pn_cookie = MAKE_UPVAR_COOKIE(tc->staticLevel, slot);
pn->pn_dflags |= PND_BOUND | PND_GVAR;
}
}
return true;
}
static JSBool
BindVarOrConst(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc)
{
JSParseNode *pn = data->pn;
/* Default best op for pn is JSOP_NAME; we'll try to improve below. */
pn->pn_op = JSOP_NAME;
if (!CheckStrictBinding(cx, tc, atom, pn))
return false;
JSStmtInfo *stmt = js_LexicalLookup(tc, atom, NULL);
if (stmt && stmt->type == STMT_WITH) {
pn->pn_op = JSOP_NAME;
data->fresh = false;
return JS_TRUE;
return tc->inFunction() || BindGvar(pn, tc, true);
}
JSAtomListElement *ale = tc->decls.lookup(atom);
@ -3374,43 +3438,8 @@ BindVarOrConst(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc)
if (data->op == JSOP_DEFCONST)
pn->pn_dflags |= PND_CONST;
if (!(tc->flags & TCF_IN_FUNCTION)) {
/*
* If we are generating global or eval-called-from-global code, bind a
* "gvar" here, as soon as possible. The JSOP_GETGVAR, etc., ops speed
* up global variable access by memoizing name-to-slot mappings in the
* script prolog (via JSOP_DEFVAR/JSOP_DEFCONST). If the memoization
* can't be done due to a pre-existing property of the same name as the
* var or const but incompatible attributes/getter/setter/etc, these
* ops devolve to JSOP_NAME, etc.
*
* For now, don't try to lookup eval frame variables at compile time.
* Seems sub-optimal: why couldn't we find eval-called-from-a-function
* upvars early and possibly simplify jsemit.cpp:BindNameToSlot?
*/
pn->pn_op = JSOP_NAME;
if ((tc->flags & TCF_COMPILING) && !tc->parser->callerFrame) {
JSCodeGenerator *cg = (JSCodeGenerator *) tc;
/* Index atom so we can map fast global number to name. */
ale = cg->atomList.add(tc->parser, atom);
if (!ale)
return JS_FALSE;
/* Defend against cg->ngvars 16-bit overflow. */
uintN slot = ALE_INDEX(ale);
if ((slot + 1) >> 16)
return JS_TRUE;
if ((uint16)(slot + 1) > cg->ngvars)
cg->ngvars = (uint16)(slot + 1);
pn->pn_op = JSOP_GETGVAR;
pn->pn_cookie = MAKE_UPVAR_COOKIE(tc->staticLevel, slot);
pn->pn_dflags |= PND_BOUND | PND_GVAR;
}
return JS_TRUE;
}
if (!tc->inFunction())
return BindGvar(pn, tc);
if (atom == cx->runtime->atomState.argumentsAtom) {
pn->pn_op = JSOP_ARGUMENTS;
@ -3446,7 +3475,6 @@ BindVarOrConst(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc)
/* Not an argument, must be a redeclared local var. */
JS_ASSERT(localKind == JSLOCAL_VAR || localKind == JSLOCAL_CONST);
}
pn->pn_op = JSOP_NAME;
return JS_TRUE;
}
@ -4175,7 +4203,7 @@ Parser::returnOrYield(bool useAssignExpr)
JSParseNode *pn, *pn2;
tt = tokenStream.currentToken().type;
if (tt == TOK_RETURN && !(tc->flags & TCF_IN_FUNCTION)) {
if (tt == TOK_RETURN && !tc->inFunction()) {
ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR,
JSMSG_BAD_RETURN_OR_YIELD, js_return_str);
return NULL;
@ -5635,7 +5663,7 @@ Parser::statement()
static void
NoteArgumentsUse(JSTreeContext *tc)
{
JS_ASSERT(tc->flags & TCF_IN_FUNCTION);
JS_ASSERT(tc->inFunction());
tc->flags |= TCF_FUN_USES_ARGUMENTS;
if (tc->funbox)
tc->funbox->node->pn_dflags |= PND_FUNARG;
@ -5805,7 +5833,7 @@ Parser::variables(bool inLetHead)
/* The declarator's position must include the initializer. */
pn2->pn_pos.end = init->pn_pos.end;
if ((tc->flags & TCF_IN_FUNCTION) &&
if (tc->inFunction() &&
atom == context->runtime->atomState.argumentsAtom) {
NoteArgumentsUse(tc);
if (!let)
@ -8359,7 +8387,7 @@ Parser::primaryExpr(TokenKind tt, JSBool afterDot)
tokenStream.currentToken().t_reflags);
if (!obj)
return NULL;
if (!(tc->flags & TCF_COMPILE_N_GO)) {
if (!tc->compileAndGo()) {
obj->clearParent();
obj->clearProto();
}

Просмотреть файл

@ -91,10 +91,12 @@ typedef uint32 jsatomid;
#ifdef __cplusplus
/* Class and struct forward declarations in namespace js. */
extern "C++" {
namespace js {
struct Parser;
struct Compiler;
}
}
#endif
@ -184,17 +186,18 @@ class DeflatedStringCache;
class PropertyCache;
struct PropertyCacheEntry;
static inline JSPropertyOp
CastAsPropertyOp(JSObject *object)
{
return JS_DATA_TO_FUNC_PTR(JSPropertyOp, object);
}
} /* namespace js */
/* Common instantiations. */
typedef js::Vector<jschar, 32> JSCharBuffer;
static inline JSPropertyOp
js_CastAsPropertyOp(JSObject *object)
{
return JS_DATA_TO_FUNC_PTR(JSPropertyOp, object);
}
} /* export "C++" */
#endif /* __cplusplus */
@ -209,7 +212,19 @@ typedef enum JSTrapStatus {
typedef JSTrapStatus
(* JSTrapHandler)(JSContext *cx, JSScript *script, jsbytecode *pc, jsval *rval,
void *closure);
jsval closure);
typedef JSTrapStatus
(* JSInterruptHook)(JSContext *cx, JSScript *script, jsbytecode *pc, jsval *rval,
void *closure);
typedef JSTrapStatus
(* JSDebuggerHandler)(JSContext *cx, JSScript *script, jsbytecode *pc, jsval *rval,
void *closure);
typedef JSTrapStatus
(* JSThrowHook)(JSContext *cx, JSScript *script, jsbytecode *pc, jsval *rval,
void *closure);
typedef JSBool
(* JSWatchPointHandler)(JSContext *cx, JSObject *obj, jsval id, jsval old,
@ -271,13 +286,13 @@ typedef JSBool
void *closure);
typedef struct JSDebugHooks {
JSTrapHandler interruptHandler;
void *interruptHandlerData;
JSInterruptHook interruptHook;
void *interruptHookData;
JSNewScriptHook newScriptHook;
void *newScriptHookData;
JSDestroyScriptHook destroyScriptHook;
void *destroyScriptHookData;
JSTrapHandler debuggerHandler;
JSDebuggerHandler debuggerHandler;
void *debuggerHandlerData;
JSSourceHandler sourceHandler;
void *sourceHandlerData;
@ -287,7 +302,7 @@ typedef struct JSDebugHooks {
void *callHookData;
JSObjectHook objectHook;
void *objectHookData;
JSTrapHandler throwHook;
JSThrowHook throwHook;
void *throwHookData;
JSDebugErrorHook debugErrorHook;
void *debugErrorHookData;

Просмотреть файл

@ -121,8 +121,8 @@ class UpRecursiveSlotMap : public RecursiveSlotMap
};
#if defined DEBUG
static JS_REQUIRES_STACK void
AssertDownFrameIsConsistent(JSContext* cx, VMSideExit* anchor, FrameInfo* fi)
JS_REQUIRES_STACK void
TraceRecorder::assertDownFrameIsConsistent(VMSideExit* anchor, FrameInfo* fi)
{
JS_ASSERT(anchor->recursive_down);
JS_ASSERT(anchor->recursive_down->callerHeight == fi->callerHeight);
@ -130,7 +130,7 @@ AssertDownFrameIsConsistent(JSContext* cx, VMSideExit* anchor, FrameInfo* fi)
unsigned downPostSlots = fi->callerHeight;
TraceType* typeMap = fi->get_typemap();
CaptureStackTypes(cx, 1, typeMap);
captureStackTypes(1, typeMap);
const TraceType* m1 = anchor->recursive_down->get_typemap();
for (unsigned i = 0; i < downPostSlots; i++) {
if (m1[i] == typeMap[i])
@ -258,7 +258,7 @@ TraceRecorder::upRecursion()
* recursive functions.
*/
#if defined DEBUG
AssertDownFrameIsConsistent(cx, anchor, fi);
assertDownFrameIsConsistent(anchor, fi);
#endif
fi = anchor->recursive_down;
} else if (recursive_pc != fragment->root->ip) {
@ -266,7 +266,7 @@ TraceRecorder::upRecursion()
* Case 1: Guess that down-recursion has to started back out, infer types
* from the down frame.
*/
CaptureStackTypes(cx, 1, fi->get_typemap());
captureStackTypes(1, fi->get_typemap());
} else {
/* Case 2: Guess that up-recursion is backing out, infer types from our Tree. */
JS_ASSERT(tree->nStackTypes == downPostSlots + 1);
@ -491,7 +491,7 @@ TraceRecorder::slurpDownFrames(jsbytecode* return_pc)
TraceType* typeMap = exit->stackTypeMap();
jsbytecode* oldpc = cx->fp->regs->pc;
cx->fp->regs->pc = exit->pc;
CaptureStackTypes(cx, frameDepth, typeMap);
captureStackTypes(frameDepth, typeMap);
cx->fp->regs->pc = oldpc;
if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) {
JS_ASSERT_IF(*cx->fp->regs->pc != JSOP_RETURN, *cx->fp->regs->pc == JSOP_STOP);

Просмотреть файл

@ -353,6 +353,38 @@ typedef struct REGlobalData {
size_t backTrackLimit; /* upper limit on backtrack states */
} REGlobalData;
void
JSRegExpStatics::clearRoots()
{
input = NULL;
cx->runtime->gcPoke = JS_TRUE;
}
bool
JSRegExpStatics::copy(const JSRegExpStatics& other)
{
clearRoots();
input = other.input;
multiline = other.multiline;
lastMatch = other.lastMatch;
lastParen = other.lastParen;
leftContext = other.leftContext;
rightContext = other.rightContext;
if (!parens.resize(other.parens.length()))
return false;
memcpy(parens.begin(), other.parens.begin(), sizeof(JSSubString) * parens.length());
return true;
}
void
JSRegExpStatics::clear()
{
clearRoots();
multiline = false;
lastMatch = lastParen = leftContext = rightContext = js_EmptySubString;
parens.clear();
}
/*
* 1. If IgnoreCase is false, return ch.
* 2. Let u be ch converted to upper case as if by calling
@ -4865,11 +4897,10 @@ js_ExecuteRegExp(JSContext *cx, JSRegExp *re, JSString *str, size_t *indexp,
const jschar *cp, *ep;
size_t i, length, start;
JSSubString *morepar;
JSBool ok;
JSRegExpStatics *res;
ptrdiff_t matchlen;
uintN num, morenum;
uintN num;
JSString *parstr, *matchstr;
JSObject *obj;
@ -4973,45 +5004,22 @@ js_ExecuteRegExp(JSContext *cx, JSRegExp *re, JSString *str, size_t *indexp,
res = &cx->regExpStatics;
res->input = str;
res->parenCount = uint16(re->parenCount);
if (!res->parens.resize(re->parenCount)) {
ok = JS_FALSE;
goto out;
}
if (re->parenCount == 0) {
res->lastParen = js_EmptySubString;
} else {
for (num = 0; num < re->parenCount; num++) {
JSSubString *sub = &res->parens[num];
parsub = &result->parens[num];
if (num < 9) {
if (parsub->index == -1) {
res->parens[num].chars = NULL;
res->parens[num].length = 0;
} else {
res->parens[num].chars = gData.cpbegin + parsub->index;
res->parens[num].length = parsub->length;
}
if (parsub->index == -1) {
sub->chars = NULL;
sub->length = 0;
} else {
morenum = num - 9;
morepar = res->moreParens;
if (!morepar) {
res->moreLength = 10;
morepar = (JSSubString*)
cx->malloc(10 * sizeof(JSSubString));
} else if (morenum >= res->moreLength) {
res->moreLength += 10;
morepar = (JSSubString*)
cx->realloc(morepar,
res->moreLength * sizeof(JSSubString));
}
if (!morepar) {
ok = JS_FALSE;
goto out;
}
res->moreParens = morepar;
if (parsub->index == -1) {
morepar[morenum].chars = NULL;
morepar[morenum].length = 0;
} else {
morepar[morenum].chars = gData.cpbegin + parsub->index;
morepar[morenum].length = parsub->length;
}
sub->chars = gData.cpbegin + parsub->index;
sub->length = parsub->length;
}
if (test)
continue;
@ -5209,14 +5217,9 @@ JS_FRIEND_API(void)
js_SaveAndClearRegExpStatics(JSContext *cx, JSRegExpStatics *statics,
AutoValueRooter *tvr)
{
*statics = cx->regExpStatics;
statics->copy(cx->regExpStatics);
if (statics->input)
tvr->setString(statics->input);
/*
* Prevent JS_ClearRegExpStatics from freeing moreParens, since we've only
* moved it elsewhere (into statics->moreParens).
*/
cx->regExpStatics.moreParens = NULL;
JS_ClearRegExpStatics(cx);
}
@ -5225,8 +5228,7 @@ js_RestoreRegExpStatics(JSContext *cx, JSRegExpStatics *statics,
AutoValueRooter *tvr)
{
/* Clear/free any new JSRegExpStatics data before clobbering. */
JS_ClearRegExpStatics(cx);
cx->regExpStatics = *statics;
cx->regExpStatics.copy(*statics);
}
void
@ -5278,7 +5280,7 @@ regexp_static_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
sub = &res->rightContext;
break;
default:
sub = REGEXP_PAREN_SUBSTRING(res, slot);
sub = (size_t(slot) < res->parens.length()) ? &res->parens[slot] : &js_EmptySubString;
break;
}
str = js_NewStringCopyN(cx, sub->chars, sub->length);

Просмотреть файл

@ -52,19 +52,6 @@
JS_BEGIN_EXTERN_C
struct JSRegExpStatics {
JSString *input; /* input string to match (perl $_, GC root) */
JSBool multiline; /* whether input contains newlines (perl $*) */
uint16 parenCount; /* number of valid elements in parens[] */
uint16 moreLength; /* number of allocated elements in moreParens */
JSSubString parens[9]; /* last set of parens matched (perl $1, $2) */
JSSubString *moreParens; /* null or realloc'd vector for $10, etc. */
JSSubString lastMatch; /* last string matched (perl $&) */
JSSubString lastParen; /* last paren matched (perl $+) */
JSSubString leftContext; /* input to left of last match (perl $`) */
JSSubString rightContext; /* input to right of last match (perl $') */
};
namespace js { class AutoValueRooter; }
extern JS_FRIEND_API(void)
@ -96,17 +83,6 @@ typedef struct RECharSet {
} u;
} RECharSet;
/*
* This macro is safe because moreParens is guaranteed to be allocated and big
* enough to hold parenCount, or else be null when parenCount is 0.
*/
#define REGEXP_PAREN_SUBSTRING(res, num) \
(((jsuint)(num) < (jsuint)(res)->parenCount) \
? ((jsuint)(num) < 9) \
? &(res)->parens[num] \
: &(res)->moreParens[(num) - 9] \
: &js_EmptySubString)
typedef struct RENode RENode;
struct JSRegExp {

Просмотреть файл

@ -768,16 +768,20 @@ NormalizeGetterAndSetter(JSContext *cx, JSScope *scope,
JSPropertyOp &getter,
JSPropertyOp &setter)
{
if (setter == JS_PropertyStub)
if (setter == JS_PropertyStub) {
JS_ASSERT(!(attrs & JSPROP_SETTER));
setter = NULL;
}
if (flags & JSScopeProperty::METHOD) {
/* Here, getter is the method, a function object reference. */
JS_ASSERT(getter);
JS_ASSERT(!setter || setter == js_watch_set);
JS_ASSERT(!(attrs & (JSPROP_GETTER | JSPROP_SETTER)));
} else {
if (getter == JS_PropertyStub)
if (getter == JS_PropertyStub) {
JS_ASSERT(!(attrs & JSPROP_GETTER));
getter = NULL;
}
}
/*
@ -1158,7 +1162,7 @@ JSScope::deletingShapeChange(JSContext *cx, JSScopeProperty *sprop)
}
bool
JSScope::methodShapeChange(JSContext *cx, JSScopeProperty *sprop, jsval toval)
JSScope::methodShapeChange(JSContext *cx, JSScopeProperty *sprop)
{
JS_ASSERT(!JSVAL_IS_NULL(sprop->id));
if (sprop->isMethod()) {
@ -1189,7 +1193,7 @@ JSScope::methodShapeChange(JSContext *cx, JSScopeProperty *sprop, jsval toval)
}
bool
JSScope::methodShapeChange(JSContext *cx, uint32 slot, jsval toval)
JSScope::methodShapeChange(JSContext *cx, uint32 slot)
{
if (!hasMethodBarrier()) {
generateOwnShape(cx);
@ -1197,7 +1201,7 @@ JSScope::methodShapeChange(JSContext *cx, uint32 slot, jsval toval)
for (JSScopeProperty *sprop = lastProp; sprop; sprop = sprop->parent) {
JS_ASSERT(!JSVAL_IS_NULL(sprop->id));
if (sprop->slot == slot)
return methodShapeChange(cx, sprop, toval);
return methodShapeChange(cx, sprop);
}
}
return true;

Просмотреть файл

@ -373,8 +373,8 @@ struct JSScope : public JSObjectMap
void trace(JSTracer *trc);
void deletingShapeChange(JSContext *cx, JSScopeProperty *sprop);
bool methodShapeChange(JSContext *cx, JSScopeProperty *sprop, jsval toval);
bool methodShapeChange(JSContext *cx, uint32 slot, jsval toval);
bool methodShapeChange(JSContext *cx, JSScopeProperty *sprop);
bool methodShapeChange(JSContext *cx, uint32 slot);
void protoShapeChange(JSContext *cx);
void shadowingShapeChange(JSContext *cx, JSScopeProperty *sprop);
bool globalObjectOwnShapeChange(JSContext *cx);
@ -575,21 +575,23 @@ JSObject::lockedSetSlot(uintN slot, jsval value)
* Helpers for reinterpreting JSPropertyOp as JSObject* for scripted getters
* and setters.
*/
namespace js {
inline JSObject *
js_CastAsObject(JSPropertyOp op)
CastAsObject(JSPropertyOp op)
{
return JS_FUNC_TO_DATA_PTR(JSObject *, op);
}
inline jsval
js_CastAsObjectJSVal(JSPropertyOp op)
CastAsObjectJSVal(JSPropertyOp op)
{
return OBJECT_TO_JSVAL(JS_FUNC_TO_DATA_PTR(JSObject *, op));
}
namespace js {
class PropertyTree;
}
} /* namespace js */
struct JSScopeProperty {
friend struct JSScope;
@ -602,13 +604,17 @@ struct JSScopeProperty {
private:
union {
JSPropertyOp rawGetter; /* getter and setter hooks or objects */
JSPropertyOp rawGetter; /* getter and setter hooks or objects */
JSObject *getterObj; /* user-defined callable "get" object or
null if sprop->hasGetterValue() */
JSScopeProperty *next; /* next node in freelist */
};
union {
JSPropertyOp rawSetter; /* getter is JSObject* and setter is 0
JSPropertyOp rawSetter; /* getter is JSObject* and setter is 0
if sprop->isMethod() */
JSObject *setterObj; /* user-defined callable "set" object or
null if sprop->hasSetterValue() */
JSScopeProperty **prevp; /* pointer to previous node's next, or
pointer to head of freelist */
};
@ -673,10 +679,8 @@ struct JSScopeProperty {
: id(id), rawGetter(getter), rawSetter(setter), slot(slot), attrs(uint8(attrs)),
flags(uint8(flags)), shortid(int16(shortid))
{
JS_ASSERT_IF(getter && (attrs & JSPROP_GETTER),
JSVAL_TO_OBJECT(getterValue())->isCallable());
JS_ASSERT_IF(setter && (attrs & JSPROP_SETTER),
JSVAL_TO_OBJECT(setterValue())->isCallable());
JS_ASSERT_IF(getter && (attrs & JSPROP_GETTER), getterObj->isCallable());
JS_ASSERT_IF(setter && (attrs & JSPROP_SETTER), setterObj->isCallable());
}
bool marked() const { return (flags & MARK) != 0; }
@ -698,48 +702,34 @@ struct JSScopeProperty {
PUBLIC_FLAGS = ALIAS | HAS_SHORTID | METHOD
};
uintN getFlags() const { return flags & PUBLIC_FLAGS; }
bool isAlias() const { return (flags & ALIAS) != 0; }
uintN getFlags() const { return flags & PUBLIC_FLAGS; }
bool isAlias() const { return (flags & ALIAS) != 0; }
bool hasShortID() const { return (flags & HAS_SHORTID) != 0; }
bool isMethod() const { return (flags & METHOD) != 0; }
bool isMethod() const { return (flags & METHOD) != 0; }
JSObject *methodObject() const {
JS_ASSERT(isMethod());
return js_CastAsObject(rawGetter);
}
jsval methodValue() const {
JS_ASSERT(isMethod());
return js_CastAsObjectJSVal(rawGetter);
}
JSObject *methodObject() const { JS_ASSERT(isMethod()); return getterObj; }
jsval methodValue() const { return OBJECT_TO_JSVAL(methodObject()); }
JSPropertyOp getter() const { return rawGetter; }
bool hasDefaultGetter() const { return !rawGetter; }
JSPropertyOp getterOp() const {
JS_ASSERT(!hasGetterValue());
return rawGetter;
}
JSObject *getterObject() const {
JS_ASSERT(hasGetterValue());
return js_CastAsObject(rawGetter);
}
JSPropertyOp getter() const { return rawGetter; }
bool hasDefaultGetter() const { return !rawGetter; }
JSPropertyOp getterOp() const { JS_ASSERT(!hasGetterValue()); return rawGetter; }
JSObject *getterObject() const { JS_ASSERT(hasGetterValue()); return getterObj; }
// Per ES5, decode null getterObj as the undefined value, which encodes as null.
jsval getterValue() const {
JS_ASSERT(hasGetterValue());
return rawGetter ? js_CastAsObjectJSVal(rawGetter) : JSVAL_VOID;
return getterObj ? OBJECT_TO_JSVAL(getterObj) : JSVAL_VOID;
}
JSPropertyOp setter() const { return rawSetter; }
bool hasDefaultSetter() const { return !rawSetter; }
JSPropertyOp setterOp() const {
JS_ASSERT(!hasSetterValue());
return rawSetter;
}
JSObject *setterObject() const {
JS_ASSERT(hasSetterValue() && rawSetter);
return js_CastAsObject(rawSetter);
}
JSPropertyOp setter() const { return rawSetter; }
bool hasDefaultSetter() const { return !rawSetter; }
JSPropertyOp setterOp() const { JS_ASSERT(!hasSetterValue()); return rawSetter; }
JSObject *setterObject() const { JS_ASSERT(hasSetterValue()); return setterObj; }
// Per ES5, decode null setterObj as the undefined value, which encodes as null.
jsval setterValue() const {
JS_ASSERT(hasSetterValue());
return rawSetter ? js_CastAsObjectJSVal(rawSetter) : JSVAL_VOID;
return setterObj ? OBJECT_TO_JSVAL(setterObj) : JSVAL_VOID;
}
inline JSDHashNumber hash() const;

Просмотреть файл

@ -140,7 +140,7 @@ JSScope::methodWriteBarrier(JSContext *cx, JSScopeProperty *sprop, jsval v)
jsval prev = object->lockedGetSlot(sprop->slot);
if (prev != v && VALUE_IS_FUNCTION(cx, prev))
return methodShapeChange(cx, sprop, v);
return methodShapeChange(cx, sprop);
}
return true;
}
@ -152,7 +152,7 @@ JSScope::methodWriteBarrier(JSContext *cx, uint32 slot, jsval v)
jsval prev = object->lockedGetSlot(slot);
if (prev != v && VALUE_IS_FUNCTION(cx, prev))
return methodShapeChange(cx, slot, v);
return methodShapeChange(cx, slot);
}
return true;
}

Просмотреть файл

@ -1123,7 +1123,7 @@ struct ManualCmp {
template <class InnerMatch>
static jsint
Duff(const jschar *text, jsuint textlen, const jschar *pat, jsuint patlen)
UnrolledMatch(const jschar *text, jsuint textlen, const jschar *pat, jsuint patlen)
{
JS_ASSERT(patlen > 0 && textlen > 0);
const jschar *textend = text + textlen - (patlen - 1);
@ -1134,26 +1134,35 @@ Duff(const jschar *text, jsuint textlen, const jschar *pat, jsuint patlen)
const jschar *t = text;
switch ((textend - t) & 7) {
case 0: if (*t++ == p0) { fixup = 8; goto match; }
case 7: if (*t++ == p0) { fixup = 7; goto match; }
case 6: if (*t++ == p0) { fixup = 6; goto match; }
case 5: if (*t++ == p0) { fixup = 5; goto match; }
case 4: if (*t++ == p0) { fixup = 4; goto match; }
case 3: if (*t++ == p0) { fixup = 3; goto match; }
case 2: if (*t++ == p0) { fixup = 2; goto match; }
case 1: if (*t++ == p0) { fixup = 1; goto match; }
}
while (t != textend) {
if (t[0] == p0) { t += 1; fixup = 8; goto match; }
if (t[1] == p0) { t += 2; fixup = 7; goto match; }
if (t[2] == p0) { t += 3; fixup = 6; goto match; }
if (t[3] == p0) { t += 4; fixup = 5; goto match; }
if (t[4] == p0) { t += 5; fixup = 4; goto match; }
if (t[5] == p0) { t += 6; fixup = 3; goto match; }
if (t[6] == p0) { t += 7; fixup = 2; goto match; }
if (t[7] == p0) { t += 8; fixup = 1; goto match; }
t += 8;
continue;
do {
case 0: if (*t++ == p0) { fixup = 8; goto match; }
case 7: if (*t++ == p0) { fixup = 7; goto match; }
case 6: if (*t++ == p0) { fixup = 6; goto match; }
case 5: if (*t++ == p0) { fixup = 5; goto match; }
case 4: if (*t++ == p0) { fixup = 4; goto match; }
case 3: if (*t++ == p0) { fixup = 3; goto match; }
case 2: if (*t++ == p0) { fixup = 2; goto match; }
case 1: if (*t++ == p0) { fixup = 1; goto match; }
continue;
do {
if (*t++ == p0) {
match:
if (!InnerMatch::match(patNext, t, extent))
goto failed_match;
return t - text - 1;
}
failed_match:;
} while (--fixup > 0);
} while(t != textend);
if (*t++ == p0) {
match:
if (!InnerMatch::match(patNext, t, extent))
goto failed_match;
return t - text - 1;
}
failed_match:;
} while (--fixup > 0);
}
return -1;
}
@ -1209,10 +1218,10 @@ StringMatch(const jschar *text, jsuint textlen,
*/
return
#if !defined(__linux__)
patlen > 128 ? Duff<MemCmp>(text, textlen, pat, patlen)
patlen > 128 ? UnrolledMatch<MemCmp>(text, textlen, pat, patlen)
:
#endif
Duff<ManualCmp>(text, textlen, pat, patlen);
UnrolledMatch<ManualCmp>(text, textlen, pat, patlen);
}
static JSBool
@ -1719,13 +1728,13 @@ InterpretDollar(JSContext *cx, jschar *dp, jschar *ep, ReplaceData &rdata,
if (JS7_ISDEC(dc)) {
/* ECMA-262 Edition 3: 1-9 or 01-99 */
num = JS7_UNDEC(dc);
if (num > res->parenCount)
if (num > res->parens.length())
return NULL;
cp = dp + 2;
if (cp < ep && (dc = *cp, JS7_ISDEC(dc))) {
tmp = 10 * num + JS7_UNDEC(dc);
if (tmp <= res->parenCount) {
if (tmp <= res->parens.length()) {
cp++;
num = tmp;
}
@ -1736,7 +1745,7 @@ InterpretDollar(JSContext *cx, jschar *dp, jschar *ep, ReplaceData &rdata,
/* Adjust num from 1 $n-origin to 0 array-index-origin. */
num--;
*skip = cp - dp;
return REGEXP_PAREN_SUBSTRING(res, num);
return (num < res->parens.length()) ? &res->parens[num] : &js_EmptySubString;
}
*skip = 2;
@ -1769,6 +1778,20 @@ PushRegExpSubstr(JSContext *cx, const JSSubString &sub, jsval *&sp)
return true;
}
class PreserveRegExpStatics {
JSContext *cx;
JSRegExpStatics save;
public:
PreserveRegExpStatics(JSContext *cx) : cx(cx), save(cx) {
save.copy(cx->regExpStatics);
}
~PreserveRegExpStatics() {
cx->regExpStatics.copy(save);
}
};
static bool
FindReplaceLength(JSContext *cx, ReplaceData &rdata, size_t *sizep)
{
@ -1780,8 +1803,6 @@ FindReplaceLength(JSContext *cx, ReplaceData &rdata, size_t *sizep)
lambda = rdata.lambda;
if (lambda) {
uintN i, m, n;
LeaveTrace(cx);
/*
@ -1802,17 +1823,7 @@ FindReplaceLength(JSContext *cx, ReplaceData &rdata, size_t *sizep)
}
jsval* invokevp = rdata.invokevp;
MUST_FLOW_THROUGH("lambda_out");
bool ok = false;
bool freeMoreParens = false;
/*
* Save the regExpStatics from the current regexp, since they may be
* clobbered by a RegExp usage in the lambda function. Note that all
* members of JSRegExpStatics are JSSubStrings, so not GC roots, save
* input, which is rooted otherwise via vp[1] in str_replace.
*/
JSRegExpStatics save = cx->regExpStatics;
PreserveRegExpStatics save(cx);
/* Push lambda and its 'this' parameter. */
jsval *sp = invokevp;
@ -1821,27 +1832,13 @@ FindReplaceLength(JSContext *cx, ReplaceData &rdata, size_t *sizep)
/* Push $&, $1, $2, ... */
if (!PushRegExpSubstr(cx, cx->regExpStatics.lastMatch, sp))
goto lambda_out;
return false;
i = 0;
m = cx->regExpStatics.parenCount;
n = JS_MIN(m, 9);
for (uintN j = 0; i < n; i++, j++) {
if (!PushRegExpSubstr(cx, cx->regExpStatics.parens[j], sp))
goto lambda_out;
uintN i = 0;
for (uintN n = cx->regExpStatics.parens.length(); i < n; i++) {
if (!PushRegExpSubstr(cx, cx->regExpStatics.parens[i], sp))
return false;
}
for (uintN j = 0; i < m; i++, j++) {
if (!PushRegExpSubstr(cx, cx->regExpStatics.moreParens[j], sp))
goto lambda_out;
}
/*
* We need to clear moreParens in the top-of-stack cx->regExpStatics
* so it won't be possibly realloc'ed, leaving the bottom-of-stack
* moreParens pointing to freed memory.
*/
cx->regExpStatics.moreParens = NULL;
freeMoreParens = true;
/* Make sure to push undefined for any unmatched parens. */
for (; i < p; i++)
@ -1852,7 +1849,7 @@ FindReplaceLength(JSContext *cx, ReplaceData &rdata, size_t *sizep)
*sp++ = STRING_TO_JSVAL(rdata.str);
if (!js_Invoke(cx, argc, invokevp, 0))
goto lambda_out;
return false;
/*
* NB: we count on the newborn string root to hold any string
@ -1861,18 +1858,12 @@ FindReplaceLength(JSContext *cx, ReplaceData &rdata, size_t *sizep)
*/
repstr = js_ValueToString(cx, *invokevp);
if (!repstr)
goto lambda_out;
return false;
rdata.repstr = repstr;
*sizep = repstr->length();
ok = true;
lambda_out:
if (freeMoreParens)
cx->free(cx->regExpStatics.moreParens);
cx->regExpStatics = save;
return ok;
return true;
}
repstr = rdata.repstr;
@ -2212,10 +2203,11 @@ str_split(JSContext *cx, uintN argc, jsval *vp)
* substring that was delimited.
*/
if (re && sep->chars) {
for (uintN num = 0; num < cx->regExpStatics.parenCount; num++) {
JSRegExpStatics *res = &cx->regExpStatics;
for (uintN num = 0; num < res->parens.length(); num++) {
if (limited && len >= limit)
break;
JSSubString *parsub = REGEXP_PAREN_SUBSTRING(&cx->regExpStatics, num);
JSSubString *parsub = &res->parens[num];
sub = js_NewStringCopyN(cx, parsub->chars, parsub->length);
if (!sub || !splits.push(sub))
return false;

Просмотреть файл

@ -72,6 +72,10 @@ JSBackgroundThread::init()
void
JSBackgroundThread::cancel()
{
/* We allow to call the cancel method after failed init. */
if (!thread)
return;
PR_Lock(lock);
if (shutdown) {
PR_Unlock(lock);

Просмотреть файл

@ -43,6 +43,7 @@ class JSBackgroundTask {
friend class JSBackgroundThread;
JSBackgroundTask* next;
public:
virtual ~JSBackgroundTask() {}
virtual void run() = 0;
};

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -78,7 +78,7 @@ public:
memcpy(tmp, _data, _len * sizeof(T));
_data = tmp;
} else {
_data = (T*)realloc(_data, _max * sizeof(T));
_data = (T*)js_realloc(_data, _max * sizeof(T));
}
#if defined(DEBUG)
memset(&_data[_len], 0xcd, _max - _len);
@ -95,7 +95,7 @@ public:
~Queue() {
if (!alloc)
free(_data);
js_free(_data);
}
bool contains(T a) {
@ -829,7 +829,7 @@ InjectStatus(AbortableRecordingStatus ars)
}
static inline bool
StatusAbortsRecording(AbortableRecordingStatus ars)
StatusAbortsRecorderIfActive(AbortableRecordingStatus ars)
{
return ars == ARECORD_ERROR || ars == ARECORD_STOP;
}
@ -854,23 +854,28 @@ StatusAbortsRecording(AbortableRecordingStatus ars)
*/
enum RecordingStatus {
RECORD_ERROR = 0, // Error; propagate to interpreter.
RECORD_STOP = 1, // Recording should be aborted at the top-level
RECORD_STOP = 0, // Recording should be aborted at the top-level
// call to the recorder.
// (value reserved for ARECORD_ABORTED)
RECORD_CONTINUE = 3, // Continue recording.
RECORD_IMACRO = 4 // Entered imacro; continue recording.
RECORD_ERROR = 1, // Recording should be aborted at the top-level
// call to the recorder and the interpreter should
// goto error
RECORD_CONTINUE = 2, // Continue recording.
RECORD_IMACRO = 3 // Entered imacro; continue recording.
// Only JSOP_IS_IMACOP opcodes may return this.
};
enum AbortableRecordingStatus {
ARECORD_ERROR = 0,
ARECORD_STOP = 1,
ARECORD_ABORTED = 2, // Recording has already been aborted; the recorder
// has been deleted.
ARECORD_CONTINUE = 3,
ARECORD_IMACRO = 4,
ARECORD_COMPLETED = 5 // Recording of the current trace recorder completed
ARECORD_STOP = 0, // see RECORD_STOP
ARECORD_ERROR = 1, // Recording may or may not have been aborted.
// Recording should be aborted at the top-level
// if it has not already been and the interpreter
// should goto error
ARECORD_CONTINUE = 2, // see RECORD_CONTINUE
ARECORD_IMACRO = 3, // see RECORD_IMACRO
ARECORD_ABORTED = 4, // Recording has already been aborted; the
// interpreter should continue executing
ARECORD_COMPLETED = 5 // Recording completed successfully, the
// trace recorder has been deleted
};
static JS_ALWAYS_INLINE AbortableRecordingStatus
@ -887,13 +892,14 @@ InjectStatus(AbortableRecordingStatus ars)
/*
* Return whether the recording status requires the current recording session
* to be deleted. ABORTED and COMPLETED indicate the recording session is
* to be deleted. ERROR means the recording session should be deleted if it
* hasn't already. ABORTED and COMPLETED indicate the recording session is
* already deleted, so they return 'false'.
*/
static JS_ALWAYS_INLINE bool
StatusAbortsRecording(AbortableRecordingStatus ars)
StatusAbortsRecorderIfActive(AbortableRecordingStatus ars)
{
return ars <= ARECORD_STOP;
return ars <= ARECORD_ERROR;
}
#endif
@ -908,6 +914,12 @@ enum TypeConsensus
TypeConsensus_Bad /* Typemaps are not compatible */
};
enum MonitorResult {
MONITOR_RECORDING,
MONITOR_NOT_RECORDING,
MONITOR_ERROR
};
typedef HashMap<nanojit::LIns*, JSObject*> GuardedShapeTable;
#ifdef DEBUG
@ -926,6 +938,9 @@ class TraceRecorder
/* Cached value of JS_TRACE_MONITOR(cx). */
TraceMonitor* const traceMonitor;
/* Cached oracle keeps track of hit counts for program counter locations */
Oracle* oracle;
/* The Fragment being recorded by this recording session. */
VMFragment* const fragment;
@ -1065,6 +1080,17 @@ class TraceRecorder
*/
JS_REQUIRES_STACK nanojit::GuardRecord* createGuardRecord(VMSideExit* exit);
JS_REQUIRES_STACK JS_INLINE void markSlotUndemotable(LinkableFragment* f, unsigned slot);
JS_REQUIRES_STACK JS_INLINE void markSlotUndemotable(LinkableFragment* f, unsigned slot, const void* pc);
JS_REQUIRES_STACK unsigned findUndemotesInTypemaps(const TypeMap& typeMap, LinkableFragment* f,
Queue<unsigned>& undemotes);
JS_REQUIRES_STACK void assertDownFrameIsConsistent(VMSideExit* anchor, FrameInfo* fi);
JS_REQUIRES_STACK void captureStackTypes(unsigned callDepth, TraceType* typeMap);
bool isGlobal(jsval* p) const;
ptrdiff_t nativeGlobalSlot(jsval *p) const;
ptrdiff_t nativeGlobalOffset(jsval* p) const;
@ -1349,8 +1375,8 @@ class TraceRecorder
JS_REQUIRES_STACK void fuseIf(jsbytecode* pc, bool cond, nanojit::LIns* x);
JS_REQUIRES_STACK AbortableRecordingStatus checkTraceEnd(jsbytecode* pc);
RecordingStatus hasMethod(JSObject* obj, jsid id, bool& found);
JS_REQUIRES_STACK RecordingStatus hasIteratorMethod(JSObject* obj, bool& found);
AbortableRecordingStatus hasMethod(JSObject* obj, jsid id, bool& found);
JS_REQUIRES_STACK AbortableRecordingStatus hasIteratorMethod(JSObject* obj, bool& found);
JS_REQUIRES_STACK jsatomid getFullIndex(ptrdiff_t pcoff = 0);
@ -1375,8 +1401,8 @@ class TraceRecorder
JS_REQUIRES_STACK AbortableRecordingStatus attemptTreeCall(TreeFragment* inner,
uintN& inlineCallCount);
static JS_REQUIRES_STACK bool recordLoopEdge(JSContext* cx, TraceRecorder* r,
uintN& inlineCallCount);
static JS_REQUIRES_STACK MonitorResult recordLoopEdge(JSContext* cx, TraceRecorder* r,
uintN& inlineCallCount);
/* Allocators associated with this recording session. */
VMAllocator& tempAlloc() const { return *traceMonitor->tempAlloc; }
@ -1389,8 +1415,8 @@ class TraceRecorder
# include "jsopcode.tbl"
#undef OPDEF
inline void* operator new(size_t size) { return calloc(1, size); }
inline void operator delete(void *p) { free(p); }
inline void* operator new(size_t size) { return js_calloc(size); }
inline void operator delete(void *p) { js_free(p); }
JS_REQUIRES_STACK
TraceRecorder(JSContext* cx, VMSideExit*, VMFragment*,
@ -1415,7 +1441,7 @@ class TraceRecorder
friend class DetermineTypesVisitor;
friend class RecursiveSlotMap;
friend class UpRecursiveSlotMap;
friend bool MonitorLoopEdge(JSContext*, uintN&, RecordReason);
friend MonitorResult MonitorLoopEdge(JSContext*, uintN&, RecordReason);
friend void AbortRecording(JSContext*, const char*);
public:
@ -1475,8 +1501,11 @@ public:
JS_BEGIN_MACRO \
if (TraceRecorder* tr_ = TRACE_RECORDER(cx)) { \
AbortableRecordingStatus status = tr_->record_##x args; \
if (StatusAbortsRecording(status)) { \
AbortRecording(cx, #x); \
if (StatusAbortsRecorderIfActive(status)) { \
if (TRACE_RECORDER(cx)) { \
JS_ASSERT(TRACE_RECORDER(cx) == tr_); \
AbortRecording(cx, #x); \
} \
if (status == ARECORD_ERROR) \
goto error; \
} \
@ -1489,7 +1518,7 @@ public:
#define TRACE_1(x,a) TRACE_ARGS(x, (a))
#define TRACE_2(x,a,b) TRACE_ARGS(x, (a, b))
extern JS_REQUIRES_STACK bool
extern JS_REQUIRES_STACK MonitorResult
MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount, RecordReason reason);
extern JS_REQUIRES_STACK void
@ -1510,9 +1539,6 @@ OverfullJITCache(TraceMonitor* tm);
extern void
FlushJITCache(JSContext* cx);
extern void
PurgeJITOracle();
extern JSObject *
GetBuiltinFunction(JSContext *cx, uintN index);

Просмотреть файл

@ -1035,10 +1035,10 @@ class TypedArrayTemplate
{
NativeType *dest = static_cast<NativeType*>(data);
if (ar->isDenseArray() && js_DenseArrayCapacity(ar) >= len) {
if (ar->isDenseArray() && ar->getDenseArrayCapacity() >= len) {
JS_ASSERT(ar->getArrayLength() == len);
jsval *src = ar->dslots;
jsval *src = ar->getDenseArrayElements();
for (uintN i = 0; i < len; ++i) {
jsval v = *src++;

Просмотреть файл

@ -65,6 +65,7 @@ JS_STATIC_ASSERT(sizeof(void *) == sizeof(void (*)()));
JS_PUBLIC_API(void) JS_Assert(const char *s, const char *file, JSIntn ln)
{
fprintf(stderr, "Assertion failure: %s, at %s:%d\n", s, file, ln);
fflush(stderr);
#if defined(WIN32)
DebugBreak();
exit(3);

Просмотреть файл

@ -190,20 +190,14 @@ JS_DumpBacktrace(JSCallsite *trace);
#else
static JS_INLINE void* js_malloc(size_t bytes) {
if (bytes < sizeof(void*)) /* for asyncFree */
bytes = sizeof(void*);
return malloc(bytes);
}
static JS_INLINE void* js_calloc(size_t bytes) {
if (bytes < sizeof(void*)) /* for asyncFree */
bytes = sizeof(void*);
return calloc(bytes, 1);
}
static JS_INLINE void* js_realloc(void* p, size_t bytes) {
if (bytes < sizeof(void*)) /* for asyncFree */
bytes = sizeof(void*);
return realloc(p, bytes);
}

Просмотреть файл

@ -5185,9 +5185,8 @@ js_TestXMLEquality(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
HasSimpleContent(xml))) {
ok = js_EnterLocalRootScope(cx);
if (ok) {
str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
vstr = js_ValueToString(cx, v);
ok = str && vstr;
ok = (str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj))) &&
(vstr = js_ValueToString(cx, v));
if (ok)
*bp = js_EqualStrings(str, vstr);
js_LeaveLocalRootScope(cx);
@ -5200,9 +5199,8 @@ js_TestXMLEquality(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
ok = js_EnterLocalRootScope(cx);
if (ok) {
if (HasSimpleContent(xml)) {
str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
vstr = js_ValueToString(cx, v);
ok = str && vstr;
ok = (str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj))) &&
(vstr = js_ValueToString(cx, v));
if (ok)
*bp = js_EqualStrings(str, vstr);
} else if (JSVAL_IS_STRING(v) || JSVAL_IS_NUMBER(v)) {
@ -7203,19 +7201,6 @@ js_GetXMLObject(JSContext *cx, JSXML *xml)
return obj;
}
/*
* A JSXML cannot be shared among threads unless it has an object.
* A JSXML cannot be given an object unless:
* (a) it has no parent; or
* (b) its parent has no object (therefore is thread-private); or
* (c) its parent's object is locked.
*
* Once given an object, a JSXML is immutable.
*/
JS_ASSERT(!xml->parent ||
!xml->parent->object ||
JS_IS_OBJ_LOCKED(cx, xml->parent->object));
obj = NewXMLObject(cx, xml);
if (!obj)
return NULL;

Просмотреть файл

@ -1 +1 @@
234af92683dad306d03911975b0e9afc57936cf5
b49dd62330e017769dfdea9c87a74698179c7f45

Просмотреть файл

@ -1280,12 +1280,6 @@ namespace nanojit
{
NanoAssert(_thisfrag->nStaticExits == 0);
// The trace must end with one of these opcodes.
NanoAssert(reader->finalIns()->isop(LIR_x) ||
reader->finalIns()->isop(LIR_xtbl) ||
reader->finalIns()->isRet() ||
reader->finalIns()->isLive());
InsList pending_lives(alloc);
NanoAssert(!error());
@ -1294,22 +1288,13 @@ namespace nanojit
// the buffer, working strictly backwards in buffer-order, and
// generating machine instructions for them as we go.
//
// For each LIns, we first determine whether it's actually necessary,
// and if not skip it. Otherwise we generate code for it. There are
// two kinds of "necessary" instructions:
//
// - "Statement" instructions, which have side effects. Anything that
// could change control flow or the state of memory.
//
// - "Value" or "expression" instructions, which compute a value based
// only on the operands to the instruction (and, in the case of
// loads, the state of memory). Because we visit instructions in
// reverse order, if some previously visited instruction uses the
// value computed by this instruction, then this instruction will
// already have a register assigned to hold that value. Hence we
// can consult the instruction to detect whether its value is in
// fact used (i.e. not dead).
//
// For each LIns, we first check if it's live. If so we mark its
// operands as also live, and then generate code for it *if
// necessary*. It may not be necessary if the instruction is an
// expression and code has already been generated for all its uses in
// combination with previously handled instructions (ins->isExtant()
// will return false if this is so).
// Note that the backwards code traversal can make register allocation
// confusing. (For example, we restore a value before we spill it!)
// In particular, words like "before" and "after" must be used very
@ -1335,12 +1320,18 @@ namespace nanojit
// generated code forwards, we would expect to both spill and restore
// registers as late (at run-time) as possible; this might be better
// for reducing register pressure.
//
// Another thing to note: we provide N_LOOKAHEAD instruction's worth
// of lookahead because it's useful for backends. This is nice and
// easy because once read() gets to the LIR_start at the beginning of
// the buffer it'll just keep regetting it.
// The trace must end with one of these opcodes. Mark it as live.
NanoAssert(reader->finalIns()->isop(LIR_x) ||
reader->finalIns()->isop(LIR_xtbl) ||
reader->finalIns()->isRet() ||
isLiveOpcode(reader->finalIns()->opcode()));
for (int32_t i = 0; i < N_LOOKAHEAD; i++)
lookahead[i] = reader->read();
@ -1349,9 +1340,10 @@ namespace nanojit
LInsp ins = lookahead[0]; // give it a shorter name for local use
LOpcode op = ins->opcode();
bool required = ins->isStmt() || ins->isUsed();
if (!required)
goto end_of_loop;
if (!ins->isLive()) {
NanoAssert(!ins->isExtant());
goto ins_is_dead;
}
#ifdef NJ_VERBOSE
// Output the post-regstate (registers and/or activation).
@ -1375,16 +1367,20 @@ namespace nanojit
break;
case LIR_livei:
case LIR_lived:
CASE64(LIR_liveq:) {
CASE64(LIR_liveq:)
case LIR_lived: {
countlir_live();
LInsp op1 = ins->oprnd1();
// allocp's are meant to live until the point of the LIR_livep instruction, marking
// other expressions as live ensures that they remain so at loop bottoms.
// allocp areas require special treatment because they are accessed indirectly and
// the indirect accesses are invisible to the assembler, other than via LIR_livep.
// other expression results are only accessed directly in ways that are visible to
// the assembler, so extending those expression's lifetimes past the last loop edge
op1->setResultLive();
// LIR_allocp's are meant to live until the point of the
// LIR_livep instruction, marking other expressions as
// live ensures that they remain so at loop bottoms.
// LIR_allocp areas require special treatment because they
// are accessed indirectly and the indirect accesses are
// invisible to the assembler, other than via LIR_livep.
// Other expression results are only accessed directly in
// ways that are visible to the assembler, so extending
// those expression's lifetimes past the last loop edge
// isn't necessary.
if (op1->isop(LIR_allocp)) {
findMemFor(op1);
@ -1395,110 +1391,133 @@ namespace nanojit
}
case LIR_reti:
CASE64(LIR_retq:)
case LIR_retd:
CASE64(LIR_retq:) {
countlir_ret();
ins->oprnd1()->setResultLive();
asm_ret(ins);
break;
}
// Allocate some stack space. The value of this instruction
// is the address of the stack space.
case LIR_allocp: {
case LIR_allocp:
countlir_alloc();
NanoAssert(ins->isInAr());
if (ins->isInReg())
evict(ins);
freeResourcesOf(ins);
if (ins->isExtant()) {
NanoAssert(ins->isInAr());
if (ins->isInReg())
evict(ins);
freeResourcesOf(ins);
}
break;
}
case LIR_immi:
{
countlir_imm();
asm_immi(ins);
if (ins->isExtant()) {
asm_immi(ins);
}
break;
}
#ifdef NANOJIT_64BIT
case LIR_immq:
{
countlir_imm();
asm_immq(ins);
if (ins->isExtant()) {
asm_immq(ins);
}
break;
}
#endif
case LIR_immd:
{
countlir_imm();
asm_immf(ins);
if (ins->isExtant()) {
asm_immf(ins);
}
break;
}
case LIR_paramp:
{
countlir_param();
asm_param(ins);
if (ins->isExtant()) {
asm_param(ins);
}
break;
}
#if NJ_SOFTFLOAT_SUPPORTED
case LIR_hcalli:
{
// return result of quad-call in register
deprecated_prepResultReg(ins, rmask(retRegs[1]));
// if hi half was used, we must use the call to ensure it happens
findSpecificRegFor(ins->oprnd1(), retRegs[0]);
case LIR_hcalli: {
LInsp op1 = ins->oprnd1();
op1->setResultLive();
if (ins->isExtant()) {
// Return result of quad-call in register.
deprecated_prepResultReg(ins, rmask(retRegs[1]));
// If hi half was used, we must use the call to ensure it happens.
findSpecificRegFor(op1, retRegs[0]);
}
break;
}
case LIR_dlo2i:
{
countlir_qlo();
asm_qlo(ins);
ins->oprnd1()->setResultLive();
if (ins->isExtant()) {
asm_qlo(ins);
}
break;
}
case LIR_dhi2i:
{
countlir_qhi();
asm_qhi(ins);
ins->oprnd1()->setResultLive();
if (ins->isExtant()) {
asm_qhi(ins);
}
break;
}
case LIR_ii2d:
{
countlir_qjoin();
asm_qjoin(ins);
ins->oprnd1()->setResultLive();
ins->oprnd2()->setResultLive();
if (ins->isExtant()) {
asm_qjoin(ins);
}
break;
}
#endif
CASE64(LIR_cmovq:)
case LIR_cmovi:
{
CASE64(LIR_cmovq:)
countlir_cmov();
asm_cmov(ins);
ins->oprnd1()->setResultLive();
ins->oprnd2()->setResultLive();
ins->oprnd3()->setResultLive();
if (ins->isExtant()) {
asm_cmov(ins);
}
break;
}
case LIR_lduc2ui:
case LIR_ldus2ui:
case LIR_ldc2i:
case LIR_lds2i:
case LIR_ldi:
{
countlir_ld();
asm_load32(ins);
ins->oprnd1()->setResultLive();
if (ins->isExtant()) {
asm_load32(ins);
}
break;
}
case LIR_ldf2d:
case LIR_ldd:
CASE64(LIR_ldq:)
{
case LIR_ldd:
case LIR_ldf2d:
countlir_ldq();
asm_load64(ins);
ins->oprnd1()->setResultLive();
if (ins->isExtant()) {
asm_load64(ins);
}
break;
}
case LIR_negi:
case LIR_noti:
{
countlir_alu();
asm_neg_not(ins);
ins->oprnd1()->setResultLive();
if (ins->isExtant()) {
asm_neg_not(ins);
}
break;
}
#if defined NANOJIT_64BIT
case LIR_addq:
@ -1507,11 +1526,14 @@ namespace nanojit
case LIR_rshuq:
case LIR_rshq:
case LIR_orq:
case LIR_qxor:
{
asm_qbinop(ins);
case LIR_xorq:
countlir_alu();
ins->oprnd1()->setResultLive();
ins->oprnd2()->setResultLive();
if (ins->isExtant()) {
asm_qbinop(ins);
}
break;
}
#endif
case LIR_addi:
@ -1524,73 +1546,101 @@ namespace nanojit
case LIR_rshi:
case LIR_rshui:
CASE86(LIR_divi:)
CASE86(LIR_modi:)
{
countlir_alu();
asm_arith(ins);
ins->oprnd1()->setResultLive();
ins->oprnd2()->setResultLive();
if (ins->isExtant()) {
asm_arith(ins);
}
break;
}
#if defined NANOJIT_IA32 || defined NANOJIT_X64
CASE86(LIR_modi:)
countlir_alu();
ins->oprnd1()->setResultLive();
if (ins->isExtant()) {
asm_arith(ins);
}
break;
#endif
case LIR_negd:
{
countlir_fpu();
asm_fneg(ins);
ins->oprnd1()->setResultLive();
if (ins->isExtant()) {
asm_fneg(ins);
}
break;
}
case LIR_addd:
case LIR_subd:
case LIR_muld:
case LIR_divd:
{
countlir_fpu();
asm_fop(ins);
ins->oprnd1()->setResultLive();
ins->oprnd2()->setResultLive();
if (ins->isExtant()) {
asm_fop(ins);
}
break;
}
case LIR_i2d:
{
countlir_fpu();
asm_i2f(ins);
ins->oprnd1()->setResultLive();
if (ins->isExtant()) {
asm_i2f(ins);
}
break;
}
case LIR_ui2d:
{
countlir_fpu();
asm_u2f(ins);
ins->oprnd1()->setResultLive();
if (ins->isExtant()) {
asm_u2f(ins);
}
break;
}
case LIR_d2i:
{
countlir_fpu();
asm_f2i(ins);
ins->oprnd1()->setResultLive();
if (ins->isExtant()) {
asm_f2i(ins);
}
break;
}
#ifdef NANOJIT_64BIT
case LIR_i2q:
case LIR_ui2uq:
{
countlir_alu();
asm_promote(ins);
ins->oprnd1()->setResultLive();
if (ins->isExtant()) {
asm_promote(ins);
}
break;
}
case LIR_q2i:
{
countlir_alu();
asm_q2i(ins);
ins->oprnd1()->setResultLive();
if (ins->isExtant()) {
asm_q2i(ins);
}
break;
}
#endif
case LIR_sti2c:
case LIR_sti2s:
case LIR_sti:
{
countlir_st();
ins->oprnd1()->setResultLive();
ins->oprnd2()->setResultLive();
asm_store32(op, ins->oprnd1(), ins->disp(), ins->oprnd2());
break;
}
case LIR_std2f:
case LIR_std:
CASE64(LIR_stq:)
{
case LIR_std:
case LIR_std2f: {
countlir_stq();
ins->oprnd1()->setResultLive();
ins->oprnd2()->setResultLive();
LIns* value = ins->oprnd1();
LIns* base = ins->oprnd2();
int dr = ins->disp();
@ -1615,13 +1665,14 @@ namespace nanojit
case LIR_jt:
case LIR_jf:
ins->oprnd1()->setResultLive();
asm_jcc(ins, pending_lives);
break;
#if NJ_JTBL_SUPPORTED
case LIR_jtbl:
{
case LIR_jtbl: {
countlir_jtbl();
ins->oprnd1()->setResultLive();
// Multiway jump can contain both forward and backward jumps.
// Out of range indices aren't allowed or checked.
// Code after this jtbl instruction is unreachable.
@ -1677,8 +1728,7 @@ namespace nanojit
}
#endif
case LIR_label:
{
case LIR_label: {
countlir_label();
LabelState *label = _labels.get(ins);
// add profiling inc, if necessary.
@ -1704,22 +1754,24 @@ namespace nanojit
})
break;
}
case LIR_xbarrier: {
case LIR_xbarrier:
break;
}
#ifdef NANOJIT_IA32
case LIR_xtbl: {
ins->oprnd1()->setResultLive();
#ifdef NANOJIT_IA32
NIns* exit = asm_exit(ins); // does intersectRegisterState()
asm_switch(ins, exit);
#else
NanoAssertMsg(0, "Not supported for this architecture");
#endif
break;
}
#else
case LIR_xtbl:
NanoAssertMsg(0, "Not supported for this architecture");
break;
#endif
case LIR_xt:
case LIR_xf:
ins->oprnd1()->setResultLive();
asm_xcc(ins);
break;
@ -1729,14 +1781,17 @@ namespace nanojit
case LIR_addxovi:
case LIR_subxovi:
case LIR_mulxovi:
{
case LIR_mulxovi: {
verbose_only( _thisfrag->nStaticExits++; )
countlir_xcc();
countlir_alu();
NIns* exit = asm_exit(ins); // does intersectRegisterState()
asm_branch_xov(op, exit);
asm_arith(ins);
ins->oprnd1()->setResultLive();
ins->oprnd2()->setResultLive();
if (ins->isExtant()) {
NIns* exit = asm_exit(ins); // does intersectRegisterState()
asm_branch_xov(op, exit);
asm_arith(ins);
}
break;
}
@ -1745,11 +1800,14 @@ namespace nanojit
case LIR_ltd:
case LIR_gtd:
case LIR_ged:
{
countlir_fpu();
asm_fcond(ins);
ins->oprnd1()->setResultLive();
ins->oprnd2()->setResultLive();
if (ins->isExtant()) {
asm_fcond(ins);
}
break;
}
case LIR_eqi:
case LIR_lei:
case LIR_lti:
@ -1759,48 +1817,52 @@ namespace nanojit
case LIR_leui:
case LIR_gtui:
case LIR_geui:
#ifdef NANOJIT_64BIT
case LIR_eqq:
case LIR_leq:
case LIR_ltq:
case LIR_gtq:
case LIR_geq:
case LIR_ltuq:
case LIR_leuq:
case LIR_gtuq:
case LIR_geuq:
#endif
{
CASE64(LIR_eqq:)
CASE64(LIR_leq:)
CASE64(LIR_ltq:)
CASE64(LIR_gtq:)
CASE64(LIR_geq:)
CASE64(LIR_ltuq:)
CASE64(LIR_leuq:)
CASE64(LIR_gtuq:)
CASE64(LIR_geuq:)
countlir_alu();
asm_cond(ins);
ins->oprnd1()->setResultLive();
ins->oprnd2()->setResultLive();
if (ins->isExtant()) {
asm_cond(ins);
}
break;
}
case LIR_calld:
#ifdef NANOJIT_64BIT
case LIR_callq:
#endif
case LIR_calli:
{
CASE64(LIR_callq:)
case LIR_calld:
countlir_call();
for (int i = 0, argc = ins->argc(); i < argc; i++)
ins->arg(i)->setResultLive();
// It must be impure or pure-and-extant -- it couldn't be
// pure-and-not-extant, because there's no way the codegen
// for a call can be folded into the codegen of another
// LIR instruction.
NanoAssert(!ins->callInfo()->_isPure || ins->isExtant());
asm_call(ins);
break;
}
#ifdef VTUNE
case LIR_file:
{
case LIR_file: {
// we traverse backwards so we are now hitting the file
// that is associated with a bunch of LIR_lines we already have seen
ins->oprnd1()->setResultLive();
uintptr_t currentFile = ins->oprnd1()->immI();
cgen->jitFilenameUpdate(currentFile);
break;
}
case LIR_line:
{
case LIR_line: {
// add a new table entry, we don't yet knwo which file it belongs
// to so we need to add it to the update table too
// note the alloc, actual act is delayed; see above
ins->oprnd1()->setResultLive();
uint32_t currentLine = (uint32_t) ins->oprnd1()->immI();
cgen->jitLineNumUpdate(currentLine);
cgen->jitAddRecord((uintptr_t)_nIns, 0, currentLine, true);
@ -1810,47 +1872,16 @@ namespace nanojit
}
#ifdef NJ_VERBOSE
// We have to do final LIR printing inside this loop. If we do it
// before this loop, we we end up printing a lot of dead LIR
// instructions.
//
// We print the LIns after generating the code. This ensures that
// the LIns will appear in debug output *before* the generated
// code, because Assembler::outputf() prints everything in reverse.
//
// Note that some live LIR instructions won't be printed. Eg. an
// immediate won't be printed unless it is explicitly loaded into
// a register (as opposed to being incorporated into an immediate
// field in another machine instruction).
// We do final LIR printing inside this loop to avoid printing
// dead LIR instructions. We print the LIns after generating the
// code. This ensures that the LIns will appear in debug output
// *before* the generated code, because Assembler::outputf()
// prints everything in reverse.
//
if (_logc->lcbits & LC_Assembly) {
InsBuf b;
LInsPrinter* printer = _thisfrag->lirbuf->printer;
outputf(" %s", printer->formatIns(&b, ins));
if (ins->isGuard() && ins->oprnd1() && ins->oprnd1()->isCmp()) {
// Special case: code is generated for guard conditions at
// the same time that code is generated for the guard
// itself. If the condition is only used by the guard, we
// must print it now otherwise it won't get printed. So
// we do print it now, with an explanatory comment. If
// the condition *is* used again we'll end up printing it
// twice, but that's ok.
outputf(" %s # codegen'd with the %s",
printer->formatIns(&b, ins->oprnd1()), lirNames[op]);
} else if (ins->isCmov()) {
// Likewise for cmov conditions.
outputf(" %s # codegen'd with the %s",
printer->formatIns(&b, ins->oprnd1()), lirNames[op]);
}
#if defined NANOJIT_IA32 || defined NANOJIT_X64
else if (ins->isop(LIR_modi)) {
// There's a similar case when a div feeds into a mod.
outputf(" %s # codegen'd with the mod",
printer->formatIns(&b, ins->oprnd1()));
}
#endif
}
#endif
@ -1865,7 +1896,7 @@ namespace nanojit
debug_only( pageValidate(); )
debug_only( resourceConsistencyCheck(); )
end_of_loop:
ins_is_dead:
for (int32_t i = 1; i < N_LOOKAHEAD; i++)
lookahead[i-1] = lookahead[i];
lookahead[N_LOOKAHEAD-1] = reader->read();
@ -1921,7 +1952,7 @@ namespace nanojit
reserveSavedRegs();
for (Seq<LIns*> *p = pending_lives.get(); p != NULL; p = p->tail) {
LIns *ins = p->head;
NanoAssert(ins->isLive());
NanoAssert(isLiveOpcode(ins->opcode()));
LIns *op1 = ins->oprnd1();
// Must findMemFor even if we're going to findRegFor; loop-carried
// operands may spill on another edge, and we need them to always

Просмотреть файл

@ -349,6 +349,13 @@ namespace nanojit
// These instructions don't have to be saved & reloaded to spill,
// they can just be recalculated cheaply.
//
// WARNING: this function must match asm_restore() -- it should return
// true for the instructions that are handled explicitly without a spill
// in asm_restore(), and false otherwise.
//
// If it doesn't match asm_restore(), the register allocator's decisions
// about which values to evict will be suboptimal.
static bool canRemat(LIns*);
bool deprecated_isKnownReg(Register r) {
@ -415,7 +422,12 @@ namespace nanojit
NIns* asm_leave_trace(LInsp guard);
void asm_store32(LOpcode op, LIns *val, int d, LIns *base);
void asm_store64(LOpcode op, LIns *val, int d, LIns *base);
// WARNING: the implementation of asm_restore() should emit fast code
// to rematerialize instructions where canRemat() returns true.
// Otherwise, register allocation decisions will be suboptimal.
void asm_restore(LInsp, Register);
void asm_maybe_spill(LInsp ins, bool pop);
void asm_spill(Register rr, int d, bool pop, bool quad);
void asm_load64(LInsp ins);

Просмотреть файл

@ -115,7 +115,7 @@ namespace nanojit
/** Cached value of VMPI_getVMPageSize */
const size_t bytesPerPage;
/** Number of bytes to request from VMPI layer, always a multiple of the page size */
const size_t bytesPerAlloc;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -560,6 +560,13 @@ namespace nanojit
NanoAssert(isCses[op] != -1); // see LIRopcode.tbl to understand this
return isCses[op] == 1;
}
inline bool isLiveOpcode(LOpcode op) {
return
#if defined NANOJIT_64BIT
op == LIR_liveq ||
#endif
op == LIR_livei || op == LIR_lived;
}
inline bool isRetOpcode(LOpcode op) {
return
#if defined NANOJIT_64BIT
@ -769,20 +776,27 @@ namespace nanojit
class LIns
{
private:
// SharedFields: fields shared by all LIns kinds. The .inReg, .reg,
// .inAr and .arIndex fields form a "reservation" that is used
// temporarily during assembly to record information relating to
// register allocation. See class RegAlloc for more details.
// SharedFields: fields shared by all LIns kinds.
//
// The .inReg, .reg, .inAr and .arIndex fields form a "reservation"
// that is used temporarily during assembly to record information
// relating to register allocation. See class RegAlloc for more
// details. Note: all combinations of .inReg/.inAr are possible, ie.
// 0/0, 0/1, 1/0, 1/1.
//
// The .isResultLive field is only used for instructions that return
// results. It indicates if the result is live. It's set (if
// appropriate) and used only during the codegen pass.
//
// Note: all combinations of .inReg/.inAr are possible, ie. 0/0, 0/1,
// 1/0, 1/1.
struct SharedFields {
uint32_t inReg:1; // if 1, 'reg' is active
uint32_t inReg:1; // if 1, 'reg' is active
Register reg:7;
uint32_t inAr:1; // if 1, 'arIndex' is active
uint32_t arIndex:15; // index into stack frame; displ is -4*arIndex
uint32_t inAr:1; // if 1, 'arIndex' is active
uint32_t isResultLive:1; // if 1, the instruction's result is live
uint32_t arIndex:14; // index into stack frame; displ is -4*arIndex
LOpcode opcode:8; // instruction's opcode
LOpcode opcode:8; // instruction's opcode
};
union {
@ -795,8 +809,8 @@ namespace nanojit
inline void initSharedFields(LOpcode opcode)
{
// We must zero .inReg and .inAR, but zeroing the whole word is
// easier. Then we set the opcode.
// We must zero .inReg, .inAR and .isResultLive, but zeroing the
// whole word is easier. Then we set the opcode.
wholeWord = 0;
sharedFields.opcode = opcode;
}
@ -836,6 +850,20 @@ namespace nanojit
LOpcode opcode() const { return sharedFields.opcode; }
// Generally, void instructions (statements) are always live and
// non-void instructions (expressions) are live if used by another
// live instruction. But there are some trickier cases.
bool isLive() const {
return isV() ||
sharedFields.isResultLive ||
(isCall() && !callInfo()->_isPure) || // impure calls are always live
isop(LIR_paramp); // LIR_paramp is always live
}
void setResultLive() {
NanoAssert(!isV());
sharedFields.isResultLive = 1;
}
// XXX: old reservation manipulating functions. See bug 538924.
// Replacement strategy:
// - deprecated_markAsClear() --> clearReg() and/or clearArIndex()
@ -847,20 +875,25 @@ namespace nanojit
sharedFields.inAr = 0;
}
bool deprecated_hasKnownReg() {
NanoAssert(isUsed());
NanoAssert(isExtant());
return isInReg();
}
Register deprecated_getReg() {
NanoAssert(isUsed());
NanoAssert(isExtant());
return ( isInReg() ? sharedFields.reg : deprecated_UnknownReg );
}
uint32_t deprecated_getArIndex() {
NanoAssert(isUsed());
NanoAssert(isExtant());
return ( isInAr() ? sharedFields.arIndex : 0 );
}
// Reservation manipulation.
bool isUsed() {
//
// "Extant" mean "in existence, still existing, surviving". In other
// words, has the value been computed explicitly (not folded into
// something else) and is it still available (in a register or spill
// slot) for use?
bool isExtant() {
return isInReg() || isInAr();
}
bool isInReg() {
@ -1004,13 +1037,6 @@ namespace nanojit
bool isRet() const {
return isRetOpcode(opcode());
}
bool isLive() const {
return isop(LIR_livei) ||
#if defined NANOJIT_64BIT
isop(LIR_liveq) ||
#endif
isop(LIR_lived);
}
bool isCmp() const {
LOpcode op = opcode();
return isCmpIOpcode(op) ||
@ -1117,22 +1143,6 @@ namespace nanojit
#endif
}
// Return true if removal of 'ins' from a LIR fragment could
// possibly change the behaviour of that fragment, even if any
// value computed by 'ins' is not used later in the fragment.
// In other words, can 'ins' possibly alter control flow or memory?
// Note, this assumes that loads will never fault and hence cannot
// affect the control flow.
bool isStmt() {
NanoAssert(!isop(LIR_skip));
// All instructions with Void retType are statements, as are calls
// to impure functions.
if (isCall())
return !callInfo()->_isPure;
else
return isV();
}
inline void* immP() const
{
#ifdef NANOJIT_64BIT
@ -1805,8 +1815,9 @@ namespace nanojit
private:
Allocator& alloc;
void formatImm(RefBuf* buf, int32_t c);
void formatImmq(RefBuf* buf, uint64_t c);
char *formatImmI(RefBuf* buf, int32_t c);
char *formatImmQ(RefBuf* buf, uint64_t c);
char *formatImmD(RefBuf* buf, double c);
void formatGuard(InsBuf* buf, LInsp ins);
void formatGuardXov(InsBuf* buf, LInsp ins);
@ -1819,7 +1830,7 @@ namespace nanojit
}
char *formatAddr(RefBuf* buf, void* p);
char *formatRef(RefBuf* buf, LInsp ref);
char *formatRef(RefBuf* buf, LInsp ref, bool showImmValue = true);
char *formatIns(InsBuf* buf, LInsp ins);
char *formatAccSet(RefBuf* buf, AccSet accSet);
@ -1946,58 +1957,64 @@ namespace nanojit
LIns* insLoad(LOpcode op, LInsp base, int32_t off, AccSet accSet);
};
enum LInsHashKind {
// We divide instruction kinds into groups for the use of LInsHashSet.
// LIns0 isn't present because we don't need to record any 0-ary
// instructions.
LInsImmI = 0,
LInsImmQ = 1, // only occurs on 64-bit platforms
LInsImmD = 2,
LIns1 = 3,
LIns2 = 4,
LIns3 = 5,
LInsCall = 6,
// Loads are special. We group them by access region: one table for
// each region, and then a catch-all table for any loads marked with
// multiple regions. This arrangement makes the removal of
// invalidated loads fast -- eg. we can invalidate all STACK loads by
// just clearing the LInsLoadStack table. The disadvantage is that
// loads marked with multiple regions must be invalidated
// conservatively, eg. if any intervening stores occur. But loads
// marked with multiple regions should be rare.
LInsLoadReadOnly = 7,
LInsLoadStack = 8,
LInsLoadRStack = 9,
LInsLoadOther = 10,
LInsLoadMultiple = 11,
LInsFirst = 0,
LInsLast = 11,
// need a value after "last" to outsmart compilers that will insist last+1 is impossible
LInsInvalid = 12
};
#define nextKind(kind) LInsHashKind(kind+1)
class LInsHashSet
class CseFilter: public LirWriter
{
// Must be a power of 2.
// Don't start too small, or we'll waste time growing and rehashing.
// Don't start too large, will waste memory.
static const uint32_t kInitialCap[LInsLast + 1];
enum LInsHashKind {
// We divide instruction kinds into groups. LIns0 isn't present
// because we don't need to record any 0-ary instructions.
LInsImmI = 0,
LInsImmQ = 1, // only occurs on 64-bit platforms
LInsImmD = 2,
LIns1 = 3,
LIns2 = 4,
LIns3 = 5,
LInsCall = 6,
// Loads are special. We group them by access region: one table for
// each region, and then a catch-all table for any loads marked with
// multiple regions. This arrangement makes the removal of
// invalidated loads fast -- eg. we can invalidate all STACK loads by
// just clearing the LInsLoadStack table. The disadvantage is that
// loads marked with multiple regions must be invalidated
// conservatively, eg. if any intervening stores occur. But loads
// marked with multiple regions should be rare.
LInsLoadReadOnly = 7,
LInsLoadStack = 8,
LInsLoadRStack = 9,
LInsLoadOther = 10,
LInsLoadMultiple = 11,
LInsFirst = 0,
LInsLast = 11,
// Need a value after "last" to outsmart compilers that insist last+1 is impossible.
LInsInvalid = 12
};
#define nextKind(kind) LInsHashKind(kind+1)
// There is one list for each instruction kind. This lets us size the
// lists appropriately (some instructions are more common than others).
// It also lets us have kind-specific find/add/grow functions, which
// are faster than generic versions.
LInsp *m_list[LInsLast + 1];
uint32_t m_cap[LInsLast + 1];
uint32_t m_used[LInsLast + 1];
typedef uint32_t (LInsHashSet::*find_t)(LInsp);
find_t m_find[LInsLast + 1];
//
// Nb: Size must be a power of 2.
// Don't start too small, or we'll waste time growing and rehashing.
// Don't start too large, will waste memory.
//
LInsp* m_list[LInsLast + 1];
uint32_t m_cap[LInsLast + 1];
uint32_t m_used[LInsLast + 1];
typedef uint32_t (CseFilter::*find_t)(LInsp);
find_t m_find[LInsLast + 1];
AccSet storesSinceLastLoad; // regions stored to since the last load
Allocator& alloc;
static uint32_t hash8(uint32_t hash, const uint8_t data);
static uint32_t hash32(uint32_t hash, const uint32_t data);
static uint32_t hashptr(uint32_t hash, const void* data);
static uint32_t hashfinish(uint32_t hash);
static uint32_t hashImmI(int32_t);
static uint32_t hashImmQorD(uint64_t); // not NANOJIT_64BIT-only -- used by findImmD()
static uint32_t hash1(LOpcode op, LInsp);
@ -2006,8 +2023,22 @@ namespace nanojit
static uint32_t hashLoad(LOpcode op, LInsp, int32_t, AccSet);
static uint32_t hashCall(const CallInfo *call, uint32_t argc, LInsp args[]);
// These private versions are used after an LIns has been created;
// they are used for rehashing after growing.
// These versions are used before an LIns has been created.
LInsp findImmI(int32_t a, uint32_t &k);
#ifdef NANOJIT_64BIT
LInsp findImmQ(uint64_t a, uint32_t &k);
#endif
LInsp findImmD(uint64_t d, uint32_t &k);
LInsp find1(LOpcode v, LInsp a, uint32_t &k);
LInsp find2(LOpcode v, LInsp a, LInsp b, uint32_t &k);
LInsp find3(LOpcode v, LInsp a, LInsp b, LInsp c, uint32_t &k);
LInsp findLoad(LOpcode v, LInsp a, int32_t b, AccSet accSet, LInsHashKind kind,
uint32_t &k);
LInsp findCall(const CallInfo *call, uint32_t argc, LInsp args[], uint32_t &k);
// These versions are used after an LIns has been created; they are
// used for rehashing after growing. They just call onto the
// multi-arg versions above.
uint32_t findImmI(LInsp ins);
#ifdef NANOJIT_64BIT
uint32_t findImmQ(LInsp ins);
@ -2025,35 +2056,11 @@ namespace nanojit
void grow(LInsHashKind kind);
public:
// kInitialCaps[i] holds the initial size for m_list[i].
LInsHashSet(Allocator&, uint32_t kInitialCaps[]);
// These public versions are used before an LIns has been created.
LInsp findImmI(int32_t a, uint32_t &k);
#ifdef NANOJIT_64BIT
LInsp findImmQ(uint64_t a, uint32_t &k);
#endif
LInsp findImmD(uint64_t d, uint32_t &k);
LInsp find1(LOpcode v, LInsp a, uint32_t &k);
LInsp find2(LOpcode v, LInsp a, LInsp b, uint32_t &k);
LInsp find3(LOpcode v, LInsp a, LInsp b, LInsp c, uint32_t &k);
LInsp findLoad(LOpcode v, LInsp a, int32_t b, AccSet accSet, LInsHashKind kind,
uint32_t &k);
LInsp findCall(const CallInfo *call, uint32_t argc, LInsp args[], uint32_t &k);
// 'k' is the index found by findXYZ().
void add(LInsHashKind kind, LInsp ins, uint32_t k);
void clear(); // clears all tables
void clear(LInsHashKind); // clears one table
};
class CseFilter: public LirWriter
{
private:
LInsHashSet* exprs;
AccSet storesSinceLastLoad; // regions stored to since the last load
public:
CseFilter(LirWriter *out, Allocator&);

Просмотреть файл

@ -729,7 +729,7 @@ Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
if (p->isImmI()) {
asm_ld_imm(r, p->immI());
} else {
if (p->isUsed()) {
if (p->isExtant()) {
if (!p->deprecated_hasKnownReg()) {
// load it into the arg reg
int d = findMemFor(p);
@ -765,7 +765,7 @@ Assembler::asm_stkarg(LInsp arg, int stkd)
bool isF64 = arg->isD();
Register rr;
if (arg->isUsed() && (rr = arg->deprecated_getReg(), deprecated_isKnownReg(rr))) {
if (arg->isExtant() && (rr = arg->deprecated_getReg(), deprecated_isKnownReg(rr))) {
// The argument resides somewhere in registers, so we simply need to
// push it onto the stack.
if (!_config.arm_vfp || !isF64) {
@ -861,7 +861,7 @@ Assembler::asm_call(LInsp ins)
// R0/R1. We need to either place it in the result fp reg, or store it.
// See comments above for more details as to why this is necessary here
// for floating point calls, but not for integer calls.
if (_config.arm_vfp && ins->isUsed()) {
if (_config.arm_vfp && ins->isExtant()) {
// If the result size is a floating-point value, treat the result
// specially, as described previously.
if (ci->returnType() == ARGTYPE_D) {
@ -1230,10 +1230,28 @@ Assembler::asm_store32(LOpcode op, LIns *value, int dr, LIns *base)
}
}
bool
canRematALU(LIns *ins)
{
// Return true if we can generate code for this instruction that neither
// sets CCs, clobbers an input register, nor requires allocating a register.
switch (ins->opcode()) {
case LIR_addi:
case LIR_subi:
case LIR_andi:
case LIR_ori:
case LIR_xori:
return ins->oprnd1()->isInReg() && ins->oprnd2()->isImmI();
default:
;
}
return false;
}
bool
Assembler::canRemat(LIns* ins)
{
return ins->isImmAny() || ins->isop(LIR_alloc);
return ins->isImmI() || ins->isop(LIR_alloc) || canRematALU(ins);
}
void
@ -1243,8 +1261,18 @@ Assembler::asm_restore(LInsp i, Register r)
asm_add_imm(r, FP, deprecated_disp(i));
} else if (i->isImmI()) {
asm_ld_imm(r, i->immI());
}
else {
} else if (canRematALU(i)) {
Register rn = i->oprnd1()->getReg();
int32_t imm = i->oprnd2()->immI();
switch (i->opcode()) {
case LIR_addi: asm_add_imm(r, rn, imm, /*stat=*/ 0); break;
case LIR_subi: asm_sub_imm(r, rn, imm, /*stat=*/ 0); break;
case LIR_andi: asm_and_imm(r, rn, imm, /*stat=*/ 0); break;
case LIR_ori: asm_orr_imm(r, rn, imm, /*stat=*/ 0); break;
case LIR_xori: asm_eor_imm(r, rn, imm, /*stat=*/ 0); break;
default: NanoAssert(0); break;
}
} else {
// We can't easily load immediate values directly into FP registers, so
// ensure that memory is allocated for the constant and load it from
// memory.

Просмотреть файл

@ -397,7 +397,7 @@ namespace nanojit
if (p->isImmI())
asm_li(r, p->immI());
else {
if (p->isUsed()) {
if (p->isExtant()) {
if (!p->deprecated_hasKnownReg()) {
// load it into the arg reg
int d = findMemFor(p);
@ -427,7 +427,7 @@ namespace nanojit
{
bool isF64 = arg->isD();
Register rr;
if (arg->isUsed() && (rr = arg->deprecated_getReg(), deprecated_isKnownReg(rr))) {
if (arg->isExtant() && (rr = arg->deprecated_getReg(), deprecated_isKnownReg(rr))) {
// The argument resides somewhere in registers, so we simply need to
// push it onto the stack.
if (!cpu_has_fpu || !isF64) {
@ -1136,7 +1136,7 @@ namespace nanojit
bool Assembler::canRemat(LIns* ins)
{
return ins->isImmAny() || ins->isop(LIR_alloc);
return ins->isImmI() || ins->isop(LIR_alloc);
}
void Assembler::asm_restore(LIns *i, Register r)

Просмотреть файл

@ -629,7 +629,7 @@ namespace nanojit
bool Assembler::canRemat(LIns* ins)
{
return ins->isImmAny() || ins->isop(LIR_alloc);
return ins->isImmI() || ins->isop(LIR_alloc);
}
void Assembler::asm_restore(LIns *i, Register r) {
@ -641,8 +641,6 @@ namespace nanojit
else if (i->isImmI()) {
asm_li(r, i->immI());
}
// XXX: should really rematerializable isImmD() and isImmQ() cases
// here; canRemat() assumes they will be rematerialized.
else {
d = findMemFor(i);
if (IsFpReg(r)) {
@ -779,7 +777,7 @@ namespace nanojit
if (p->isImmI()) {
asm_li(r, p->immI());
} else {
if (p->isUsed()) {
if (p->isExtant()) {
if (!p->deprecated_hasKnownReg()) {
// load it into the arg reg
int d = findMemFor(p);
@ -804,7 +802,7 @@ namespace nanojit
}
}
else {
if (p->isUsed()) {
if (p->isExtant()) {
Register rp = p->deprecated_getReg();
if (!deprecated_isKnownReg(rp) || !IsFpReg(rp)) {
// load it into the arg reg
@ -1209,12 +1207,12 @@ namespace nanojit
#endif
}
void Assembler::asm_cmov(LIns *ins) {
LIns* cond = ins->oprnd1();
void Assembler::asm_cmov(LInsp ins)
{
LIns* condval = ins->oprnd1();
LIns* iftrue = ins->oprnd2();
LIns* iffalse = ins->oprnd3();
NanoAssert(cond->isCmp());
#ifdef NANOJIT_64BIT
NanoAssert((ins->opcode() == LIR_cmov && iftrue->isI() && iffalse->isI()) ||
(ins->opcode() == LIR_qcmov && iftrue->isQ() && iffalse->isQ()));
@ -1222,14 +1220,30 @@ namespace nanojit
NanoAssert((ins->opcode() == LIR_cmov && iftrue->isI() && iffalse->isI()));
#endif
// fixme: we could handle fpu registers here, too, since we're just branching
Register rr = deprecated_prepResultReg(ins, GpRegs);
findSpecificRegFor(iftrue, rr);
Register rr = prepareResultReg(ins, GpRegs);
Register rf = findRegFor(iffalse, GpRegs & ~rmask(rr));
// If 'iftrue' isn't in a register, it can be clobbered by 'ins'.
Register rt = iftrue->isInReg() ? iftrue->getReg() : rr;
underrunProtect(16); // make sure branch target and branch are on same page and thus near
NIns *after = _nIns;
verbose_only(if (_logc->lcbits & LC_Assembly) outputf("%p:",after);)
MR(rr, rf);
asm_branch(false, cond, after);
MR(rr,rf);
NanoAssert(isS24(after - (_nIns-1)));
asm_branch_near(false, condval, after);
if (rr != rt)
MR(rr, rt);
freeResourcesOf(ins);
if (!iftrue->isInReg()) {
NanoAssert(rt == rr);
findSpecificRegForUnallocated(iftrue, rr);
}
asm_cmp(condval->opcode(), condval->oprnd1(), condval->oprnd2(), CR7);
}
RegisterMask Assembler::hint(LIns* ins) {

Просмотреть файл

@ -391,14 +391,14 @@ namespace nanojit
#define SRD(rd,rs,rb) BITALU2(srd, rd, rs, rb, 0)
#define SRAD(rd,rs,rb) BITALU2(srad, rd, rs, rb, 0)
#define FADD(rd,ra,rb) FPUAB(addd, rd, ra, rb, 0)
#define FADD_(rd,ra,rb) FPUAB(addd, rd, ra, rb, 1)
#define FDIV(rd,ra,rb) FPUAB(divd, rd, ra, rb, 0)
#define FDIV_(rd,ra,rb) FPUAB(divd, rd, ra, rb, 1)
#define FMUL(rd,ra,rb) FPUAC(muld, rd, ra, rb, 0)
#define FMUL_(rd,ra,rb) FPUAC(muld, rd, ra, rb, 1)
#define FSUB(rd,ra,rb) FPUAB(subd, rd, ra, rb, 0)
#define FSUB_(rd,ra,rb) FPUAB(subd, rd, ra, rb, 1)
#define FADD(rd,ra,rb) FPUAB(fadd, rd, ra, rb, 0)
#define FADD_(rd,ra,rb) FPUAB(fadd, rd, ra, rb, 1)
#define FDIV(rd,ra,rb) FPUAB(fdiv, rd, ra, rb, 0)
#define FDIV_(rd,ra,rb) FPUAB(fdiv, rd, ra, rb, 1)
#define FMUL(rd,ra,rb) FPUAC(fmul, rd, ra, rb, 0)
#define FMUL_(rd,ra,rb) FPUAC(fmul, rd, ra, rb, 1)
#define FSUB(rd,ra,rb) FPUAB(fsub, rd, ra, rb, 0)
#define FSUB_(rd,ra,rb) FPUAB(fsub, rd, ra, rb, 1)
#define MULLI(rd,ra,simm) EMIT1(PPC_mulli | GPR(rd)<<21 | GPR(ra)<<16 | uint16_t(simm),\
"mulli %s,%s,%d", gpn(rd), gpn(ra), int16_t(simm))

Просмотреть файл

@ -252,7 +252,7 @@ namespace nanojit
bool Assembler::canRemat(LIns* ins)
{
return ins->isImmAny() || ins->isop(LIR_alloc);
return ins->isImmI() || ins->isop(LIR_alloc);
}
void Assembler::asm_restore(LInsp i, Register r)

Просмотреть файл

@ -58,7 +58,7 @@ better code
- spill gp values to xmm registers?
- prefer xmm registers for copies since gprs are in higher demand?
- stack arg doubles
- stack based LIR_param
- stack based LIR_paramp
tracing
- nFragExit
@ -503,6 +503,7 @@ namespace nanojit
void Assembler::LEARIP(R r, I32 d) { emitrm(X64_learip,r,d,(Register)0); asm_output("lea %s, %d(rip)",RQ(r),d); }
void Assembler::LEALRM(R r, I d, R b) { emitrm(X64_lealrm,r,d,b); asm_output("leal %s, %d(%s)",RL(r),d,RL(b)); }
void Assembler::LEAQRM(R r, I d, R b) { emitrm(X64_leaqrm,r,d,b); asm_output("leaq %s, %d(%s)",RQ(r),d,RQ(b)); }
void Assembler::MOVLRM(R r, I d, R b) { emitrm(X64_movlrm,r,d,b); asm_output("movl %s, %d(%s)",RL(r),d,RQ(b)); }
void Assembler::MOVQRM(R r, I d, R b) { emitrm(X64_movqrm,r,d,b); asm_output("movq %s, %d(%s)",RQ(r),d,RQ(b)); }
@ -653,12 +654,12 @@ namespace nanojit
switch (ins->opcode()) {
default:
TODO(asm_shift);
case LIR_qursh: SHRQ(rr); break;
case LIR_qirsh: SARQ(rr); break;
case LIR_qilsh: SHLQ(rr); break;
case LIR_ush: SHR( rr); break;
case LIR_rsh: SAR( rr); break;
case LIR_lsh: SHL( rr); break;
case LIR_rshuq: SHRQ(rr); break;
case LIR_rshq: SARQ(rr); break;
case LIR_lshq: SHLQ(rr); break;
case LIR_rshui: SHR( rr); break;
case LIR_rshi: SAR( rr); break;
case LIR_lshi: SHL( rr); break;
}
if (rr != ra)
MR(rr, ra);
@ -673,12 +674,12 @@ namespace nanojit
int shift = ins->oprnd2()->immI() & 63;
switch (ins->opcode()) {
default: TODO(shiftimm);
case LIR_qursh: SHRQI(rr, shift); break;
case LIR_qirsh: SARQI(rr, shift); break;
case LIR_qilsh: SHLQI(rr, shift); break;
case LIR_ush: SHRI( rr, shift); break;
case LIR_rsh: SARI( rr, shift); break;
case LIR_lsh: SHLI( rr, shift); break;
case LIR_rshuq: SHRQI(rr, shift); break;
case LIR_rshq: SARQI(rr, shift); break;
case LIR_lshq: SHLQI(rr, shift); break;
case LIR_rshui: SHRI( rr, shift); break;
case LIR_rshi: SARI( rr, shift); break;
case LIR_lshi: SHLI( rr, shift); break;
}
if (rr != ra)
MR(rr, ra);
@ -699,7 +700,7 @@ namespace nanojit
int32_t imm = getImm32(b);
LOpcode op = ins->opcode();
Register rr, ra;
if (op == LIR_mul || op == LIR_mulxov) {
if (op == LIR_muli || op == LIR_mulxovi) {
// Special case: imul-by-imm has true 3-addr form. So we don't
// need the MR(rr, ra) after the IMULI.
beginOp1Regs(ins, GpRegs, rr, ra);
@ -712,32 +713,32 @@ namespace nanojit
if (isS8(imm)) {
switch (ins->opcode()) {
default: TODO(arith_imm8);
case LIR_add:
case LIR_addxov: ADDLR8(rr, imm); break; // XXX: bug 547125: could use LEA for LIR_add
case LIR_and: ANDLR8(rr, imm); break;
case LIR_or: ORLR8( rr, imm); break;
case LIR_sub:
case LIR_subxov: SUBLR8(rr, imm); break;
case LIR_xor: XORLR8(rr, imm); break;
case LIR_qiadd: ADDQR8(rr, imm); break;
case LIR_qiand: ANDQR8(rr, imm); break;
case LIR_qior: ORQR8( rr, imm); break;
case LIR_qxor: XORQR8(rr, imm); break;
case LIR_addi:
case LIR_addxovi: ADDLR8(rr, imm); break; // XXX: bug 547125: could use LEA for LIR_addi
case LIR_andi: ANDLR8(rr, imm); break;
case LIR_ori: ORLR8( rr, imm); break;
case LIR_subi:
case LIR_subxovi: SUBLR8(rr, imm); break;
case LIR_xori: XORLR8(rr, imm); break;
case LIR_addq: ADDQR8(rr, imm); break;
case LIR_andq: ANDQR8(rr, imm); break;
case LIR_orq: ORQR8( rr, imm); break;
case LIR_xorq: XORQR8(rr, imm); break;
}
} else {
switch (ins->opcode()) {
default: TODO(arith_imm);
case LIR_add:
case LIR_addxov: ADDLRI(rr, imm); break; // XXX: bug 547125: could use LEA for LIR_add
case LIR_and: ANDLRI(rr, imm); break;
case LIR_or: ORLRI( rr, imm); break;
case LIR_sub:
case LIR_subxov: SUBLRI(rr, imm); break;
case LIR_xor: XORLRI(rr, imm); break;
case LIR_qiadd: ADDQRI(rr, imm); break;
case LIR_qiand: ANDQRI(rr, imm); break;
case LIR_qior: ORQRI( rr, imm); break;
case LIR_qxor: XORQRI(rr, imm); break;
case LIR_addi:
case LIR_addxovi: ADDLRI(rr, imm); break; // XXX: bug 547125: could use LEA for LIR_addi
case LIR_andi: ANDLRI(rr, imm); break;
case LIR_ori: ORLRI( rr, imm); break;
case LIR_subi:
case LIR_subxovi: SUBLRI(rr, imm); break;
case LIR_xori: XORLRI(rr, imm); break;
case LIR_addq: ADDQRI(rr, imm); break;
case LIR_andq: ANDQRI(rr, imm); break;
case LIR_orq: ORQRI( rr, imm); break;
case LIR_xorq: XORQRI(rr, imm); break;
}
}
if (rr != ra)
@ -746,9 +747,9 @@ namespace nanojit
endOpRegs(ins, rr, ra);
}
// Generates code for a LIR_div that doesn't have a subsequent LIR_mod.
// Generates code for a LIR_divi that doesn't have a subsequent LIR_modi.
void Assembler::asm_div(LIns *div) {
NanoAssert(div->isop(LIR_div));
NanoAssert(div->isop(LIR_divi));
LIns *a = div->oprnd1();
LIns *b = div->oprnd2();
@ -771,12 +772,12 @@ namespace nanojit
}
}
// Generates code for a LIR_mod(LIR_div(divL, divR)) sequence.
// Generates code for a LIR_modi(LIR_divi(divL, divR)) sequence.
void Assembler::asm_div_mod(LIns *mod) {
LIns *div = mod->oprnd1();
NanoAssert(mod->isop(LIR_mod));
NanoAssert(div->isop(LIR_div));
NanoAssert(mod->isop(LIR_modi));
NanoAssert(div->isop(LIR_divi));
LIns *divL = div->oprnd1();
LIns *divR = div->oprnd2();
@ -806,15 +807,15 @@ namespace nanojit
Register rr, ra, rb = UnspecifiedReg; // init to shut GCC up
switch (ins->opcode()) {
case LIR_lsh: case LIR_qilsh:
case LIR_rsh: case LIR_qirsh:
case LIR_ush: case LIR_qursh:
case LIR_lshi: case LIR_lshq:
case LIR_rshi: case LIR_rshq:
case LIR_rshui: case LIR_rshuq:
asm_shift(ins);
return;
case LIR_mod:
case LIR_modi:
asm_div_mod(ins);
return;
case LIR_div:
case LIR_divi:
// Nb: if the div feeds into a mod it will be handled by
// asm_div_mod() rather than here.
asm_div(ins);
@ -830,20 +831,20 @@ namespace nanojit
}
beginOp2Regs(ins, GpRegs, rr, ra, rb);
switch (ins->opcode()) {
default: TODO(asm_arith);
case LIR_or: ORLRR(rr, rb); break;
case LIR_sub:
case LIR_subxov: SUBRR(rr, rb); break;
case LIR_add:
case LIR_addxov: ADDRR(rr, rb); break; // XXX: bug 547125: could use LEA for LIR_add
case LIR_and: ANDRR(rr, rb); break;
case LIR_xor: XORRR(rr, rb); break;
case LIR_mul:
case LIR_mulxov: IMUL(rr, rb); break;
case LIR_qxor: XORQRR(rr, rb); break;
case LIR_qior: ORQRR(rr, rb); break;
case LIR_qiand: ANDQRR(rr, rb); break;
case LIR_qiadd: ADDQRR(rr, rb); break;
default: TODO(asm_arith);
case LIR_ori: ORLRR(rr, rb); break;
case LIR_subi:
case LIR_subxovi: SUBRR(rr, rb); break;
case LIR_addi:
case LIR_addxovi: ADDRR(rr, rb); break; // XXX: bug 547125: could use LEA for LIR_addi
case LIR_andi: ANDRR(rr, rb); break;
case LIR_xori: XORRR(rr, rb); break;
case LIR_muli:
case LIR_mulxovi: IMUL(rr, rb); break;
case LIR_xorq: XORQRR(rr, rb); break;
case LIR_orq: ORQRR(rr, rb); break;
case LIR_andq: ANDQRR(rr, rb); break;
case LIR_addq: ADDQRR(rr, rb); break;
}
if (rr != ra)
MR(rr, ra);
@ -857,10 +858,10 @@ namespace nanojit
beginOp2Regs(ins, FpRegs, rr, ra, rb);
switch (ins->opcode()) {
default: TODO(asm_fop);
case LIR_fdiv: DIVSD(rr, rb); break;
case LIR_fmul: MULSD(rr, rb); break;
case LIR_fadd: ADDSD(rr, rb); break;
case LIR_fsub: SUBSD(rr, rb); break;
case LIR_divd: DIVSD(rr, rb); break;
case LIR_muld: MULSD(rr, rb); break;
case LIR_addd: ADDSD(rr, rb); break;
case LIR_subd: SUBSD(rr, rb); break;
}
if (rr != ra) {
asm_nongp_copy(rr, ra);
@ -873,7 +874,7 @@ namespace nanojit
Register rr, ra;
beginOp1Regs(ins, GpRegs, rr, ra);
if (ins->isop(LIR_not))
if (ins->isop(LIR_noti))
NOT(rr);
else
NEG(rr);
@ -884,7 +885,7 @@ namespace nanojit
}
void Assembler::asm_call(LIns *ins) {
Register rr = ( ins->isop(LIR_fcall) ? XMM0 : retRegs[0] );
Register rr = ( ins->isop(LIR_calld) ? XMM0 : retRegs[0] );
prepareResultReg(ins, rmask(rr));
evictScratchRegsExcept(rmask(rr));
@ -1024,7 +1025,7 @@ namespace nanojit
Register rr, ra;
beginOp1Regs(ins, GpRegs, rr, ra);
NanoAssert(IsGpReg(ra));
if (ins->isop(LIR_u2q)) {
if (ins->isop(LIR_ui2uq)) {
MOVLR(rr, ra); // 32bit mov zeros the upper 32bits of the target
} else {
NanoAssert(ins->isop(LIR_i2q));
@ -1076,8 +1077,8 @@ namespace nanojit
LIns* iftrue = ins->oprnd2();
LIns* iffalse = ins->oprnd3();
NanoAssert(cond->isCmp());
NanoAssert((ins->isop(LIR_cmov) && iftrue->isI() && iffalse->isI()) ||
(ins->isop(LIR_qcmov) && iftrue->isQ() && iffalse->isQ()));
NanoAssert((ins->isop(LIR_cmovi) && iftrue->isI() && iffalse->isI()) ||
(ins->isop(LIR_cmovq) && iftrue->isQ() && iffalse->isQ()));
Register rr = prepareResultReg(ins, GpRegs);
@ -1090,30 +1091,30 @@ namespace nanojit
// codes between the MRcc generation here and the asm_cmp() call
// below. See asm_cmp() for more details.
LOpcode condop = cond->opcode();
if (ins->opcode() == LIR_cmov) {
if (ins->opcode() == LIR_cmovi) {
switch (condop) {
case LIR_eq: case LIR_qeq: CMOVNE( rr, rf); break;
case LIR_lt: case LIR_qlt: CMOVNL( rr, rf); break;
case LIR_gt: case LIR_qgt: CMOVNG( rr, rf); break;
case LIR_le: case LIR_qle: CMOVNLE(rr, rf); break;
case LIR_ge: case LIR_qge: CMOVNGE(rr, rf); break;
case LIR_ult: case LIR_qult: CMOVNB( rr, rf); break;
case LIR_ugt: case LIR_qugt: CMOVNA( rr, rf); break;
case LIR_ule: case LIR_qule: CMOVNBE(rr, rf); break;
case LIR_uge: case LIR_quge: CMOVNAE(rr, rf); break;
case LIR_eqi: case LIR_eqq: CMOVNE( rr, rf); break;
case LIR_lti: case LIR_ltq: CMOVNL( rr, rf); break;
case LIR_gti: case LIR_gtq: CMOVNG( rr, rf); break;
case LIR_lei: case LIR_leq: CMOVNLE(rr, rf); break;
case LIR_gei: case LIR_geq: CMOVNGE(rr, rf); break;
case LIR_ltui: case LIR_ltuq: CMOVNB( rr, rf); break;
case LIR_gtui: case LIR_gtuq: CMOVNA( rr, rf); break;
case LIR_leui: case LIR_leuq: CMOVNBE(rr, rf); break;
case LIR_geui: case LIR_geuq: CMOVNAE(rr, rf); break;
default: NanoAssert(0); break;
}
} else {
switch (condop) {
case LIR_eq: case LIR_qeq: CMOVQNE( rr, rf); break;
case LIR_lt: case LIR_qlt: CMOVQNL( rr, rf); break;
case LIR_gt: case LIR_qgt: CMOVQNG( rr, rf); break;
case LIR_le: case LIR_qle: CMOVQNLE(rr, rf); break;
case LIR_ge: case LIR_qge: CMOVQNGE(rr, rf); break;
case LIR_ult: case LIR_qult: CMOVQNB( rr, rf); break;
case LIR_ugt: case LIR_qugt: CMOVQNA( rr, rf); break;
case LIR_ule: case LIR_qule: CMOVQNBE(rr, rf); break;
case LIR_uge: case LIR_quge: CMOVQNAE(rr, rf); break;
case LIR_eqi: case LIR_eqq: CMOVQNE( rr, rf); break;
case LIR_lti: case LIR_ltq: CMOVQNL( rr, rf); break;
case LIR_gti: case LIR_gtq: CMOVQNG( rr, rf); break;
case LIR_lei: case LIR_leq: CMOVQNLE(rr, rf); break;
case LIR_gei: case LIR_geq: CMOVQNGE(rr, rf); break;
case LIR_ltui: case LIR_ltuq: CMOVQNB( rr, rf); break;
case LIR_gtui: case LIR_gtuq: CMOVQNA( rr, rf); break;
case LIR_leui: case LIR_leuq: CMOVQNBE(rr, rf); break;
case LIR_geui: case LIR_geuq: CMOVQNAE(rr, rf); break;
default: NanoAssert(0); break;
}
}
@ -1144,56 +1145,56 @@ namespace nanojit
if (target && isTargetWithinS8(target)) {
if (onFalse) {
switch (condop) {
case LIR_eq: case LIR_qeq: JNE8( 8, target); break;
case LIR_lt: case LIR_qlt: JNL8( 8, target); break;
case LIR_gt: case LIR_qgt: JNG8( 8, target); break;
case LIR_le: case LIR_qle: JNLE8(8, target); break;
case LIR_ge: case LIR_qge: JNGE8(8, target); break;
case LIR_ult: case LIR_qult: JNB8( 8, target); break;
case LIR_ugt: case LIR_qugt: JNA8( 8, target); break;
case LIR_ule: case LIR_qule: JNBE8(8, target); break;
case LIR_uge: case LIR_quge: JNAE8(8, target); break;
case LIR_eqi: case LIR_eqq: JNE8( 8, target); break;
case LIR_lti: case LIR_ltq: JNL8( 8, target); break;
case LIR_gti: case LIR_gtq: JNG8( 8, target); break;
case LIR_lei: case LIR_leq: JNLE8(8, target); break;
case LIR_gei: case LIR_geq: JNGE8(8, target); break;
case LIR_ltui: case LIR_ltuq: JNB8( 8, target); break;
case LIR_gtui: case LIR_gtuq: JNA8( 8, target); break;
case LIR_leui: case LIR_leuq: JNBE8(8, target); break;
case LIR_geui: case LIR_geuq: JNAE8(8, target); break;
default: NanoAssert(0); break;
}
} else {
switch (condop) {
case LIR_eq: case LIR_qeq: JE8( 8, target); break;
case LIR_lt: case LIR_qlt: JL8( 8, target); break;
case LIR_gt: case LIR_qgt: JG8( 8, target); break;
case LIR_le: case LIR_qle: JLE8(8, target); break;
case LIR_ge: case LIR_qge: JGE8(8, target); break;
case LIR_ult: case LIR_qult: JB8( 8, target); break;
case LIR_ugt: case LIR_qugt: JA8( 8, target); break;
case LIR_ule: case LIR_qule: JBE8(8, target); break;
case LIR_uge: case LIR_quge: JAE8(8, target); break;
case LIR_eqi: case LIR_eqq: JE8( 8, target); break;
case LIR_lti: case LIR_ltq: JL8( 8, target); break;
case LIR_gti: case LIR_gtq: JG8( 8, target); break;
case LIR_lei: case LIR_leq: JLE8(8, target); break;
case LIR_gei: case LIR_geq: JGE8(8, target); break;
case LIR_ltui: case LIR_ltuq: JB8( 8, target); break;
case LIR_gtui: case LIR_gtuq: JA8( 8, target); break;
case LIR_leui: case LIR_leuq: JBE8(8, target); break;
case LIR_geui: case LIR_geuq: JAE8(8, target); break;
default: NanoAssert(0); break;
}
}
} else {
if (onFalse) {
switch (condop) {
case LIR_eq: case LIR_qeq: JNE( 8, target); break;
case LIR_lt: case LIR_qlt: JNL( 8, target); break;
case LIR_gt: case LIR_qgt: JNG( 8, target); break;
case LIR_le: case LIR_qle: JNLE(8, target); break;
case LIR_ge: case LIR_qge: JNGE(8, target); break;
case LIR_ult: case LIR_qult: JNB( 8, target); break;
case LIR_ugt: case LIR_qugt: JNA( 8, target); break;
case LIR_ule: case LIR_qule: JNBE(8, target); break;
case LIR_uge: case LIR_quge: JNAE(8, target); break;
case LIR_eqi: case LIR_eqq: JNE( 8, target); break;
case LIR_lti: case LIR_ltq: JNL( 8, target); break;
case LIR_gti: case LIR_gtq: JNG( 8, target); break;
case LIR_lei: case LIR_leq: JNLE(8, target); break;
case LIR_gei: case LIR_geq: JNGE(8, target); break;
case LIR_ltui: case LIR_ltuq: JNB( 8, target); break;
case LIR_gtui: case LIR_gtuq: JNA( 8, target); break;
case LIR_leui: case LIR_leuq: JNBE(8, target); break;
case LIR_geui: case LIR_geuq: JNAE(8, target); break;
default: NanoAssert(0); break;
}
} else {
switch (condop) {
case LIR_eq: case LIR_qeq: JE( 8, target); break;
case LIR_lt: case LIR_qlt: JL( 8, target); break;
case LIR_gt: case LIR_qgt: JG( 8, target); break;
case LIR_le: case LIR_qle: JLE(8, target); break;
case LIR_ge: case LIR_qge: JGE(8, target); break;
case LIR_ult: case LIR_qult: JB( 8, target); break;
case LIR_ugt: case LIR_qugt: JA( 8, target); break;
case LIR_ule: case LIR_qule: JBE(8, target); break;
case LIR_uge: case LIR_quge: JAE(8, target); break;
case LIR_eqi: case LIR_eqq: JE( 8, target); break;
case LIR_lti: case LIR_ltq: JL( 8, target); break;
case LIR_gti: case LIR_gtq: JG( 8, target); break;
case LIR_lei: case LIR_leq: JLE(8, target); break;
case LIR_gei: case LIR_geq: JGE(8, target); break;
case LIR_ltui: case LIR_ltuq: JB( 8, target); break;
case LIR_gtui: case LIR_gtuq: JA( 8, target); break;
case LIR_leui: case LIR_leuq: JBE(8, target); break;
case LIR_geui: case LIR_geuq: JAE(8, target); break;
default: NanoAssert(0); break;
}
}
@ -1286,7 +1287,7 @@ namespace nanojit
NIns *patch;
LIns *a = cond->oprnd1();
LIns *b = cond->oprnd2();
if (condop == LIR_feq) {
if (condop == LIR_eqd) {
if (onFalse) {
// branch if unordered or !=
JP(16, target); // underrun of 12 needed, round up for overhang --> 16
@ -1304,19 +1305,19 @@ namespace nanojit
}
}
else {
if (condop == LIR_flt) {
condop = LIR_fgt;
if (condop == LIR_ltd) {
condop = LIR_gtd;
LIns *t = a; a = b; b = t;
} else if (condop == LIR_fle) {
condop = LIR_fge;
} else if (condop == LIR_led) {
condop = LIR_ged;
LIns *t = a; a = b; b = t;
}
if (condop == LIR_fgt) {
if (condop == LIR_gtd) {
if (onFalse)
JBE(8, target);
else
JA(8, target);
} else { // LIR_fge
} else { // LIR_ged
if (onFalse)
JB(8, target);
else
@ -1332,7 +1333,7 @@ namespace nanojit
LOpcode op = ins->opcode();
LIns *a = ins->oprnd1();
LIns *b = ins->oprnd2();
if (op == LIR_feq) {
if (op == LIR_eqd) {
// result = ZF & !PF, must do logic on flags
// r = al|bl|cl|dl, can only use rh without rex prefix
Register r = prepareResultReg(ins, 1<<RAX|1<<RCX|1<<RDX|1<<RBX);
@ -1341,16 +1342,16 @@ namespace nanojit
X86_SETNP(r); // setnp rh rh = !PF
X86_SETE(r); // sete rl rl = ZF
} else {
if (op == LIR_flt) {
op = LIR_fgt;
if (op == LIR_ltd) {
op = LIR_gtd;
LIns *t = a; a = b; b = t;
} else if (op == LIR_fle) {
op = LIR_fge;
} else if (op == LIR_led) {
op = LIR_ged;
LIns *t = a; a = b; b = t;
}
Register r = prepareResultReg(ins, GpRegs); // x64 can use any GPR as setcc target
MOVZX8(r, r);
if (op == LIR_fgt)
if (op == LIR_gtd)
SETA(r);
else
SETAE(r);
@ -1370,14 +1371,38 @@ namespace nanojit
UCOMISD(ra, rb);
}
// Return true if we can generate code for this instruction that neither
// sets CCs nor clobbers any input register.
// LEA is the only native instruction that fits those requirements.
bool canRematLEA(LIns* ins)
{
switch (ins->opcode()) {
case LIR_addi:
return ins->oprnd1()->isInRegMask(BaseRegs) && ins->oprnd2()->isImmI();
case LIR_addq: {
LIns* rhs;
return ins->oprnd1()->isInRegMask(BaseRegs) &&
(rhs = ins->oprnd2())->isImmQ() &&
isS32(rhs->immQ());
}
// Subtract and some left-shifts could be rematerialized using LEA,
// but it hasn't shown to help in real code yet. Noting them anyway:
// maybe sub? R = subl/q rL, const => leal/q R, [rL + -const]
// maybe lsh? R = lshl/q rL, 1/2/3 => leal/q R, [rL * 2/4/8]
default:
;
}
return false;
}
bool Assembler::canRemat(LIns* ins) {
return ins->isImmAny() || ins->isop(LIR_alloc);
return ins->isImmAny() || ins->isop(LIR_allocp) || canRematLEA(ins);
}
// WARNING: the code generated by this function must not affect the
// condition codes. See asm_cmp() for details.
void Assembler::asm_restore(LIns *ins, Register r) {
if (ins->isop(LIR_alloc)) {
if (ins->isop(LIR_allocp)) {
int d = arDisp(ins);
LEAQRM(r, d, FP);
}
@ -1390,6 +1415,13 @@ namespace nanojit
else if (ins->isImmD()) {
asm_immf(r, ins->immQ(), /*canClobberCCs*/false);
}
else if (canRematLEA(ins)) {
Register lhsReg = ins->oprnd1()->getReg();
if (ins->isop(LIR_addq))
LEAQRM(r, (int32_t)ins->oprnd2()->immQ(), lhsReg);
else // LIR_addi
LEALRM(r, ins->oprnd2()->immI(), lhsReg);
}
else {
int d = findMemFor(ins);
if (ins->isD()) {
@ -1416,24 +1448,24 @@ namespace nanojit
switch (op) {
default:
TODO(cond);
case LIR_qeq:
case LIR_eq: SETE(r); break;
case LIR_qlt:
case LIR_lt: SETL(r); break;
case LIR_qle:
case LIR_le: SETLE(r); break;
case LIR_qgt:
case LIR_gt: SETG(r); break;
case LIR_qge:
case LIR_ge: SETGE(r); break;
case LIR_qult:
case LIR_ult: SETB(r); break;
case LIR_qule:
case LIR_ule: SETBE(r); break;
case LIR_qugt:
case LIR_ugt: SETA(r); break;
case LIR_quge:
case LIR_uge: SETAE(r); break;
case LIR_eqq:
case LIR_eqi: SETE(r); break;
case LIR_ltq:
case LIR_lti: SETL(r); break;
case LIR_leq:
case LIR_lei: SETLE(r); break;
case LIR_gtq:
case LIR_gti: SETG(r); break;
case LIR_geq:
case LIR_gei: SETGE(r); break;
case LIR_ltuq:
case LIR_ltui: SETB(r); break;
case LIR_leuq:
case LIR_leui: SETBE(r); break;
case LIR_gtuq:
case LIR_gtui: SETA(r); break;
case LIR_geuq:
case LIR_geui: SETAE(r); break;
}
freeResourcesOf(ins);
@ -1449,7 +1481,7 @@ namespace nanojit
releaseRegisters();
assignSavedRegs();
LIns *value = ins->oprnd1();
Register r = ins->isop(LIR_fret) ? XMM0 : RAX;
Register r = ins->isop(LIR_retd) ? XMM0 : RAX;
findSpecificRegFor(value, r);
}
@ -1489,12 +1521,12 @@ namespace nanojit
NanoAssert(IsGpReg(rr));
MOVQRM(rr, dr, rb); // general 64bit load, 32bit const displacement
break;
case LIR_ldf:
case LIR_ldd:
beginLoadRegs(ins, FpRegs, rr, dr, rb);
NanoAssert(IsFpReg(rr));
MOVSDRM(rr, dr, rb); // load 64bits into XMM
break;
case LIR_ld32f:
case LIR_ldf2d:
beginLoadRegs(ins, FpRegs, rr, dr, rb);
NanoAssert(IsFpReg(rr));
CVTSS2SD(rr, rr);
@ -1514,19 +1546,19 @@ namespace nanojit
beginLoadRegs(ins, GpRegs, r, d, b);
LOpcode op = ins->opcode();
switch (op) {
case LIR_ldzb:
case LIR_lduc2ui:
MOVZX8M( r, d, b);
break;
case LIR_ldzs:
case LIR_ldus2ui:
MOVZX16M(r, d, b);
break;
case LIR_ld:
case LIR_ldi:
MOVLRM( r, d, b);
break;
case LIR_ldsb:
case LIR_ldc2i:
MOVSX8M( r, d, b);
break;
case LIR_ldss:
case LIR_lds2i:
MOVSX16M( r, d, b);
break;
default:
@ -1540,19 +1572,19 @@ namespace nanojit
NanoAssert(value->isQorD());
switch (op) {
case LIR_stqi: {
case LIR_stq: {
Register r, b;
getBaseReg2(GpRegs, value, r, BaseRegs, base, b, d);
MOVQMR(r, d, b); // gpr store
break;
}
case LIR_stfi: {
case LIR_std: {
Register b = getBaseReg(base, d, BaseRegs);
Register r = findRegFor(value, FpRegs);
MOVSDMR(r, d, b); // xmm store
break;
}
case LIR_st32f: {
case LIR_std2f: {
Register b = getBaseReg(base, d, BaseRegs);
Register r = findRegFor(value, FpRegs);
Register t = registerAllocTmp(FpRegs & ~rmask(r));
@ -1572,17 +1604,17 @@ namespace nanojit
// Quirk of x86-64: reg cannot appear to be ah/bh/ch/dh for
// single-byte stores with REX prefix.
const RegisterMask SrcRegs = (op == LIR_stb) ? SingleByteStoreRegs : GpRegs;
const RegisterMask SrcRegs = (op == LIR_sti2c) ? SingleByteStoreRegs : GpRegs;
NanoAssert(value->isI());
Register b = getBaseReg(base, d, BaseRegs);
Register r = findRegFor(value, SrcRegs & ~rmask(b));
switch (op) {
case LIR_stb:
case LIR_sti2c:
MOVBMR(r, d, b);
break;
case LIR_sts:
case LIR_sti2s:
MOVSMR(r, d, b);
break;
case LIR_sti:

Просмотреть файл

@ -237,7 +237,7 @@ namespace nanojit
X64_jnp8 = 0x007B000000000002LL, // jump near if not parity (PF == 0)
X64_jneg8 = 0x0001000000000000LL, // xor with this mask to negate the condition
X64_leaqrm = 0x00000000808D4807LL, // 64bit load effective addr reg <- disp32+base
X64_learm = 0x00000000808D4007LL, // 32bit load effective addr reg <- disp32+base
X64_lealrm = 0x00000000808D4007LL, // 32bit load effective addr reg <- disp32+base
X64_learip = 0x00000000058D4807LL, // 64bit RIP-relative lea. reg <- disp32+rip (modrm = 00rrr101 = 05)
X64_movlr = 0xC08B400000000003LL, // 32bit mov r <- b
X64_movbmr = 0x0000000080884007LL, // 8bit store r -> [b+d32]
@ -521,6 +521,7 @@ namespace nanojit
void IMULI(Register l, Register r, int32_t i32);\
void MOVQI(Register r, uint64_t u64);\
void LEARIP(Register r, int32_t d);\
void LEALRM(Register r, int d, Register b);\
void LEAQRM(Register r, int d, Register b);\
void MOVLRM(Register r, int d, Register b);\
void MOVQRM(Register r, int d, Register b);\

Просмотреть файл

@ -957,7 +957,7 @@ namespace nanojit
void Assembler::asm_call(LInsp ins)
{
Register rr = ( ins->isop(LIR_fcall) ? FST0 : retRegs[0] );
Register rr = ( ins->isop(LIR_calld) ? FST0 : retRegs[0] );
prepareResultReg(ins, rmask(rr));
evictScratchRegsExcept(rmask(rr));
@ -1012,7 +1012,7 @@ namespace nanojit
}
}
NanoAssert(ins->isop(LIR_pcall) || ins->isop(LIR_fcall));
NanoAssert(ins->isop(LIR_callp) || ins->isop(LIR_calld));
if (!indirect) {
CALL(call);
}
@ -1122,13 +1122,13 @@ namespace nanojit
uint32_t op = ins->opcode();
int prefer = 0;
if (op == LIR_icall) {
if (op == LIR_calli) {
prefer = rmask(retRegs[0]);
}
else if (op == LIR_fcall) {
else if (op == LIR_calld) {
prefer = rmask(FST0);
}
else if (op == LIR_param) {
else if (op == LIR_paramp) {
uint8_t arg = ins->paramArg();
if (ins->paramKind() == 0) {
uint32_t max_regs = max_abi_regs[_thisfrag->lirbuf->abi];
@ -1148,10 +1148,24 @@ namespace nanojit
return prefer;
}
// Return true if we can generate code for this instruction that neither
// sets CCs nor clobbers any input register.
// LEA is the only native instruction that fits those requirements.
bool canRematLEA(LIns* ins)
{
if (ins->isop(LIR_addi))
return ins->oprnd1()->isInReg() && ins->oprnd2()->isImmI();
// Subtract and some left-shifts could be rematerialized using LEA,
// but it hasn't shown to help in real code yet. Noting them anyway:
// maybe sub? R = subl rL, const => leal R, [rL + -const]
// maybe lsh? R = lshl rL, 1/2/3 => leal R, [rL * 2/4/8]
return false;
}
bool Assembler::canRemat(LIns* ins)
{
return ins->isImmAny() || ins->isop(LIR_alloc);
return ins->isImmAny() || ins->isop(LIR_allocp) || canRematLEA(ins);
}
// WARNING: the code generated by this function must not affect the
@ -1162,8 +1176,8 @@ namespace nanojit
uint32_t arg;
uint32_t abi_regcount;
if (ins->isop(LIR_alloc)) {
// The value of a LIR_alloc instruction is the address of the
if (ins->isop(LIR_allocp)) {
// The value of a LIR_allocp instruction is the address of the
// stack allocation. We can rematerialize that from the record we
// have of where the allocation lies in the stack.
NanoAssert(ins->isInAr()); // must have stack slots allocated
@ -1175,10 +1189,16 @@ namespace nanojit
} else if (ins->isImmD()) {
asm_immf(r, ins->immQ(), ins->immD(), /*canClobberCCs*/false);
} else if (ins->isop(LIR_param) && ins->paramKind() == 0 &&
} else if (ins->isop(LIR_paramp) && ins->paramKind() == 0 &&
(arg = ins->paramArg()) >= (abi_regcount = max_abi_regs[_thisfrag->lirbuf->abi])) {
// Incoming arg is on stack, can restore it from there instead of spilling.
// this case is intentionally not detected in canRemat(), because we still
// emit a load instead of a fast ALU operation. We don't want parameter
// spills to have precedence over immediates & ALU ops, but if one does
// spill, we want to load it directly from its stack area, saving a store
// in the prolog.
// Compute position of argument relative to ebp. Higher argument
// numbers are at higher positive offsets. The first abi_regcount
// arguments are in registers, rest on stack. +8 accomodates the
@ -1190,6 +1210,9 @@ namespace nanojit
int d = (arg - abi_regcount) * sizeof(intptr_t) + 8;
LD(r, d, FP);
} else if (canRematLEA(ins)) {
LEA(r, ins->oprnd2()->immI(), ins->oprnd1()->getReg());
} else {
int d = findMemFor(ins);
if (ins->isI()) {
@ -1213,10 +1236,10 @@ namespace nanojit
Register rb = getBaseReg(base, dr, GpRegs);
int c = value->immI();
switch (op) {
case LIR_stb:
case LIR_sti2c:
ST8i(rb, dr, c);
break;
case LIR_sts:
case LIR_sti2s:
ST16i(rb, dr, c);
break;
case LIR_sti:
@ -1230,7 +1253,7 @@ namespace nanojit
else
{
// Quirk of x86-32: reg must be a/b/c/d for single-byte stores.
const RegisterMask SrcRegs = (op == LIR_stb) ?
const RegisterMask SrcRegs = (op == LIR_sti2c) ?
(1<<EAX | 1<<ECX | 1<<EDX | 1<<EBX) :
GpRegs;
@ -1244,10 +1267,10 @@ namespace nanojit
getBaseReg2(SrcRegs, value, ra, GpRegs, base, rb, dr);
}
switch (op) {
case LIR_stb:
case LIR_sti2c:
ST8(rb, dr, ra);
break;
case LIR_sts:
case LIR_sti2s:
ST16(rb, dr, ra);
break;
case LIR_sti:
@ -1294,7 +1317,7 @@ namespace nanojit
Register rr = ins->getReg();
asm_maybe_spill(ins, false); // if also in memory in post-state, spill it now
switch (ins->opcode()) {
case LIR_ldf:
case LIR_ldd:
if (rmask(rr) & XmmRegs) {
SSE_LDQ(rr, db, rb);
} else {
@ -1303,7 +1326,7 @@ namespace nanojit
}
break;
case LIR_ld32f:
case LIR_ldf2d:
if (rmask(rr) & XmmRegs) {
SSE_CVTSS2SD(rr, rr);
SSE_LDSS(rr, db, rb);
@ -1324,12 +1347,12 @@ namespace nanojit
int dr = arDisp(ins);
switch (ins->opcode()) {
case LIR_ldf:
case LIR_ldd:
// Don't use an fpu reg to simply load & store the value.
asm_mmq(FP, dr, rb, db);
break;
case LIR_ld32f:
case LIR_ldf2d:
// Need to use fpu to expand 32->64.
FSTPQ(dr, FP);
FLD32(db, rb);
@ -1348,7 +1371,7 @@ namespace nanojit
{
Register rb = getBaseReg(base, dr, GpRegs);
if (op == LIR_st32f) {
if (op == LIR_std2f) {
bool pop = !value->isInReg();
Register rv = ( pop
? findRegFor(value, _config.i386_sse2 ? XmmRegs : FpRegs)
@ -1371,7 +1394,7 @@ namespace nanojit
STi(rb, dr+4, value->immQorDhi());
STi(rb, dr, value->immQorDlo());
} else if (value->isop(LIR_ldf)) {
} else if (value->isop(LIR_ldd)) {
// value is 64bit struct or int64_t, or maybe a double.
// It may be live in an FPU reg. Either way, don't put it in an
// FPU reg just to load & store it.
@ -1438,29 +1461,29 @@ namespace nanojit
if (branchOnFalse) {
// op == LIR_xf/LIR_jf
switch (condop) {
case LIR_eq: JNE(targ); break;
case LIR_lt: JNL(targ); break;
case LIR_le: JNLE(targ); break;
case LIR_gt: JNG(targ); break;
case LIR_ge: JNGE(targ); break;
case LIR_ult: JNB(targ); break;
case LIR_ule: JNBE(targ); break;
case LIR_ugt: JNA(targ); break;
case LIR_uge: JNAE(targ); break;
case LIR_eqi: JNE(targ); break;
case LIR_lti: JNL(targ); break;
case LIR_lei: JNLE(targ); break;
case LIR_gti: JNG(targ); break;
case LIR_gei: JNGE(targ); break;
case LIR_ltui: JNB(targ); break;
case LIR_leui: JNBE(targ); break;
case LIR_gtui: JNA(targ); break;
case LIR_geui: JNAE(targ); break;
default: NanoAssert(0); break;
}
} else {
// op == LIR_xt/LIR_jt
switch (condop) {
case LIR_eq: JE(targ); break;
case LIR_lt: JL(targ); break;
case LIR_le: JLE(targ); break;
case LIR_gt: JG(targ); break;
case LIR_ge: JGE(targ); break;
case LIR_ult: JB(targ); break;
case LIR_ule: JBE(targ); break;
case LIR_ugt: JA(targ); break;
case LIR_uge: JAE(targ); break;
case LIR_eqi: JE(targ); break;
case LIR_lti: JL(targ); break;
case LIR_lei: JLE(targ); break;
case LIR_gti: JG(targ); break;
case LIR_gei: JGE(targ); break;
case LIR_ltui: JB(targ); break;
case LIR_leui: JBE(targ); break;
case LIR_gtui: JA(targ); break;
case LIR_geui: JAE(targ); break;
default: NanoAssert(0); break;
}
}
@ -1539,9 +1562,9 @@ namespace nanojit
// findRegFor() can call asm_restore() -- asm_restore() better not
// disturb the CCs!
Register r = findRegFor(lhs, GpRegs);
if (c == 0 && cond->isop(LIR_eq)) {
if (c == 0 && cond->isop(LIR_eqi)) {
NanoAssert(N_LOOKAHEAD >= 3);
if ((lhs->isop(LIR_and) || lhs->isop(LIR_or)) &&
if ((lhs->isop(LIR_andi) || lhs->isop(LIR_ori)) &&
cond == lookahead[1] && lhs == lookahead[2])
{
// Do nothing. At run-time, 'lhs' will have just computed
@ -1569,15 +1592,15 @@ namespace nanojit
MOVZX8(r,r);
if (_config.i386_sse2) {
// LIR_flt and LIR_fgt are handled by the same case because
// asm_fcmp() converts LIR_flt(a,b) to LIR_fgt(b,a). Likewise
// for LIR_fle/LIR_fge.
// LIR_ltd and LIR_gtd are handled by the same case because
// asm_fcmp() converts LIR_ltd(a,b) to LIR_gtd(b,a). Likewise
// for LIR_led/LIR_ged.
switch (opcode) {
case LIR_feq: SETNP(r); break;
case LIR_flt:
case LIR_fgt: SETA(r); break;
case LIR_fle:
case LIR_fge: SETAE(r); break;
case LIR_eqd: SETNP(r); break;
case LIR_ltd:
case LIR_gtd: SETA(r); break;
case LIR_led:
case LIR_ged: SETAE(r); break;
default: NanoAssert(0); break;
}
} else {
@ -1598,15 +1621,15 @@ namespace nanojit
// SETcc only sets low 8 bits, so extend
MOVZX8(r,r);
switch (op) {
case LIR_eq: SETE(r); break;
case LIR_lt: SETL(r); break;
case LIR_le: SETLE(r); break;
case LIR_gt: SETG(r); break;
case LIR_ge: SETGE(r); break;
case LIR_ult: SETB(r); break;
case LIR_ule: SETBE(r); break;
case LIR_ugt: SETA(r); break;
case LIR_uge: SETAE(r); break;
case LIR_eqi: SETE(r); break;
case LIR_lti: SETL(r); break;
case LIR_lei: SETLE(r); break;
case LIR_gti: SETG(r); break;
case LIR_gei: SETGE(r); break;
case LIR_ltui: SETB(r); break;
case LIR_leui: SETBE(r); break;
case LIR_gtui: SETA(r); break;
case LIR_geui: SETAE(r); break;
default: NanoAssert(0); break;
}
@ -1638,7 +1661,7 @@ namespace nanojit
LOpcode op = ins->opcode();
// First special case.
if (op == LIR_mod) {
if (op == LIR_modi) {
asm_div_mod(ins);
return;
}
@ -1647,9 +1670,9 @@ namespace nanojit
LInsp rhs = ins->oprnd2();
// Second special case.
// XXX: bug 547125: don't need this once LEA is used for LIR_add in all cases below
if (op == LIR_add && lhs->isop(LIR_alloc) && rhs->isImmI()) {
// LIR_add(LIR_alloc, LIR_int) -- use lea.
// XXX: bug 547125: don't need this once LEA is used for LIR_addi in all cases below
if (op == LIR_addi && lhs->isop(LIR_allocp) && rhs->isImmI()) {
// LIR_addi(LIR_allocp, LIR_immi) -- use lea.
Register rr = prepareResultReg(ins, GpRegs);
int d = findMemFor(lhs) + rhs->immI();
@ -1665,7 +1688,7 @@ namespace nanojit
Register rb = UnspecifiedReg;
switch (op) {
case LIR_div:
case LIR_divi:
// Nb: if the div feeds into a mod it will be handled by
// asm_div_mod() rather than here.
isConstRhs = false;
@ -1673,17 +1696,17 @@ namespace nanojit
allow = rmask(EAX);
evictIfActive(EDX);
break;
case LIR_mul:
case LIR_mulxov:
case LIR_muli:
case LIR_mulxovi:
isConstRhs = false;
if (lhs != rhs) {
rb = findRegFor(rhs, allow);
allow &= ~rmask(rb);
}
break;
case LIR_lsh:
case LIR_rsh:
case LIR_ush:
case LIR_lshi:
case LIR_rshi:
case LIR_rshui:
isConstRhs = rhs->isImmI();
if (!isConstRhs) {
rb = findSpecificRegFor(rhs, ECX);
@ -1710,19 +1733,19 @@ namespace nanojit
rb = ra;
switch (op) {
case LIR_add:
case LIR_addxov: ADD(rr, rb); break; // XXX: bug 547125: could use LEA for LIR_add
case LIR_sub:
case LIR_subxov: SUB(rr, rb); break;
case LIR_mul:
case LIR_mulxov: MUL(rr, rb); break;
case LIR_and: AND(rr, rb); break;
case LIR_or: OR( rr, rb); break;
case LIR_xor: XOR(rr, rb); break;
case LIR_lsh: SHL(rr, rb); break;
case LIR_rsh: SAR(rr, rb); break;
case LIR_ush: SHR(rr, rb); break;
case LIR_div:
case LIR_addi:
case LIR_addxovi: ADD(rr, rb); break; // XXX: bug 547125: could use LEA for LIR_addi
case LIR_subi:
case LIR_subxovi: SUB(rr, rb); break;
case LIR_muli:
case LIR_mulxovi: MUL(rr, rb); break;
case LIR_andi: AND(rr, rb); break;
case LIR_ori: OR( rr, rb); break;
case LIR_xori: XOR(rr, rb); break;
case LIR_lshi: SHL(rr, rb); break;
case LIR_rshi: SAR(rr, rb); break;
case LIR_rshui: SHR(rr, rb); break;
case LIR_divi:
DIV(rb);
CDQ(); // sign-extend EAX into EDX:EAX
break;
@ -1732,20 +1755,20 @@ namespace nanojit
} else {
int c = rhs->immI();
switch (op) {
case LIR_add:
case LIR_addi:
// this doesn't set cc's, only use it when cc's not required.
LEA(rr, c, ra);
ra = rr; // suppress mov
break;
case LIR_addxov: ADDi(rr, c); break;
case LIR_sub:
case LIR_subxov: SUBi(rr, c); break;
case LIR_and: ANDi(rr, c); break;
case LIR_or: ORi( rr, c); break;
case LIR_xor: XORi(rr, c); break;
case LIR_lsh: SHLi(rr, c); break;
case LIR_rsh: SARi(rr, c); break;
case LIR_ush: SHRi(rr, c); break;
case LIR_addxovi: ADDi(rr, c); break;
case LIR_subi:
case LIR_subxovi: SUBi(rr, c); break;
case LIR_andi: ANDi(rr, c); break;
case LIR_ori: ORi( rr, c); break;
case LIR_xori: XORi(rr, c); break;
case LIR_lshi: SHLi(rr, c); break;
case LIR_rshi: SARi(rr, c); break;
case LIR_rshui: SHRi(rr, c); break;
default: NanoAssert(0); break;
}
}
@ -1760,14 +1783,14 @@ namespace nanojit
}
}
// Generates code for a LIR_mod(LIR_div(divL, divR)) sequence.
// Generates code for a LIR_modi(LIR_divi(divL, divR)) sequence.
void Assembler::asm_div_mod(LInsp mod)
{
LInsp div = mod->oprnd1();
// LIR_mod expects the LIR_div to be near (no interference from the register allocator).
NanoAssert(mod->isop(LIR_mod));
NanoAssert(div->isop(LIR_div));
// LIR_modi expects the LIR_divi to be near (no interference from the register allocator).
NanoAssert(mod->isop(LIR_modi));
NanoAssert(div->isop(LIR_divi));
LInsp divL = div->oprnd1();
LInsp divR = div->oprnd2();
@ -1817,10 +1840,10 @@ namespace nanojit
// If 'lhs' isn't in a register, it can be clobbered by 'ins'.
Register ra = lhs->isInReg() ? lhs->getReg() : rr;
if (ins->isop(LIR_not)) {
if (ins->isop(LIR_noti)) {
NOT(rr);
} else {
NanoAssert(ins->isop(LIR_neg));
NanoAssert(ins->isop(LIR_negi));
NEG(rr);
}
if (rr != ra)
@ -1845,19 +1868,19 @@ namespace nanojit
intptr_t addr = base->immI();
addr += d;
switch (op) {
case LIR_ldzb:
case LIR_lduc2ui:
LD8Zdm(rr, addr);
break;
case LIR_ldsb:
case LIR_ldc2i:
LD8Sdm(rr, addr);
break;
case LIR_ldzs:
case LIR_ldus2ui:
LD16Zdm(rr, addr);
break;
case LIR_ldss:
case LIR_lds2i:
LD16Sdm(rr, addr);
break;
case LIR_ld:
case LIR_ldi:
LDdm(rr, addr);
break;
default:
@ -1867,7 +1890,7 @@ namespace nanojit
freeResourcesOf(ins);
} else if (base->opcode() == LIR_piadd) {
} else if (base->opcode() == LIR_addp) {
// Search for add(X,Y).
LIns *lhs = base->oprnd1();
LIns *rhs = base->oprnd2();
@ -1889,7 +1912,7 @@ namespace nanojit
// W = ld (add(X, shl(Y, 0)))[d]
//
int scale;
if (rhs->opcode() == LIR_pilsh && rhs->oprnd2()->isImmI()) {
if (rhs->opcode() == LIR_lshp && rhs->oprnd2()->isImmI()) {
scale = rhs->oprnd2()->immI();
if (scale >= 1 && scale <= 3)
rhs = rhs->oprnd1();
@ -1914,19 +1937,19 @@ namespace nanojit
}
switch (op) {
case LIR_ldzb:
case LIR_lduc2ui:
LD8Zsib(rr, d, ra, rb, scale);
break;
case LIR_ldsb:
case LIR_ldc2i:
LD8Ssib(rr, d, ra, rb, scale);
break;
case LIR_ldzs:
case LIR_ldus2ui:
LD16Zsib(rr, d, ra, rb, scale);
break;
case LIR_ldss:
case LIR_lds2i:
LD16Ssib(rr, d, ra, rb, scale);
break;
case LIR_ld:
case LIR_ldi:
LDsib(rr, d, ra, rb, scale);
break;
default:
@ -1947,19 +1970,19 @@ namespace nanojit
Register ra = getBaseReg(base, d, GpRegs);
switch (op) {
case LIR_ldzb:
case LIR_lduc2ui:
LD8Z(rr, d, ra);
break;
case LIR_ldsb:
case LIR_ldc2i:
LD8S(rr, d, ra);
break;
case LIR_ldzs:
case LIR_ldus2ui:
LD16Z(rr, d, ra);
break;
case LIR_ldss:
case LIR_lds2i:
LD16S(rr, d, ra);
break;
case LIR_ld:
case LIR_ldi:
LD(rr, d, ra);
break;
default:
@ -1968,7 +1991,7 @@ namespace nanojit
}
freeResourcesOf(ins);
if (!base->isop(LIR_alloc) && !base->isInReg()) {
if (!base->isop(LIR_allocp) && !base->isInReg()) {
NanoAssert(ra == rr);
findSpecificRegForUnallocated(base, ra);
}
@ -1982,7 +2005,7 @@ namespace nanojit
LIns* iffalse = ins->oprnd3();
NanoAssert(condval->isCmp());
NanoAssert(ins->isop(LIR_cmov) && iftrue->isI() && iffalse->isI());
NanoAssert(ins->isop(LIR_cmovi) && iftrue->isI() && iffalse->isI());
Register rr = prepareResultReg(ins, GpRegs);
@ -1996,15 +2019,15 @@ namespace nanojit
// below. See asm_cmp() for more details.
switch (condval->opcode()) {
// Note that these are all opposites...
case LIR_eq: MRNE(rr, rf); break;
case LIR_lt: MRGE(rr, rf); break;
case LIR_le: MRG( rr, rf); break;
case LIR_gt: MRLE(rr, rf); break;
case LIR_ge: MRL( rr, rf); break;
case LIR_ult: MRAE(rr, rf); break;
case LIR_ule: MRA( rr, rf); break;
case LIR_ugt: MRBE(rr, rf); break;
case LIR_uge: MRB( rr, rf); break;
case LIR_eqi: MRNE(rr, rf); break;
case LIR_lti: MRGE(rr, rf); break;
case LIR_lei: MRG( rr, rf); break;
case LIR_gti: MRLE(rr, rf); break;
case LIR_gei: MRL( rr, rf); break;
case LIR_ltui: MRAE(rr, rf); break;
case LIR_leui: MRA( rr, rf); break;
case LIR_gtui: MRBE(rr, rf); break;
case LIR_geui: MRB( rr, rf); break;
default: NanoAssert(0); break;
}
@ -2200,7 +2223,7 @@ namespace nanojit
} else if (ins->isInAr()) {
int d = arDisp(ins);
NanoAssert(d != 0);
if (ins->isop(LIR_alloc)) {
if (ins->isop(LIR_allocp)) {
LEA(r, d, FP);
} else {
LD(r, d, FP);
@ -2228,11 +2251,11 @@ namespace nanojit
void Assembler::asm_pusharg(LInsp ins)
{
// arg goes on stack
if (!ins->isUsed() && ins->isImmI())
if (!ins->isExtant() && ins->isImmI())
{
PUSHi(ins->immI()); // small const we push directly
}
else if (!ins->isUsed() || ins->isop(LIR_alloc))
else if (!ins->isExtant() || ins->isop(LIR_allocp))
{
Register ra = findRegFor(ins, GpRegs);
PUSHr(ra);
@ -2251,14 +2274,14 @@ namespace nanojit
void Assembler::asm_stkarg(LInsp ins, int32_t& stkd)
{
// arg goes on stack
if (!ins->isUsed() && ins->isImmI())
if (!ins->isExtant() && ins->isImmI())
{
// small const we push directly
STi(SP, stkd, ins->immI());
}
else {
Register ra;
if (!ins->isInReg() || ins->isop(LIR_alloc))
if (!ins->isInReg() || ins->isop(LIR_allocp))
ra = findRegFor(ins, GpRegs & (~SavedRegs));
else
ra = ins->getReg();
@ -2336,10 +2359,10 @@ namespace nanojit
rb = ra;
switch (op) {
case LIR_fadd: SSE_ADDSD(rr, rb); break;
case LIR_fsub: SSE_SUBSD(rr, rb); break;
case LIR_fmul: SSE_MULSD(rr, rb); break;
case LIR_fdiv: SSE_DIVSD(rr, rb); break;
case LIR_addd: SSE_ADDSD(rr, rb); break;
case LIR_subd: SSE_SUBSD(rr, rb); break;
case LIR_muld: SSE_MULSD(rr, rb); break;
case LIR_divd: SSE_DIVSD(rr, rb); break;
default: NanoAssert(0);
}
@ -2366,10 +2389,10 @@ namespace nanojit
const uint64_t* p = findImmDFromPool(rhs->immQ());
switch (op) {
case LIR_fadd: FADDdm( (const double*)p); break;
case LIR_fsub: FSUBRdm((const double*)p); break;
case LIR_fmul: FMULdm( (const double*)p); break;
case LIR_fdiv: FDIVRdm((const double*)p); break;
case LIR_addd: FADDdm( (const double*)p); break;
case LIR_subd: FSUBRdm((const double*)p); break;
case LIR_muld: FMULdm( (const double*)p); break;
case LIR_divd: FDIVRdm((const double*)p); break;
default: NanoAssert(0);
}
@ -2377,10 +2400,10 @@ namespace nanojit
int db = findMemFor(rhs);
switch (op) {
case LIR_fadd: FADD( db, FP); break;
case LIR_fsub: FSUBR(db, FP); break;
case LIR_fmul: FMUL( db, FP); break;
case LIR_fdiv: FDIVR(db, FP); break;
case LIR_addd: FADD( db, FP); break;
case LIR_subd: FSUBR(db, FP); break;
case LIR_muld: FMUL( db, FP); break;
case LIR_divd: FDIVR(db, FP); break;
default: NanoAssert(0);
}
}
@ -2502,27 +2525,27 @@ namespace nanojit
LOpcode opcode = cond->opcode();
if (_config.i386_sse2) {
// LIR_flt and LIR_fgt are handled by the same case because
// asm_fcmp() converts LIR_flt(a,b) to LIR_fgt(b,a). Likewise
// for LIR_fle/LIR_fge.
// LIR_ltd and LIR_gtd are handled by the same case because
// asm_fcmp() converts LIR_ltd(a,b) to LIR_gtd(b,a). Likewise
// for LIR_led/LIR_ged.
if (branchOnFalse) {
// op == LIR_xf
switch (opcode) {
case LIR_feq: JP(targ); break;
case LIR_flt:
case LIR_fgt: JNA(targ); break;
case LIR_fle:
case LIR_fge: JNAE(targ); break;
case LIR_eqd: JP(targ); break;
case LIR_ltd:
case LIR_gtd: JNA(targ); break;
case LIR_led:
case LIR_ged: JNAE(targ); break;
default: NanoAssert(0); break;
}
} else {
// op == LIR_xt
switch (opcode) {
case LIR_feq: JNP(targ); break;
case LIR_flt:
case LIR_fgt: JA(targ); break;
case LIR_fle:
case LIR_fge: JAE(targ); break;
case LIR_eqd: JNP(targ); break;
case LIR_ltd:
case LIR_gtd: JA(targ); break;
case LIR_led:
case LIR_ged: JAE(targ); break;
default: NanoAssert(0); break;
}
}
@ -2552,17 +2575,17 @@ namespace nanojit
if (_config.i386_sse2) {
// First, we convert (a < b) into (b > a), and (a <= b) into (b >= a).
if (condop == LIR_flt) {
condop = LIR_fgt;
if (condop == LIR_ltd) {
condop = LIR_gtd;
LIns* t = lhs; lhs = rhs; rhs = t;
} else if (condop == LIR_fle) {
condop = LIR_fge;
} else if (condop == LIR_led) {
condop = LIR_ged;
LIns* t = lhs; lhs = rhs; rhs = t;
}
if (condop == LIR_feq) {
if (condop == LIR_eqd) {
if (lhs == rhs) {
// We can generate better code for LIR_feq when lhs==rhs (NaN test).
// We can generate better code for LIR_eqd when lhs==rhs (NaN test).
// ucomisd ZPC outcome (SETNP/JNP succeeds if P==0)
// ------- --- -------
@ -2574,7 +2597,7 @@ namespace nanojit
} else {
// LAHF puts the flags into AH like so: SF:ZF:0:AF:0:PF:1:CF (aka. SZ0A_0P1C).
// We then mask out the bits as follows.
// - LIR_feq: mask == 0x44 == 0100_0100b, which extracts 0Z00_0P00 from AH.
// - LIR_eqd: mask == 0x44 == 0100_0100b, which extracts 0Z00_0P00 from AH.
int mask = 0x44;
// ucomisd ZPC lahf/test(0x44) SZP outcome
@ -2593,7 +2616,7 @@ namespace nanojit
SSE_UCOMISD(ra, rb);
}
} else {
// LIR_fgt:
// LIR_gtd:
// ucomisd ZPC outcome (SETA/JA succeeds if CZ==00)
// ------- --- -------
// UNORDERED 111 SETA/JA fails
@ -2601,7 +2624,7 @@ namespace nanojit
// GREATER_THAN 000 SETA/JA succeeds
// LESS_THAN 001 SETA/JA fails
//
// LIR_fge:
// LIR_ged:
// ucomisd ZPC outcome (SETAE/JAE succeeds if C==0)
// ------- --- -------
// UNORDERED 111 SETAE/JAE fails
@ -2617,11 +2640,11 @@ namespace nanojit
} else {
// First, we convert (a > b) into (b < a), and (a >= b) into (b <= a).
// Note that this is the opposite of the sse2 conversion above.
if (condop == LIR_fgt) {
condop = LIR_flt;
if (condop == LIR_gtd) {
condop = LIR_ltd;
LIns* t = lhs; lhs = rhs; rhs = t;
} else if (condop == LIR_fge) {
condop = LIR_fle;
} else if (condop == LIR_ged) {
condop = LIR_led;
LIns* t = lhs; lhs = rhs; rhs = t;
}
@ -2637,11 +2660,11 @@ namespace nanojit
// x86 machines.
//
// The masks are as follows:
// - LIR_feq: mask == 0x44 == 0100_0100b, which extracts 0Z00_0P00 from AH.
// - LIR_flt: mask == 0x05 == 0000_0101b, which extracts 0000_0P0C from AH.
// - LIR_fle: mask == 0x41 == 0100_0001b, which extracts 0Z00_000C from AH.
// - LIR_eqd: mask == 0x44 == 0100_0100b, which extracts 0Z00_0P00 from AH.
// - LIR_ltd: mask == 0x05 == 0000_0101b, which extracts 0000_0P0C from AH.
// - LIR_led: mask == 0x41 == 0100_0001b, which extracts 0Z00_000C from AH.
//
// LIR_feq (very similar to the sse2 case above):
// LIR_eqd (very similar to the sse2 case above):
// ucomisd C3:C2:C0 lahf/test(0x44) SZP outcome
// ------- -------- --------- --- -------
// UNORDERED 111 0100_0100 001 SETNP fails
@ -2649,7 +2672,7 @@ namespace nanojit
// GREATER_THAN 000 0000_0000 011 SETNP fails
// LESS_THAN 001 0000_0000 011 SETNP fails
//
// LIR_flt:
// LIR_ltd:
// fcom C3:C2:C0 lahf/test(0x05) SZP outcome
// ------- -------- --------- --- -------
// UNORDERED 111 0000_0101 001 SETNP fails
@ -2657,7 +2680,7 @@ namespace nanojit
// GREATER_THAN 000 0000_0000 011 SETNP fails
// LESS_THAN 001 0000_0001 000 SETNP succeeds
//
// LIR_fle:
// LIR_led:
// fcom C3:C2:C0 lahf/test(0x41) SZP outcome
// ------- --- --------- --- -------
// UNORDERED 111 0100_0001 001 SETNP fails
@ -2667,9 +2690,9 @@ namespace nanojit
int mask = 0; // init to avoid MSVC compile warnings
switch (condop) {
case LIR_feq: mask = 0x44; break;
case LIR_flt: mask = 0x05; break;
case LIR_fle: mask = 0x41; break;
case LIR_eqd: mask = 0x44; break;
case LIR_ltd: mask = 0x05; break;
case LIR_led: mask = 0x41; break;
default: NanoAssert(0); break;
}
@ -2745,10 +2768,10 @@ namespace nanojit
assignSavedRegs();
LIns *val = ins->oprnd1();
if (ins->isop(LIR_ret)) {
if (ins->isop(LIR_reti)) {
findSpecificRegFor(val, retRegs[0]);
} else {
NanoAssert(ins->isop(LIR_fret));
NanoAssert(ins->isop(LIR_retd));
findSpecificRegFor(val, FST0);
fpu_pop();
}

Просмотреть файл

@ -77,6 +77,12 @@
#include <os2.h>
#endif
#if defined(__SUNPRO_CC)
#define __asm__ asm
#define __volatile__ volatile
#define __inline__ inline
#endif
#if defined(DEBUG) || defined(NJ_NO_VARIADIC_MACROS)
#if !defined _DEBUG
#define _DEBUG
@ -105,26 +111,15 @@ __declspec(naked) static inline __int64 rdtsc()
}
}
#elif defined(SOLARIS)
# define AVMPLUS_HAS_RDTSC 1
static inline unsigned long long rdtsc(void)
{
unsigned long long int x;
asm volatile (".byte 0x0f, 0x31" : "=A" (x));
return x;
}
#elif defined(__i386__)
#elif defined(__i386__) || defined(__i386)
# define AVMPLUS_HAS_RDTSC 1
static __inline__ unsigned long long rdtsc(void)
{
unsigned long long int x;
__asm__ volatile (".byte 0x0f, 0x31" : "=A" (x));
return x;
__asm__ volatile (".byte 0x0f, 0x31" : "=A" (x));
return x;
}
#endif /* compilers */

Просмотреть файл

@ -233,7 +233,7 @@ namespace nanojit {
public:
// All Nanojit and jstracer printing should be routed through
// this function.
virtual ~LogControl() {}
virtual ~LogControl() {}
#ifdef NJ_VERBOSE
virtual void printf( const char* format, ... ) PRINTF_CHECK(2,3);
#endif

Просмотреть файл

@ -712,7 +712,7 @@ ProcessArgs(JSContext *cx, JSObject *obj, char **argv, int argc)
case 'Z':
if (++i == argc)
return usage();
JS_SetGCZeal(cx, atoi(argv[i]));
JS_SetGCZeal(cx, !!(atoi(argv[i])));
break;
#endif
@ -1457,12 +1457,12 @@ GetTrapArgs(JSContext *cx, uintN argc, jsval *argv, JSScript **scriptp,
static JSTrapStatus
TrapHandler(JSContext *cx, JSScript *script, jsbytecode *pc, jsval *rval,
void *closure)
jsval closure)
{
JSString *str;
JSStackFrame *caller;
str = (JSString *) closure;
str = JSVAL_TO_STRING(closure);
caller = JS_GetScriptedCaller(cx, NULL);
if (!JS_EvaluateUCInStackFrame(cx, caller,
JS_GetStringChars(str), JS_GetStringLength(str),
@ -1493,7 +1493,7 @@ Trap(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
argv[argc] = STRING_TO_JSVAL(str);
if (!GetTrapArgs(cx, argc, argv, &script, &i))
return JS_FALSE;
return JS_SetTrap(cx, script, script->code + i, TrapHandler, str);
return JS_SetTrap(cx, script, script->code + i, TrapHandler, STRING_TO_JSVAL(str));
}
static JSBool
@ -3056,7 +3056,7 @@ EvalInFrame(JSContext *cx, uintN argc, jsval *vp)
JSString *str = JSVAL_TO_STRING(argv[1]);
bool saveCurrent = (argc >= 3 && JSVAL_IS_BOOLEAN(argv[2]))
? (bool)JSVAL_TO_SPECIAL(argv[2])
? !!(JSVAL_TO_SPECIAL(argv[2]))
: false;
JS_ASSERT(cx->fp);
@ -3749,7 +3749,8 @@ Parse(JSContext *cx, uintN argc, jsval *vp)
js::Parser parser(cx);
parser.init(JS_GetStringCharsZ(cx, scriptContents), JS_GetStringLength(scriptContents),
NULL, "<string>", 0);
parser.parse(NULL);
if (!parser.parse(NULL))
return JS_FALSE;
JS_SET_RVAL(cx, vp, JSVAL_VOID);
return JS_TRUE;
}

Просмотреть файл

@ -243,7 +243,7 @@ skip-if(xulRuntime.OS=="WINNT"&&isDebugBuild) script regress-418540.js # slow
script regress-419018.js
script regress-419803.js
script regress-420919.js
fails-if(xulRuntime.OS=="Linux"&&xulRuntime.XPCOMABI.match(/x86_64/)) script regress-422348.js # No test results
skip-if(xulRuntime.OS=="Linux"&&xulRuntime.XPCOMABI.match(/x86_64/)) script regress-422348.js # On 64-bit Linux, takes forever rather than throwing
script regress-424311.js
skip-if(xulRuntime.OS=="WINNT"&&isDebugBuild) script regress-425360.js # slow
script regress-426827.js

Просмотреть файл

@ -178,7 +178,7 @@ script regress-427196-01.js
script regress-427196-02.js
script regress-427196-03.js
script regress-429264.js
skip script regress-429739.js # Bug 520778 - wontfix
script regress-429739.js
script regress-431428.js
skip script regress-432075.js # obsolete test
script regress-434837-01.js

Просмотреть файл

@ -37,7 +37,7 @@
var gTestfile = 'regress-369696-01.js';
//-----------------------------------------------------------------------------
var BUGNUMBER = 396696;
var BUGNUMBER = 369696;
var summary = 'Do not assert: map->depth > 0" in js_LeaveSharpObject';
var actual = '';
var expect = '';

Просмотреть файл

@ -37,7 +37,7 @@
var gTestfile = 'regress-369696-02.js';
//-----------------------------------------------------------------------------
var BUGNUMBER = 396696;
var BUGNUMBER = 369696;
var summary = 'Do not assert: map->depth > 0" in js_LeaveSharpObject';
var actual = '';
var expect = '';

Просмотреть файл

@ -37,7 +37,7 @@
var gTestfile = 'regress-369696-03.js';
//-----------------------------------------------------------------------------
var BUGNUMBER = 396696;
var BUGNUMBER = 369696;
var summary = 'Do not assert: map->depth > 0" in js_LeaveSharpObject';
var actual = '';
var expect = '';

Просмотреть файл

@ -53,7 +53,7 @@ function test()
printBugNumber(BUGNUMBER);
printStatus (summary);
expect = 'function anonymous(y) {}';
expect = 'TypeError: o.y is not a constructor';
try
{
@ -65,7 +65,7 @@ function test()
actual = ex + '';
}
compareSource(expect, actual, summary);
reportCompare(expect, actual, summary);
exitFunc ('test');
}

Просмотреть файл

@ -7,7 +7,7 @@ fails-if(!xulRuntime.shell) script regress-455464-01.js # bug - NS_ERROR_DOM_NOT
fails-if(!xulRuntime.shell) script regress-455464-02.js # bug - NS_ERROR_DOM_NOT_SUPPORTED_ERR line 49
fails-if(!xulRuntime.shell) script regress-455464-03.js # bug - NS_ERROR_DOM_NOT_SUPPORTED_ERR line 1
fails-if(!xulRuntime.shell&&!isDebugBuild) skip-if(!xulRuntime.shell&&isDebugBuild) script regress-455464-04.js # bug xxx - hangs reftests in debug, ### bug xxx - NS_ERROR_DOM_NOT_SUPPORTED_ERR in opt
fails-if(xulRuntime.shell) skip-if(!xulRuntime.shell) script regress-456826.js # bug 504632
skip-if(!xulRuntime.shell) script regress-456826.js # bug 504632
script regress-457521.js
script regress-465443.js
script regress-470310.js

Просмотреть файл

@ -58,6 +58,7 @@ function test()
if (typeof gcparam != 'undefined')
{
gcparam("maxBytes", 22000);
expectExitCode(5);
}
const numRows = 600;

Просмотреть файл

@ -11,3 +11,6 @@ script regress-546615.js
script regress-555246-0.js
fails script regress-555246-1.js
script regress-559438.js
script regress-560101.js
script regress-560998-1.js
script regress-560998-2.js

Просмотреть файл

@ -0,0 +1,4 @@
try {
Object.defineProperty(<x/>, "p", {}); // don't assert
} catch (exc) {}
reportCompare(0, 0, "ok");

Просмотреть файл

@ -0,0 +1,10 @@
// Any copyright is dedicated to the Public Domain.
// http://creativecommons.org/licenses/publicdomain/
// Contributor: Jesse Ruderman
for (let j = 0; j < 4; ++j) {
function g() { j; }
g();
}
reportCompare(0, 0, "ok");

Просмотреть файл

@ -0,0 +1,10 @@
// Any copyright is dedicated to the Public Domain.
// http://creativecommons.org/licenses/publicdomain/
// Contributor: Jason Orendorff <jorendorff@mozilla.com>
var x = {};
for (var i = 0; i < 2; i++) {
Object.defineProperty(x, "y", {configurable: true, value: function () {}});
x.y();
}
reportCompare(0, 0, "ok");

Просмотреть файл

@ -125,10 +125,10 @@ class ResultsSink:
# key is (result, expect, random)
# value is (tinderbox label, dev test category)
LABELS = {
(TestResult.CRASH, False, False): ('TEST-KNOWN-FAIL', ''),
(TestResult.CRASH, False, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''),
(TestResult.CRASH, False, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
(TestResult.CRASH, False, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
(TestResult.CRASH, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
(TestResult.CRASH, True, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''),
(TestResult.CRASH, True, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
(TestResult.FAIL, False, False): ('TEST-KNOWN-FAIL', ''),
(TestResult.FAIL, False, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''),
@ -201,7 +201,7 @@ if __name__ == '__main__':
op.add_option('-t', '--timeout', dest='timeout', type=float, default=60.0,
help='set test timeout in seconds')
op.add_option('-d', '--exclude-random', dest='random', action='store_false',
help='exclude tests marked random')
help='exclude tests marked random', default=True)
op.add_option('--run-skipped', dest='run_skipped', action='store_true',
help='run skipped tests')
op.add_option('--run-only-skipped', dest='run_only_skipped', action='store_true',

Просмотреть файл

@ -131,6 +131,8 @@ class TestResult:
passes = 0
expected_rcs = []
if test.path.endswith('-n.js'):
expected_rcs.append(3)
for line in out.split('\n'):
if line.startswith(' FAILED!'):
@ -146,13 +148,13 @@ class TestResult:
if m:
expected_rcs.append(int(m.group(1)))
if rc:
if (test.path.endswith('-n.js') and rc == 3) or rc in expected_rcs:
result = cls.PASS
if rc and not rc in expected_rcs:
if rc == 3:
result = cls.FAIL
else:
result = cls.CRASH
else:
if passes > 0 and failures == 0:
if (rc or passes > 0) and failures == 0:
result = cls.PASS
else:
result = cls.FAIL

Просмотреть файл

@ -0,0 +1,10 @@
function test() {
var a = ['x', '', '', '', '', '', '', 'x'];
var b = '';
for (var i = 0; i < a.length; i++) {
(function() {
a[i].replace(/x/, function() { return b; });
}());
}
}
test(); // should NOT get a ReferenceError for b on trace

Просмотреть файл

@ -0,0 +1,12 @@
function f(a) {
function g() {
yield function () a;
}
if (a == 8)
return g();
a = 3;
}
var x;
for (var i = 0; i < 9; i++)
x = f(i);
x.next()(); // ReferenceError: a is not defined.

Просмотреть файл

@ -0,0 +1,11 @@
function f(a, b) { return a + " " + b; }
for (var i = 0; i < 10; ++i) {
var s = '';
var a = {toString: function () { s += 'a'; return 'a'; }};
var b = {toString: function () { s += 'b'; return 'b'; }};
f(a, b);
assertEq(s, 'ab');
}
checkStats({ traceTriggered:1 });

Просмотреть файл

@ -0,0 +1,10 @@
var obj = {p: 100};
var name = "p";
var a = [];
for (var i = 0; i < 10; i++)
a[i] = --obj[name];
assertEq(a.join(','), '99,98,97,96,95,94,93,92,91,90');
assertEq(obj.p, 90);
checkStats({recorderStarted: 1, recorderAborted: 0, traceCompleted: 1, traceTriggered: 1});

Просмотреть файл

@ -0,0 +1,11 @@
var obj = {s: ""};
var name = "s";
var a = [];
for (var i = 0; i <= RECORDLOOP + 5; i++) {
a[i] = 'x';
if (i > RECORDLOOP)
a[i] = --obj[name]; // first recording changes obj.s from string to number
}
assertEq(a.join(','), Array(RECORDLOOP + 2).join('x,') + '-1,-2,-3,-4,-5');
assertEq(obj.s, -5);

Просмотреть файл

@ -0,0 +1,20 @@
var o = {};
var arr = [o,o,o,o,o,o,o,o,o,o,o,o,o];
var out = [];
const OUTER = 10;
for (var i = 0; i < 10; ++i) {
for (var j = 0; j < arr.length; ++j) {
out.push(String.prototype.indexOf.call(arr[i], 'object'));
}
}
assertEq(out.length, 10 * arr.length);
for (var i = 0; i < out.length; ++i)
assertEq(out[i], 1);
checkStats({
traceCompleted:2,
recorderAborted:1
});

Просмотреть файл

@ -0,0 +1,10 @@
var obj = {p: 100};
var name = "p";
var a = [];
for (var i = 0; i < 10; i++)
a[i] = obj[name]--;
assertEq(a.join(','), '100,99,98,97,96,95,94,93,92,91');
assertEq(obj.p, 90);
checkStats({recorderStarted: 1, recorderAborted: 0, traceCompleted: 1, traceTriggered: 1});

Просмотреть файл

@ -0,0 +1,7 @@
var obj = {s: ""};
var name = "s";
for (var i = 0; i <= RECORDLOOP + 5; i++)
if (i > RECORDLOOP)
obj[name]--; // first recording changes obj.s from string to number
assertEq(obj.s, -5);

Просмотреть файл

@ -0,0 +1,10 @@
var obj = {p: 100};
var name = "p";
var a = [];
for (var i = 0; i < 10; i++)
a[i] = obj[name]++;
assertEq(a.join(','), '100,101,102,103,104,105,106,107,108,109');
assertEq(obj.p, 110);
checkStats({recorderStarted: 1, recorderAborted: 0, traceCompleted: 1, traceTriggered: 1});

Просмотреть файл

@ -0,0 +1,7 @@
var obj = {s: ""};
var name = "s";
for (var i = 0; i <= RECORDLOOP + 5; i++)
if (i > RECORDLOOP)
obj[name]++; // first recording changes obj.s from string to number
assertEq(obj.s, 5);

Просмотреть файл

@ -0,0 +1,10 @@
var obj = {p: 100};
var name = "p";
var a = [];
for (var i = 0; i < 10; i++)
a[i] = ++obj[name];
assertEq(a.join(','), '101,102,103,104,105,106,107,108,109,110');
assertEq(obj.p, 110);
checkStats({recorderStarted: 1, recorderAborted: 0, traceCompleted: 1, traceTriggered: 1});

Просмотреть файл

@ -0,0 +1,11 @@
var obj = {s: ""};
var name = "s";
var a = [];
for (var i = 0; i <= RECORDLOOP + 5; i++) {
a[i] = 'x';
if (i > RECORDLOOP)
a[i] = ++obj[name]; // first recording changes obj.s from string to number
}
assertEq(a.join(','), Array(RECORDLOOP + 2).join('x,') + '1,2,3,4,5');
assertEq(obj.s, 5);

Просмотреть файл

@ -8,7 +8,4 @@ for (var i = 0; i < 9; i++)
g += a.p();
assertEq(g, 'qqqqqqqqq');
if (this['jitstats'])
print(uneval(this['jitstats']));
checkStats({recorderStarted: 1, recorderAborted: 0, traceCompleted: 1, traceTriggered: 1});

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше