зеркало из https://github.com/mozilla/gecko-dev.git
Merge tracemonkey to mozilla-central.
This commit is contained in:
Коммит
a85a64b09a
|
@ -374,17 +374,13 @@ private:
|
|||
void *mPtr;
|
||||
};
|
||||
|
||||
class AutoFreeJSStack {
|
||||
class nsAutoPoolRelease {
|
||||
public:
|
||||
AutoFreeJSStack(JSContext *ctx, void *aPtr) : mContext(ctx), mStack(aPtr) {
|
||||
}
|
||||
JS_REQUIRES_STACK ~AutoFreeJSStack() {
|
||||
if (mContext && mStack)
|
||||
js_FreeStack(mContext, mStack);
|
||||
}
|
||||
nsAutoPoolRelease(JSArenaPool *p, void *m) : mPool(p), mMark(m) {}
|
||||
~nsAutoPoolRelease() { JS_ARENA_RELEASE(mPool, mMark); }
|
||||
private:
|
||||
JSContext *mContext;
|
||||
void *mStack;
|
||||
JSArenaPool *mPool;
|
||||
void *mMark;
|
||||
};
|
||||
|
||||
// A utility function for script languages to call. Although it looks small,
|
||||
|
@ -2143,24 +2139,24 @@ nsJSContext::CallEventHandler(nsISupports* aTarget, void *aScope, void *aHandler
|
|||
|
||||
if (NS_SUCCEEDED(rv)) {
|
||||
// Convert args to jsvals.
|
||||
void *mark;
|
||||
PRUint32 argc = 0;
|
||||
jsval *argv = nsnull;
|
||||
|
||||
js::LazilyConstructed<nsAutoPoolRelease> poolRelease;
|
||||
js::LazilyConstructed<JSAutoTempValueRooter> tvr;
|
||||
|
||||
// Use |target| as the scope for wrapping the arguments, since aScope is
|
||||
// the safe scope in many cases, which isn't very useful. Wrapping aTarget
|
||||
// was OK because those typically have PreCreate methods that give them the
|
||||
// right scope anyway, and we want to make sure that the arguments end up
|
||||
// in the same scope as aTarget.
|
||||
rv = ConvertSupportsTojsvals(aargv, target, &argc,
|
||||
reinterpret_cast<void **>(&argv), &mark);
|
||||
&argv, poolRelease, tvr);
|
||||
if (NS_FAILED(rv)) {
|
||||
stack->Pop(nsnull);
|
||||
return rv;
|
||||
}
|
||||
|
||||
AutoFreeJSStack stackGuard(mContext, mark); // ensure always freed.
|
||||
|
||||
jsval funval = OBJECT_TO_JSVAL(static_cast<JSObject *>(aHandler));
|
||||
JSAutoRequest ar(mContext);
|
||||
++mExecuteDepth;
|
||||
|
@ -2655,15 +2651,16 @@ nsJSContext::SetProperty(void *aTarget, const char *aPropName, nsISupports *aArg
|
|||
{
|
||||
PRUint32 argc;
|
||||
jsval *argv = nsnull;
|
||||
void *mark;
|
||||
|
||||
JSAutoRequest ar(mContext);
|
||||
|
||||
js::LazilyConstructed<nsAutoPoolRelease> poolRelease;
|
||||
js::LazilyConstructed<JSAutoTempValueRooter> tvr;
|
||||
|
||||
nsresult rv;
|
||||
rv = ConvertSupportsTojsvals(aArgs, GetNativeGlobal(), &argc,
|
||||
reinterpret_cast<void **>(&argv), &mark);
|
||||
&argv, poolRelease, tvr);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
AutoFreeJSStack stackGuard(mContext, mark); // ensure always freed.
|
||||
|
||||
jsval vargs;
|
||||
|
||||
|
@ -2690,26 +2687,24 @@ nsJSContext::SetProperty(void *aTarget, const char *aPropName, nsISupports *aArg
|
|||
nsresult
|
||||
nsJSContext::ConvertSupportsTojsvals(nsISupports *aArgs,
|
||||
void *aScope,
|
||||
PRUint32 *aArgc, void **aArgv,
|
||||
void **aMarkp)
|
||||
PRUint32 *aArgc,
|
||||
jsval **aArgv,
|
||||
js::LazilyConstructed<nsAutoPoolRelease> &aPoolRelease,
|
||||
js::LazilyConstructed<JSAutoTempValueRooter> &aRooter)
|
||||
{
|
||||
nsresult rv = NS_OK;
|
||||
|
||||
js_LeaveTrace(mContext);
|
||||
|
||||
// If the array implements nsIJSArgArray, just grab the values directly.
|
||||
nsCOMPtr<nsIJSArgArray> fastArray = do_QueryInterface(aArgs);
|
||||
if (fastArray != nsnull) {
|
||||
*aMarkp = nsnull;
|
||||
return fastArray->GetArgs(aArgc, aArgv);
|
||||
}
|
||||
if (fastArray != nsnull)
|
||||
return fastArray->GetArgs(aArgc, reinterpret_cast<void **>(aArgv));
|
||||
|
||||
// Take the slower path converting each item.
|
||||
// Handle only nsIArray and nsIVariant. nsIArray is only needed for
|
||||
// SetProperty('arguments', ...);
|
||||
|
||||
*aArgv = nsnull;
|
||||
*aArgc = 0;
|
||||
*aMarkp = nsnull;
|
||||
|
||||
nsIXPConnect *xpc = nsContentUtils::XPConnect();
|
||||
NS_ENSURE_TRUE(xpc, NS_ERROR_UNEXPECTED);
|
||||
|
@ -2730,9 +2725,16 @@ nsJSContext::ConvertSupportsTojsvals(nsISupports *aArgs,
|
|||
argCount = 1; // the nsISupports which is not an array
|
||||
}
|
||||
|
||||
jsval *argv = js_AllocStack(mContext, argCount, aMarkp);
|
||||
void *mark = JS_ARENA_MARK(&mContext->tempPool);
|
||||
jsval *argv;
|
||||
JS_ARENA_ALLOCATE_CAST(argv, jsval *, &mContext->tempPool,
|
||||
argCount * sizeof(jsval));
|
||||
NS_ENSURE_TRUE(argv, NS_ERROR_OUT_OF_MEMORY);
|
||||
|
||||
/* Use the caller's auto guards to release and unroot. */
|
||||
aPoolRelease.construct(&mContext->tempPool, mark);
|
||||
aRooter.construct(mContext, argCount, argv);
|
||||
|
||||
if (argsArray) {
|
||||
for (argCtr = 0; argCtr < argCount && NS_SUCCEEDED(rv); argCtr++) {
|
||||
nsCOMPtr<nsISupports> arg;
|
||||
|
@ -2781,10 +2783,8 @@ nsJSContext::ConvertSupportsTojsvals(nsISupports *aArgs,
|
|||
rv = NS_ERROR_UNEXPECTED;
|
||||
}
|
||||
}
|
||||
if (NS_FAILED(rv)) {
|
||||
js_FreeStack(mContext, *aMarkp);
|
||||
if (NS_FAILED(rv))
|
||||
return rv;
|
||||
}
|
||||
*aArgv = argv;
|
||||
*aArgc = argCount;
|
||||
return NS_OK;
|
||||
|
|
|
@ -48,6 +48,9 @@
|
|||
#include "nsScriptNameSpaceManager.h"
|
||||
|
||||
class nsIXPConnectJSObjectHolder;
|
||||
class nsAutoPoolRelease;
|
||||
class JSAutoTempValueRooter;
|
||||
namespace js { template <class> class LazilyConstructed; }
|
||||
|
||||
class nsJSContext : public nsIScriptContext,
|
||||
public nsIXPCScriptNotify
|
||||
|
@ -207,11 +210,12 @@ protected:
|
|||
nsresult InitializeExternalClasses();
|
||||
|
||||
// Helper to convert xpcom datatypes to jsvals.
|
||||
JS_FORCES_STACK nsresult ConvertSupportsTojsvals(nsISupports *aArgs,
|
||||
void *aScope,
|
||||
PRUint32 *aArgc,
|
||||
void **aArgv,
|
||||
void **aMarkp);
|
||||
nsresult ConvertSupportsTojsvals(nsISupports *aArgs,
|
||||
void *aScope,
|
||||
PRUint32 *aArgc,
|
||||
jsval **aArgv,
|
||||
js::LazilyConstructed<nsAutoPoolRelease> &aPoolRelease,
|
||||
js::LazilyConstructed<JSAutoTempValueRooter> &aRooter);
|
||||
|
||||
nsresult AddSupportsPrimitiveTojsvals(nsISupports *aArg, jsval *aArgv);
|
||||
|
||||
|
|
|
@ -997,8 +997,7 @@ nsDOMThreadService::CreateJSContext()
|
|||
};
|
||||
JS_SetContextSecurityCallbacks(cx, &securityCallbacks);
|
||||
|
||||
static JSDebugHooks debugHooks;
|
||||
JS_SetContextDebugHooks(cx, &debugHooks);
|
||||
JS_ClearContextDebugHooks(cx);
|
||||
|
||||
nsresult rv = nsContentUtils::XPConnect()->
|
||||
SetSecurityManagerForJSContext(cx, gWorkerSecurityManager, 0);
|
||||
|
|
|
@ -95,8 +95,6 @@
|
|||
#include "nsIPrefBranch.h"
|
||||
#include "nsIPrefService.h"
|
||||
|
||||
#include "jsinterp.h" // for js_AllocStack() and js_FreeStack()
|
||||
|
||||
#ifdef USEWEAKREFS
|
||||
#include "nsIWeakReference.h"
|
||||
#endif
|
||||
|
|
|
@ -19,6 +19,7 @@ from mercurial import ui, hg
|
|||
from hgext.convert.filemap import filemapper
|
||||
from optparse import OptionParser
|
||||
|
||||
import sys
|
||||
|
||||
parser = OptionParser()
|
||||
|
||||
|
@ -63,3 +64,6 @@ while len(revs) != 0:
|
|||
u.write("%s %s\n" % (child.hex(), dst_tip))
|
||||
exit(0);
|
||||
revs.extend(child.children())
|
||||
|
||||
sys.stderr.write("No candidate child found in source repository\n")
|
||||
exit(1)
|
||||
|
|
|
@ -916,6 +916,7 @@ uint8 js_opcode2extra[JSOP_LIMIT] = {
|
|||
3, /* JSOP_CONCATN */
|
||||
0, /* JSOP_SETMETHOD */
|
||||
0, /* JSOP_INITMETHOD */
|
||||
0, /* JSOP_UNBRAND */
|
||||
0, /* JSOP_SHARPINIT */
|
||||
};
|
||||
#define JSOP_IS_IMACOP(x) (0 \
|
||||
|
|
|
@ -98,6 +98,8 @@
|
|||
#include "jsxml.h"
|
||||
#endif
|
||||
|
||||
using namespace js;
|
||||
|
||||
#ifdef HAVE_VA_LIST_AS_ARRAY
|
||||
#define JS_ADDRESSOF_VA_LIST(ap) ((va_list *)(ap))
|
||||
#else
|
||||
|
@ -334,7 +336,7 @@ JS_PushArgumentsVA(JSContext *cx, void **markp, const char *format, va_list ap)
|
|||
continue;
|
||||
argc++;
|
||||
}
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
sp = js_AllocStack(cx, argc, markp);
|
||||
if (!sp)
|
||||
return NULL;
|
||||
|
@ -969,7 +971,7 @@ JS_EndRequest(JSContext *cx)
|
|||
JS_ASSERT(cx->requestDepth > 0);
|
||||
JS_ASSERT(cx->outstandingRequests > 0);
|
||||
if (cx->requestDepth == 1) {
|
||||
js_LeaveTrace(cx); /* for GC safety */
|
||||
LeaveTrace(cx); /* for GC safety */
|
||||
|
||||
/* Lock before clearing to interlock with ClaimScope, in jslock.c. */
|
||||
rt = cx->runtime;
|
||||
|
@ -2046,7 +2048,7 @@ JS_TraceRuntime(JSTracer *trc)
|
|||
{
|
||||
JSBool allAtoms = trc->context->runtime->gcKeepAtoms != 0;
|
||||
|
||||
js_LeaveTrace(trc->context);
|
||||
LeaveTrace(trc->context);
|
||||
js_TraceRuntime(trc, allAtoms);
|
||||
}
|
||||
|
||||
|
@ -2452,7 +2454,7 @@ JS_IsGCMarkingTracer(JSTracer *trc)
|
|||
JS_PUBLIC_API(void)
|
||||
JS_GC(JSContext *cx)
|
||||
{
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
|
||||
/* Don't nuke active arenas if executing or compiling. */
|
||||
if (cx->stackPool.current == &cx->stackPool.first)
|
||||
|
@ -2603,7 +2605,7 @@ JS_SetGCParameterForThread(JSContext *cx, JSGCParamKey key, uint32 value)
|
|||
{
|
||||
JS_ASSERT(key == JSGC_MAX_CODE_CACHE_BYTES);
|
||||
#ifdef JS_TRACER
|
||||
js_SetMaxCodeCacheBytes(cx, value);
|
||||
SetMaxCodeCacheBytes(cx, value);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -2622,7 +2624,7 @@ JS_PUBLIC_API(void)
|
|||
JS_FlushCaches(JSContext *cx)
|
||||
{
|
||||
#ifdef JS_TRACER
|
||||
js_FlushJITCache(cx);
|
||||
FlushJITCache(cx);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -5173,44 +5175,21 @@ JS_IsRunning(JSContext *cx)
|
|||
return cx->fp != NULL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
JS_PUBLIC_API(JSBool)
|
||||
JS_IsConstructing(JSContext *cx)
|
||||
{
|
||||
#ifdef JS_TRACER
|
||||
if (JS_ON_TRACE(cx)) {
|
||||
JS_ASSERT(cx->bailExit);
|
||||
return *cx->bailExit->pc == JSOP_NEW;
|
||||
}
|
||||
#endif
|
||||
|
||||
JSStackFrame *fp = js_GetTopStackFrame(cx);
|
||||
return fp && (fp->flags & JSFRAME_CONSTRUCTING);
|
||||
}
|
||||
|
||||
JS_FRIEND_API(JSBool)
|
||||
JS_IsAssigning(JSContext *cx)
|
||||
{
|
||||
JSStackFrame *fp;
|
||||
|
||||
fp = js_GetScriptedCaller(cx, NULL);
|
||||
if (!fp || !fp->regs)
|
||||
return JS_FALSE;
|
||||
return (js_CodeSpec[*fp->regs->pc].format & JOF_ASSIGNING) != 0;
|
||||
return cx->isConstructing();
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(JSStackFrame *)
|
||||
JS_SaveFrameChain(JSContext *cx)
|
||||
{
|
||||
JSStackFrame *fp;
|
||||
|
||||
fp = js_GetTopStackFrame(cx);
|
||||
JSStackFrame *fp = js_GetTopStackFrame(cx);
|
||||
if (!fp)
|
||||
return fp;
|
||||
|
||||
JS_ASSERT(!fp->dormantNext);
|
||||
fp->dormantNext = cx->dormantFrameChain;
|
||||
cx->dormantFrameChain = fp;
|
||||
cx->fp = NULL;
|
||||
return NULL;
|
||||
cx->saveActiveCallStack();
|
||||
return fp;
|
||||
}
|
||||
|
||||
|
@ -5221,11 +5200,7 @@ JS_RestoreFrameChain(JSContext *cx, JSStackFrame *fp)
|
|||
JS_ASSERT(!cx->fp);
|
||||
if (!fp)
|
||||
return;
|
||||
|
||||
JS_ASSERT(fp == cx->dormantFrameChain);
|
||||
cx->fp = fp;
|
||||
cx->dormantFrameChain = fp->dormantNext;
|
||||
fp->dormantNext = NULL;
|
||||
cx->restoreCallStack();
|
||||
}
|
||||
|
||||
/************************************************************************/
|
||||
|
|
|
@ -2365,14 +2365,6 @@ JS_IsRunning(JSContext *cx);
|
|||
extern JS_PUBLIC_API(JSBool)
|
||||
JS_IsConstructing(JSContext *cx);
|
||||
|
||||
/*
|
||||
* Returns true if a script is executing and its current bytecode is a set
|
||||
* (assignment) operation, even if there are native (no script) stack frames
|
||||
* between the script and the caller to JS_IsAssigning.
|
||||
*/
|
||||
extern JS_FRIEND_API(JSBool)
|
||||
JS_IsAssigning(JSContext *cx);
|
||||
|
||||
/*
|
||||
* Saving and restoring frame chains.
|
||||
*
|
||||
|
|
|
@ -104,6 +104,8 @@
|
|||
|
||||
#include "jsatominlines.h"
|
||||
|
||||
using namespace js;
|
||||
|
||||
/* 2^32 - 1 as a number and a string */
|
||||
#define MAXINDEX 4294967295u
|
||||
#define MAXSTR "4294967295"
|
||||
|
@ -970,7 +972,7 @@ static JSBool
|
|||
array_defineProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
|
||||
JSPropertyOp getter, JSPropertyOp setter, uintN attrs)
|
||||
{
|
||||
uint32 i;
|
||||
uint32 i = 0; // init to shut GCC up
|
||||
JSBool isIndex;
|
||||
|
||||
if (id == ATOM_TO_JSID(cx->runtime->atomState.lengthAtom))
|
||||
|
@ -1343,7 +1345,7 @@ js_MakeArraySlow(JSContext *cx, JSObject *obj)
|
|||
return JS_TRUE;
|
||||
|
||||
out_bad:
|
||||
JSScope::destroy(cx, scope);
|
||||
scope->destroy(cx);
|
||||
return JS_FALSE;
|
||||
}
|
||||
|
||||
|
@ -1765,7 +1767,7 @@ Array_p_join(JSContext* cx, JSObject* obj, JSString *str)
|
|||
{
|
||||
JSAutoTempValueRooter tvr(cx);
|
||||
if (!array_toString_sub(cx, obj, JS_FALSE, str, tvr.addr())) {
|
||||
js_SetBuiltinError(cx);
|
||||
SetBuiltinError(cx);
|
||||
return NULL;
|
||||
}
|
||||
return JSVAL_TO_STRING(tvr.value());
|
||||
|
@ -1776,7 +1778,7 @@ Array_p_toString(JSContext* cx, JSObject* obj)
|
|||
{
|
||||
JSAutoTempValueRooter tvr(cx);
|
||||
if (!array_toString_sub(cx, obj, JS_FALSE, NULL, tvr.addr())) {
|
||||
js_SetBuiltinError(cx);
|
||||
SetBuiltinError(cx);
|
||||
return NULL;
|
||||
}
|
||||
return JSVAL_TO_STRING(tvr.value());
|
||||
|
@ -2305,7 +2307,7 @@ array_sort(JSContext *cx, uintN argc, jsval *vp)
|
|||
} else {
|
||||
void *mark;
|
||||
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
|
||||
ca.context = cx;
|
||||
ca.fval = fval;
|
||||
|
@ -2435,7 +2437,7 @@ Array_p_push1(JSContext* cx, JSObject* obj, jsval v)
|
|||
: array_push_slowly(cx, obj, 1, tvr.addr(), tvr.addr())) {
|
||||
return tvr.value();
|
||||
}
|
||||
js_SetBuiltinError(cx);
|
||||
SetBuiltinError(cx);
|
||||
return JSVAL_VOID;
|
||||
}
|
||||
#endif
|
||||
|
@ -2507,7 +2509,7 @@ Array_p_pop(JSContext* cx, JSObject* obj)
|
|||
: array_pop_slowly(cx, obj, tvr.addr())) {
|
||||
return tvr.value();
|
||||
}
|
||||
js_SetBuiltinError(cx);
|
||||
SetBuiltinError(cx);
|
||||
return JSVAL_VOID;
|
||||
}
|
||||
#endif
|
||||
|
@ -3160,7 +3162,7 @@ array_extra(JSContext *cx, ArrayExtraMode mode, uintN argc, jsval *vp)
|
|||
* For all but REDUCE, we call with 3 args (value, index, array). REDUCE
|
||||
* requires 4 args (accum, value, index, array).
|
||||
*/
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
argc = 3 + REDUCE_MODE(mode);
|
||||
elemroot = js_AllocStack(cx, 1 + 2 + argc, &mark);
|
||||
if (!elemroot)
|
||||
|
|
|
@ -66,11 +66,12 @@
|
|||
|
||||
using namespace avmplus;
|
||||
using namespace nanojit;
|
||||
using namespace js;
|
||||
|
||||
JS_FRIEND_API(void)
|
||||
js_SetTraceableNativeFailed(JSContext *cx)
|
||||
{
|
||||
js_SetBuiltinError(cx);
|
||||
SetBuiltinError(cx);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -239,12 +240,12 @@ js_AddProperty(JSContext* cx, JSObject* obj, JSScopeProperty* sprop)
|
|||
goto exit_trace;
|
||||
JS_ASSERT(sprop->parent == scope->lastProperty());
|
||||
|
||||
if (scope->owned()) {
|
||||
JS_ASSERT(!scope->hasProperty(sprop));
|
||||
} else {
|
||||
if (scope->isSharedEmpty()) {
|
||||
scope = js_GetMutableScope(cx, obj);
|
||||
if (!scope)
|
||||
goto exit_trace;
|
||||
} else {
|
||||
JS_ASSERT(!scope->hasProperty(sprop));
|
||||
}
|
||||
|
||||
if (!scope->table) {
|
||||
|
|
|
@ -225,7 +225,7 @@ struct ClosureVarInfo;
|
|||
#define _JS_CTYPE_CLASS _JS_CTYPE(JSClass *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_DOUBLEPTR _JS_CTYPE(double *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_CHARPTR _JS_CTYPE(char *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_APNPTR _JS_CTYPE(js_ArgsPrivateNative *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_APNPTR _JS_CTYPE(ArgsPrivateNative *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_CVIPTR _JS_CTYPE(const ClosureVarInfo *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_FRAMEINFO _JS_CTYPE(FrameInfo *, _JS_PTR, --, --, INFALLIBLE)
|
||||
|
||||
|
@ -441,7 +441,7 @@ js_StringToNumber(JSContext* cx, JSString* str);
|
|||
jsdouble FASTCALL
|
||||
js_BooleanOrUndefinedToNumber(JSContext* cx, int32 unboxed);
|
||||
|
||||
/* Extern version of js_SetBuiltinError. */
|
||||
/* Extern version of SetBuiltinError. */
|
||||
extern JS_FRIEND_API(void)
|
||||
js_SetTraceableNativeFailed(JSContext *cx);
|
||||
|
||||
|
@ -464,11 +464,6 @@ js_dmod(jsdouble a, jsdouble b);
|
|||
|
||||
#endif /* !JS_TRACER */
|
||||
|
||||
/* Defined in jsobj.cpp. */
|
||||
JS_DECLARE_CALLINFO(js_Object_tn)
|
||||
JS_DECLARE_CALLINFO(js_NewInstance)
|
||||
JS_DECLARE_CALLINFO(js_NonEmptyObject)
|
||||
|
||||
/* Defined in jsarray.cpp. */
|
||||
JS_DECLARE_CALLINFO(js_Array_dense_setelem)
|
||||
JS_DECLARE_CALLINFO(js_Array_dense_setelem_int)
|
||||
|
@ -478,17 +473,50 @@ JS_DECLARE_CALLINFO(js_NewEmptyArrayWithLength)
|
|||
JS_DECLARE_CALLINFO(js_NewArrayWithSlots)
|
||||
JS_DECLARE_CALLINFO(js_ArrayCompPush)
|
||||
|
||||
/* Defined in jsbuiltins.cpp. */
|
||||
JS_DECLARE_CALLINFO(js_BoxDouble)
|
||||
JS_DECLARE_CALLINFO(js_BoxInt32)
|
||||
JS_DECLARE_CALLINFO(js_UnboxDouble)
|
||||
JS_DECLARE_CALLINFO(js_UnboxInt32)
|
||||
JS_DECLARE_CALLINFO(js_TryUnboxInt32)
|
||||
JS_DECLARE_CALLINFO(js_dmod)
|
||||
JS_DECLARE_CALLINFO(js_imod)
|
||||
JS_DECLARE_CALLINFO(js_DoubleToInt32)
|
||||
JS_DECLARE_CALLINFO(js_DoubleToUint32)
|
||||
JS_DECLARE_CALLINFO(js_StringToNumber)
|
||||
JS_DECLARE_CALLINFO(js_StringToInt32)
|
||||
JS_DECLARE_CALLINFO(js_AddProperty)
|
||||
JS_DECLARE_CALLINFO(js_HasNamedProperty)
|
||||
JS_DECLARE_CALLINFO(js_HasNamedPropertyInt32)
|
||||
JS_DECLARE_CALLINFO(js_TypeOfObject)
|
||||
JS_DECLARE_CALLINFO(js_TypeOfBoolean)
|
||||
JS_DECLARE_CALLINFO(js_BooleanOrUndefinedToNumber)
|
||||
JS_DECLARE_CALLINFO(js_BooleanOrUndefinedToString)
|
||||
JS_DECLARE_CALLINFO(js_NewNullClosure)
|
||||
JS_DECLARE_CALLINFO(js_PopInterpFrame)
|
||||
JS_DECLARE_CALLINFO(js_ConcatN)
|
||||
|
||||
/* Defined in jsfun.cpp. */
|
||||
JS_DECLARE_CALLINFO(js_AllocFlatClosure)
|
||||
JS_DECLARE_CALLINFO(js_PutArguments)
|
||||
|
||||
/* Defined in jsfun.cpp. */
|
||||
JS_DECLARE_CALLINFO(js_PutCallObjectOnTrace)
|
||||
JS_DECLARE_CALLINFO(js_SetCallVar)
|
||||
JS_DECLARE_CALLINFO(js_SetCallArg)
|
||||
JS_DECLARE_CALLINFO(js_CloneFunctionObject)
|
||||
JS_DECLARE_CALLINFO(js_CreateCallObjectOnTrace)
|
||||
JS_DECLARE_CALLINFO(js_Arguments)
|
||||
|
||||
/* Defined in jsiter.cpp. */
|
||||
JS_DECLARE_CALLINFO(js_CloseIterator)
|
||||
|
||||
/* Defined in jsnum.cpp. */
|
||||
JS_DECLARE_CALLINFO(js_NumberToString)
|
||||
|
||||
/* Defined in jsobj.cpp. */
|
||||
JS_DECLARE_CALLINFO(js_Object_tn)
|
||||
JS_DECLARE_CALLINFO(js_NewInstance)
|
||||
JS_DECLARE_CALLINFO(js_NonEmptyObject)
|
||||
|
||||
/* Defined in jsstr.cpp. */
|
||||
JS_DECLARE_CALLINFO(js_String_tn)
|
||||
JS_DECLARE_CALLINFO(js_CompareStrings)
|
||||
|
@ -501,31 +529,4 @@ JS_DECLARE_CALLINFO(js_String_p_charCodeAt0_int)
|
|||
JS_DECLARE_CALLINFO(js_String_p_charCodeAt_double_int)
|
||||
JS_DECLARE_CALLINFO(js_String_p_charCodeAt_int_int)
|
||||
|
||||
/* Defined in jsbuiltins.cpp. */
|
||||
JS_DECLARE_CALLINFO(js_BoxDouble)
|
||||
JS_DECLARE_CALLINFO(js_BoxInt32)
|
||||
JS_DECLARE_CALLINFO(js_UnboxDouble)
|
||||
JS_DECLARE_CALLINFO(js_UnboxInt32)
|
||||
JS_DECLARE_CALLINFO(js_TryUnboxInt32)
|
||||
JS_DECLARE_CALLINFO(js_dmod)
|
||||
JS_DECLARE_CALLINFO(js_imod)
|
||||
JS_DECLARE_CALLINFO(js_DoubleToInt32)
|
||||
JS_DECLARE_CALLINFO(js_DoubleToUint32)
|
||||
|
||||
JS_DECLARE_CALLINFO(js_StringToNumber)
|
||||
JS_DECLARE_CALLINFO(js_StringToInt32)
|
||||
JS_DECLARE_CALLINFO(js_CloseIterator)
|
||||
JS_DECLARE_CALLINFO(js_CallTree)
|
||||
JS_DECLARE_CALLINFO(js_AddProperty)
|
||||
JS_DECLARE_CALLINFO(js_HasNamedProperty)
|
||||
JS_DECLARE_CALLINFO(js_HasNamedPropertyInt32)
|
||||
JS_DECLARE_CALLINFO(js_TypeOfObject)
|
||||
JS_DECLARE_CALLINFO(js_TypeOfBoolean)
|
||||
JS_DECLARE_CALLINFO(js_BooleanOrUndefinedToNumber)
|
||||
JS_DECLARE_CALLINFO(js_BooleanOrUndefinedToString)
|
||||
JS_DECLARE_CALLINFO(js_Arguments)
|
||||
JS_DECLARE_CALLINFO(js_NewNullClosure)
|
||||
JS_DECLARE_CALLINFO(js_ConcatN)
|
||||
JS_DECLARE_CALLINFO(js_PopInterpFrame)
|
||||
|
||||
#endif /* jsbuiltins_h___ */
|
||||
|
|
|
@ -71,12 +71,35 @@
|
|||
#include "jsstr.h"
|
||||
#include "jstracer.h"
|
||||
|
||||
using namespace js;
|
||||
|
||||
static void
|
||||
FreeContext(JSContext *cx);
|
||||
|
||||
static void
|
||||
MarkLocalRoots(JSTracer *trc, JSLocalRootStack *lrs);
|
||||
|
||||
#ifdef DEBUG
|
||||
bool
|
||||
CallStack::contains(JSStackFrame *fp)
|
||||
{
|
||||
JSStackFrame *start;
|
||||
JSStackFrame *stop;
|
||||
if (isSuspended()) {
|
||||
start = suspendedFrame;
|
||||
stop = initialFrame->down;
|
||||
} else {
|
||||
start = cx->fp;
|
||||
stop = cx->activeCallStack()->initialFrame->down;
|
||||
}
|
||||
for (JSStackFrame *f = start; f != stop; f = f->down) {
|
||||
if (f == fp)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
JSThreadData::init()
|
||||
{
|
||||
|
@ -86,7 +109,7 @@ JSThreadData::init()
|
|||
JS_ASSERT(reinterpret_cast<uint8*>(this)[i] == 0);
|
||||
#endif
|
||||
#ifdef JS_TRACER
|
||||
js_InitJIT(&traceMonitor);
|
||||
InitJIT(&traceMonitor);
|
||||
#endif
|
||||
js_InitRandom(this);
|
||||
}
|
||||
|
@ -107,7 +130,7 @@ JSThreadData::finish()
|
|||
js_FinishGSNCache(&gsnCache);
|
||||
js_FinishPropertyCache(&propertyCache);
|
||||
#if defined JS_TRACER
|
||||
js_FinishJIT(&traceMonitor);
|
||||
FinishJIT(&traceMonitor);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -529,6 +552,9 @@ js_NewContext(JSRuntime *rt, size_t stackChunkSize)
|
|||
ok = js_InitRuntimeNumberState(cx);
|
||||
if (ok)
|
||||
ok = js_InitRuntimeStringState(cx);
|
||||
if (ok)
|
||||
ok = JSScope::initRuntimeState(cx);
|
||||
|
||||
#ifdef JS_THREADSAFE
|
||||
JS_EndRequest(cx);
|
||||
#endif
|
||||
|
@ -731,7 +757,7 @@ js_DestroyContext(JSContext *cx, JSDestroyContextMode mode)
|
|||
JS_BeginRequest(cx);
|
||||
#endif
|
||||
|
||||
/* Unlock and clear GC things held by runtime pointers. */
|
||||
JSScope::finishRuntimeState(cx);
|
||||
js_FinishRuntimeNumberState(cx);
|
||||
js_FinishRuntimeStringState(cx);
|
||||
|
||||
|
@ -908,44 +934,6 @@ js_WaitForGC(JSRuntime *rt)
|
|||
}
|
||||
}
|
||||
|
||||
uint32
|
||||
js_DiscountRequestsForGC(JSContext *cx)
|
||||
{
|
||||
uint32 requestDebit;
|
||||
|
||||
JS_ASSERT(cx->thread);
|
||||
JS_ASSERT(cx->runtime->gcThread != cx->thread);
|
||||
|
||||
#ifdef JS_TRACER
|
||||
if (JS_ON_TRACE(cx)) {
|
||||
JS_UNLOCK_GC(cx->runtime);
|
||||
js_LeaveTrace(cx);
|
||||
JS_LOCK_GC(cx->runtime);
|
||||
}
|
||||
#endif
|
||||
|
||||
requestDebit = js_CountThreadRequests(cx);
|
||||
if (requestDebit != 0) {
|
||||
JSRuntime *rt = cx->runtime;
|
||||
JS_ASSERT(requestDebit <= rt->requestCount);
|
||||
rt->requestCount -= requestDebit;
|
||||
if (rt->requestCount == 0)
|
||||
JS_NOTIFY_REQUEST_DONE(rt);
|
||||
}
|
||||
return requestDebit;
|
||||
}
|
||||
|
||||
void
|
||||
js_RecountRequestsAfterGC(JSRuntime *rt, uint32 requestDebit)
|
||||
{
|
||||
while (rt->gcLevel > 0) {
|
||||
JS_ASSERT(rt->gcThread);
|
||||
JS_AWAIT_GC_DONE(rt);
|
||||
}
|
||||
if (requestDebit != 0)
|
||||
rt->requestCount += requestDebit;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static JSDHashNumber
|
||||
|
@ -1951,3 +1939,17 @@ JSContext::checkMallocGCPressure(void *p)
|
|||
}
|
||||
JS_UNLOCK_GC(runtime);
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
JSContext::isConstructing()
|
||||
{
|
||||
#ifdef JS_TRACER
|
||||
if (JS_ON_TRACE(this)) {
|
||||
JS_ASSERT(bailExit);
|
||||
return *bailExit->pc == JSOP_NEW;
|
||||
}
|
||||
#endif
|
||||
JSStackFrame *fp = js_GetTopStackFrame(this);
|
||||
return fp && (fp->flags & JSFRAME_CONSTRUCTING);
|
||||
}
|
||||
|
|
297
js/src/jscntxt.h
297
js/src/jscntxt.h
|
@ -90,15 +90,18 @@ js_PurgeGSNCache(JSGSNCache *cache);
|
|||
#define JS_METER_GSN_CACHE(cx,cnt) GSN_CACHE_METER(&JS_GSN_CACHE(cx), cnt)
|
||||
|
||||
/* Forward declarations of nanojit types. */
|
||||
namespace nanojit
|
||||
{
|
||||
class Assembler;
|
||||
class CodeAlloc;
|
||||
class Fragment;
|
||||
template<typename K> struct DefaultHash;
|
||||
template<typename K, typename V, typename H> class HashMap;
|
||||
template<typename T> class Seq;
|
||||
}
|
||||
namespace nanojit {
|
||||
|
||||
class Assembler;
|
||||
class CodeAlloc;
|
||||
class Fragment;
|
||||
template<typename K> struct DefaultHash;
|
||||
template<typename K, typename V, typename H> class HashMap;
|
||||
template<typename T> class Seq;
|
||||
|
||||
} /* namespace nanojit */
|
||||
|
||||
namespace js {
|
||||
|
||||
/* Tracer constants. */
|
||||
static const size_t MONITOR_N_GLOBAL_STATES = 4;
|
||||
|
@ -110,7 +113,6 @@ static const size_t GLOBAL_SLOTS_BUFFER_SIZE = MAX_GLOBAL_SLOTS + 1;
|
|||
|
||||
/* Forward declarations of tracer types. */
|
||||
class VMAllocator;
|
||||
class TraceRecorder;
|
||||
class FrameInfoCache;
|
||||
struct REHashFn;
|
||||
struct REHashKey;
|
||||
|
@ -120,6 +122,7 @@ struct TreeFragment;
|
|||
struct InterpState;
|
||||
template<typename T> class Queue;
|
||||
typedef Queue<uint16> SlotList;
|
||||
struct TypeMap;
|
||||
struct REFragment;
|
||||
typedef nanojit::HashMap<REHashKey, REFragment*, REHashFn> REHashMap;
|
||||
|
||||
|
@ -163,7 +166,7 @@ struct InterpState
|
|||
uintN nativeVpLen;
|
||||
jsval* nativeVp;
|
||||
|
||||
InterpState(JSContext *cx, JSTraceMonitor *tm, TreeFragment *ti,
|
||||
InterpState(JSContext *cx, TraceMonitor *tm, TreeFragment *ti,
|
||||
uintN &inlineCallCountp, VMSideExit** innermostNestedGuardp);
|
||||
~InterpState();
|
||||
};
|
||||
|
@ -191,12 +194,113 @@ struct GlobalState {
|
|||
SlotList* globalSlots;
|
||||
};
|
||||
|
||||
/*
|
||||
* A callstack contains a set of stack frames linked by fp->down. A callstack
|
||||
* is a member of a JSContext and all of a JSContext's callstacks are kept in a
|
||||
* list starting at cx->currentCallStack. A callstack may be active or
|
||||
* suspended. There are zero or one active callstacks for a context and any
|
||||
* number of suspended contexts. If there is an active context, it is the first
|
||||
* in the currentCallStack list, |cx->fp != NULL| and the callstack's newest
|
||||
* (top) stack frame is |cx->fp|. For all other (suspended) callstacks, the
|
||||
* newest frame is pointed to by suspendedFrame.
|
||||
*
|
||||
* While all frames in a callstack are down-linked, not all down-linked frames
|
||||
* are in the same callstack (e.g., calling js_Execute with |down != cx->fp|
|
||||
* will create a new frame in a new active callstack).
|
||||
*/
|
||||
class CallStack
|
||||
{
|
||||
#ifdef DEBUG
|
||||
/* The context to which this callstack belongs. */
|
||||
JSContext *cx;
|
||||
#endif
|
||||
|
||||
/* If this callstack is suspended, the top of the callstack. */
|
||||
JSStackFrame *suspendedFrame;
|
||||
|
||||
/* This callstack was suspended by JS_SaveFrameChain. */
|
||||
bool saved;
|
||||
|
||||
/* Links members of the JSContext::currentCallStack list. */
|
||||
CallStack *previous;
|
||||
|
||||
/* The varobj on entry to initialFrame. */
|
||||
JSObject *initialVarObj;
|
||||
|
||||
/* The first frame executed in this callstack. */
|
||||
JSStackFrame *initialFrame;
|
||||
|
||||
public:
|
||||
CallStack(JSContext *cx)
|
||||
:
|
||||
#ifdef DEBUG
|
||||
cx(cx),
|
||||
#endif
|
||||
suspendedFrame(NULL), saved(false), previous(NULL),
|
||||
initialVarObj(NULL), initialFrame(NULL)
|
||||
{}
|
||||
|
||||
#ifdef DEBUG
|
||||
bool contains(JSStackFrame *fp);
|
||||
#endif
|
||||
|
||||
void suspend(JSStackFrame *fp) {
|
||||
JS_ASSERT(fp && !isSuspended() && contains(fp));
|
||||
suspendedFrame = fp;
|
||||
}
|
||||
|
||||
void resume() {
|
||||
JS_ASSERT(suspendedFrame);
|
||||
suspendedFrame = NULL;
|
||||
}
|
||||
|
||||
JSStackFrame *getSuspendedFrame() const {
|
||||
JS_ASSERT(suspendedFrame);
|
||||
return suspendedFrame;
|
||||
}
|
||||
|
||||
bool isSuspended() const { return suspendedFrame; }
|
||||
|
||||
void setPrevious(CallStack *cs) { previous = cs; }
|
||||
CallStack *getPrevious() const { return previous; }
|
||||
|
||||
void setInitialVarObj(JSObject *o) { initialVarObj = o; }
|
||||
JSObject *getInitialVarObj() const { return initialVarObj; }
|
||||
|
||||
void setInitialFrame(JSStackFrame *f) { initialFrame = f; }
|
||||
JSStackFrame *getInitialFrame() const { return initialFrame; }
|
||||
|
||||
/*
|
||||
* Saving and restoring is a special case of suspending and resuming
|
||||
* whereby the active callstack becomes suspended without pushing a new
|
||||
* active callstack. This means that if a callstack c1 is pushed on top of a
|
||||
* saved callstack c2, when c1 is popped, c2 must not be made active. In
|
||||
* the normal case, where c2 is not saved, when c1 is popped, c2 is made
|
||||
* active. This distinction is indicated by the |saved| flag.
|
||||
*/
|
||||
|
||||
void save(JSStackFrame *fp) {
|
||||
suspend(fp);
|
||||
saved = true;
|
||||
}
|
||||
|
||||
void restore() {
|
||||
saved = false;
|
||||
resume();
|
||||
}
|
||||
|
||||
bool isSaved() const {
|
||||
JS_ASSERT_IF(saved, isSuspended());
|
||||
return saved;
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Trace monitor. Every JSThread (if JS_THREADSAFE) or JSRuntime (if not
|
||||
* JS_THREADSAFE) has an associated trace monitor that keeps track of loop
|
||||
* frequencies for all JavaScript code loaded into that runtime.
|
||||
*/
|
||||
struct JSTraceMonitor {
|
||||
struct TraceMonitor {
|
||||
/*
|
||||
* The context currently executing JIT-compiled code on this thread, or
|
||||
* NULL if none. Among other things, this can in certain cases prevent
|
||||
|
@ -254,8 +358,8 @@ struct JSTraceMonitor {
|
|||
|
||||
TraceRecorder* recorder;
|
||||
|
||||
struct GlobalState globalStates[MONITOR_N_GLOBAL_STATES];
|
||||
struct TreeFragment* vmfragments[FRAGMENT_TABLE_SIZE];
|
||||
GlobalState globalStates[MONITOR_N_GLOBAL_STATES];
|
||||
TreeFragment* vmfragments[FRAGMENT_TABLE_SIZE];
|
||||
JSDHashTable recordAttempts;
|
||||
|
||||
/*
|
||||
|
@ -283,6 +387,11 @@ struct JSTraceMonitor {
|
|||
*/
|
||||
REHashMap* reFragments;
|
||||
|
||||
// Cached temporary typemap to avoid realloc'ing every time we create one.
|
||||
// This must be used in only one place at a given time. It must be cleared
|
||||
// before use.
|
||||
TypeMap* cachedTempTypeMap;
|
||||
|
||||
#ifdef DEBUG
|
||||
/* Fields needed for fragment/guard profiling. */
|
||||
nanojit::Seq<nanojit::Fragment*>* branches;
|
||||
|
@ -304,7 +413,7 @@ struct JSTraceMonitor {
|
|||
bool outOfMemory() const;
|
||||
};
|
||||
|
||||
typedef struct InterpStruct InterpStruct;
|
||||
} /* namespace js */
|
||||
|
||||
/*
|
||||
* N.B. JS_ON_TRACE(cx) is true if JIT code is on the stack in the current
|
||||
|
@ -402,7 +511,7 @@ struct JSThreadData {
|
|||
|
||||
#ifdef JS_TRACER
|
||||
/* Trace-tree JIT recorder/interpreter state. */
|
||||
JSTraceMonitor traceMonitor;
|
||||
js::TraceMonitor traceMonitor;
|
||||
#endif
|
||||
|
||||
/* Lock-free hashed lists of scripts created by eval to garbage-collect. */
|
||||
|
@ -455,6 +564,15 @@ struct JSThread {
|
|||
*/
|
||||
ptrdiff_t gcThreadMallocBytes;
|
||||
|
||||
/*
|
||||
* This thread is inside js_GC, either waiting until it can start GC, or
|
||||
* waiting for GC to finish on another thread. This thread holds no locks;
|
||||
* other threads may steal titles from it.
|
||||
*
|
||||
* Protected by rt->gcLock.
|
||||
*/
|
||||
bool gcWaiting;
|
||||
|
||||
/*
|
||||
* Deallocator task for this thread.
|
||||
*/
|
||||
|
@ -607,7 +725,7 @@ struct JSRuntime {
|
|||
ptrdiff_t gcMallocBytes;
|
||||
|
||||
/* See comments before DelayMarkingChildren is jsgc.cpp. */
|
||||
JSGCArenaInfo *gcUnmarkedArenaStackTop;
|
||||
JSGCArena *gcUnmarkedArenaStackTop;
|
||||
#ifdef DEBUG
|
||||
size_t gcMarkLaterCount;
|
||||
#endif
|
||||
|
@ -796,6 +914,8 @@ struct JSRuntime {
|
|||
JSBackgroundThread *deallocatorThread;
|
||||
#endif
|
||||
|
||||
JSEmptyScope *emptyBlockScope;
|
||||
|
||||
/*
|
||||
* Various metering fields are defined at the end of JSRuntime. In this
|
||||
* way there is no need to recompile all the code that refers to other
|
||||
|
@ -1066,6 +1186,18 @@ typedef struct JSResolvingEntry {
|
|||
|
||||
#define JSRESOLVE_INFER 0xffff /* infer bits from current bytecode */
|
||||
|
||||
extern const JSDebugHooks js_NullDebugHooks; /* defined in jsdbgapi.cpp */
|
||||
|
||||
/*
|
||||
* Wraps a stack frame which has been temporarily popped from its call stack
|
||||
* and needs to be GC-reachable. See JSContext::{push,pop}GCReachableFrame.
|
||||
*/
|
||||
struct JSGCReachableFrame
|
||||
{
|
||||
JSGCReachableFrame *next;
|
||||
JSStackFrame *frame;
|
||||
};
|
||||
|
||||
struct JSContext {
|
||||
/*
|
||||
* If this flag is set, we were asked to call back the operation callback
|
||||
|
@ -1183,8 +1315,67 @@ struct JSContext {
|
|||
void *data;
|
||||
void *data2;
|
||||
|
||||
/* GC and thread-safe state. */
|
||||
JSStackFrame *dormantFrameChain; /* dormant stack frame to scan */
|
||||
/* Linked list of frames temporarily popped from their chain. */
|
||||
JSGCReachableFrame *reachableFrames;
|
||||
|
||||
void pushGCReachableFrame(JSGCReachableFrame &gcrf, JSStackFrame *f) {
|
||||
gcrf.next = reachableFrames;
|
||||
gcrf.frame = f;
|
||||
reachableFrames = &gcrf;
|
||||
}
|
||||
|
||||
void popGCReachableFrame() {
|
||||
reachableFrames = reachableFrames->next;
|
||||
}
|
||||
|
||||
private:
|
||||
friend void js_TraceContext(JSTracer *, JSContext *);
|
||||
|
||||
/* Linked list of callstacks. See CallStack. */
|
||||
js::CallStack *currentCallStack;
|
||||
|
||||
public:
|
||||
/* Assuming there is an active callstack, return it. */
|
||||
js::CallStack *activeCallStack() const {
|
||||
JS_ASSERT(currentCallStack && !currentCallStack->isSaved());
|
||||
return currentCallStack;
|
||||
}
|
||||
|
||||
/* Add the given callstack to the list as the new active callstack. */
|
||||
void pushCallStack(js::CallStack *newcs) {
|
||||
if (fp)
|
||||
currentCallStack->suspend(fp);
|
||||
else
|
||||
JS_ASSERT_IF(currentCallStack, currentCallStack->isSaved());
|
||||
newcs->setPrevious(currentCallStack);
|
||||
currentCallStack = newcs;
|
||||
JS_ASSERT(!newcs->isSuspended() && !newcs->isSaved());
|
||||
}
|
||||
|
||||
/* Remove the active callstack and make the next callstack active. */
|
||||
void popCallStack() {
|
||||
JS_ASSERT(!currentCallStack->isSuspended() && !currentCallStack->isSaved());
|
||||
currentCallStack = currentCallStack->getPrevious();
|
||||
if (currentCallStack && !currentCallStack->isSaved()) {
|
||||
JS_ASSERT(fp);
|
||||
currentCallStack->resume();
|
||||
}
|
||||
}
|
||||
|
||||
/* Mark the top callstack as suspended, without pushing a new one. */
|
||||
void saveActiveCallStack() {
|
||||
JS_ASSERT(fp && currentCallStack && !currentCallStack->isSuspended());
|
||||
currentCallStack->save(fp);
|
||||
fp = NULL;
|
||||
}
|
||||
|
||||
/* Undoes calls to suspendTopCallStack. */
|
||||
void restoreCallStack() {
|
||||
JS_ASSERT(!fp && currentCallStack && currentCallStack->isSuspended());
|
||||
fp = currentCallStack->getSuspendedFrame();
|
||||
currentCallStack->restore();
|
||||
}
|
||||
|
||||
#ifdef JS_THREADSAFE
|
||||
JSThread *thread;
|
||||
jsrefcount requestDepth;
|
||||
|
@ -1222,8 +1413,8 @@ struct JSContext {
|
|||
* called back into native code via a _FAIL builtin and has not yet bailed,
|
||||
* else garbage (NULL in debug builds).
|
||||
*/
|
||||
InterpState *interpState;
|
||||
VMSideExit *bailExit;
|
||||
js::InterpState *interpState;
|
||||
js::VMSideExit *bailExit;
|
||||
|
||||
/*
|
||||
* True if traces may be executed. Invariant: The value of jitEnabled is
|
||||
|
@ -1242,8 +1433,9 @@ struct JSContext {
|
|||
void updateJITEnabled() {
|
||||
#ifdef JS_TRACER
|
||||
jitEnabled = ((options & JSOPTION_JIT) &&
|
||||
!runtime->debuggerInhibitsJIT() &&
|
||||
debugHooks == &runtime->globalDebugHooks);
|
||||
(debugHooks == &js_NullDebugHooks ||
|
||||
(debugHooks == &runtime->globalDebugHooks &&
|
||||
!runtime->debuggerInhibitsJIT())));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1393,6 +1585,8 @@ struct JSContext {
|
|||
this->free(p);
|
||||
}
|
||||
|
||||
bool isConstructing();
|
||||
|
||||
private:
|
||||
|
||||
/*
|
||||
|
@ -1404,6 +1598,20 @@ private:
|
|||
void checkMallocGCPressure(void *p);
|
||||
};
|
||||
|
||||
JS_ALWAYS_INLINE JSObject *
|
||||
JSStackFrame::varobj(js::CallStack *cs)
|
||||
{
|
||||
JS_ASSERT(cs->contains(this));
|
||||
return fun ? callobj : cs->getInitialVarObj();
|
||||
}
|
||||
|
||||
JS_ALWAYS_INLINE JSObject *
|
||||
JSStackFrame::varobj(JSContext *cx)
|
||||
{
|
||||
JS_ASSERT(cx->activeCallStack()->contains(this));
|
||||
return fun ? callobj : cx->activeCallStack()->getInitialVarObj();
|
||||
}
|
||||
|
||||
#ifdef JS_THREADSAFE
|
||||
# define JS_THREAD_ID(cx) ((cx)->thread ? (cx)->thread->id : 0)
|
||||
#endif
|
||||
|
@ -1464,11 +1672,6 @@ class JSAutoTempValueRooter
|
|||
JSContext *mContext;
|
||||
|
||||
private:
|
||||
#ifndef AIX
|
||||
static void *operator new(size_t);
|
||||
static void operator delete(void *, size_t);
|
||||
#endif
|
||||
|
||||
JSTempValueRooter mTvr;
|
||||
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
};
|
||||
|
@ -1702,7 +1905,7 @@ js_NextActiveContext(JSRuntime *, JSContext *);
|
|||
/*
|
||||
* Count the number of contexts entered requests on the current thread.
|
||||
*/
|
||||
uint32
|
||||
extern uint32
|
||||
js_CountThreadRequests(JSContext *cx);
|
||||
|
||||
/*
|
||||
|
@ -1714,24 +1917,6 @@ js_CountThreadRequests(JSContext *cx);
|
|||
extern void
|
||||
js_WaitForGC(JSRuntime *rt);
|
||||
|
||||
/*
|
||||
* If we're in one or more requests (possibly on more than one context)
|
||||
* running on the current thread, indicate, temporarily, that all these
|
||||
* requests are inactive so a possible GC can proceed on another thread.
|
||||
* This function returns the number of discounted requests. The number must
|
||||
* be passed later to js_ActivateRequestAfterGC to reactivate the requests.
|
||||
*
|
||||
* This function must be called with the GC lock held.
|
||||
*/
|
||||
uint32
|
||||
js_DiscountRequestsForGC(JSContext *cx);
|
||||
|
||||
/*
|
||||
* This function must be called with the GC lock held.
|
||||
*/
|
||||
void
|
||||
js_RecountRequestsAfterGC(JSRuntime *rt, uint32 requestDebit);
|
||||
|
||||
#else /* !JS_THREADSAFE */
|
||||
|
||||
# define js_WaitForGC(rt) ((void) 0)
|
||||
|
@ -1917,6 +2102,8 @@ js_GetCurrentBytecodePC(JSContext* cx);
|
|||
extern bool
|
||||
js_CurrentPCIsInImacro(JSContext *cx);
|
||||
|
||||
namespace js {
|
||||
|
||||
#ifdef JS_TRACER
|
||||
/*
|
||||
* Reconstruct the JS stack and clear cx->tracecx. We must be currently in a
|
||||
|
@ -1926,27 +2113,27 @@ js_CurrentPCIsInImacro(JSContext *cx);
|
|||
* Implemented in jstracer.cpp.
|
||||
*/
|
||||
JS_FORCES_STACK JS_FRIEND_API(void)
|
||||
js_DeepBail(JSContext *cx);
|
||||
DeepBail(JSContext *cx);
|
||||
#endif
|
||||
|
||||
static JS_FORCES_STACK JS_INLINE void
|
||||
js_LeaveTrace(JSContext *cx)
|
||||
LeaveTrace(JSContext *cx)
|
||||
{
|
||||
#ifdef JS_TRACER
|
||||
if (JS_ON_TRACE(cx))
|
||||
js_DeepBail(cx);
|
||||
DeepBail(cx);
|
||||
#endif
|
||||
}
|
||||
|
||||
static JS_INLINE void
|
||||
js_LeaveTraceIfGlobalObject(JSContext *cx, JSObject *obj)
|
||||
LeaveTraceIfGlobalObject(JSContext *cx, JSObject *obj)
|
||||
{
|
||||
if (!obj->fslots[JSSLOT_PARENT])
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
}
|
||||
|
||||
static JS_INLINE JSBool
|
||||
js_CanLeaveTrace(JSContext *cx)
|
||||
CanLeaveTrace(JSContext *cx)
|
||||
{
|
||||
JS_ASSERT(JS_ON_TRACE(cx));
|
||||
#ifdef JS_TRACER
|
||||
|
@ -1956,6 +2143,8 @@ js_CanLeaveTrace(JSContext *cx)
|
|||
#endif
|
||||
}
|
||||
|
||||
} /* namespace js */
|
||||
|
||||
/*
|
||||
* Get the current cx->fp, first lazily instantiating stack frames if needed.
|
||||
* (Do not access cx->fp directly except in JS_REQUIRES_STACK code.)
|
||||
|
@ -1965,7 +2154,7 @@ js_CanLeaveTrace(JSContext *cx)
|
|||
static JS_FORCES_STACK JS_INLINE JSStackFrame *
|
||||
js_GetTopStackFrame(JSContext *cx)
|
||||
{
|
||||
js_LeaveTrace(cx);
|
||||
js::LeaveTrace(cx);
|
||||
return cx->fp;
|
||||
}
|
||||
|
||||
|
|
|
@ -68,6 +68,8 @@
|
|||
|
||||
#include "jsautooplen.h"
|
||||
|
||||
using namespace js;
|
||||
|
||||
typedef struct JSTrap {
|
||||
JSCList links;
|
||||
JSScript *script;
|
||||
|
@ -367,7 +369,7 @@ LeaveTraceRT(JSRuntime *rt)
|
|||
JS_UNLOCK_GC(rt);
|
||||
|
||||
if (cx)
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1219,18 +1221,16 @@ JS_GetFrameCallObject(JSContext *cx, JSStackFrame *fp)
|
|||
JS_PUBLIC_API(JSObject *)
|
||||
JS_GetFrameThis(JSContext *cx, JSStackFrame *fp)
|
||||
{
|
||||
JSStackFrame *afp;
|
||||
|
||||
if (fp->flags & JSFRAME_COMPUTED_THIS)
|
||||
return JSVAL_TO_OBJECT(fp->thisv); /* JSVAL_COMPUTED_THIS invariant */
|
||||
|
||||
/* js_ComputeThis gets confused if fp != cx->fp, so set it aside. */
|
||||
if (js_GetTopStackFrame(cx) != fp) {
|
||||
afp = cx->fp;
|
||||
JSStackFrame *afp = js_GetTopStackFrame(cx);
|
||||
JSGCReachableFrame reachable;
|
||||
if (afp != fp) {
|
||||
if (afp) {
|
||||
afp->dormantNext = cx->dormantFrameChain;
|
||||
cx->dormantFrameChain = afp;
|
||||
cx->fp = fp;
|
||||
cx->pushGCReachableFrame(reachable, afp);
|
||||
}
|
||||
} else {
|
||||
afp = NULL;
|
||||
|
@ -1241,8 +1241,7 @@ JS_GetFrameThis(JSContext *cx, JSStackFrame *fp)
|
|||
|
||||
if (afp) {
|
||||
cx->fp = afp;
|
||||
cx->dormantFrameChain = afp->dormantNext;
|
||||
afp->dormantNext = NULL;
|
||||
cx->popGCReachableFrame();
|
||||
}
|
||||
|
||||
return JSVAL_TO_OBJECT(fp->thisv);
|
||||
|
@ -1672,7 +1671,7 @@ JS_GetObjectTotalSize(JSContext *cx, JSObject *obj)
|
|||
}
|
||||
if (OBJ_IS_NATIVE(obj)) {
|
||||
scope = OBJ_SCOPE(obj);
|
||||
if (scope->owned()) {
|
||||
if (!scope->isSharedEmpty()) {
|
||||
nbytes += sizeof *scope;
|
||||
nbytes += SCOPE_CAPACITY(scope) * sizeof(JSScopeProperty *);
|
||||
}
|
||||
|
@ -1828,12 +1827,14 @@ JS_GetGlobalDebugHooks(JSRuntime *rt)
|
|||
return &rt->globalDebugHooks;
|
||||
}
|
||||
|
||||
const JSDebugHooks js_NullDebugHooks = {};
|
||||
|
||||
JS_PUBLIC_API(JSDebugHooks *)
|
||||
JS_SetContextDebugHooks(JSContext *cx, const JSDebugHooks *hooks)
|
||||
{
|
||||
JS_ASSERT(hooks);
|
||||
if (hooks != &cx->runtime->globalDebugHooks)
|
||||
js_LeaveTrace(cx);
|
||||
if (hooks != &cx->runtime->globalDebugHooks && hooks != &js_NullDebugHooks)
|
||||
LeaveTrace(cx);
|
||||
|
||||
#ifdef JS_TRACER
|
||||
JS_LOCK_GC(cx->runtime);
|
||||
|
@ -1847,6 +1848,12 @@ JS_SetContextDebugHooks(JSContext *cx, const JSDebugHooks *hooks)
|
|||
return old;
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(JSDebugHooks *)
|
||||
JS_ClearContextDebugHooks(JSContext *cx)
|
||||
{
|
||||
return JS_SetContextDebugHooks(cx, &js_NullDebugHooks);
|
||||
}
|
||||
|
||||
#ifdef MOZ_SHARK
|
||||
|
||||
#include <CHUD/CHUD.h>
|
||||
|
|
|
@ -427,6 +427,10 @@ JS_GetGlobalDebugHooks(JSRuntime *rt);
|
|||
extern JS_PUBLIC_API(JSDebugHooks *)
|
||||
JS_SetContextDebugHooks(JSContext *cx, const JSDebugHooks *hooks);
|
||||
|
||||
/* Disable debug hooks for this context. */
|
||||
extern JS_PUBLIC_API(JSDebugHooks *)
|
||||
JS_ClearContextDebugHooks(JSContext *cx);
|
||||
|
||||
#ifdef MOZ_SHARK
|
||||
|
||||
extern JS_PUBLIC_API(JSBool)
|
||||
|
|
|
@ -1280,6 +1280,19 @@ JSTreeContext::ensureSharpSlots()
|
|||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
JSTreeContext::skipSpansGenerator(unsigned skip)
|
||||
{
|
||||
JSTreeContext *tc = this;
|
||||
for (unsigned i = 0; i < skip; ++i, tc = tc->parent) {
|
||||
if (!tc)
|
||||
return false;
|
||||
if (tc->flags & TCF_FUN_IS_GENERATOR)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
js_PushStatement(JSTreeContext *tc, JSStmtInfo *stmt, JSStmtType type,
|
||||
ptrdiff_t top)
|
||||
|
@ -2086,7 +2099,7 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
|||
JSObject *scopeobj = (cg->flags & TCF_IN_FUNCTION)
|
||||
? STOBJ_GET_PARENT(FUN_OBJECT(cg->fun))
|
||||
: cg->scopeChain;
|
||||
if (scopeobj != caller->varobj)
|
||||
if (scopeobj != caller->varobj(cx))
|
||||
return JS_TRUE;
|
||||
|
||||
/*
|
||||
|
@ -2103,7 +2116,10 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
|||
* defeats the display optimization to static link searching used
|
||||
* by JSOP_{GET,CALL}UPVAR.
|
||||
*/
|
||||
if (cg->flags & TCF_FUN_IS_GENERATOR)
|
||||
JSFunction *fun = cg->compiler->callerFrame->fun;
|
||||
JS_ASSERT(cg->staticLevel >= fun->u.i.script->staticLevel);
|
||||
unsigned skip = cg->staticLevel - fun->u.i.script->staticLevel;
|
||||
if (cg->skipSpansGenerator(skip))
|
||||
return JS_TRUE;
|
||||
|
||||
return MakeUpvarForEval(pn, cg);
|
||||
|
@ -2178,7 +2194,7 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
|||
|
||||
JSCodeGenerator *evalcg = (JSCodeGenerator *) tc;
|
||||
JS_ASSERT(evalcg->flags & TCF_COMPILE_N_GO);
|
||||
JS_ASSERT(caller->fun && caller->varobj == evalcg->scopeChain);
|
||||
JS_ASSERT(caller->fun && caller->varobj(cx) == evalcg->scopeChain);
|
||||
|
||||
/*
|
||||
* Don't generate upvars on the left side of a for loop. See
|
||||
|
@ -2235,7 +2251,7 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
|||
* defeats the display optimization to static link searching used
|
||||
* by JSOP_{GET,CALL}UPVAR.
|
||||
*/
|
||||
if (cg->flags & TCF_FUN_IS_GENERATOR)
|
||||
if (cg->skipSpansGenerator(skip))
|
||||
return JS_TRUE;
|
||||
|
||||
op = JSOP_GETUPVAR;
|
||||
|
@ -3156,6 +3172,10 @@ EmitSwitch(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn,
|
|||
pn3->pn_val = JSVAL_FALSE;
|
||||
break;
|
||||
}
|
||||
if (pn4->pn_op == JSOP_NULL) {
|
||||
pn3->pn_val = JSVAL_NULL;
|
||||
break;
|
||||
}
|
||||
/* FALL THROUGH */
|
||||
default:
|
||||
switchOp = JSOP_CONDSWITCH;
|
||||
|
@ -3550,7 +3570,7 @@ js_EmitFunctionScript(JSContext *cx, JSCodeGenerator *cg, JSParseNode *body)
|
|||
CG_SWITCH_TO_PROLOG(cg);
|
||||
JS_ASSERT(CG_NEXT(cg) == CG_BASE(cg));
|
||||
if (js_Emit1(cx, cg, JSOP_GENERATOR) < 0)
|
||||
return JS_FALSE;
|
||||
return false;
|
||||
CG_SWITCH_TO_MAIN(cg);
|
||||
} else {
|
||||
/*
|
||||
|
@ -3558,7 +3578,18 @@ js_EmitFunctionScript(JSContext *cx, JSCodeGenerator *cg, JSParseNode *body)
|
|||
* are not yet traced and both want to be the first instruction.
|
||||
*/
|
||||
if (js_Emit1(cx, cg, JSOP_TRACE) < 0)
|
||||
return JS_FALSE;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (cg->flags & TCF_FUN_UNBRAND_THIS) {
|
||||
if (js_Emit1(cx, cg, JSOP_THIS) < 0)
|
||||
return false;
|
||||
if (js_Emit1(cx, cg, JSOP_UNBRAND) < 0)
|
||||
return false;
|
||||
if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
|
||||
return false;
|
||||
if (js_Emit1(cx, cg, JSOP_POP) < 0)
|
||||
return false;
|
||||
}
|
||||
|
||||
return js_EmitTree(cx, cg, body) &&
|
||||
|
@ -5616,6 +5647,21 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
|||
}
|
||||
#endif
|
||||
if (op != JSOP_NOP) {
|
||||
/*
|
||||
* Specialize JSOP_SETPROP to JSOP_SETMETHOD to defer or
|
||||
* avoid null closure cloning. Do this only for assignment
|
||||
* statements that are not completion values wanted by a
|
||||
* script evaluator, to ensure that the joined function
|
||||
* can't escape directly.
|
||||
*/
|
||||
if (!wantval &&
|
||||
PN_TYPE(pn2) == TOK_ASSIGN &&
|
||||
PN_OP(pn2) == JSOP_NOP &&
|
||||
PN_OP(pn2->pn_left) == JSOP_SETPROP &&
|
||||
PN_OP(pn2->pn_right) == JSOP_LAMBDA &&
|
||||
pn2->pn_right->pn_funbox->joinable()) {
|
||||
pn2->pn_left->pn_op = JSOP_SETMETHOD;
|
||||
}
|
||||
if (!js_EmitTree(cx, cg, pn2))
|
||||
return JS_FALSE;
|
||||
if (js_Emit1(cx, cg, op) < 0)
|
||||
|
@ -6097,8 +6143,8 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
|||
#endif
|
||||
pn2 = pn->pn_kid;
|
||||
|
||||
/* See js_FoldConstants for why this assertion holds true. */
|
||||
JS_ASSERT_IF(op == JSOP_TYPEOF, pn2->pn_type == TOK_NAME);
|
||||
if (op == JSOP_TYPEOF && pn2->pn_type != TOK_NAME)
|
||||
op = JSOP_TYPEOFEXPR;
|
||||
|
||||
oldflags = cg->flags;
|
||||
cg->flags &= ~TCF_IN_FOR_INIT;
|
||||
|
@ -6583,7 +6629,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
|||
EMIT_UINT16_IMM_OP(JSOP_NEWARRAY, atomIndex);
|
||||
break;
|
||||
|
||||
case TOK_RC:
|
||||
case TOK_RC: {
|
||||
#if JS_HAS_SHARP_VARS
|
||||
sharpnum = -1;
|
||||
do_emit_object:
|
||||
|
@ -6606,6 +6652,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
|||
if (!EmitNewInit(cx, cg, JSProto_Object, pn, sharpnum))
|
||||
return JS_FALSE;
|
||||
|
||||
uintN methodInits = 0, slowMethodInits = 0;
|
||||
for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
|
||||
/* Emit an index for t[2] for later consumption by JSOP_INITELEM. */
|
||||
pn3 = pn2->pn_left;
|
||||
|
@ -6638,22 +6685,40 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
|||
if (!ale)
|
||||
return JS_FALSE;
|
||||
|
||||
JSOp initOp = (PN_OP(pn2->pn_right) == JSOP_LAMBDA &&
|
||||
!(pn2->pn_right->pn_funbox->tcflags
|
||||
& (TCF_FUN_USES_ARGUMENTS | TCF_FUN_USES_OWN_NAME))
|
||||
/* Check whether we can optimize to JSOP_INITMETHOD. */
|
||||
JSParseNode *init = pn2->pn_right;
|
||||
bool lambda = PN_OP(init) == JSOP_LAMBDA;
|
||||
if (lambda)
|
||||
++methodInits;
|
||||
if (
|
||||
#if JS_HAS_GETTER_SETTER
|
||||
&& op != JSOP_GETTER && op != JSOP_SETTER
|
||||
op == JSOP_INITPROP &&
|
||||
#else
|
||||
JS_ASSERT(op == JSOP_INITPROP),
|
||||
#endif
|
||||
)
|
||||
? JSOP_INITMETHOD
|
||||
: JSOP_INITPROP;
|
||||
EMIT_INDEX_OP(initOp, ALE_INDEX(ale));
|
||||
lambda &&
|
||||
init->pn_funbox->joinable())
|
||||
{
|
||||
op = JSOP_INITMETHOD;
|
||||
pn2->pn_op = uint8(op);
|
||||
} else {
|
||||
op = JSOP_INITPROP;
|
||||
if (lambda)
|
||||
++slowMethodInits;
|
||||
}
|
||||
|
||||
EMIT_INDEX_OP(op, ALE_INDEX(ale));
|
||||
}
|
||||
}
|
||||
|
||||
if (cg->funbox && cg->funbox->shouldUnbrand(methodInits, slowMethodInits)) {
|
||||
if (js_Emit1(cx, cg, JSOP_UNBRAND) < 0)
|
||||
return JS_FALSE;
|
||||
}
|
||||
if (!EmitEndInit(cx, cg, pn->pn_count))
|
||||
return JS_FALSE;
|
||||
break;
|
||||
}
|
||||
|
||||
#if JS_HAS_SHARP_VARS
|
||||
case TOK_DEFSHARP:
|
||||
|
|
|
@ -236,6 +236,11 @@ struct JSTreeContext { /* tree context for semantic checks */
|
|||
*/
|
||||
int sharpSlotBase;
|
||||
bool ensureSharpSlots();
|
||||
|
||||
// Return true there is a generator function within |skip| lexical scopes
|
||||
// (going upward) from this context's lexical scope. Always return true if
|
||||
// this context is itself a generator.
|
||||
bool skipSpansGenerator(unsigned skip);
|
||||
};
|
||||
|
||||
#define TCF_COMPILING 0x01 /* JSTreeContext is JSCodeGenerator */
|
||||
|
@ -282,18 +287,26 @@ struct JSTreeContext { /* tree context for semantic checks */
|
|||
#define TCF_NEED_MUTABLE_SCRIPT 0x20000
|
||||
|
||||
/*
|
||||
* This function/global/eval code body contained a Use Strict
|
||||
* Directive. Treat certain strict warnings as errors, and forbid
|
||||
* the use of 'with'. See also TSF_STRICT_MODE_CODE,
|
||||
* JSScript::strictModeCode, and JSREPORT_STRICT_ERROR.
|
||||
* This function/global/eval code body contained a Use Strict Directive. Treat
|
||||
* certain strict warnings as errors, and forbid the use of 'with'. See also
|
||||
* TSF_STRICT_MODE_CODE, JSScript::strictModeCode, and JSREPORT_STRICT_ERROR.
|
||||
*/
|
||||
#define TCF_STRICT_MODE_CODE 0x40000
|
||||
#define TCF_STRICT_MODE_CODE 0x40000
|
||||
|
||||
/* Function has parameter named 'eval'. */
|
||||
#define TCF_FUN_PARAM_EVAL 0x80000
|
||||
#define TCF_FUN_PARAM_EVAL 0x80000
|
||||
|
||||
/*
|
||||
* Flags to propagate out of the blocks.
|
||||
* Flag signifying that the current function seems to be a constructor that
|
||||
* sets this.foo to define "methods", at least one of which can't be a null
|
||||
* closure, so we should avoid over-specializing property cache entries and
|
||||
* trace inlining guards to method function object identity, which will vary
|
||||
* per instance.
|
||||
*/
|
||||
#define TCF_FUN_UNBRAND_THIS 0x100000
|
||||
|
||||
/*
|
||||
* Flags to check for return; vs. return expr; in a function.
|
||||
*/
|
||||
#define TCF_RETURN_FLAGS (TCF_RETURN_EXPR | TCF_RETURN_VOID)
|
||||
|
||||
|
|
|
@ -81,6 +81,8 @@
|
|||
|
||||
#include "jsatominlines.h"
|
||||
|
||||
using namespace js;
|
||||
|
||||
static inline void
|
||||
SetOverriddenArgsLength(JSObject *obj)
|
||||
{
|
||||
|
@ -113,7 +115,7 @@ GetArgsLength(JSObject *obj)
|
|||
}
|
||||
|
||||
static inline void
|
||||
SetArgsPrivateNative(JSObject *argsobj, js_ArgsPrivateNative *apn)
|
||||
SetArgsPrivateNative(JSObject *argsobj, ArgsPrivateNative *apn)
|
||||
{
|
||||
JS_ASSERT(STOBJ_GET_CLASS(argsobj) == &js_ArgumentsClass);
|
||||
uintptr_t p = (uintptr_t) apn;
|
||||
|
@ -227,7 +229,9 @@ js_GetArgsObject(JSContext *cx, JSStackFrame *fp)
|
|||
* We must be in a function activation; the function must be lightweight
|
||||
* or else fp must have a variable object.
|
||||
*/
|
||||
JS_ASSERT(fp->fun && (!(fp->fun->flags & JSFUN_HEAVYWEIGHT) || fp->varobj));
|
||||
JS_ASSERT(fp->fun);
|
||||
JS_ASSERT_IF(fp->fun->flags & JSFUN_HEAVYWEIGHT,
|
||||
fp->varobj(js_ContainingCallStack(cx, fp)));
|
||||
|
||||
/* Skip eval and debugger frames. */
|
||||
while (fp->flags & JSFRAME_SPECIAL)
|
||||
|
@ -281,7 +285,7 @@ js_PutArgsObject(JSContext *cx, JSStackFrame *fp)
|
|||
#ifdef JS_TRACER
|
||||
JSObject * JS_FASTCALL
|
||||
js_Arguments(JSContext *cx, JSObject *parent, uint32 argc, JSObject *callee,
|
||||
double *argv, js_ArgsPrivateNative *apn)
|
||||
double *argv, ArgsPrivateNative *apn)
|
||||
{
|
||||
JSObject *argsobj = NewArguments(cx, parent, argc, callee);
|
||||
if (!argsobj)
|
||||
|
@ -509,11 +513,11 @@ ArgGetter(JSContext *cx, JSObject *obj, jsval idval, jsval *vp)
|
|||
uintN arg = uintN(JSVAL_TO_INT(idval));
|
||||
if (arg < GetArgsLength(obj)) {
|
||||
#ifdef JS_TRACER
|
||||
js_ArgsPrivateNative *argp = js_GetArgsPrivateNative(obj);
|
||||
ArgsPrivateNative *argp = js_GetArgsPrivateNative(obj);
|
||||
if (argp) {
|
||||
if (js_NativeToValue(cx, *vp, argp->typemap()[arg], &argp->argv[arg]))
|
||||
if (NativeToValue(cx, *vp, argp->typemap()[arg], &argp->argv[arg]))
|
||||
return true;
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
@ -562,7 +566,7 @@ ArgSetter(JSContext *cx, JSObject *obj, jsval idval, jsval *vp)
|
|||
// For simplicity, we just leave trace, since this is presumably not
|
||||
// a common operation.
|
||||
if (JS_ON_TRACE(cx)) {
|
||||
js_DeepBail(cx);
|
||||
DeepBail(cx);
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
@ -754,7 +758,7 @@ CheckForEscapingClosure(JSContext *cx, JSObject *obj, jsval *vp)
|
|||
* still has an active stack frame associated with it.
|
||||
*/
|
||||
if (fun->needsWrapper()) {
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
|
||||
JSStackFrame *fp = (JSStackFrame *) obj->getPrivate();
|
||||
if (fp) {
|
||||
|
@ -779,6 +783,17 @@ CalleeGetter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
|
|||
return CheckForEscapingClosure(cx, obj, vp);
|
||||
}
|
||||
|
||||
static JSObject *
|
||||
NewCallObject(JSContext *cx, JSFunction *fun, JSObject *scopeChain)
|
||||
{
|
||||
JSObject *callobj = js_NewObjectWithGivenProto(cx, &js_CallClass, NULL, scopeChain);
|
||||
if (!callobj ||
|
||||
!js_EnsureReservedSlots(cx, callobj, fun->countArgsAndVars())) {
|
||||
return NULL;
|
||||
}
|
||||
return callobj;
|
||||
}
|
||||
|
||||
JSObject *
|
||||
js_GetCallObject(JSContext *cx, JSStackFrame *fp)
|
||||
{
|
||||
|
@ -823,11 +838,9 @@ js_GetCallObject(JSContext *cx, JSStackFrame *fp)
|
|||
}
|
||||
}
|
||||
|
||||
callobj = js_NewObjectWithGivenProto(cx, &js_CallClass, NULL, fp->scopeChain);
|
||||
if (!callobj ||
|
||||
!js_EnsureReservedSlots(cx, callobj, fp->fun->countArgsAndVars())) {
|
||||
callobj = NewCallObject(cx, fp->fun, fp->scopeChain);
|
||||
if (!callobj)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
callobj->setPrivate(fp);
|
||||
JS_ASSERT(fp->argv);
|
||||
|
@ -840,10 +853,22 @@ js_GetCallObject(JSContext *cx, JSStackFrame *fp)
|
|||
* variables object.
|
||||
*/
|
||||
fp->scopeChain = callobj;
|
||||
fp->varobj = callobj;
|
||||
return callobj;
|
||||
}
|
||||
|
||||
JSObject * JS_FASTCALL
|
||||
js_CreateCallObjectOnTrace(JSContext *cx, JSFunction *fun, JSObject *callee, JSObject *scopeChain)
|
||||
{
|
||||
JS_ASSERT(!js_IsNamedLambda(fun));
|
||||
JSObject *callobj = NewCallObject(cx, fun, scopeChain);
|
||||
if (!callobj)
|
||||
return NULL;
|
||||
STOBJ_SET_SLOT(callobj, JSSLOT_CALLEE, OBJECT_TO_JSVAL(callee));
|
||||
return callobj;
|
||||
}
|
||||
|
||||
JS_DEFINE_CALLINFO_4(extern, OBJECT, js_CreateCallObjectOnTrace, CONTEXT, FUNCTION, OBJECT, OBJECT, 0, 0)
|
||||
|
||||
JSFunction *
|
||||
js_GetCallObjectFunction(JSObject *obj)
|
||||
{
|
||||
|
@ -859,6 +884,13 @@ js_GetCallObjectFunction(JSObject *obj)
|
|||
return GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(v));
|
||||
}
|
||||
|
||||
inline static void
|
||||
CopyValuesToCallObject(JSObject *callobj, int nargs, jsval *argv, int nvars, jsval *slots)
|
||||
{
|
||||
memcpy(callobj->dslots, argv, nargs * sizeof(jsval));
|
||||
memcpy(callobj->dslots + nargs, slots, nvars * sizeof(jsval));
|
||||
}
|
||||
|
||||
void
|
||||
js_PutCallObject(JSContext *cx, JSStackFrame *fp)
|
||||
{
|
||||
|
@ -885,15 +917,11 @@ js_PutCallObject(JSContext *cx, JSStackFrame *fp)
|
|||
if (n != 0) {
|
||||
JS_ASSERT(STOBJ_NSLOTS(callobj) >= JS_INITIAL_NSLOTS + n);
|
||||
n += JS_INITIAL_NSLOTS;
|
||||
JS_LOCK_OBJ(cx, callobj);
|
||||
memcpy(callobj->dslots, fp->argv, fun->nargs * sizeof(jsval));
|
||||
memcpy(callobj->dslots + fun->nargs, fp->slots,
|
||||
fun->u.i.nvars * sizeof(jsval));
|
||||
JS_UNLOCK_OBJ(cx, callobj);
|
||||
CopyValuesToCallObject(callobj, fun->nargs, fp->argv, fun->u.i.nvars, fp->slots);
|
||||
}
|
||||
|
||||
/* Clear private pointers to fp, which is about to go away (js_Invoke). */
|
||||
if ((fun->flags & JSFUN_LAMBDA) && fun->atom) {
|
||||
if (js_IsNamedLambda(fun)) {
|
||||
JSObject *env = STOBJ_GET_PARENT(callobj);
|
||||
|
||||
JS_ASSERT(STOBJ_GET_CLASS(env) == &js_DeclEnvClass);
|
||||
|
@ -905,6 +933,22 @@ js_PutCallObject(JSContext *cx, JSStackFrame *fp)
|
|||
fp->callobj = NULL;
|
||||
}
|
||||
|
||||
JSBool JS_FASTCALL
|
||||
js_PutCallObjectOnTrace(JSContext *cx, JSObject *scopeChain, uint32 nargs, jsval *argv,
|
||||
uint32 nvars, jsval *slots)
|
||||
{
|
||||
JS_ASSERT(scopeChain->hasClass(&js_CallClass));
|
||||
JS_ASSERT(!scopeChain->getPrivate());
|
||||
|
||||
uintN n = nargs + nvars;
|
||||
if (n != 0)
|
||||
CopyValuesToCallObject(scopeChain, nargs, argv, nvars, slots);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
JS_DEFINE_CALLINFO_6(extern, BOOL, js_PutCallObjectOnTrace, CONTEXT, OBJECT, UINT32, JSVALPTR, UINT32, JSVALPTR, 0, 0)
|
||||
|
||||
static JSBool
|
||||
call_enumerate(JSContext *cx, JSObject *obj)
|
||||
{
|
||||
|
@ -1896,7 +1940,7 @@ js_fun_call(JSContext *cx, uintN argc, jsval *vp)
|
|||
void *mark;
|
||||
JSBool ok;
|
||||
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
|
||||
obj = JS_THIS_OBJECT(cx, vp);
|
||||
if (!obj || !obj->defaultValue(cx, JSTYPE_FUNCTION, &vp[1]))
|
||||
|
@ -1964,7 +2008,7 @@ js_fun_apply(JSContext *cx, uintN argc, jsval *vp)
|
|||
return js_fun_call(cx, argc, vp);
|
||||
}
|
||||
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
|
||||
obj = JS_THIS_OBJECT(cx, vp);
|
||||
if (!obj || !obj->defaultValue(cx, JSTYPE_FUNCTION, &vp[1]))
|
||||
|
@ -2403,7 +2447,7 @@ js_NewFunction(JSContext *cx, JSObject *funobj, JSNative native, uintN nargs,
|
|||
return fun;
|
||||
}
|
||||
|
||||
JSObject *
|
||||
JSObject * JS_FASTCALL
|
||||
js_CloneFunctionObject(JSContext *cx, JSFunction *fun, JSObject *parent)
|
||||
{
|
||||
/*
|
||||
|
@ -2417,6 +2461,8 @@ js_CloneFunctionObject(JSContext *cx, JSFunction *fun, JSObject *parent)
|
|||
return clone;
|
||||
}
|
||||
|
||||
JS_DEFINE_CALLINFO_3(extern, OBJECT, js_CloneFunctionObject, CONTEXT, FUNCTION, OBJECT, 0, 0)
|
||||
|
||||
/*
|
||||
* Create a new flat closure, but don't initialize the imported upvar
|
||||
* values. The tracer calls this function and then initializes the upvar
|
||||
|
|
|
@ -163,6 +163,11 @@ struct JSFunction : public JSObject {
|
|||
bool optimizedClosure() const { return FUN_KIND(this) > JSFUN_INTERPRETED; }
|
||||
bool needsWrapper() const { return FUN_NULL_CLOSURE(this) && u.i.skipmin != 0; }
|
||||
|
||||
uintN countVars() const {
|
||||
JS_ASSERT(FUN_INTERPRETED(this));
|
||||
return u.i.nvars;
|
||||
}
|
||||
|
||||
uintN countArgsAndVars() const {
|
||||
JS_ASSERT(FUN_INTERPRETED(this));
|
||||
return nargs + u.i.nvars;
|
||||
|
@ -247,14 +252,14 @@ js_IsInternalFunctionObject(JSObject *funobj)
|
|||
return funobj == fun && (fun->flags & JSFUN_LAMBDA) && !funobj->getParent();
|
||||
}
|
||||
|
||||
struct js_ArgsPrivateNative;
|
||||
namespace js { struct ArgsPrivateNative; }
|
||||
|
||||
inline js_ArgsPrivateNative *
|
||||
inline js::ArgsPrivateNative *
|
||||
js_GetArgsPrivateNative(JSObject *argsobj)
|
||||
{
|
||||
JS_ASSERT(STOBJ_GET_CLASS(argsobj) == &js_ArgumentsClass);
|
||||
uintptr_t p = (uintptr_t) argsobj->getPrivate();
|
||||
return (js_ArgsPrivateNative *) (p & 2 ? p & ~2 : NULL);
|
||||
return (js::ArgsPrivateNative *) (p & 2 ? p & ~2 : NULL);
|
||||
}
|
||||
|
||||
extern JSObject *
|
||||
|
@ -273,7 +278,7 @@ js_TraceFunction(JSTracer *trc, JSFunction *fun);
|
|||
extern void
|
||||
js_FinalizeFunction(JSContext *cx, JSFunction *fun);
|
||||
|
||||
extern JSObject *
|
||||
extern JSObject * JS_FASTCALL
|
||||
js_CloneFunctionObject(JSContext *cx, JSFunction *fun, JSObject *parent);
|
||||
|
||||
extern JS_REQUIRES_STACK JSObject *
|
||||
|
@ -310,9 +315,16 @@ js_ReportIsNotFunction(JSContext *cx, jsval *vp, uintN flags);
|
|||
extern JSObject *
|
||||
js_GetCallObject(JSContext *cx, JSStackFrame *fp);
|
||||
|
||||
extern JSObject * JS_FASTCALL
|
||||
js_CreateCallObjectOnTrace(JSContext *cx, JSFunction *fun, JSObject *callee, JSObject *scopeChain);
|
||||
|
||||
extern void
|
||||
js_PutCallObject(JSContext *cx, JSStackFrame *fp);
|
||||
|
||||
extern JSBool JS_FASTCALL
|
||||
js_PutCallObjectOnTrace(JSContext *cx, JSObject *scopeChain, uint32 nargs, jsval *argv,
|
||||
uint32 nvars, jsval *slots);
|
||||
|
||||
extern JSFunction *
|
||||
js_GetCallObjectFunction(JSObject *obj);
|
||||
|
||||
|
@ -359,6 +371,9 @@ js_GetArgsObject(JSContext *cx, JSStackFrame *fp);
|
|||
extern void
|
||||
js_PutArgsObject(JSContext *cx, JSStackFrame *fp);
|
||||
|
||||
inline bool
|
||||
js_IsNamedLambda(JSFunction *fun) { return (fun->flags & JSFUN_LAMBDA) && fun->atom; }
|
||||
|
||||
/*
|
||||
* Reserved slot structure for Arguments objects:
|
||||
*
|
||||
|
|
1300
js/src/jsgc.cpp
1300
js/src/jsgc.cpp
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -303,20 +303,20 @@ js_NewGCXML(JSContext *cx)
|
|||
}
|
||||
#endif
|
||||
|
||||
struct JSGCArenaInfo;
|
||||
struct JSGCArena;
|
||||
struct JSGCChunkInfo;
|
||||
|
||||
struct JSGCArenaList {
|
||||
JSGCArenaInfo *head; /* list start */
|
||||
JSGCArenaInfo *cursor; /* arena with free things */
|
||||
JSGCArena *head; /* list start */
|
||||
JSGCArena *cursor; /* arena with free things */
|
||||
uint32 thingKind; /* one of JSFinalizeGCThingKind */
|
||||
uint32 thingSize; /* size of things to allocate on this list
|
||||
*/
|
||||
};
|
||||
|
||||
struct JSGCDoubleArenaList {
|
||||
JSGCArenaInfo *head; /* list start */
|
||||
JSGCArenaInfo *cursor; /* next arena with free cells */
|
||||
JSGCArena *head; /* list start */
|
||||
JSGCArena *cursor; /* next arena with free cells */
|
||||
};
|
||||
|
||||
struct JSGCFreeLists {
|
||||
|
|
|
@ -75,6 +75,7 @@
|
|||
#include "jsvector.h"
|
||||
|
||||
#include "jsatominlines.h"
|
||||
#include "jsobjinlines.h"
|
||||
#include "jsscopeinlines.h"
|
||||
#include "jsscriptinlines.h"
|
||||
#include "jsstrinlines.h"
|
||||
|
@ -89,6 +90,8 @@
|
|||
|
||||
#include "jsautooplen.h"
|
||||
|
||||
using namespace js;
|
||||
|
||||
/* jsinvoke_cpp___ indicates inclusion from jsinvoke.cpp. */
|
||||
#if !JS_LONE_INTERPRET ^ defined jsinvoke_cpp___
|
||||
|
||||
|
@ -204,7 +207,8 @@ js_FillPropertyCache(JSContext *cx, JSObject *obj,
|
|||
break;
|
||||
}
|
||||
|
||||
if (SPROP_HAS_STUB_GETTER(sprop) &&
|
||||
if (!scope->generic() &&
|
||||
SPROP_HAS_STUB_GETTER(sprop) &&
|
||||
SPROP_HAS_VALID_SLOT(sprop, scope)) {
|
||||
v = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot);
|
||||
if (VALUE_IS_FUNCTION(cx, v)) {
|
||||
|
@ -280,7 +284,7 @@ js_FillPropertyCache(JSContext *cx, JSObject *obj,
|
|||
* that on the third and subsequent iterations the cache will
|
||||
* be hit because the shape is no longer updated.
|
||||
*/
|
||||
JS_ASSERT(scope->owned());
|
||||
JS_ASSERT(!scope->isSharedEmpty());
|
||||
if (sprop->parent) {
|
||||
kshape = sprop->parent->shape;
|
||||
} else {
|
||||
|
@ -730,7 +734,7 @@ js_GetScopeChain(JSContext *cx, JSStackFrame *fp)
|
|||
}
|
||||
|
||||
/* We don't handle cloning blocks on trace. */
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
|
||||
/*
|
||||
* We have one or more lexical scopes to reflect into fp->scopeChain, so
|
||||
|
@ -892,7 +896,6 @@ js_ComputeGlobalThis(JSContext *cx, JSBool lazy, jsval *argv)
|
|||
!OBJ_GET_PARENT(cx, JSVAL_TO_OBJECT(argv[-2]))) {
|
||||
thisp = cx->globalObject;
|
||||
} else {
|
||||
JSStackFrame *fp;
|
||||
jsid id;
|
||||
jsval v;
|
||||
uintN attrs;
|
||||
|
@ -908,24 +911,26 @@ js_ComputeGlobalThis(JSContext *cx, JSBool lazy, jsval *argv)
|
|||
* FIXME: 417851 -- this access check should not be required, as it
|
||||
* imposes a performance penalty on all js_ComputeGlobalThis calls,
|
||||
* and it represents a maintenance hazard.
|
||||
*
|
||||
* When the above FIXME is made fixed, the whole GC reachable frame
|
||||
* mechanism can be removed as well.
|
||||
*/
|
||||
fp = js_GetTopStackFrame(cx); /* quell GCC overwarning */
|
||||
JSStackFrame *fp = js_GetTopStackFrame(cx);
|
||||
JSGCReachableFrame reachable;
|
||||
if (lazy) {
|
||||
JS_ASSERT(fp->argv == argv);
|
||||
fp->dormantNext = cx->dormantFrameChain;
|
||||
cx->dormantFrameChain = fp;
|
||||
cx->fp = fp->down;
|
||||
fp->down = NULL;
|
||||
cx->pushGCReachableFrame(reachable, fp);
|
||||
}
|
||||
thisp = JSVAL_TO_OBJECT(argv[-2]);
|
||||
id = ATOM_TO_JSID(cx->runtime->atomState.parentAtom);
|
||||
|
||||
ok = thisp->checkAccess(cx, id, JSACC_PARENT, &v, &attrs);
|
||||
if (lazy) {
|
||||
cx->dormantFrameChain = fp->dormantNext;
|
||||
fp->dormantNext = NULL;
|
||||
fp->down = cx->fp;
|
||||
cx->fp = fp;
|
||||
cx->popGCReachableFrame();
|
||||
}
|
||||
if (!ok)
|
||||
return NULL;
|
||||
|
@ -1095,6 +1100,7 @@ JS_REQUIRES_STACK JS_FRIEND_API(JSBool)
|
|||
js_Invoke(JSContext *cx, uintN argc, jsval *vp, uintN flags)
|
||||
{
|
||||
void *mark;
|
||||
CallStack callStack(cx);
|
||||
JSStackFrame frame;
|
||||
jsval *sp, *argv, *newvp;
|
||||
jsval v;
|
||||
|
@ -1109,6 +1115,7 @@ js_Invoke(JSContext *cx, uintN argc, jsval *vp, uintN flags)
|
|||
uint32 rootedArgsFlag;
|
||||
JSInterpreterHook hook;
|
||||
void *hookData;
|
||||
bool pushCall;
|
||||
|
||||
JS_ASSERT(argc <= JS_ARGS_LENGTH_MAX);
|
||||
|
||||
|
@ -1299,7 +1306,6 @@ have_fun:
|
|||
* Initialize the frame.
|
||||
*/
|
||||
frame.thisv = vp[1];
|
||||
frame.varobj = NULL;
|
||||
frame.callobj = NULL;
|
||||
frame.argsobj = NULL;
|
||||
frame.script = script;
|
||||
|
@ -1317,10 +1323,18 @@ have_fun:
|
|||
frame.imacpc = NULL;
|
||||
frame.slots = NULL;
|
||||
frame.flags = flags | rootedArgsFlag;
|
||||
frame.dormantNext = NULL;
|
||||
frame.displaySave = NULL;
|
||||
|
||||
MUST_FLOW_THROUGH("out");
|
||||
pushCall = !cx->fp;
|
||||
if (pushCall) {
|
||||
/*
|
||||
* The initialVarObj is left NULL since fp->callobj is NULL and, for
|
||||
* interpreted functions, fp->varobj() == fp->callobj.
|
||||
*/
|
||||
callStack.setInitialFrame(&frame);
|
||||
cx->pushCallStack(&callStack);
|
||||
}
|
||||
cx->fp = &frame;
|
||||
|
||||
/* Init these now in case we goto out before first hook call. */
|
||||
|
@ -1328,15 +1342,13 @@ have_fun:
|
|||
hookData = NULL;
|
||||
|
||||
if (native) {
|
||||
/* If native, use caller varobj and scopeChain for eval. */
|
||||
JS_ASSERT(!frame.varobj);
|
||||
JS_ASSERT(!frame.scopeChain);
|
||||
/* Slow natives expect the caller's scopeChain as their scopeChain. */
|
||||
if (frame.down) {
|
||||
frame.varobj = frame.down->varobj;
|
||||
JS_ASSERT(!pushCall);
|
||||
frame.scopeChain = frame.down->scopeChain;
|
||||
}
|
||||
|
||||
/* But ensure that we have a scope chain. */
|
||||
/* Ensure that we have a scope chain. */
|
||||
if (!frame.scopeChain)
|
||||
frame.scopeChain = parent;
|
||||
} else {
|
||||
|
@ -1404,6 +1416,8 @@ out:
|
|||
*vp = frame.rval;
|
||||
|
||||
/* Restore cx->fp now that we're done releasing frame objects. */
|
||||
if (pushCall)
|
||||
cx->popCallStack();
|
||||
cx->fp = frame.down;
|
||||
|
||||
out2:
|
||||
|
@ -1427,7 +1441,7 @@ js_InternalInvoke(JSContext *cx, JSObject *obj, jsval fval, uintN flags,
|
|||
void *mark;
|
||||
JSBool ok;
|
||||
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
invokevp = js_AllocStack(cx, 2 + argc, &mark);
|
||||
if (!invokevp)
|
||||
return JS_FALSE;
|
||||
|
@ -1465,7 +1479,7 @@ JSBool
|
|||
js_InternalGetOrSet(JSContext *cx, JSObject *obj, jsid id, jsval fval,
|
||||
JSAccessMode mode, uintN argc, jsval *argv, jsval *rval)
|
||||
{
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
|
||||
/*
|
||||
* js_InternalInvoke could result in another try to get or set the same id
|
||||
|
@ -1476,38 +1490,69 @@ js_InternalGetOrSet(JSContext *cx, JSObject *obj, jsid id, jsval fval,
|
|||
return js_InternalCall(cx, obj, fval, argc, argv, rval);
|
||||
}
|
||||
|
||||
CallStack *
|
||||
js_ContainingCallStack(JSContext *cx, JSStackFrame *target)
|
||||
{
|
||||
JS_ASSERT(cx->fp);
|
||||
|
||||
/* The active callstack's top frame is cx->fp. */
|
||||
CallStack *cs = cx->activeCallStack();
|
||||
JSStackFrame *f = cx->fp;
|
||||
JSStackFrame *stop = cs->getInitialFrame()->down;
|
||||
for (; f != stop; f = f->down) {
|
||||
if (f == target)
|
||||
return cs;
|
||||
}
|
||||
|
||||
/* A suspended callstack's top frame is its suspended frame. */
|
||||
for (cs = cs->getPrevious(); cs; cs = cs->getPrevious()) {
|
||||
f = cs->getSuspendedFrame();
|
||||
stop = cs->getInitialFrame()->down;
|
||||
for (; f != stop; f = f->down) {
|
||||
if (f == target)
|
||||
return cs;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
JSBool
|
||||
js_Execute(JSContext *cx, JSObject *chain, JSScript *script,
|
||||
JSStackFrame *down, uintN flags, jsval *result)
|
||||
{
|
||||
JSInterpreterHook hook;
|
||||
void *hookData, *mark;
|
||||
JSStackFrame *oldfp, frame;
|
||||
JSObject *obj, *tmp;
|
||||
JSBool ok;
|
||||
|
||||
if (script->isEmpty()) {
|
||||
if (result)
|
||||
*result = JSVAL_VOID;
|
||||
return JS_TRUE;
|
||||
}
|
||||
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
|
||||
#ifdef INCLUDE_MOZILLA_DTRACE
|
||||
if (JAVASCRIPT_EXECUTE_START_ENABLED())
|
||||
jsdtrace_execute_start(script);
|
||||
struct JSDNotifyGuard {
|
||||
JSScript *script;
|
||||
JSDNotifyGuard(JSScript *s) : script(s) {
|
||||
if (JAVASCRIPT_EXECUTE_START_ENABLED())
|
||||
jsdtrace_execute_start(script);
|
||||
}
|
||||
~JSDNotifyGuard() {
|
||||
if (JAVASCRIPT_EXECUTE_DONE_ENABLED())
|
||||
jsdtrace_execute_done(script);
|
||||
}
|
||||
|
||||
} jsdNotifyGuard(script);
|
||||
#endif
|
||||
|
||||
hook = cx->debugHooks->executeHook;
|
||||
hookData = mark = NULL;
|
||||
oldfp = js_GetTopStackFrame(cx);
|
||||
JSInterpreterHook hook = cx->debugHooks->executeHook;
|
||||
void *hookData = NULL;
|
||||
JSStackFrame frame;
|
||||
CallStack callStack(cx);
|
||||
frame.script = script;
|
||||
if (down) {
|
||||
/* Propagate arg state for eval and the debugger API. */
|
||||
frame.callobj = down->callobj;
|
||||
frame.argsobj = down->argsobj;
|
||||
frame.varobj = down->varobj;
|
||||
frame.fun = (script->staticLevel > 0) ? down->fun : NULL;
|
||||
frame.thisv = down->thisv;
|
||||
if (down->flags & JSFRAME_COMPUTED_THIS)
|
||||
|
@ -1515,29 +1560,49 @@ js_Execute(JSContext *cx, JSObject *chain, JSScript *script,
|
|||
frame.argc = down->argc;
|
||||
frame.argv = down->argv;
|
||||
frame.annotation = down->annotation;
|
||||
|
||||
/*
|
||||
* We want to call |down->varobj()|, but this requires knowing the
|
||||
* CallStack of |down|. If |down == cx->fp|, the callstack is simply
|
||||
* the context's active callstack, so we can use |down->varobj(cx)|.
|
||||
* When |down != cx->fp|, we need to do a slow linear search. Luckily,
|
||||
* this only happens with eval and JS_EvaluateInStackFrame.
|
||||
*/
|
||||
if (down == cx->fp) {
|
||||
callStack.setInitialVarObj(down->varobj(cx));
|
||||
} else {
|
||||
CallStack *cs = js_ContainingCallStack(cx, down);
|
||||
callStack.setInitialVarObj(down->varobj(cs));
|
||||
}
|
||||
} else {
|
||||
frame.callobj = NULL;
|
||||
frame.argsobj = NULL;
|
||||
obj = chain;
|
||||
JSObject *obj = chain;
|
||||
if (cx->options & JSOPTION_VAROBJFIX) {
|
||||
while ((tmp = OBJ_GET_PARENT(cx, obj)) != NULL)
|
||||
while (JSObject *tmp = OBJ_GET_PARENT(cx, obj))
|
||||
obj = tmp;
|
||||
}
|
||||
frame.varobj = obj;
|
||||
frame.fun = NULL;
|
||||
frame.thisv = OBJECT_TO_JSVAL(chain);
|
||||
frame.argc = 0;
|
||||
frame.argv = NULL;
|
||||
frame.annotation = NULL;
|
||||
callStack.setInitialVarObj(obj);
|
||||
}
|
||||
|
||||
frame.imacpc = NULL;
|
||||
|
||||
struct RawStackGuard {
|
||||
JSContext *cx;
|
||||
void *mark;
|
||||
RawStackGuard(JSContext *cx) : cx(cx), mark(NULL) {}
|
||||
~RawStackGuard() { if (mark) js_FreeRawStack(cx, mark); }
|
||||
} rawStackGuard(cx);
|
||||
|
||||
if (script->nslots != 0) {
|
||||
frame.slots = js_AllocRawStack(cx, script->nslots, &mark);
|
||||
if (!frame.slots) {
|
||||
ok = JS_FALSE;
|
||||
goto out;
|
||||
}
|
||||
frame.slots = js_AllocRawStack(cx, script->nslots, &rawStackGuard.mark);
|
||||
if (!frame.slots)
|
||||
return false;
|
||||
memset(frame.slots, 0, script->nfixed * sizeof(jsval));
|
||||
|
||||
#if JS_HAS_SHARP_VARS
|
||||
|
@ -1552,10 +1617,8 @@ js_Execute(JSContext *cx, JSObject *chain, JSScript *script,
|
|||
int base = (down->fun && !(down->flags & JSFRAME_SPECIAL))
|
||||
? down->fun->sharpSlotBase(cx)
|
||||
: down->script->nfixed - SHARP_NSLOTS;
|
||||
if (base < 0) {
|
||||
ok = JS_FALSE;
|
||||
goto out;
|
||||
}
|
||||
if (base < 0)
|
||||
return false;
|
||||
sharps[0] = down->slots[base];
|
||||
sharps[1] = down->slots[base + 1];
|
||||
} else {
|
||||
|
@ -1572,40 +1635,43 @@ js_Execute(JSContext *cx, JSObject *chain, JSScript *script,
|
|||
frame.scopeChain = chain;
|
||||
frame.regs = NULL;
|
||||
frame.flags = flags;
|
||||
frame.dormantNext = NULL;
|
||||
frame.blockChain = NULL;
|
||||
|
||||
/*
|
||||
* Here we wrap the call to js_Interpret with code to (conditionally)
|
||||
* save and restore the old stack frame chain into a chain of 'dormant'
|
||||
* frame chains. Since we are replacing cx->fp, we were running into
|
||||
* the problem that if GC was called under this frame, some of the GC
|
||||
* things associated with the old frame chain (available here only in
|
||||
* the C variable 'oldfp') were not rooted and were being collected.
|
||||
*
|
||||
* So, now we preserve the links to these 'dormant' frame chains in cx
|
||||
* before calling js_Interpret and cleanup afterwards. The GC walks
|
||||
* these dormant chains and marks objects in the same way that it marks
|
||||
* objects in the primary cx->fp chain.
|
||||
* We need to push/pop a new callstack if there is no existing callstack
|
||||
* or the current callstack needs to be suspended (so that its frames are
|
||||
* marked by GC).
|
||||
*/
|
||||
if (oldfp && oldfp != down) {
|
||||
JS_ASSERT(!oldfp->dormantNext);
|
||||
oldfp->dormantNext = cx->dormantFrameChain;
|
||||
cx->dormantFrameChain = oldfp;
|
||||
JSStackFrame *oldfp = cx->fp;
|
||||
bool newCallStack = !oldfp || oldfp != down;
|
||||
if (newCallStack) {
|
||||
callStack.setInitialFrame(&frame);
|
||||
cx->pushCallStack(&callStack);
|
||||
}
|
||||
|
||||
cx->fp = &frame;
|
||||
|
||||
struct FinishGuard {
|
||||
JSContext *cx;
|
||||
JSStackFrame *oldfp;
|
||||
bool newCallStack;
|
||||
FinishGuard(JSContext *cx, JSStackFrame *oldfp, bool newCallStack)
|
||||
: cx(cx), oldfp(oldfp), newCallStack(newCallStack) {}
|
||||
~FinishGuard() {
|
||||
if (newCallStack)
|
||||
cx->popCallStack();
|
||||
cx->fp = oldfp;
|
||||
}
|
||||
} finishGuard(cx, oldfp, newCallStack);
|
||||
|
||||
if (!down) {
|
||||
OBJ_TO_INNER_OBJECT(cx, chain);
|
||||
if (!chain)
|
||||
return JS_FALSE;
|
||||
return false;
|
||||
frame.scopeChain = chain;
|
||||
|
||||
JSObject *thisp = JSVAL_TO_OBJECT(frame.thisv)->thisObject(cx);
|
||||
if (!thisp) {
|
||||
ok = JS_FALSE;
|
||||
goto out2;
|
||||
}
|
||||
if (!thisp)
|
||||
return false;
|
||||
frame.thisv = OBJECT_TO_JSVAL(thisp);
|
||||
frame.flags |= JSFRAME_COMPUTED_THIS;
|
||||
}
|
||||
|
@ -1615,7 +1681,7 @@ js_Execute(JSContext *cx, JSObject *chain, JSScript *script,
|
|||
cx->debugHooks->executeHookData);
|
||||
}
|
||||
|
||||
ok = js_Interpret(cx);
|
||||
JSBool ok = js_Interpret(cx);
|
||||
if (result)
|
||||
*result = frame.rval;
|
||||
|
||||
|
@ -1625,22 +1691,6 @@ js_Execute(JSContext *cx, JSObject *chain, JSScript *script,
|
|||
hook(cx, &frame, JS_FALSE, &ok, hookData);
|
||||
}
|
||||
|
||||
out2:
|
||||
if (mark)
|
||||
js_FreeRawStack(cx, mark);
|
||||
cx->fp = oldfp;
|
||||
|
||||
if (oldfp && oldfp != down) {
|
||||
JS_ASSERT(cx->dormantFrameChain == oldfp);
|
||||
cx->dormantFrameChain = oldfp->dormantNext;
|
||||
oldfp->dormantNext = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
#ifdef INCLUDE_MOZILLA_DTRACE
|
||||
if (JAVASCRIPT_EXECUTE_DONE_ENABLED())
|
||||
jsdtrace_execute_done(script);
|
||||
#endif
|
||||
return ok;
|
||||
}
|
||||
|
||||
|
@ -2647,7 +2697,7 @@ JS_STATIC_ASSERT(JSOP_INCNAME_LENGTH == JSOP_NAMEDEC_LENGTH);
|
|||
# define ABORT_RECORDING(cx, reason) \
|
||||
JS_BEGIN_MACRO \
|
||||
if (TRACE_RECORDER(cx)) \
|
||||
js_AbortRecording(cx, reason); \
|
||||
AbortRecording(cx, reason); \
|
||||
JS_END_MACRO
|
||||
#else
|
||||
# define ABORT_RECORDING(cx, reason) ((void) 0)
|
||||
|
@ -2881,7 +2931,7 @@ js_Interpret(JSContext *cx)
|
|||
#define MONITOR_BRANCH(reason) \
|
||||
JS_BEGIN_MACRO \
|
||||
if (TRACING_ENABLED(cx)) { \
|
||||
if (js_MonitorLoopEdge(cx, inlineCallCount, reason)) { \
|
||||
if (MonitorLoopEdge(cx, inlineCallCount, reason)) { \
|
||||
JS_ASSERT(TRACE_RECORDER(cx)); \
|
||||
MONITOR_BRANCH_TRACEVIS; \
|
||||
ENABLE_INTERRUPTS(); \
|
||||
|
@ -3013,7 +3063,7 @@ js_Interpret(JSContext *cx)
|
|||
* after cx->fp->regs is set.
|
||||
*/
|
||||
if (TRACE_RECORDER(cx))
|
||||
js_AbortRecording(cx, "attempt to reenter interpreter while recording");
|
||||
AbortRecording(cx, "attempt to reenter interpreter while recording");
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -3103,7 +3153,7 @@ js_Interpret(JSContext *cx)
|
|||
* For now just bail on any sign of trouble.
|
||||
*/
|
||||
if (TRACE_RECORDER(cx))
|
||||
js_AbortRecording(cx, "error or exception while recording");
|
||||
AbortRecording(cx, "error or exception while recording");
|
||||
#endif
|
||||
|
||||
if (!cx->throwing) {
|
||||
|
@ -3285,7 +3335,7 @@ js_Interpret(JSContext *cx)
|
|||
JS_ASSERT(fp->regs == ®s);
|
||||
#ifdef JS_TRACER
|
||||
if (TRACE_RECORDER(cx))
|
||||
js_AbortRecording(cx, "recording out of js_Interpret");
|
||||
AbortRecording(cx, "recording out of js_Interpret");
|
||||
#endif
|
||||
#if JS_HAS_GENERATORS
|
||||
if (JS_UNLIKELY(fp->flags & JSFRAME_YIELDING)) {
|
||||
|
|
|
@ -56,6 +56,7 @@ typedef struct JSFrameRegs {
|
|||
jsval *sp; /* stack pointer */
|
||||
} JSFrameRegs;
|
||||
|
||||
|
||||
/*
|
||||
* JS stack frame, may be allocated on the C stack by native callers. Always
|
||||
* allocated on cx->stackPool for calls from the interpreter to an interpreted
|
||||
|
@ -73,7 +74,6 @@ struct JSStackFrame {
|
|||
JSObject *callobj; /* lazily created Call object */
|
||||
jsval argsobj; /* lazily created arguments object, must be
|
||||
JSVAL_OBJECT */
|
||||
JSObject *varobj; /* variables object, where vars go */
|
||||
JSScript *script; /* script being interpreted */
|
||||
JSFunction *fun; /* function being called or null */
|
||||
jsval thisv; /* "this" pointer if in method */
|
||||
|
@ -123,7 +123,6 @@ struct JSStackFrame {
|
|||
JSObject *blockChain;
|
||||
|
||||
uint32 flags; /* frame flags -- see below */
|
||||
JSStackFrame *dormantNext; /* next dormant frame chain */
|
||||
JSStackFrame *displaySave; /* previous value of display entry for
|
||||
script->staticLevel */
|
||||
|
||||
|
@ -155,9 +154,26 @@ struct JSStackFrame {
|
|||
JSObject *callee() {
|
||||
return argv ? JSVAL_TO_OBJECT(argv[-2]) : NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the object associated with the Execution Context's
|
||||
* VariableEnvironment (ES5 10.3). The given CallStack must contain this
|
||||
* stack frame.
|
||||
*/
|
||||
JSObject *varobj(js::CallStack *cs);
|
||||
|
||||
/* Short for: varobj(cx->activeCallStack()). */
|
||||
JSObject *varobj(JSContext *cx);
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
/*
|
||||
* Perform a linear search of all frames in all callstacks in the given context
|
||||
* for the given frame, returning the callstack, if found, and NULL otherwise.
|
||||
*/
|
||||
extern js::CallStack *
|
||||
js_ContainingCallStack(JSContext *cx, JSStackFrame *target);
|
||||
|
||||
static JS_INLINE uintN
|
||||
FramePCOffset(JSStackFrame* fp)
|
||||
{
|
||||
|
|
|
@ -72,6 +72,8 @@
|
|||
#include "jsxml.h"
|
||||
#endif
|
||||
|
||||
using namespace js;
|
||||
|
||||
JS_STATIC_ASSERT(JSSLOT_ITER_FLAGS < JS_INITIAL_NSLOTS);
|
||||
|
||||
#if JS_HAS_GENERATORS
|
||||
|
@ -416,7 +418,7 @@ js_ValueToIterator(JSContext *cx, uintN flags, jsval *vp)
|
|||
if (!InitNativeIterator(cx, iterobj, obj, flags))
|
||||
goto bad;
|
||||
} else {
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
arg = BOOLEAN_TO_JSVAL((flags & JSITER_FOREACH) == 0);
|
||||
if (!js_InternalInvoke(cx, obj, *vp, JSINVOKE_ITERATOR, 1, &arg,
|
||||
vp)) {
|
||||
|
@ -712,8 +714,8 @@ JS_FRIEND_DATA(JSClass) js_GeneratorClass = {
|
|||
* from the activation in fp, so we can steal away fp->callobj and fp->argsobj
|
||||
* if they are non-null.
|
||||
*/
|
||||
JSObject *
|
||||
js_NewGenerator(JSContext *cx, JSStackFrame *fp)
|
||||
JS_REQUIRES_STACK JSObject *
|
||||
js_NewGenerator(JSContext *cx)
|
||||
{
|
||||
JSObject *obj;
|
||||
uintN argc, nargs, nslots;
|
||||
|
@ -725,6 +727,7 @@ js_NewGenerator(JSContext *cx, JSStackFrame *fp)
|
|||
return NULL;
|
||||
|
||||
/* Load and compute stack slot counts. */
|
||||
JSStackFrame *fp = cx->fp;
|
||||
argc = fp->argc;
|
||||
nargs = JS_MAX(argc, fp->fun->nargs);
|
||||
nslots = 2 + nargs + fp->script->nslots;
|
||||
|
@ -750,7 +753,6 @@ js_NewGenerator(JSContext *cx, JSStackFrame *fp)
|
|||
}
|
||||
|
||||
/* These two references can be shared with fp until it goes away. */
|
||||
gen->frame.varobj = fp->varobj;
|
||||
gen->frame.thisv = fp->thisv;
|
||||
|
||||
/* Copy call-invariant script and function references. */
|
||||
|
@ -784,7 +786,6 @@ js_NewGenerator(JSContext *cx, JSStackFrame *fp)
|
|||
gen->frame.regs = &gen->savedRegs;
|
||||
|
||||
gen->frame.flags = (fp->flags & ~JSFRAME_ROOTED_ARGV) | JSFRAME_GENERATOR;
|
||||
gen->frame.dormantNext = NULL;
|
||||
|
||||
/* JSOP_GENERATOR appears in the prologue, outside all blocks. */
|
||||
JS_ASSERT(!fp->blockChain);
|
||||
|
@ -924,7 +925,7 @@ generator_op(JSContext *cx, JSGeneratorOp op, jsval *vp, uintN argc)
|
|||
JSObject *obj;
|
||||
jsval arg;
|
||||
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
|
||||
obj = JS_THIS_OBJECT(cx, vp);
|
||||
if (!JS_InstanceOf(cx, obj, &js_GeneratorClass, vp + 2))
|
||||
|
|
|
@ -117,7 +117,7 @@ struct JSGenerator {
|
|||
((JSGenerator *) ((uint8 *)(fp) - offsetof(JSGenerator, frame)))
|
||||
|
||||
extern JSObject *
|
||||
js_NewGenerator(JSContext *cx, JSStackFrame *fp);
|
||||
js_NewGenerator(JSContext *cx);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -520,16 +520,18 @@ js_NudgeOtherContexts(JSContext *cx)
|
|||
* specific thread.
|
||||
*/
|
||||
static void
|
||||
NudgeThread(JSThread *thread)
|
||||
NudgeThread(JSRuntime *rt, JSThread *thread)
|
||||
{
|
||||
JSCList *link;
|
||||
JSContext *acx;
|
||||
JS_ASSERT(thread);
|
||||
|
||||
link = &thread->contextList;
|
||||
while ((link = link->next) != &thread->contextList) {
|
||||
acx = CX_FROM_THREAD_LINKS(link);
|
||||
JS_ASSERT(acx->thread == thread);
|
||||
if (acx->requestDepth)
|
||||
/*
|
||||
* We cannot walk here over thread->contextList as that is manipulated
|
||||
* outside the GC lock and must be accessed only from the the thread that
|
||||
* owns JSThread.
|
||||
*/
|
||||
JSContext *acx = NULL;
|
||||
while ((acx = js_NextActiveContext(rt, acx)) != NULL) {
|
||||
if (acx->thread == thread)
|
||||
JS_TriggerOperationCallback(acx);
|
||||
}
|
||||
}
|
||||
|
@ -545,48 +547,45 @@ NudgeThread(JSThread *thread)
|
|||
static JSBool
|
||||
ClaimTitle(JSTitle *title, JSContext *cx)
|
||||
{
|
||||
JSRuntime *rt;
|
||||
JSContext *ownercx;
|
||||
uint32 requestDebit;
|
||||
JSRuntime *rt = cx->runtime;
|
||||
JS_ASSERT_IF(cx->requestDepth == 0,
|
||||
cx->thread == rt->gcThread && rt->gcRunning);
|
||||
|
||||
rt = cx->runtime;
|
||||
JS_RUNTIME_METER(rt, claimAttempts);
|
||||
JS_LOCK_GC(rt);
|
||||
|
||||
/* Reload in case ownercx went away while we blocked on the lock. */
|
||||
while ((ownercx = title->ownercx) != NULL) {
|
||||
while (JSContext *ownercx = title->ownercx) {
|
||||
/*
|
||||
* Avoid selflock if ownercx is dead, or is not running a request, or
|
||||
* has the same thread as cx. Set title->ownercx to cx so that the
|
||||
* matching JS_UNLOCK_SCOPE or JS_UNLOCK_OBJ macro call will take the
|
||||
* fast path around the corresponding js_UnlockTitle or js_UnlockObj
|
||||
* function call.
|
||||
* has the same thread as cx, or cx->thread runs the GC (in which case
|
||||
* all other requests must be suspended), or ownercx->thread runs a GC
|
||||
* and the GC waits for all requests to finish. Set title->ownercx to
|
||||
* cx so that the matching JS_UNLOCK_SCOPE or JS_UNLOCK_OBJ macro call
|
||||
* will take the fast path around the corresponding js_UnlockTitle or
|
||||
* js_UnlockObj function call.
|
||||
*
|
||||
* If title->u.link is non-null, title has already been inserted on
|
||||
* the rt->titleSharingTodo list, because another thread's context
|
||||
* already wanted to lock title while ownercx was running a request.
|
||||
* That context must still be in request and cannot be dead. We can
|
||||
* claim it if its thread matches ours but only if cx itself is in a
|
||||
* request.
|
||||
*
|
||||
* The latter check covers the case when the embedding triggers a call
|
||||
* to js_GC on a cx outside a request while having ownercx running a
|
||||
* request on the same thread, and then js_GC calls a mark hook or a
|
||||
* finalizer accessing the title. In this case we cannot claim the
|
||||
* title but must share it now as no title-sharing JS_EndRequest will
|
||||
* follow.
|
||||
* That context must still be in request and cannot be dead. Moreover,
|
||||
* the GC can not run at this moment as it must wait until all the
|
||||
* titles are shared and the threads that want to lock them finish
|
||||
* their requests. Thus we can claim the title if its thread matches
|
||||
* ours.
|
||||
*/
|
||||
bool canClaim;
|
||||
if (title->u.link) {
|
||||
JS_ASSERT(js_ValidContextPointer(rt, ownercx));
|
||||
JS_ASSERT(ownercx->requestDepth > 0);
|
||||
JS_ASSERT_IF(cx->requestDepth == 0, cx->thread == rt->gcThread);
|
||||
canClaim = (ownercx->thread == cx->thread &&
|
||||
cx->requestDepth > 0);
|
||||
JS_ASSERT(!rt->gcRunning);
|
||||
canClaim = (ownercx->thread == cx->thread);
|
||||
} else {
|
||||
canClaim = (!js_ValidContextPointer(rt, ownercx) ||
|
||||
!ownercx->requestDepth ||
|
||||
ownercx->thread == cx->thread);
|
||||
cx->thread == ownercx->thread ||
|
||||
cx->thread == rt->gcThread ||
|
||||
ownercx->thread->gcWaiting);
|
||||
}
|
||||
if (canClaim) {
|
||||
title->ownercx = cx;
|
||||
|
@ -607,14 +606,8 @@ ClaimTitle(JSTitle *title, JSContext *cx)
|
|||
* so that control would unwind properly once these locks became
|
||||
* "thin" or "fat". The engine promotes a title from exclusive to
|
||||
* shared access only when locking, never when holding or unlocking.
|
||||
*
|
||||
* Avoid deadlock before any of this title/context cycle detection if
|
||||
* cx is on the active GC's thread, because in that case, no requests
|
||||
* will run until the GC completes. Any title wanted by the GC (from
|
||||
* a finalizer or a mark hook) that can't be claimed must become
|
||||
* shared.
|
||||
*/
|
||||
if (rt->gcThread == cx->thread || WillDeadlock(ownercx, cx->thread)) {
|
||||
if (WillDeadlock(ownercx, cx->thread)) {
|
||||
ShareTitle(cx, title);
|
||||
break;
|
||||
}
|
||||
|
@ -625,26 +618,10 @@ ClaimTitle(JSTitle *title, JSContext *cx)
|
|||
* non-null test, and avoid double-insertion bugs.
|
||||
*/
|
||||
if (!title->u.link) {
|
||||
TITLE_TO_SCOPE(title)->hold();
|
||||
title->u.link = rt->titleSharingTodo;
|
||||
rt->titleSharingTodo = title;
|
||||
}
|
||||
|
||||
/*
|
||||
* Discount all the requests running on the current thread so a
|
||||
* possible GC can proceed on another thread while we wait on
|
||||
* rt->titleSharingDone.
|
||||
*/
|
||||
requestDebit = js_DiscountRequestsForGC(cx);
|
||||
if (title->ownercx != ownercx) {
|
||||
/*
|
||||
* js_DiscountRequestsForGC released and reacquired the GC lock,
|
||||
* and the title was taken or shared. Start over.
|
||||
*/
|
||||
js_RecountRequestsAfterGC(rt, requestDebit);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* We know that some other thread's context owns title, which is now
|
||||
* linked onto rt->titleSharingTodo, awaiting the end of that other
|
||||
|
@ -652,7 +629,7 @@ ClaimTitle(JSTitle *title, JSContext *cx)
|
|||
* But before waiting, we force the operation callback for that other
|
||||
* thread so it can quickly suspend.
|
||||
*/
|
||||
NudgeThread(ownercx->thread);
|
||||
NudgeThread(rt, ownercx->thread);
|
||||
|
||||
JS_ASSERT(!cx->thread->titleToShare);
|
||||
cx->thread->titleToShare = title;
|
||||
|
@ -661,21 +638,6 @@ ClaimTitle(JSTitle *title, JSContext *cx)
|
|||
#endif
|
||||
PR_WaitCondVar(rt->titleSharingDone, PR_INTERVAL_NO_TIMEOUT);
|
||||
JS_ASSERT(stat != PR_FAILURE);
|
||||
|
||||
js_RecountRequestsAfterGC(rt, requestDebit);
|
||||
|
||||
/*
|
||||
* Don't clear titleToShare until after we're through waiting on
|
||||
* all condition variables protected by rt->gcLock -- that includes
|
||||
* rt->titleSharingDone *and* rt->gcDone (hidden in the call to
|
||||
* js_RecountRequestsAfterGC immediately above).
|
||||
*
|
||||
* Otherwise, the GC could easily deadlock with another thread that
|
||||
* owns a title wanted by a finalizer. By keeping cx->titleToShare
|
||||
* set till here, we ensure that such deadlocks are detected, which
|
||||
* results in the finalized object's title being shared (it must, of
|
||||
* course, have other, live objects sharing it).
|
||||
*/
|
||||
cx->thread->titleToShare = NULL;
|
||||
}
|
||||
|
||||
|
@ -693,24 +655,15 @@ js_ShareWaitingTitles(JSContext *cx)
|
|||
todop = &cx->runtime->titleSharingTodo;
|
||||
shared = false;
|
||||
while ((title = *todop) != NO_TITLE_SHARING_TODO) {
|
||||
if (title->ownercx != cx) {
|
||||
if (title->ownercx->thread != cx->thread) {
|
||||
todop = &title->u.link;
|
||||
continue;
|
||||
}
|
||||
*todop = title->u.link;
|
||||
title->u.link = NULL; /* null u.link for sanity ASAP */
|
||||
title->u.link = NULL; /* null u.link for sanity ASAP */
|
||||
|
||||
/*
|
||||
* If JSScope::drop returns false, we held the last ref to scope. The
|
||||
* waiting thread(s) must have been killed, after which the GC
|
||||
* collected the object that held this scope. Unlikely, because it
|
||||
* requires that the GC ran (e.g., from an operation callback)
|
||||
* during this request, but possible.
|
||||
*/
|
||||
if (TITLE_TO_SCOPE(title)->drop(cx, NULL)) {
|
||||
FinishSharingTitle(cx, title); /* set ownercx = NULL */
|
||||
shared = true;
|
||||
}
|
||||
FinishSharingTitle(cx, title); /* set ownercx = NULL */
|
||||
shared = true;
|
||||
}
|
||||
if (shared)
|
||||
JS_NOTIFY_ALL_CONDVAR(cx->runtime->titleSharingDone);
|
||||
|
@ -1315,17 +1268,18 @@ js_UnlockTitle(JSContext *cx, JSTitle *title)
|
|||
* dropped the last reference to oldtitle.
|
||||
*/
|
||||
void
|
||||
js_TransferTitle(JSContext *cx, JSTitle *oldtitle, JSTitle *newtitle)
|
||||
js_DropAllEmptyScopeLocks(JSContext *cx, JSScope *scope)
|
||||
{
|
||||
JS_ASSERT(JS_IS_TITLE_LOCKED(cx, newtitle));
|
||||
JS_ASSERT(!CX_OWNS_SCOPE_TITLE(cx,scope));
|
||||
JS_ASSERT(scope->isSharedEmpty());
|
||||
JS_ASSERT(JS_IS_TITLE_LOCKED(cx, &scope->title));
|
||||
|
||||
/*
|
||||
* If the last reference to oldtitle went away, newtitle needs no lock
|
||||
* state update.
|
||||
* Shared empty scope cannot be sealed so we do not need to deal with
|
||||
* cx->lockedSealedTitle.
|
||||
*/
|
||||
if (!oldtitle)
|
||||
return;
|
||||
JS_ASSERT(JS_IS_TITLE_LOCKED(cx, oldtitle));
|
||||
JS_ASSERT(!scope->sealed());
|
||||
JS_ASSERT(cx->lockedSealedTitle != &scope->title);
|
||||
|
||||
/*
|
||||
* Special case in js_LockTitle and js_UnlockTitle for the GC calling
|
||||
|
@ -1336,46 +1290,9 @@ js_TransferTitle(JSContext *cx, JSTitle *oldtitle, JSTitle *newtitle)
|
|||
if (CX_THREAD_IS_RUNNING_GC(cx))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Special case in js_LockObj and js_UnlockTitle for locking the sealed
|
||||
* scope of an object that owns that scope (the prototype or mutated obj
|
||||
* for which OBJ_SCOPE(obj)->object == obj), and unlocking it.
|
||||
*/
|
||||
JS_ASSERT(cx->lockedSealedTitle != newtitle);
|
||||
if (cx->lockedSealedTitle == oldtitle) {
|
||||
JS_ASSERT(newtitle->ownercx == cx ||
|
||||
(!newtitle->ownercx && newtitle->u.count == 1));
|
||||
cx->lockedSealedTitle = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If oldtitle is single-threaded, there's nothing to do.
|
||||
*/
|
||||
if (oldtitle->ownercx) {
|
||||
JS_ASSERT(oldtitle->ownercx == cx);
|
||||
JS_ASSERT(newtitle->ownercx == cx ||
|
||||
(!newtitle->ownercx && newtitle->u.count == 1));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We transfer oldtitle->u.count only if newtitle is not single-threaded.
|
||||
* Flow unwinds from here through some number of JS_UNLOCK_TITLE and/or
|
||||
* JS_UNLOCK_OBJ macro calls, which will decrement newtitle->u.count only
|
||||
* if they find newtitle->ownercx != cx.
|
||||
*/
|
||||
if (newtitle->ownercx != cx) {
|
||||
JS_ASSERT(!newtitle->ownercx);
|
||||
newtitle->u.count = oldtitle->u.count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset oldtitle's lock state so that it is completely unlocked.
|
||||
*/
|
||||
LOGIT(oldtitle, '0');
|
||||
oldtitle->u.count = 0;
|
||||
ThinUnlock(&oldtitle->lock, CX_THINLOCK_ID(cx));
|
||||
LOGIT(&scope->title, '0');
|
||||
scope->title.u.count = 0;
|
||||
ThinUnlock(&scope->title.lock, CX_THINLOCK_ID(cx));
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -154,11 +154,13 @@ struct JSTitle {
|
|||
* are for optimizations above the JSObjectOps layer, under which object locks
|
||||
* normally hide.
|
||||
*/
|
||||
#define JS_LOCK_OBJ(cx,obj) ((OBJ_SCOPE(obj)->title.ownercx == (cx)) \
|
||||
#define CX_OWNS_SCOPE_TITLE(cx,scope) ((scope)->title.ownercx == (cx))
|
||||
|
||||
#define JS_LOCK_OBJ(cx,obj) (CX_OWNS_SCOPE_TITLE(cx, OBJ_SCOPE(obj)) \
|
||||
? (void)0 \
|
||||
: (js_LockObj(cx, obj), \
|
||||
JS_SET_OBJ_INFO(obj,__FILE__,__LINE__)))
|
||||
#define JS_UNLOCK_OBJ(cx,obj) ((OBJ_SCOPE(obj)->title.ownercx == (cx)) \
|
||||
#define JS_UNLOCK_OBJ(cx,obj) (CX_OWNS_SCOPE_TITLE(cx, OBJ_SCOPE(obj)) \
|
||||
? (void)0 : js_UnlockObj(cx, obj))
|
||||
|
||||
/*
|
||||
|
@ -182,9 +184,12 @@ struct JSTitle {
|
|||
#define JS_LOCK_SCOPE(cx,scope) JS_LOCK_TITLE(cx,&(scope)->title)
|
||||
#define JS_UNLOCK_SCOPE(cx,scope) JS_UNLOCK_TITLE(cx,&(scope)->title)
|
||||
|
||||
#define JS_TRANSFER_SCOPE_LOCK(cx, scope, newscope) \
|
||||
js_TransferTitle(cx, &scope->title, &newscope->title)
|
||||
|
||||
#define JS_DROP_ALL_EMPTY_SCOPE_LOCKS(cx,scope) \
|
||||
JS_BEGIN_MACRO \
|
||||
JS_ASSERT((scope)->isSharedEmpty()); \
|
||||
if (!CX_OWNS_SCOPE_TITLE(cx, scope)) \
|
||||
js_DropAllEmptyScopeLocks(cx, scope); \
|
||||
JS_END_MACRO
|
||||
|
||||
extern void js_Lock(JSContext *cx, JSThinLock *tl);
|
||||
extern void js_Unlock(JSContext *cx, JSThinLock *tl);
|
||||
|
@ -199,7 +204,7 @@ extern void js_LockTitle(JSContext *cx, JSTitle *title);
|
|||
extern void js_UnlockTitle(JSContext *cx, JSTitle *title);
|
||||
extern int js_SetupLocks(int,int);
|
||||
extern void js_CleanupLocks();
|
||||
extern void js_TransferTitle(JSContext *, JSTitle *, JSTitle *);
|
||||
extern void js_DropAllEmptyScopeLocks(JSContext *cx, JSScope *scope);
|
||||
extern JS_FRIEND_API(jsval)
|
||||
js_GetSlotThreadSafe(JSContext *, JSObject *, uint32);
|
||||
extern void js_SetSlotThreadSafe(JSContext *, JSObject *, uint32, jsval);
|
||||
|
@ -259,6 +264,7 @@ extern void js_SetScopeInfo(JSScope *scope, const char *file, int line);
|
|||
|
||||
#define JS_LOCK_RUNTIME(rt) ((void)0)
|
||||
#define JS_UNLOCK_RUNTIME(rt) ((void)0)
|
||||
#define CX_OWNS_SCOPE_TITLE(cx,obj) true
|
||||
#define JS_LOCK_OBJ(cx,obj) ((void)0)
|
||||
#define JS_UNLOCK_OBJ(cx,obj) ((void)0)
|
||||
#define JS_LOCK_OBJ_IF_SHAPE(cx,obj,shape) (OBJ_SHAPE(obj) == (shape))
|
||||
|
@ -266,7 +272,7 @@ extern void js_SetScopeInfo(JSScope *scope, const char *file, int line);
|
|||
#define JS_LOCK_OBJ_VOID(cx,obj,e) (e)
|
||||
#define JS_LOCK_SCOPE(cx,scope) ((void)0)
|
||||
#define JS_UNLOCK_SCOPE(cx,scope) ((void)0)
|
||||
#define JS_TRANSFER_SCOPE_LOCK(c,o,n) ((void)0)
|
||||
#define JS_DROP_ALL_EMPTY_SCOPE_LOCKS(cx,scope) ((void)0)
|
||||
|
||||
#define JS_IS_RUNTIME_LOCKED(rt) 1
|
||||
#define JS_IS_OBJ_LOCKED(cx,obj) 1
|
||||
|
|
110
js/src/jsobj.cpp
110
js/src/jsobj.cpp
|
@ -102,6 +102,8 @@
|
|||
|
||||
#include "jsautooplen.h"
|
||||
|
||||
using namespace js;
|
||||
|
||||
#ifdef JS_THREADSAFE
|
||||
#define NATIVE_DROP_PROPERTY js_DropProperty
|
||||
|
||||
|
@ -1254,11 +1256,11 @@ obj_eval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
|||
JSBool ok;
|
||||
JSScript **bucket = NULL; /* avoid GCC warning with early decl&init */
|
||||
#if JS_HAS_EVAL_THIS_SCOPE
|
||||
JSObject *callerScopeChain = NULL, *callerVarObj = NULL;
|
||||
JSObject *withObject = NULL;
|
||||
JSBool setCallerScopeChain = JS_FALSE, setCallerVarObj = JS_FALSE;
|
||||
JSTempValueRooter scopetvr, varobjtvr;
|
||||
JSObject *callerScopeChain = NULL;
|
||||
JSBool setCallerScopeChain = JS_FALSE;
|
||||
JSTempValueRooter scopetvr;
|
||||
#endif
|
||||
JSObject *withObject = NULL;
|
||||
|
||||
fp = js_GetTopStackFrame(cx);
|
||||
caller = js_GetScriptedCaller(cx, fp);
|
||||
|
@ -1310,7 +1312,7 @@ obj_eval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
|||
* object, then we need to provide one for the compiler to stick any
|
||||
* declared (var) variables into.
|
||||
*/
|
||||
if (!caller->varobj && !js_GetCallObject(cx, caller))
|
||||
if (caller->fun && !caller->callobj && !js_GetCallObject(cx, caller))
|
||||
return JS_FALSE;
|
||||
|
||||
/* Accept an optional trailing argument that overrides the scope object. */
|
||||
|
@ -1359,22 +1361,11 @@ obj_eval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
|||
|
||||
/* NB: We know obj is a global object here. */
|
||||
JS_ASSERT(!OBJ_GET_PARENT(cx, obj));
|
||||
scopeobj = obj;
|
||||
|
||||
/* Set fp->scopeChain too, for the compiler. */
|
||||
caller->scopeChain = fp->scopeChain = scopeobj;
|
||||
caller->scopeChain = scopeobj = obj;
|
||||
|
||||
/* Remember scopeobj so we can null its private when done. */
|
||||
setCallerScopeChain = JS_TRUE;
|
||||
JS_PUSH_TEMP_ROOT_OBJECT(cx, callerScopeChain, &scopetvr);
|
||||
|
||||
callerVarObj = caller->varobj;
|
||||
if (obj != callerVarObj) {
|
||||
/* Set fp->varobj too, for the compiler. */
|
||||
caller->varobj = fp->varobj = obj;
|
||||
setCallerVarObj = JS_TRUE;
|
||||
JS_PUSH_TEMP_ROOT_OBJECT(cx, callerVarObj, &varobjtvr);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Compile using the caller's current scope object.
|
||||
|
@ -1551,17 +1542,13 @@ obj_eval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
|||
out:
|
||||
#if JS_HAS_EVAL_THIS_SCOPE
|
||||
/* Restore OBJ_GET_PARENT(scopeobj) not callerScopeChain in case of Call. */
|
||||
if (setCallerVarObj) {
|
||||
caller->varobj = callerVarObj;
|
||||
JS_POP_TEMP_ROOT(cx, &varobjtvr);
|
||||
}
|
||||
if (setCallerScopeChain) {
|
||||
caller->scopeChain = callerScopeChain;
|
||||
JS_POP_TEMP_ROOT(cx, &scopetvr);
|
||||
}
|
||||
#endif
|
||||
if (withObject)
|
||||
withObject->setPrivate(NULL);
|
||||
#endif
|
||||
return ok;
|
||||
}
|
||||
|
||||
|
@ -1769,7 +1756,7 @@ Object_p_hasOwnProperty(JSContext* cx, JSObject* obj, JSString *str)
|
|||
JSProperty *prop;
|
||||
if (!js_ValueToStringId(cx, STRING_TO_JSVAL(str), &id) ||
|
||||
!js_HasOwnProperty(cx, obj->map->ops->lookupProperty, obj, id, &pobj, &prop)) {
|
||||
js_SetBuiltinError(cx);
|
||||
SetBuiltinError(cx);
|
||||
return JSVAL_TO_BOOLEAN(JSVAL_VOID);
|
||||
}
|
||||
|
||||
|
@ -1815,7 +1802,7 @@ Object_p_propertyIsEnumerable(JSContext* cx, JSObject* obj, JSString *str)
|
|||
jsval v;
|
||||
|
||||
if (!js_PropertyIsEnumerable(cx, obj, id, &v)) {
|
||||
js_SetBuiltinError(cx);
|
||||
SetBuiltinError(cx);
|
||||
return JSVAL_TO_BOOLEAN(JSVAL_VOID);
|
||||
}
|
||||
|
||||
|
@ -2912,7 +2899,7 @@ InitScopeForObject(JSContext* cx, JSObject* obj, JSObject* proto, JSObjectOps* o
|
|||
JS_ASSERT(scope->freeslot >= JSSLOT_PRIVATE);
|
||||
if (scope->freeslot > JS_INITIAL_NSLOTS &&
|
||||
!AllocSlots(cx, obj, scope->freeslot)) {
|
||||
JSScope::destroy(cx, scope);
|
||||
scope->destroy(cx);
|
||||
goto bad;
|
||||
}
|
||||
}
|
||||
|
@ -3114,7 +3101,7 @@ js_NewInstance(JSContext *cx, JSClass *clasp, JSObject *ctor)
|
|||
if (scope->title.ownercx != cx)
|
||||
return NULL;
|
||||
#endif
|
||||
if (!scope->owned()) {
|
||||
if (scope->isSharedEmpty()) {
|
||||
scope = js_GetMutableScope(cx, ctor);
|
||||
if (!scope)
|
||||
return NULL;
|
||||
|
@ -3426,20 +3413,13 @@ js_CloneBlockObject(JSContext *cx, JSObject *proto, JSStackFrame *fp)
|
|||
if (!clone)
|
||||
return NULL;
|
||||
|
||||
JSScope *scope = OBJ_SCOPE(proto);
|
||||
scope->hold();
|
||||
JS_ASSERT(!scope->owned());
|
||||
clone->map = scope;
|
||||
|
||||
clone->classword = jsuword(&js_BlockClass);
|
||||
clone->setProto(proto);
|
||||
clone->setParent(NULL); // caller's responsibility
|
||||
clone->setPrivate(fp);
|
||||
/* The caller sets parent on its own. */
|
||||
clone->init(&js_BlockClass, proto, NULL, reinterpret_cast<jsval>(fp));
|
||||
clone->fslots[JSSLOT_BLOCK_DEPTH] = proto->fslots[JSSLOT_BLOCK_DEPTH];
|
||||
JS_ASSERT(scope->freeslot == JSSLOT_BLOCK_DEPTH + 1);
|
||||
for (uint32 i = JSSLOT_BLOCK_DEPTH + 1; i < JS_INITIAL_NSLOTS; ++i)
|
||||
clone->fslots[i] = JSVAL_VOID;
|
||||
clone->dslots = NULL;
|
||||
|
||||
JS_ASSERT(cx->runtime->emptyBlockScope->freeslot == JSSLOT_BLOCK_DEPTH + 1);
|
||||
clone->map = cx->runtime->emptyBlockScope;
|
||||
cx->runtime->emptyBlockScope->hold();
|
||||
JS_ASSERT(OBJ_IS_CLONED_BLOCK(clone));
|
||||
return clone;
|
||||
}
|
||||
|
@ -3687,11 +3667,6 @@ js_XDRBlockObject(JSXDRState *xdr, JSObject **objp)
|
|||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (xdr->mode == JSXDR_DECODE) {
|
||||
/* Do as the parser does and make this block scope shareable. */
|
||||
OBJ_SCOPE(obj)->object = NULL;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -3858,12 +3833,8 @@ js_InitClass(JSContext *cx, JSObject *obj, JSObject *parent_proto,
|
|||
*
|
||||
* All callers of JSObject::initSharingEmptyScope depend on this.
|
||||
*/
|
||||
{
|
||||
JSScope *scope = OBJ_SCOPE(proto)->getEmptyScope(cx, clasp);
|
||||
if (!scope)
|
||||
goto bad;
|
||||
scope->drop(cx, NULL);
|
||||
}
|
||||
if (!OBJ_SCOPE(proto)->ensureEmptyScope(cx, clasp))
|
||||
goto bad;
|
||||
|
||||
/* If this is a standard class, cache its prototype. */
|
||||
if (key != JSProto_Null && !js_SetClassObject(cx, obj, key, ctor))
|
||||
|
@ -3998,7 +3969,7 @@ js_EnsureReservedSlots(JSContext *cx, JSObject *obj, size_t nreserved)
|
|||
return false;
|
||||
|
||||
JSScope *scope = OBJ_SCOPE(obj);
|
||||
if (scope->owned()) {
|
||||
if (!scope->isSharedEmpty()) {
|
||||
#ifdef JS_THREADSAFE
|
||||
JS_ASSERT(scope->title.ownercx->thread == cx->thread);
|
||||
#endif
|
||||
|
@ -4380,7 +4351,7 @@ PurgeProtoChain(JSContext *cx, JSObject *obj, jsid id)
|
|||
* the global shape. jstracer.cpp assumes that the global shape
|
||||
* never changes on trace, so we must deep-bail here.
|
||||
*/
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
}
|
||||
return JS_TRUE;
|
||||
}
|
||||
|
@ -4501,7 +4472,7 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
|
|||
JSBool added;
|
||||
|
||||
JS_ASSERT((defineHow & ~(JSDNP_CACHE_RESULT | JSDNP_DONT_PURGE | JSDNP_SET_METHOD)) == 0);
|
||||
js_LeaveTraceIfGlobalObject(cx, obj);
|
||||
LeaveTraceIfGlobalObject(cx, obj);
|
||||
|
||||
/* Convert string indices to integers if appropriate. */
|
||||
id = js_CheckForStringIndex(id);
|
||||
|
@ -4750,12 +4721,12 @@ js_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, jsid id, uintN flags,
|
|||
* "too bad!" case.
|
||||
*/
|
||||
scope = OBJ_SCOPE(obj2);
|
||||
if (scope->owned())
|
||||
if (!scope->isSharedEmpty())
|
||||
sprop = scope->lookup(id);
|
||||
}
|
||||
if (sprop) {
|
||||
JS_ASSERT(scope == OBJ_SCOPE(obj2));
|
||||
JS_ASSERT(scope->owned());
|
||||
JS_ASSERT(!scope->isSharedEmpty());
|
||||
obj = obj2;
|
||||
} else if (obj2 != obj) {
|
||||
if (OBJ_IS_NATIVE(obj2))
|
||||
|
@ -4775,7 +4746,7 @@ js_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, jsid id, uintN flags,
|
|||
JS_LOCK_OBJ(cx, obj);
|
||||
JS_ASSERT(OBJ_IS_NATIVE(obj));
|
||||
scope = OBJ_SCOPE(obj);
|
||||
if (scope->owned())
|
||||
if (!scope->isSharedEmpty())
|
||||
sprop = scope->lookup(id);
|
||||
}
|
||||
|
||||
|
@ -4863,11 +4834,11 @@ js_FindPropertyHelper(JSContext *cx, jsid id, JSBool cacheResult,
|
|||
JS_ASSERT(OBJ_GET_CLASS(cx, pobj) == clasp);
|
||||
if (clasp == &js_BlockClass) {
|
||||
/*
|
||||
* Block instances on the scope chain are immutable and
|
||||
* always share their scope with compile-time prototypes.
|
||||
* A block instance on the scope chain is immutable and
|
||||
* the compile-time prototype provides all its properties.
|
||||
*/
|
||||
JS_ASSERT(pobj == obj);
|
||||
JS_ASSERT(protoIndex == 0);
|
||||
JS_ASSERT(pobj == obj->getProto());
|
||||
JS_ASSERT(protoIndex == 1);
|
||||
} else {
|
||||
/* Call and DeclEnvClass objects have no prototypes. */
|
||||
JS_ASSERT(!OBJ_GET_PROTO(cx, obj));
|
||||
|
@ -5001,7 +4972,7 @@ JSBool
|
|||
js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj,
|
||||
JSScopeProperty *sprop, uintN getHow, jsval *vp)
|
||||
{
|
||||
js_LeaveTraceIfGlobalObject(cx, pobj);
|
||||
LeaveTraceIfGlobalObject(cx, pobj);
|
||||
|
||||
JSScope *scope;
|
||||
uint32 slot;
|
||||
|
@ -5054,7 +5025,7 @@ JSBool
|
|||
js_NativeSet(JSContext *cx, JSObject *obj, JSScopeProperty *sprop, bool added,
|
||||
jsval *vp)
|
||||
{
|
||||
js_LeaveTraceIfGlobalObject(cx, obj);
|
||||
LeaveTraceIfGlobalObject(cx, obj);
|
||||
|
||||
JSScope *scope;
|
||||
uint32 slot;
|
||||
|
@ -5172,7 +5143,7 @@ js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN getHow,
|
|||
|
||||
/* Do not warn about tests like (obj[prop] == undefined). */
|
||||
if (cx->resolveFlags == JSRESOLVE_INFER) {
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
pc += js_CodeSpec[op].length;
|
||||
if (Detecting(cx, pc))
|
||||
return JS_TRUE;
|
||||
|
@ -6614,7 +6585,7 @@ js_TraceObject(JSTracer *trc, JSObject *obj)
|
|||
|
||||
JSContext *cx = trc->context;
|
||||
JSScope *scope = OBJ_SCOPE(obj);
|
||||
if (scope->owned() && IS_GC_MARKING_TRACER(trc)) {
|
||||
if (!scope->isSharedEmpty() && IS_GC_MARKING_TRACER(trc)) {
|
||||
/*
|
||||
* Check whether we should shrink the object's slots. Skip this check
|
||||
* if the scope is shared, since for Block objects and flat closures
|
||||
|
@ -6655,7 +6626,7 @@ js_TraceObject(JSTracer *trc, JSObject *obj)
|
|||
* above.
|
||||
*/
|
||||
uint32 nslots = STOBJ_NSLOTS(obj);
|
||||
if (scope->owned() && scope->freeslot < nslots)
|
||||
if (!scope->isSharedEmpty() && scope->freeslot < nslots)
|
||||
nslots = scope->freeslot;
|
||||
JS_ASSERT(nslots >= JSSLOT_START(clasp));
|
||||
|
||||
|
@ -6681,7 +6652,7 @@ js_Clear(JSContext *cx, JSObject *obj)
|
|||
*/
|
||||
JS_LOCK_OBJ(cx, obj);
|
||||
scope = OBJ_SCOPE(obj);
|
||||
if (scope->owned()) {
|
||||
if (!scope->isSharedEmpty()) {
|
||||
/* Now that we're done using scope->lastProp/table, clear scope. */
|
||||
scope->clear(cx);
|
||||
|
||||
|
@ -6774,7 +6745,7 @@ js_SetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, jsval v)
|
|||
* use STOBJ_NSLOTS(obj) rather than rely on freeslot.
|
||||
*/
|
||||
JSScope *scope = OBJ_SCOPE(obj);
|
||||
if (scope->owned() && slot >= scope->freeslot)
|
||||
if (!scope->isSharedEmpty() && slot >= scope->freeslot)
|
||||
scope->freeslot = slot + 1;
|
||||
|
||||
STOBJ_SET_SLOT(obj, slot, v);
|
||||
|
@ -7030,7 +7001,7 @@ js_DumpObject(JSObject *obj)
|
|||
|
||||
fprintf(stderr, "slots:\n");
|
||||
reservedEnd = i + JSCLASS_RESERVED_SLOTS(clasp);
|
||||
slots = (OBJ_IS_NATIVE(obj) && OBJ_SCOPE(obj)->owned())
|
||||
slots = (OBJ_IS_NATIVE(obj) && !OBJ_SCOPE(obj)->isSharedEmpty())
|
||||
? OBJ_SCOPE(obj)->freeslot
|
||||
: STOBJ_NSLOTS(obj);
|
||||
for (; i < slots; i++) {
|
||||
|
@ -7117,7 +7088,6 @@ js_DumpStackFrame(JSStackFrame *fp)
|
|||
fprintf(stderr, " argv: %p (argc: %u)\n", (void *) fp->argv, (unsigned) fp->argc);
|
||||
MaybeDumpObject("callobj", fp->callobj);
|
||||
MaybeDumpObject("argsobj", JSVAL_TO_OBJECT(fp->argsobj));
|
||||
MaybeDumpObject("varobj", fp->varobj);
|
||||
MaybeDumpValue("this", fp->thisv);
|
||||
fprintf(stderr, " rval: ");
|
||||
dumpValue(fp->rval);
|
||||
|
@ -7153,8 +7123,6 @@ js_DumpStackFrame(JSStackFrame *fp)
|
|||
if (fp->blockChain)
|
||||
fprintf(stderr, " blockChain: (JSObject *) %p\n", (void *) fp->blockChain);
|
||||
|
||||
if (fp->dormantNext)
|
||||
fprintf(stderr, " dormantNext: (JSStackFrame *) %p\n", (void *) fp->dormantNext);
|
||||
if (fp->displaySave)
|
||||
fprintf(stderr, " displaySave: (JSStackFrame *) %p\n", (void *) fp->displaySave);
|
||||
|
||||
|
|
|
@ -234,6 +234,10 @@ struct JSObject {
|
|||
return (JSClass *) (classword & ~JSSLOT_CLASS_MASK_BITS);
|
||||
}
|
||||
|
||||
bool hasClass(const JSClass *clasp) const {
|
||||
return clasp == getClass();
|
||||
}
|
||||
|
||||
bool isDelegate() const {
|
||||
return (classword & jsuword(1)) != jsuword(0);
|
||||
}
|
||||
|
@ -406,6 +410,8 @@ struct JSObject {
|
|||
inline bool isFunction() const;
|
||||
inline bool isRegExp() const;
|
||||
inline bool isXML() const;
|
||||
|
||||
inline bool unbrand(JSContext *cx);
|
||||
};
|
||||
|
||||
/* Compatibility macros. */
|
||||
|
@ -602,7 +608,7 @@ extern JSBool
|
|||
js_DefineBlockVariable(JSContext *cx, JSObject *obj, jsid id, intN index);
|
||||
|
||||
#define OBJ_BLOCK_COUNT(cx,obj) \
|
||||
(OBJ_SCOPE(obj)->entryCount)
|
||||
(OBJ_SCOPE(OBJ_IS_CLONED_BLOCK(obj) ? obj->getProto() : obj)->entryCount)
|
||||
#define OBJ_BLOCK_DEPTH(cx,obj) \
|
||||
JSVAL_TO_INT(STOBJ_GET_SLOT(obj, JSSLOT_BLOCK_DEPTH))
|
||||
#define OBJ_SET_BLOCK_DEPTH(cx,obj,depth) \
|
||||
|
|
|
@ -64,4 +64,23 @@ JSObject::freeSlotsArray(JSContext *cx)
|
|||
cx->free(dslots - 1);
|
||||
}
|
||||
|
||||
inline bool
|
||||
JSObject::unbrand(JSContext *cx)
|
||||
{
|
||||
if (OBJ_IS_NATIVE(this)) {
|
||||
JS_LOCK_OBJ(cx, this);
|
||||
JSScope *scope = OBJ_SCOPE(this);
|
||||
if (scope->isSharedEmpty()) {
|
||||
scope = js_GetMutableScope(cx, this);
|
||||
if (!scope) {
|
||||
JS_UNLOCK_OBJ(cx, this);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
scope->setGeneric();
|
||||
JS_UNLOCK_SCOPE(cx, scope);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* jsobjinlines_h___ */
|
||||
|
|
|
@ -2525,8 +2525,13 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
|||
break;
|
||||
|
||||
case SRC_HIDDEN:
|
||||
/* Hide this pop, it's from a goto in a with or for/in. */
|
||||
/*
|
||||
* Hide this pop. Don't adjust our stack depth model if
|
||||
* it's from a goto in a with or for/in.
|
||||
*/
|
||||
todo = -2;
|
||||
if (lastop == JSOP_UNBRAND)
|
||||
(void) POP_STR();
|
||||
break;
|
||||
|
||||
case SRC_DECL:
|
||||
|
|
|
@ -601,5 +601,6 @@ OPDEF(JSOP_CONCATN, 234,"concatn", NULL, 3, -1, 1, 13, JOF_UINT16
|
|||
*/
|
||||
OPDEF(JSOP_SETMETHOD, 235,"setmethod", NULL, 3, 2, 1, 3, JOF_ATOM|JOF_PROP|JOF_SET|JOF_DETECTING)
|
||||
OPDEF(JSOP_INITMETHOD, 236,"initmethod", NULL, 3, 2, 1, 3, JOF_ATOM|JOF_PROP|JOF_SET|JOF_DETECTING)
|
||||
OPDEF(JSOP_UNBRAND, 237,"unbrand", NULL, 1, 1, 1, 0, JOF_BYTE)
|
||||
|
||||
OPDEF(JSOP_SHARPINIT, 237,"sharpinit", NULL, 3, 0, 0, 0, JOF_UINT16|JOF_SHARPSLOT)
|
||||
OPDEF(JSOP_SHARPINIT, 238,"sharpinit", NULL, 3, 0, 0, 0, JOF_UINT16|JOF_SHARPSLOT)
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
if (handler) {
|
||||
#ifdef JS_TRACER
|
||||
if (TRACE_RECORDER(cx))
|
||||
js_AbortRecording(cx, "interrupt handler");
|
||||
AbortRecording(cx, "interrupt handler");
|
||||
#endif
|
||||
switch (handler(cx, script, regs.pc, &rval,
|
||||
cx->debugHooks->interruptHandlerData)) {
|
||||
|
@ -670,7 +670,7 @@ END_CASE(JSOP_PICK)
|
|||
|
||||
BEGIN_CASE(JSOP_SETCONST)
|
||||
LOAD_ATOM(0);
|
||||
obj = fp->varobj;
|
||||
obj = fp->varobj(cx);
|
||||
rval = FETCH_OPND(-1);
|
||||
if (!obj->defineProperty(cx, ATOM_TO_JSID(atom), rval,
|
||||
JS_PropertyStub, JS_PropertyStub,
|
||||
|
@ -1037,7 +1037,7 @@ BEGIN_CASE(JSOP_CONCATN)
|
|||
if (imacro) {
|
||||
argc = GET_ARGC(fp->imacpc);
|
||||
if (!recording)
|
||||
js_ConcatPostImacroStackCleanup(argc, regs, NULL);
|
||||
ConcatPostImacroStackCleanup(argc, regs, NULL);
|
||||
} else {
|
||||
#endif /* JS_TRACER */
|
||||
argc = GET_ARGC(regs.pc);
|
||||
|
@ -1441,7 +1441,8 @@ BEGIN_CASE(JSOP_GVARINC)
|
|||
DO_OP();
|
||||
}
|
||||
slot = JSVAL_TO_INT(lval);
|
||||
rval = OBJ_GET_SLOT(cx, fp->varobj, slot);
|
||||
JS_ASSERT(fp->varobj(cx) == cx->activeCallStack()->getInitialVarObj());
|
||||
rval = OBJ_GET_SLOT(cx, cx->activeCallStack()->getInitialVarObj(), slot);
|
||||
if (JS_LIKELY(CAN_DO_FAST_INC_DEC(rval))) {
|
||||
PUSH_OPND(rval + incr2);
|
||||
rval += incr;
|
||||
|
@ -1453,7 +1454,7 @@ BEGIN_CASE(JSOP_GVARINC)
|
|||
rval = regs.sp[-1];
|
||||
--regs.sp;
|
||||
}
|
||||
OBJ_SET_SLOT(cx, fp->varobj, slot, rval);
|
||||
OBJ_SET_SLOT(cx, fp->varobj(cx), slot, rval);
|
||||
len = JSOP_INCGVAR_LENGTH; /* all gvar incops are same length */
|
||||
JS_ASSERT(len == js_CodeSpec[op].length);
|
||||
DO_NEXT_OP(len);
|
||||
|
@ -1685,6 +1686,14 @@ BEGIN_CASE(JSOP_CALLPROP)
|
|||
}
|
||||
END_CASE(JSOP_CALLPROP)
|
||||
|
||||
BEGIN_CASE(JSOP_UNBRAND)
|
||||
JS_ASSERT(regs.sp - fp->slots >= 1);
|
||||
lval = FETCH_OPND(-1);
|
||||
obj = JSVAL_TO_OBJECT(lval);
|
||||
if (!obj->unbrand(cx))
|
||||
goto error;
|
||||
END_CASE(JSOP_UNBRAND)
|
||||
|
||||
BEGIN_CASE(JSOP_SETNAME)
|
||||
BEGIN_CASE(JSOP_SETPROP)
|
||||
BEGIN_CASE(JSOP_SETMETHOD)
|
||||
|
@ -1755,7 +1764,7 @@ BEGIN_CASE(JSOP_SETMETHOD)
|
|||
|
||||
/* The cache entry doesn't apply. vshape mismatch. */
|
||||
checkForAdd = false;
|
||||
} else if (scope->owned()) {
|
||||
} else if (!scope->isSharedEmpty()) {
|
||||
if (sprop == scope->lastProperty() || scope->hasProperty(sprop)) {
|
||||
fast_set_propcache_hit:
|
||||
PCMETER(cache->pchits++);
|
||||
|
@ -2144,7 +2153,6 @@ BEGIN_CASE(JSOP_APPLY)
|
|||
newsp += nframeslots;
|
||||
newifp->frame.callobj = NULL;
|
||||
newifp->frame.argsobj = NULL;
|
||||
newifp->frame.varobj = NULL;
|
||||
newifp->frame.script = script;
|
||||
newifp->frame.fun = fun;
|
||||
newifp->frame.argc = argc;
|
||||
|
@ -2154,7 +2162,6 @@ BEGIN_CASE(JSOP_APPLY)
|
|||
newifp->frame.annotation = NULL;
|
||||
newifp->frame.scopeChain = parent = OBJ_GET_PARENT(cx, obj);
|
||||
newifp->frame.flags = flags;
|
||||
newifp->frame.dormantNext = NULL;
|
||||
newifp->frame.blockChain = NULL;
|
||||
if (script->staticLevel < JS_DISPLAY_SIZE) {
|
||||
JSStackFrame **disp = &cx->display[script->staticLevel];
|
||||
|
@ -2773,7 +2780,8 @@ BEGIN_CASE(JSOP_CALLGVAR)
|
|||
op = (op == JSOP_GETGVAR) ? JSOP_NAME : JSOP_CALLNAME;
|
||||
DO_OP();
|
||||
}
|
||||
obj = fp->varobj;
|
||||
JS_ASSERT(fp->varobj(cx) == cx->activeCallStack()->getInitialVarObj());
|
||||
obj = cx->activeCallStack()->getInitialVarObj();
|
||||
slot = JSVAL_TO_INT(lval);
|
||||
rval = OBJ_GET_SLOT(cx, obj, slot);
|
||||
PUSH_OPND(rval);
|
||||
|
@ -2786,7 +2794,8 @@ BEGIN_CASE(JSOP_SETGVAR)
|
|||
JS_ASSERT(slot < GlobalVarCount(fp));
|
||||
METER_SLOT_OP(op, slot);
|
||||
rval = FETCH_OPND(-1);
|
||||
obj = fp->varobj;
|
||||
JS_ASSERT(fp->varobj(cx) == cx->activeCallStack()->getInitialVarObj());
|
||||
obj = cx->activeCallStack()->getInitialVarObj();
|
||||
lval = fp->slots[slot];
|
||||
if (JSVAL_IS_NULL(lval)) {
|
||||
/*
|
||||
|
@ -2796,7 +2805,7 @@ BEGIN_CASE(JSOP_SETGVAR)
|
|||
*/
|
||||
#ifdef JS_TRACER
|
||||
if (TRACE_RECORDER(cx))
|
||||
js_AbortRecording(cx, "SETGVAR with NULL slot");
|
||||
AbortRecording(cx, "SETGVAR with NULL slot");
|
||||
#endif
|
||||
LOAD_ATOM(0);
|
||||
id = ATOM_TO_JSID(atom);
|
||||
|
@ -2825,7 +2834,7 @@ BEGIN_CASE(JSOP_DEFVAR)
|
|||
* code below we need the absolute value.
|
||||
*/
|
||||
index += atoms - script->atomMap.vector;
|
||||
obj = fp->varobj;
|
||||
obj = fp->varobj(cx);
|
||||
JS_ASSERT(obj->map->ops->defineProperty == js_DefineProperty);
|
||||
attrs = JSPROP_ENUMERATE;
|
||||
if (!(fp->flags & JSFRAME_EVAL))
|
||||
|
@ -2874,11 +2883,10 @@ BEGIN_CASE(JSOP_DEFVAR)
|
|||
SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) &&
|
||||
SPROP_HAS_STUB_SETTER(sprop)) {
|
||||
/*
|
||||
* Fast globals use frame variables to map the global
|
||||
* name's atom index to the permanent fp->varobj slot
|
||||
* number, tagged as a jsval. The atom index for the
|
||||
* global's name literal is identical to its variable
|
||||
* index.
|
||||
* Fast globals use frame variables to map the global name's atom
|
||||
* index to the permanent varobj slot number, tagged as a jsval.
|
||||
* The atom index for the global's name literal is identical to its
|
||||
* variable index.
|
||||
*/
|
||||
fp->slots[index] = INT_TO_JSVAL(sprop->slot);
|
||||
}
|
||||
|
@ -2983,7 +2991,7 @@ BEGIN_CASE(JSOP_DEFFUN)
|
|||
* current scope chain even for the case of function expression statements
|
||||
* and functions defined by eval inside let or with blocks.
|
||||
*/
|
||||
parent = fp->varobj;
|
||||
parent = fp->varobj(cx);
|
||||
JS_ASSERT(parent);
|
||||
|
||||
/*
|
||||
|
@ -3031,7 +3039,7 @@ BEGIN_CASE(JSOP_DEFFUN)
|
|||
: parent->defineProperty(cx, id, rval, getter, setter, attrs);
|
||||
|
||||
restore_scope:
|
||||
/* Restore fp->scopeChain now that obj is defined in fp->varobj. */
|
||||
/* Restore fp->scopeChain now that obj is defined in fp->callobj. */
|
||||
fp->scopeChain = obj2;
|
||||
if (!ok)
|
||||
goto error;
|
||||
|
@ -3059,7 +3067,7 @@ BEGIN_CASE(JSOP_DEFFUN_DBGFC)
|
|||
rval = JSVAL_VOID;
|
||||
}
|
||||
|
||||
parent = fp->varobj;
|
||||
parent = fp->varobj(cx);
|
||||
JS_ASSERT(parent);
|
||||
|
||||
id = ATOM_TO_JSID(fun->atom);
|
||||
|
@ -3111,7 +3119,7 @@ BEGIN_CASE(JSOP_DEFLOCALFUN)
|
|||
if (OBJ_GET_PARENT(cx, obj) != parent) {
|
||||
#ifdef JS_TRACER
|
||||
if (TRACE_RECORDER(cx))
|
||||
js_AbortRecording(cx, "DEFLOCALFUN for closure");
|
||||
AbortRecording(cx, "DEFLOCALFUN for closure");
|
||||
#endif
|
||||
obj = js_CloneFunctionObject(cx, fun, parent);
|
||||
if (!obj)
|
||||
|
@ -3467,7 +3475,7 @@ BEGIN_CASE(JSOP_INITMETHOD)
|
|||
}
|
||||
JS_ASSERT(sprop2 == sprop);
|
||||
} else {
|
||||
JS_ASSERT(scope->owned());
|
||||
JS_ASSERT(!scope->isSharedEmpty());
|
||||
scope->extend(cx, sprop);
|
||||
}
|
||||
|
||||
|
@ -4086,7 +4094,7 @@ END_CASE(JSOP_LEAVEBLOCK)
|
|||
|
||||
BEGIN_CASE(JSOP_CALLBUILTIN)
|
||||
#ifdef JS_TRACER
|
||||
obj = js_GetBuiltinFunction(cx, GET_INDEX(regs.pc));
|
||||
obj = GetBuiltinFunction(cx, GET_INDEX(regs.pc));
|
||||
if (!obj)
|
||||
goto error;
|
||||
rval = FETCH_OPND(-1);
|
||||
|
@ -4101,7 +4109,7 @@ END_CASE(JSOP_CALLBUILTIN)
|
|||
BEGIN_CASE(JSOP_GENERATOR)
|
||||
ASSERT_NOT_THROWING(cx);
|
||||
regs.pc += JSOP_GENERATOR_LENGTH;
|
||||
obj = js_NewGenerator(cx, fp);
|
||||
obj = js_NewGenerator(cx);
|
||||
if (!obj)
|
||||
goto error;
|
||||
JS_ASSERT(!fp->callobj && !fp->argsobj);
|
||||
|
|
|
@ -147,8 +147,8 @@ static JSMemberParser MemberExpr;
|
|||
static JSPrimaryParser PrimaryExpr;
|
||||
static JSParenParser ParenExpr;
|
||||
|
||||
static bool RecognizeDirectivePrologue(JSContext *cx, JSTokenStream *ts,
|
||||
JSTreeContext *tc, JSParseNode *pn);
|
||||
static bool
|
||||
RecognizeDirectivePrologue(JSContext *cx, JSTreeContext *tc, JSParseNode *pn);
|
||||
|
||||
/*
|
||||
* Insist that the next token be of type tt, or report errno and return null.
|
||||
|
@ -255,7 +255,7 @@ JSCompiler::newObjectBox(JSObject *obj)
|
|||
|
||||
/*
|
||||
* We use JSContext.tempPool to allocate parsed objects and place them on
|
||||
* a list in JSTokenStream to ensure GC safety. Thus the tempPool arenas
|
||||
* a list in this JSCompiler to ensure GC safety. Thus the tempPool arenas
|
||||
* containing the entries must be alive until we are done with scanning,
|
||||
* parsing and code generation for the whole script or top-level function.
|
||||
*/
|
||||
|
@ -280,7 +280,7 @@ JSCompiler::newFunctionBox(JSObject *obj, JSParseNode *fn, JSTreeContext *tc)
|
|||
|
||||
/*
|
||||
* We use JSContext.tempPool to allocate parsed objects and place them on
|
||||
* a list in JSTokenStream to ensure GC safety. Thus the tempPool arenas
|
||||
* a list in this JSCompiler to ensure GC safety. Thus the tempPool arenas
|
||||
* containing the entries must be alive until we are done with scanning,
|
||||
* parsing and code generation for the whole script or top-level function.
|
||||
*/
|
||||
|
@ -300,6 +300,7 @@ JSCompiler::newFunctionBox(JSObject *obj, JSParseNode *fn, JSTreeContext *tc)
|
|||
++tc->compiler->functionCount;
|
||||
funbox->kids = NULL;
|
||||
funbox->parent = tc->funbox;
|
||||
funbox->methods = NULL;
|
||||
funbox->queued = false;
|
||||
funbox->inLoop = false;
|
||||
for (JSStmtInfo *stmt = tc->topStmt; stmt; stmt = stmt->down) {
|
||||
|
@ -313,6 +314,27 @@ JSCompiler::newFunctionBox(JSObject *obj, JSParseNode *fn, JSTreeContext *tc)
|
|||
return funbox;
|
||||
}
|
||||
|
||||
bool
|
||||
JSFunctionBox::joinable() const
|
||||
{
|
||||
return FUN_NULL_CLOSURE((JSFunction *) object) &&
|
||||
!(tcflags & (TCF_FUN_USES_ARGUMENTS | TCF_FUN_USES_OWN_NAME));
|
||||
}
|
||||
|
||||
bool
|
||||
JSFunctionBox::shouldUnbrand(uintN methods, uintN slowMethods) const
|
||||
{
|
||||
if (slowMethods != 0) {
|
||||
for (const JSFunctionBox *funbox = this; funbox; funbox = funbox->parent) {
|
||||
if (!(funbox->node->pn_dflags & PND_MODULEPAT))
|
||||
return true;
|
||||
if (funbox->inLoop)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
JSCompiler::trace(JSTracer *trc)
|
||||
{
|
||||
|
@ -913,7 +935,7 @@ JSCompiler::compileScript(JSContext *cx, JSObject *scopeChain, JSStackFrame *cal
|
|||
JS_ASSERT(!cg.blockNode);
|
||||
|
||||
if (inDirectivePrologue)
|
||||
inDirectivePrologue = RecognizeDirectivePrologue(cx, &jsc.tokenStream, &cg, pn);
|
||||
inDirectivePrologue = RecognizeDirectivePrologue(cx, &cg, pn);
|
||||
|
||||
if (!js_FoldConstants(cx, pn, &cg))
|
||||
goto out;
|
||||
|
@ -2037,15 +2059,48 @@ JSCompiler::setFunctionKinds(JSFunctionBox *funbox, uint32& tcflags)
|
|||
#else
|
||||
# define FUN_METER(x) ((void)0)
|
||||
#endif
|
||||
JSFunctionBox *parent = funbox->parent;
|
||||
|
||||
for (;;) {
|
||||
JSParseNode *fn = funbox->node;
|
||||
JSParseNode *pn = fn->pn_body;
|
||||
|
||||
if (funbox->kids)
|
||||
if (funbox->kids) {
|
||||
setFunctionKinds(funbox->kids, tcflags);
|
||||
|
||||
JSParseNode *pn = fn->pn_body;
|
||||
/*
|
||||
* We've unwound from recursively setting our kids' kinds, which
|
||||
* also classifies enclosing functions holding upvars referenced in
|
||||
* those descendants' bodies. So now we can check our "methods".
|
||||
*
|
||||
* Despecialize from branded method-identity-based shape to sprop-
|
||||
* or slot-based shape if this function smells like a constructor
|
||||
* and too many of its methods are *not* joinable null closures
|
||||
* (i.e., they have one or more upvars fetched via the display).
|
||||
*/
|
||||
JSParseNode *pn2 = pn;
|
||||
if (PN_TYPE(pn2) == TOK_UPVARS)
|
||||
pn2 = pn2->pn_tree;
|
||||
if (PN_TYPE(pn2) == TOK_ARGSBODY)
|
||||
pn2 = pn2->last();
|
||||
|
||||
#if JS_HAS_EXPR_CLOSURES
|
||||
if (PN_TYPE(pn2) == TOK_LC)
|
||||
#endif
|
||||
if (!(funbox->tcflags & TCF_RETURN_EXPR)) {
|
||||
uintN methodSets = 0, slowMethodSets = 0;
|
||||
|
||||
for (JSParseNode *method = funbox->methods; method; method = method->pn_link) {
|
||||
JS_ASSERT(PN_OP(method) == JSOP_LAMBDA || PN_OP(method) == JSOP_LAMBDA_FC);
|
||||
++methodSets;
|
||||
if (!method->pn_funbox->joinable())
|
||||
++slowMethodSets;
|
||||
}
|
||||
|
||||
if (funbox->shouldUnbrand(methodSets, slowMethodSets))
|
||||
funbox->tcflags |= TCF_FUN_UNBRAND_THIS;
|
||||
}
|
||||
}
|
||||
|
||||
JSFunction *fun = (JSFunction *) funbox->object;
|
||||
|
||||
FUN_METER(allfun);
|
||||
|
@ -2333,52 +2388,47 @@ JSCompiler::setFunctionKinds(JSFunctionBox *funbox, uint32& tcflags)
|
|||
}
|
||||
}
|
||||
|
||||
if (FUN_KIND(fun) == JSFUN_INTERPRETED) {
|
||||
if (pn->pn_type != TOK_UPVARS) {
|
||||
if (parent)
|
||||
parent->tcflags |= TCF_FUN_HEAVYWEIGHT;
|
||||
} else {
|
||||
JSAtomList upvars(pn->pn_names);
|
||||
JS_ASSERT(upvars.count != 0);
|
||||
if (FUN_KIND(fun) == JSFUN_INTERPRETED && pn->pn_type == TOK_UPVARS) {
|
||||
/*
|
||||
* One or more upvars cannot be safely snapshot into a flat
|
||||
* closure's dslot (see JSOP_GETDSLOT), so we loop again over
|
||||
* all upvars, and for each non-free upvar, ensure that its
|
||||
* containing function has been flagged as heavyweight.
|
||||
*
|
||||
* The emitter must see TCF_FUN_HEAVYWEIGHT accurately before
|
||||
* generating any code for a tree of nested functions.
|
||||
*/
|
||||
JSAtomList upvars(pn->pn_names);
|
||||
JS_ASSERT(upvars.count != 0);
|
||||
|
||||
JSAtomListIterator iter(&upvars);
|
||||
JSAtomListElement *ale;
|
||||
JSAtomListIterator iter(&upvars);
|
||||
JSAtomListElement *ale;
|
||||
|
||||
/*
|
||||
* One or more upvars cannot be safely snapshot into a flat
|
||||
* closure's dslot (see JSOP_GETDSLOT), so we loop again over
|
||||
* all upvars, and for each non-free upvar, ensure that its
|
||||
* containing function has been flagged as heavyweight.
|
||||
*
|
||||
* The emitter must see TCF_FUN_HEAVYWEIGHT accurately before
|
||||
* generating any code for a tree of nested functions.
|
||||
*/
|
||||
while ((ale = iter()) != NULL) {
|
||||
JSDefinition *lexdep = ALE_DEFN(ale)->resolve();
|
||||
while ((ale = iter()) != NULL) {
|
||||
JSDefinition *lexdep = ALE_DEFN(ale)->resolve();
|
||||
|
||||
if (!lexdep->isFreeVar()) {
|
||||
JSFunctionBox *afunbox = funbox->parent;
|
||||
uintN lexdepLevel = lexdep->frameLevel();
|
||||
if (!lexdep->isFreeVar()) {
|
||||
JSFunctionBox *afunbox = funbox->parent;
|
||||
uintN lexdepLevel = lexdep->frameLevel();
|
||||
|
||||
while (afunbox) {
|
||||
/*
|
||||
* NB: afunbox->level is the static level of
|
||||
* the definition or expression of the function
|
||||
* parsed into afunbox, not the static level of
|
||||
* its body. Therefore we must add 1 to match
|
||||
* lexdep's level to find the afunbox whose
|
||||
* body contains the lexdep definition.
|
||||
*/
|
||||
if (afunbox->level + 1U == lexdepLevel ||
|
||||
(lexdepLevel == 0 && lexdep->isLet())) {
|
||||
afunbox->tcflags |= TCF_FUN_HEAVYWEIGHT;
|
||||
break;
|
||||
}
|
||||
afunbox = afunbox->parent;
|
||||
while (afunbox) {
|
||||
/*
|
||||
* NB: afunbox->level is the static level of
|
||||
* the definition or expression of the function
|
||||
* parsed into afunbox, not the static level of
|
||||
* its body. Therefore we must add 1 to match
|
||||
* lexdep's level to find the afunbox whose
|
||||
* body contains the lexdep definition.
|
||||
*/
|
||||
if (afunbox->level + 1U == lexdepLevel ||
|
||||
(lexdepLevel == 0 && lexdep->isLet())) {
|
||||
afunbox->tcflags |= TCF_FUN_HEAVYWEIGHT;
|
||||
break;
|
||||
}
|
||||
if (!afunbox && (tcflags & TCF_IN_FUNCTION))
|
||||
tcflags |= TCF_FUN_HEAVYWEIGHT;
|
||||
afunbox = afunbox->parent;
|
||||
}
|
||||
if (!afunbox && (tcflags & TCF_IN_FUNCTION))
|
||||
tcflags |= TCF_FUN_HEAVYWEIGHT;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2386,8 +2436,8 @@ JSCompiler::setFunctionKinds(JSFunctionBox *funbox, uint32& tcflags)
|
|||
funbox = funbox->siblings;
|
||||
if (!funbox)
|
||||
break;
|
||||
JS_ASSERT(funbox->parent == parent);
|
||||
}
|
||||
|
||||
#undef FUN_METER
|
||||
}
|
||||
|
||||
|
@ -2440,7 +2490,8 @@ LeaveFunction(JSParseNode *fn, JSTreeContext *funtc, JSTreeContext *tc,
|
|||
{
|
||||
tc->blockidGen = funtc->blockidGen;
|
||||
|
||||
fn->pn_funbox->tcflags |= funtc->flags & (TCF_FUN_FLAGS | TCF_COMPILE_N_GO);
|
||||
JSFunctionBox *funbox = fn->pn_funbox;
|
||||
funbox->tcflags |= funtc->flags & (TCF_FUN_FLAGS | TCF_COMPILE_N_GO | TCF_RETURN_EXPR);
|
||||
|
||||
fn->pn_dflags |= PND_INITIALIZED;
|
||||
JS_ASSERT_IF(tc->atTopLevel() && lambda == 0 && funAtom,
|
||||
|
@ -2475,12 +2526,12 @@ LeaveFunction(JSParseNode *fn, JSTreeContext *funtc, JSTreeContext *tc,
|
|||
* than to call itself, flag this function specially.
|
||||
*/
|
||||
if (dn->isFunArg())
|
||||
fn->pn_funbox->tcflags |= TCF_FUN_USES_OWN_NAME;
|
||||
funbox->tcflags |= TCF_FUN_USES_OWN_NAME;
|
||||
foundCallee = 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!(fn->pn_funbox->tcflags & TCF_FUN_SETS_OUTER_NAME) &&
|
||||
if (!(funbox->tcflags & TCF_FUN_SETS_OUTER_NAME) &&
|
||||
dn->isAssigned()) {
|
||||
/*
|
||||
* Make sure we do not fail to set TCF_FUN_SETS_OUTER_NAME if
|
||||
|
@ -2490,7 +2541,7 @@ LeaveFunction(JSParseNode *fn, JSTreeContext *funtc, JSTreeContext *tc,
|
|||
*/
|
||||
for (JSParseNode *pnu = dn->dn_uses; pnu; pnu = pnu->pn_link) {
|
||||
if (pnu->isAssigned() && pnu->pn_blockid >= funtc->bodyid) {
|
||||
fn->pn_funbox->tcflags |= TCF_FUN_SETS_OUTER_NAME;
|
||||
funbox->tcflags |= TCF_FUN_SETS_OUTER_NAME;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -3030,8 +3081,7 @@ FunctionExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
|
|||
* if it can't possibly be a directive, now or in the future.
|
||||
*/
|
||||
static bool
|
||||
RecognizeDirectivePrologue(JSContext *cx, JSTokenStream *ts,
|
||||
JSTreeContext *tc, JSParseNode *pn)
|
||||
RecognizeDirectivePrologue(JSContext *cx, JSTreeContext *tc, JSParseNode *pn)
|
||||
{
|
||||
if (!pn->isDirectivePrologueMember())
|
||||
return false;
|
||||
|
@ -3039,7 +3089,7 @@ RecognizeDirectivePrologue(JSContext *cx, JSTokenStream *ts,
|
|||
JSAtom *directive = pn->pn_kid->pn_atom;
|
||||
if (directive == cx->runtime->atomState.useStrictAtom) {
|
||||
tc->flags |= TCF_STRICT_MODE_CODE;
|
||||
ts->flags |= TSF_STRICT_MODE_CODE;
|
||||
tc->compiler->tokenStream.flags |= TSF_STRICT_MODE_CODE;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
@ -3087,15 +3137,8 @@ Statements(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (inDirectivePrologue) {
|
||||
if (RecognizeDirectivePrologue(cx, ts, tc, pn2)) {
|
||||
/* A Directive Prologue member is dead code. Omit it from the statement list. */
|
||||
RecycleTree(pn2, tc);
|
||||
continue;
|
||||
} else {
|
||||
inDirectivePrologue = false;
|
||||
}
|
||||
}
|
||||
if (inDirectivePrologue)
|
||||
inDirectivePrologue = RecognizeDirectivePrologue(cx, tc, pn2);
|
||||
|
||||
if (pn2->pn_type == TOK_FUNCTION) {
|
||||
/*
|
||||
|
@ -3270,12 +3313,6 @@ PopStatement(JSTreeContext *tc)
|
|||
continue;
|
||||
tc->decls.remove(tc->compiler, atom);
|
||||
}
|
||||
|
||||
/*
|
||||
* The block scope will not be modified again. It may be shared. Clear
|
||||
* scope->object to make scope->owned() false.
|
||||
*/
|
||||
scope->object = NULL;
|
||||
}
|
||||
js_PopStatement(tc);
|
||||
}
|
||||
|
@ -5701,18 +5738,35 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
|
|||
pn->pn_pos = pn2->pn_pos;
|
||||
pn->pn_kid = pn2;
|
||||
|
||||
/*
|
||||
* Specialize JSOP_SETPROP into JSOP_SETMETHOD to defer or avoid null
|
||||
* closure cloning. Do this here rather than in AssignExpr as only now
|
||||
* do we know that the uncloned (unjoined in ES3 terms) function object
|
||||
* result of the assignment expression can't escape.
|
||||
*/
|
||||
if (PN_TYPE(pn2) == TOK_ASSIGN && PN_OP(pn2) == JSOP_NOP &&
|
||||
PN_OP(pn2->pn_left) == JSOP_SETPROP &&
|
||||
PN_OP(pn2->pn_right) == JSOP_LAMBDA &&
|
||||
!(pn2->pn_right->pn_funbox->tcflags
|
||||
& (TCF_FUN_USES_ARGUMENTS | TCF_FUN_USES_OWN_NAME))) {
|
||||
pn2->pn_left->pn_op = JSOP_SETMETHOD;
|
||||
switch (PN_TYPE(pn2)) {
|
||||
case TOK_LP:
|
||||
/*
|
||||
* Flag lambdas immediately applied as statements as instances of
|
||||
* the JS "module pattern". See CheckForImmediatelyAppliedLambda.
|
||||
*/
|
||||
if (PN_TYPE(pn2->pn_head) == TOK_FUNCTION &&
|
||||
!pn2->pn_head->pn_funbox->node->isFunArg()) {
|
||||
pn2->pn_head->pn_funbox->node->pn_dflags |= PND_MODULEPAT;
|
||||
}
|
||||
break;
|
||||
case TOK_ASSIGN:
|
||||
/*
|
||||
* Keep track of all apparent methods created by assignments such
|
||||
* as this.foo = function (...) {...} in a function that could end
|
||||
* up a constructor function. See JSCompiler::setFunctionKinds.
|
||||
*/
|
||||
if (tc->funbox &&
|
||||
PN_OP(pn2) == JSOP_NOP &&
|
||||
PN_OP(pn2->pn_left) == JSOP_SETPROP &&
|
||||
PN_OP(pn2->pn_left->pn_expr) == JSOP_THIS &&
|
||||
PN_OP(pn2->pn_right) == JSOP_LAMBDA) {
|
||||
JS_ASSERT(!pn2->pn_defn);
|
||||
JS_ASSERT(!pn2->pn_used);
|
||||
pn2->pn_right->pn_link = tc->funbox->methods;
|
||||
tc->funbox->methods = pn2->pn_right;
|
||||
}
|
||||
break;
|
||||
default:;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -6990,8 +7044,6 @@ ArgumentList(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
|
|||
static JSParseNode *
|
||||
CheckForImmediatelyAppliedLambda(JSParseNode *pn)
|
||||
{
|
||||
while (pn->pn_type == TOK_RP)
|
||||
pn = pn->pn_kid;
|
||||
if (pn->pn_type == TOK_FUNCTION) {
|
||||
JS_ASSERT(pn->pn_arity == PN_FUNC);
|
||||
|
||||
|
@ -7177,7 +7229,6 @@ MemberExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
|
|||
return NULL;
|
||||
pn2->pn_op = JSOP_CALL;
|
||||
|
||||
/* CheckForImmediatelyAppliedLambda skips useless TOK_RP nodes. */
|
||||
pn = CheckForImmediatelyAppliedLambda(pn);
|
||||
if (pn->pn_op == JSOP_NAME) {
|
||||
if (pn->pn_atom == cx->runtime->atomState.evalAtom) {
|
||||
|
@ -8177,15 +8228,19 @@ PrimaryExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
|
|||
}
|
||||
|
||||
tt = js_GetToken(cx, ts);
|
||||
op = JSOP_INITPROP;
|
||||
#if JS_HAS_GETTER_SETTER
|
||||
if (tt == TOK_NAME) {
|
||||
tt = CheckGetterOrSetter(cx, ts, TOK_COLON);
|
||||
if (tt == TOK_ERROR)
|
||||
return NULL;
|
||||
op = CURRENT_TOKEN(ts).t_op;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (tt != TOK_COLON) {
|
||||
if (tt == TOK_COLON) {
|
||||
pnval = AssignExpr(cx, ts, tc);
|
||||
} else {
|
||||
#if JS_HAS_DESTRUCTURING_SHORTHAND
|
||||
if (tt != TOK_COMMA && tt != TOK_RC) {
|
||||
#endif
|
||||
|
@ -8206,11 +8261,7 @@ PrimaryExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
|
|||
pnval->pn_arity = PN_NAME;
|
||||
InitNameNodeCommon(pnval, tc);
|
||||
}
|
||||
op = JSOP_NOP;
|
||||
#endif
|
||||
} else {
|
||||
op = CURRENT_TOKEN(ts).t_op;
|
||||
pnval = AssignExpr(cx, ts, tc);
|
||||
}
|
||||
|
||||
pn2 = NewBinary(TOK_COLON, op, pn3, pnval, tc);
|
||||
|
@ -8228,13 +8279,13 @@ PrimaryExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
|
|||
*/
|
||||
if (tc->needStrictChecks()) {
|
||||
unsigned attributesMask;
|
||||
if (op == JSOP_NOP)
|
||||
if (op == JSOP_INITPROP) {
|
||||
attributesMask = JSPROP_GETTER | JSPROP_SETTER;
|
||||
else if (op == JSOP_GETTER)
|
||||
} else if (op == JSOP_GETTER) {
|
||||
attributesMask = JSPROP_GETTER;
|
||||
else if (op == JSOP_SETTER)
|
||||
} else if (op == JSOP_SETTER) {
|
||||
attributesMask = JSPROP_SETTER;
|
||||
else {
|
||||
} else {
|
||||
JS_NOT_REACHED("bad opcode in object initializer");
|
||||
attributesMask = 0;
|
||||
}
|
||||
|
@ -8978,8 +9029,13 @@ js_FoldConstants(JSContext *cx, JSParseNode *pn, JSTreeContext *tc, bool inCond)
|
|||
/* Propagate inCond through logical connectives. */
|
||||
bool cond = inCond && (pn->pn_type == TOK_OR || pn->pn_type == TOK_AND);
|
||||
|
||||
/* Don't fold a parenthesized call expression. See bug 537673. */
|
||||
pn1 = pn2 = pn->pn_head;
|
||||
if ((pn->pn_type == TOK_LP || pn->pn_type == TOK_NEW) && pn2->pn_parens)
|
||||
pn2 = pn2->pn_next;
|
||||
|
||||
/* Save the list head in pn1 for later use. */
|
||||
for (pn1 = pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
|
||||
for (; pn2; pn2 = pn2->pn_next) {
|
||||
if (!js_FoldConstants(cx, pn2, tc, cond))
|
||||
return JS_FALSE;
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ JS_BEGIN_EXTERN_C
|
|||
* defined in enclosing scopes, or ultimately not
|
||||
* defined (free variables, either global property
|
||||
* references or reference errors).
|
||||
* pn_argsbody: TOK_ARGSBODY or TOK_LC node
|
||||
* pn_tree: TOK_ARGSBODY or TOK_LC node
|
||||
*
|
||||
* <Statements>
|
||||
* TOK_LC list pn_head: list of pn_count statements
|
||||
|
@ -301,7 +301,9 @@ struct JSParseNode {
|
|||
JSTokenPos pn_pos; /* two 16-bit pairs here, for 64 bits */
|
||||
int32 pn_offset; /* first generated bytecode offset */
|
||||
JSParseNode *pn_next; /* intrinsic link in parent PN_LIST */
|
||||
JSParseNode *pn_link; /* def/use link (alignment freebie) */
|
||||
JSParseNode *pn_link; /* def/use link (alignment freebie);
|
||||
also links JSFunctionBox::methods
|
||||
lists of would-be |this| methods */
|
||||
union {
|
||||
struct { /* list of next-linked nodes */
|
||||
JSParseNode *head; /* first node in list */
|
||||
|
@ -417,6 +419,9 @@ struct JSParseNode {
|
|||
#define PND_PLACEHOLDER 0x80 /* placeholder definition for lexdep */
|
||||
#define PND_FUNARG 0x100 /* downward or upward funarg usage */
|
||||
#define PND_BOUND 0x200 /* bound to a stack or global slot */
|
||||
#define PND_MODULEPAT 0x400 /* "module pattern", i.e., a lambda
|
||||
that is immediately applied and the
|
||||
whole of an expression statement */
|
||||
|
||||
/* Flags to propagate from uses to definition. */
|
||||
#define PND_USE2DEF_FLAGS (PND_ASSIGNED | PND_FUNARG)
|
||||
|
@ -486,8 +491,8 @@ struct JSParseNode {
|
|||
* we'll need additional flags that we can test here.
|
||||
*/
|
||||
bool isDirectivePrologueMember() const {
|
||||
if (PN_TYPE(this) == TOK_SEMI &&
|
||||
pn_arity == PN_UNARY) {
|
||||
if (PN_TYPE(this) == TOK_SEMI) {
|
||||
JS_ASSERT(pn_arity == PN_UNARY);
|
||||
JSParseNode *kid = pn_kid;
|
||||
return kid && PN_TYPE(kid) == TOK_STRING && !kid->pn_parens;
|
||||
}
|
||||
|
@ -795,10 +800,29 @@ struct JSFunctionBox : public JSObjectBox
|
|||
JSFunctionBox *siblings;
|
||||
JSFunctionBox *kids;
|
||||
JSFunctionBox *parent;
|
||||
JSParseNode *methods; /* would-be methods set on this;
|
||||
these nodes are linked via
|
||||
pn_link, since lambdas are
|
||||
neither definitions nor uses
|
||||
of a binding */
|
||||
uint32 queued:1,
|
||||
inLoop:1, /* in a loop in parent function */
|
||||
level:JSFB_LEVEL_BITS;
|
||||
uint32 tcflags;
|
||||
|
||||
bool joinable() const;
|
||||
|
||||
/*
|
||||
* Unbrand an object being initialized or constructed if any method cannot
|
||||
* be joined to one compiler-created null closure shared among N different
|
||||
* closure environments.
|
||||
*
|
||||
* We despecialize from caching function objects, caching slots or sprops
|
||||
* instead, because an unbranded object may still have joined methods (for
|
||||
* which sprop->isMethod), since js_FillPropertyCache gives precedence to
|
||||
* joined methods over branded methods.
|
||||
*/
|
||||
bool shouldUnbrand(uintN methods, uintN slowMethods) const;
|
||||
};
|
||||
|
||||
struct JSFunctionBoxQueue {
|
||||
|
|
|
@ -101,6 +101,7 @@ typedef struct JSParseNode JSParseNode;
|
|||
typedef struct JSPropCacheEntry JSPropCacheEntry;
|
||||
typedef struct JSProperty JSProperty;
|
||||
typedef struct JSSharpObjectMap JSSharpObjectMap;
|
||||
typedef struct JSEmptyScope JSEmptyScope;
|
||||
typedef struct JSTempValueRooter JSTempValueRooter;
|
||||
typedef struct JSThread JSThread;
|
||||
typedef struct JSThreadData JSThreadData;
|
||||
|
@ -108,7 +109,6 @@ typedef struct JSToken JSToken;
|
|||
typedef struct JSTokenPos JSTokenPos;
|
||||
typedef struct JSTokenPtr JSTokenPtr;
|
||||
typedef struct JSTokenStream JSTokenStream;
|
||||
typedef struct JSTraceMonitor JSTraceMonitor;
|
||||
typedef struct JSTreeContext JSTreeContext;
|
||||
typedef struct JSTryNote JSTryNote;
|
||||
typedef struct JSWeakRoots JSWeakRoots;
|
||||
|
@ -146,6 +146,10 @@ extern "C++" {
|
|||
|
||||
namespace js {
|
||||
|
||||
class TraceRecorder;
|
||||
class TraceMonitor;
|
||||
class CallStack;
|
||||
|
||||
class ContextAllocPolicy;
|
||||
class SystemAllocPolicy;
|
||||
|
||||
|
|
|
@ -128,10 +128,10 @@ AssertDownFrameIsConsistent(JSContext* cx, VMSideExit* anchor, FrameInfo* fi)
|
|||
JS_ASSERT(anchor->recursive_down->callerHeight == fi->callerHeight);
|
||||
|
||||
unsigned downPostSlots = fi->callerHeight;
|
||||
JSTraceType* typeMap = fi->get_typemap();
|
||||
TraceType* typeMap = fi->get_typemap();
|
||||
|
||||
js_CaptureStackTypes(cx, 1, typeMap);
|
||||
const JSTraceType* m1 = anchor->recursive_down->get_typemap();
|
||||
CaptureStackTypes(cx, 1, typeMap);
|
||||
const TraceType* m1 = anchor->recursive_down->get_typemap();
|
||||
for (unsigned i = 0; i < downPostSlots; i++) {
|
||||
if (m1[i] == typeMap[i])
|
||||
continue;
|
||||
|
@ -154,15 +154,15 @@ TraceRecorder::downSnapshot(FrameInfo* downFrame)
|
|||
unsigned downPostSlots = downFrame->callerHeight;
|
||||
unsigned ngslots = tree->globalSlots->length();
|
||||
unsigned exitTypeMapLen = downPostSlots + 1 + ngslots;
|
||||
JSTraceType* exitTypeMap = (JSTraceType*)alloca(sizeof(JSTraceType) * exitTypeMapLen);
|
||||
JSTraceType* typeMap = downFrame->get_typemap();
|
||||
TraceType* exitTypeMap = (TraceType*)alloca(sizeof(TraceType) * exitTypeMapLen);
|
||||
TraceType* typeMap = downFrame->get_typemap();
|
||||
for (unsigned i = 0; i < downPostSlots; i++)
|
||||
exitTypeMap[i] = typeMap[i];
|
||||
exitTypeMap[downPostSlots] = determineSlotType(&stackval(-1));
|
||||
determineGlobalTypes(&exitTypeMap[downPostSlots + 1]);
|
||||
|
||||
VMSideExit* exit = (VMSideExit*)
|
||||
traceMonitor->traceAlloc->alloc(sizeof(VMSideExit) + sizeof(JSTraceType) * exitTypeMapLen);
|
||||
traceMonitor->traceAlloc->alloc(sizeof(VMSideExit) + sizeof(TraceType) * exitTypeMapLen);
|
||||
|
||||
memset(exit, 0, sizeof(VMSideExit));
|
||||
exit->from = fragment;
|
||||
|
@ -180,7 +180,7 @@ TraceRecorder::downSnapshot(FrameInfo* downFrame)
|
|||
exit->rp_adj = exit->calldepth * sizeof(FrameInfo*);
|
||||
exit->nativeCalleeWord = 0;
|
||||
exit->lookupFlags = js_InferFlags(cx, 0);
|
||||
memcpy(exit->fullTypeMap(), exitTypeMap, sizeof(JSTraceType) * exitTypeMapLen);
|
||||
memcpy(exit->fullTypeMap(), exitTypeMap, sizeof(TraceType) * exitTypeMapLen);
|
||||
#if defined JS_JIT_SPEW
|
||||
TreevisLogExit(cx, exit);
|
||||
#endif
|
||||
|
@ -225,7 +225,7 @@ TraceRecorder::upRecursion()
|
|||
*/
|
||||
unsigned totalSlots = NativeStackSlots(cx, 1);
|
||||
unsigned downPostSlots = totalSlots - NativeStackSlots(cx, 0);
|
||||
FrameInfo* fi = (FrameInfo*)alloca(sizeof(FrameInfo) + totalSlots * sizeof(JSTraceType));
|
||||
FrameInfo* fi = (FrameInfo*)alloca(sizeof(FrameInfo) + totalSlots * sizeof(TraceType));
|
||||
fi->block = cx->fp->blockChain;
|
||||
fi->pc = (jsbytecode*)return_pc;
|
||||
fi->imacpc = NULL;
|
||||
|
@ -255,11 +255,11 @@ TraceRecorder::upRecursion()
|
|||
* Case 1: Guess that down-recursion has to started back out, infer types
|
||||
* from the down frame.
|
||||
*/
|
||||
js_CaptureStackTypes(cx, 1, fi->get_typemap());
|
||||
CaptureStackTypes(cx, 1, fi->get_typemap());
|
||||
} else {
|
||||
/* Case 2: Guess that up-recursion is backing out, infer types from our Tree. */
|
||||
JS_ASSERT(tree->nStackTypes == downPostSlots + 1);
|
||||
JSTraceType* typeMap = fi->get_typemap();
|
||||
TraceType* typeMap = fi->get_typemap();
|
||||
for (unsigned i = 0; i < downPostSlots; i++)
|
||||
typeMap[i] = tree->typeMap[i];
|
||||
}
|
||||
|
@ -300,11 +300,11 @@ TraceRecorder::upRecursion()
|
|||
get(&stackval(-1)) :
|
||||
NULL;
|
||||
JS_ASSERT(rval_ins != NULL);
|
||||
JSTraceType returnType = exit->stackTypeMap()[downPostSlots];
|
||||
TraceType returnType = exit->stackTypeMap()[downPostSlots];
|
||||
if (returnType == TT_INT32) {
|
||||
JS_ASSERT(determineSlotType(&stackval(-1)) == TT_INT32);
|
||||
JS_ASSERT(isPromoteInt(rval_ins));
|
||||
rval_ins = ::demote(lir, rval_ins);
|
||||
rval_ins = demote(lir, rval_ins);
|
||||
}
|
||||
|
||||
UpRecursiveSlotMap slotMap(*this, downPostSlots, rval_ins);
|
||||
|
@ -329,7 +329,7 @@ class SlurpInfo
|
|||
{
|
||||
public:
|
||||
unsigned curSlot;
|
||||
JSTraceType* typeMap;
|
||||
TraceType* typeMap;
|
||||
VMSideExit* exit;
|
||||
unsigned slurpFailSlot;
|
||||
};
|
||||
|
@ -428,7 +428,7 @@ TraceRecorder::slurpDownFrames(jsbytecode* return_pc)
|
|||
unsigned safeSlots = NativeStackSlots(cx, frameDepth) + 1 + numGlobalSlots;
|
||||
jsbytecode* recursive_pc = return_pc + JSOP_CALL_LENGTH;
|
||||
VMSideExit* exit = (VMSideExit*)
|
||||
traceMonitor->traceAlloc->alloc(sizeof(VMSideExit) + sizeof(JSTraceType) * safeSlots);
|
||||
traceMonitor->traceAlloc->alloc(sizeof(VMSideExit) + sizeof(TraceType) * safeSlots);
|
||||
memset(exit, 0, sizeof(VMSideExit));
|
||||
exit->pc = (jsbytecode*)recursive_pc;
|
||||
exit->from = fragment;
|
||||
|
@ -442,10 +442,10 @@ TraceRecorder::slurpDownFrames(jsbytecode* return_pc)
|
|||
* Build the exit typemap. This may capture extra types, but they are
|
||||
* thrown away.
|
||||
*/
|
||||
JSTraceType* typeMap = exit->stackTypeMap();
|
||||
TraceType* typeMap = exit->stackTypeMap();
|
||||
jsbytecode* oldpc = cx->fp->regs->pc;
|
||||
cx->fp->regs->pc = exit->pc;
|
||||
js_CaptureStackTypes(cx, frameDepth, typeMap);
|
||||
CaptureStackTypes(cx, frameDepth, typeMap);
|
||||
cx->fp->regs->pc = oldpc;
|
||||
if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT)
|
||||
typeMap[downPostSlots] = determineSlotType(&stackval(-1));
|
||||
|
@ -466,13 +466,13 @@ TraceRecorder::slurpDownFrames(jsbytecode* return_pc)
|
|||
* grabbed safely.
|
||||
*/
|
||||
LIns* rval_ins;
|
||||
JSTraceType returnType = exit->stackTypeMap()[downPostSlots];
|
||||
TraceType returnType = exit->stackTypeMap()[downPostSlots];
|
||||
if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) {
|
||||
rval_ins = get(&stackval(-1));
|
||||
if (returnType == TT_INT32) {
|
||||
JS_ASSERT(determineSlotType(&stackval(-1)) == TT_INT32);
|
||||
JS_ASSERT(isPromoteInt(rval_ins));
|
||||
rval_ins = ::demote(lir, rval_ins);
|
||||
rval_ins = demote(lir, rval_ins);
|
||||
}
|
||||
/*
|
||||
* The return value must be written out early, before slurping can fail,
|
||||
|
@ -524,6 +524,10 @@ TraceRecorder::slurpDownFrames(jsbytecode* return_pc)
|
|||
slurpSlot(addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, argsobj)), "argsobj"),
|
||||
&fp->argsobj,
|
||||
&info);
|
||||
/* scopeChain */
|
||||
slurpSlot(addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, scopeChain)), "scopeChain"),
|
||||
(jsval*) &fp->scopeChain,
|
||||
&info);
|
||||
/* vars */
|
||||
LIns* slots_ins = addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, slots)),
|
||||
"slots");
|
||||
|
@ -596,7 +600,7 @@ TraceRecorder::downRecursion()
|
|||
|
||||
/* Adjust the stack by the budget the down-frame needs. */
|
||||
int slots = NativeStackSlots(cx, 1) - NativeStackSlots(cx, 0);
|
||||
JS_ASSERT(unsigned(slots) == NativeStackSlots(cx, 1) - fp->argc - 2 - fp->script->nfixed - 1);
|
||||
JS_ASSERT(unsigned(slots) == NativeStackSlots(cx, 1) - fp->argc - 2 - fp->script->nfixed - 2);
|
||||
|
||||
/* Guard that there is enough stack space. */
|
||||
JS_ASSERT(tree->maxNativeStackSlots >= tree->nativeStackBase / sizeof(double));
|
||||
|
|
|
@ -74,6 +74,8 @@ using namespace avmplus;
|
|||
using namespace nanojit;
|
||||
#endif
|
||||
|
||||
using namespace js;
|
||||
|
||||
typedef enum REOp {
|
||||
#define REOP_DEF(opcode, name) opcode,
|
||||
#include "jsreops.tbl"
|
||||
|
@ -1999,6 +2001,8 @@ CompileRegExpToAST(JSContext* cx, JSTokenStream* ts,
|
|||
#ifdef JS_TRACER
|
||||
typedef js::Vector<LIns *, 4, js::ContextAllocPolicy> LInsList;
|
||||
|
||||
namespace js {
|
||||
|
||||
struct REFragment : public nanojit::Fragment
|
||||
{
|
||||
REFragment(const void* _ip verbose_only(, uint32_t profFragID))
|
||||
|
@ -2006,12 +2010,14 @@ struct REFragment : public nanojit::Fragment
|
|||
{}
|
||||
};
|
||||
|
||||
} /* namespace js */
|
||||
|
||||
/* Return the cached fragment for the given regexp, or create one. */
|
||||
static Fragment*
|
||||
LookupNativeRegExp(JSContext* cx, uint16 re_flags,
|
||||
const jschar* re_chars, size_t re_length)
|
||||
{
|
||||
JSTraceMonitor *tm = &JS_TRACE_MONITOR(cx);
|
||||
TraceMonitor *tm = &JS_TRACE_MONITOR(cx);
|
||||
VMAllocator &alloc = *tm->dataAlloc;
|
||||
REHashMap &table = *tm->reFragments;
|
||||
|
||||
|
@ -2020,7 +2026,7 @@ LookupNativeRegExp(JSContext* cx, uint16 re_flags,
|
|||
|
||||
if (!frag) {
|
||||
verbose_only(
|
||||
uint32_t profFragID = (js_LogController.lcbits & LC_FragProfile)
|
||||
uint32_t profFragID = (LogController.lcbits & LC_FragProfile)
|
||||
? (++(tm->lastFragID)) : 0;
|
||||
)
|
||||
frag = new (alloc) REFragment(0 verbose_only(, profFragID));
|
||||
|
@ -2290,7 +2296,7 @@ class RegExpNativeCompiler {
|
|||
Fragment* fragment;
|
||||
LirWriter* lir;
|
||||
#ifdef DEBUG
|
||||
LirWriter* sanity_filter;
|
||||
LirWriter* validate_writer;
|
||||
#endif
|
||||
#ifdef NJ_VERBOSE
|
||||
LirWriter* verbose_filter;
|
||||
|
@ -2703,7 +2709,7 @@ class RegExpNativeCompiler {
|
|||
LIns *belowBr = lir->insBranch(LIR_jt, belowCnd, NULL);
|
||||
LIns *aboveCnd = lir->ins2(LIR_ugt, chr, lir->insImm(0x200A));
|
||||
LIns *aboveBr = lir->insBranch(LIR_jt, aboveCnd, NULL);
|
||||
LIns *intervalMatchBr = lir->ins2(LIR_j, NULL, NULL);
|
||||
LIns *intervalMatchBr = lir->insBranch(LIR_j, NULL, NULL);
|
||||
|
||||
/* Handle [0xA0,0x2000). */
|
||||
LIns *belowLbl = lir->ins0(LIR_label);
|
||||
|
@ -2714,7 +2720,7 @@ class RegExpNativeCompiler {
|
|||
LIns *eq2Br = lir->insBranch(LIR_jt, eq2Cnd, NULL);
|
||||
LIns *eq3Cnd = lir->ins2(LIR_eq, chr, lir->insImm(0x180E));
|
||||
LIns *eq3Br = lir->insBranch(LIR_jt, eq3Cnd, NULL);
|
||||
LIns *belowMissBr = lir->ins2(LIR_j, NULL, NULL);
|
||||
LIns *belowMissBr = lir->insBranch(LIR_j, NULL, NULL);
|
||||
|
||||
/* Handle (0x200A, max). */
|
||||
LIns *aboveLbl = lir->ins0(LIR_label);
|
||||
|
@ -2729,7 +2735,7 @@ class RegExpNativeCompiler {
|
|||
LIns *eq7Br = lir->insBranch(LIR_jt, eq7Cnd, NULL);
|
||||
LIns *eq8Cnd = lir->ins2(LIR_eq, chr, lir->insImm(0x3000));
|
||||
LIns *eq8Br = lir->insBranch(LIR_jt, eq8Cnd, NULL);
|
||||
LIns *aboveMissBr = lir->ins2(LIR_j, NULL, NULL);
|
||||
LIns *aboveMissBr = lir->insBranch(LIR_j, NULL, NULL);
|
||||
|
||||
/* Handle [0,0x20]. */
|
||||
LIns *tableLbl = lir->ins0(LIR_label);
|
||||
|
@ -2743,7 +2749,7 @@ class RegExpNativeCompiler {
|
|||
asciiMissBr->setTarget(missLbl);
|
||||
belowMissBr->setTarget(missLbl);
|
||||
aboveMissBr->setTarget(missLbl);
|
||||
LIns *missBr = lir->ins2(LIR_j, NULL, NULL);
|
||||
LIns *missBr = lir->insBranch(LIR_j, NULL, NULL);
|
||||
if (node->op == REOP_SPACE) {
|
||||
if (!fails.append(missBr))
|
||||
return NULL;
|
||||
|
@ -2758,7 +2764,7 @@ class RegExpNativeCompiler {
|
|||
eq5Br->setTarget(matchLbl); eq6Br->setTarget(matchLbl);
|
||||
eq7Br->setTarget(matchLbl); eq8Br->setTarget(matchLbl);
|
||||
if (node->op == REOP_NONSPACE) {
|
||||
LIns *matchBr = lir->ins2(LIR_j, NULL, NULL);
|
||||
LIns *matchBr = lir->insBranch(LIR_j, NULL, NULL);
|
||||
if (!fails.append(matchBr))
|
||||
return NULL;
|
||||
}
|
||||
|
@ -2829,7 +2835,7 @@ class RegExpNativeCompiler {
|
|||
*/
|
||||
lir->insStorei(branchEnd, state,
|
||||
offsetof(REGlobalData, stateStack));
|
||||
LIns *leftSuccess = lir->ins2(LIR_j, NULL, NULL);
|
||||
LIns *leftSuccess = lir->insBranch(LIR_j, NULL, NULL);
|
||||
|
||||
/* Try right branch. */
|
||||
targetCurrentPoint(kidFails);
|
||||
|
@ -2946,7 +2952,7 @@ class RegExpNativeCompiler {
|
|||
|
||||
/* End iteration: store loop variables, increment, jump */
|
||||
lir->insStorei(iterEnd, state, offsetof(REGlobalData, stateStack));
|
||||
lir->ins2(LIR_j, NULL, loopTop);
|
||||
lir->insBranch(LIR_j, NULL, loopTop);
|
||||
|
||||
/*
|
||||
* Using '+' as branch, the intended control flow is:
|
||||
|
@ -2974,9 +2980,9 @@ class RegExpNativeCompiler {
|
|||
* conditionally executed, and we (currently) don't have real phi
|
||||
* nodes, we need only consider insns defined in A and used in E.
|
||||
*/
|
||||
lir->ins1(LIR_live, state);
|
||||
lir->ins1(LIR_live, cpend);
|
||||
lir->ins1(LIR_live, start);
|
||||
lir->ins1(LIR_plive, state);
|
||||
lir->ins1(LIR_plive, cpend);
|
||||
lir->ins1(LIR_plive, start);
|
||||
|
||||
/* After the loop: reload 'pos' from memory and continue. */
|
||||
targetCurrentPoint(kidFails);
|
||||
|
@ -3106,8 +3112,8 @@ class RegExpNativeCompiler {
|
|||
if (loopLabel) {
|
||||
lir->insBranch(LIR_j, NULL, loopLabel);
|
||||
LirBuffer* lirbuf = fragment->lirbuf;
|
||||
lir->ins1(LIR_live, lirbuf->state);
|
||||
lir->ins1(LIR_live, lirbuf->param1);
|
||||
lir->ins1(LIR_plive, lirbuf->state);
|
||||
lir->ins1(LIR_plive, lirbuf->param1);
|
||||
}
|
||||
|
||||
Allocator &alloc = *JS_TRACE_MONITOR(cx).dataAlloc;
|
||||
|
@ -3138,7 +3144,7 @@ class RegExpNativeCompiler {
|
|||
{
|
||||
fragment->lirbuf = lirbuf;
|
||||
#ifdef DEBUG
|
||||
LabelMap* labels = new (tempAlloc) LabelMap(tempAlloc, &js_LogController);
|
||||
LabelMap* labels = new (tempAlloc) LabelMap(tempAlloc, &LogController);
|
||||
lirbuf->names = new (tempAlloc) LirNameMap(tempAlloc, labels);
|
||||
#endif
|
||||
}
|
||||
|
@ -3153,11 +3159,11 @@ class RegExpNativeCompiler {
|
|||
GuardRecord* guard = NULL;
|
||||
const jschar* re_chars;
|
||||
size_t re_length;
|
||||
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
||||
TraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
||||
Assembler *assm = tm->assembler;
|
||||
LIns* loopLabel = NULL;
|
||||
|
||||
if (outOfMemory() || js_OverfullJITCache(tm))
|
||||
if (outOfMemory() || OverfullJITCache(tm))
|
||||
return JS_FALSE;
|
||||
|
||||
re->source->getCharsAndLength(re_chars, re_length);
|
||||
|
@ -3175,19 +3181,19 @@ class RegExpNativeCompiler {
|
|||
if (outOfMemory())
|
||||
goto fail;
|
||||
/* FIXME Use bug 463260 smart pointer when available. */
|
||||
lir = lirBufWriter = new LirBufWriter(lirbuf);
|
||||
lir = lirBufWriter = new LirBufWriter(lirbuf, nanojit::AvmCore::config);
|
||||
|
||||
/* FIXME Use bug 463260 smart pointer when available. */
|
||||
#ifdef NJ_VERBOSE
|
||||
debug_only_stmt(
|
||||
if (js_LogController.lcbits & LC_TMRegexp) {
|
||||
if (LogController.lcbits & LC_TMRegexp) {
|
||||
lir = verbose_filter = new VerboseWriter(tempAlloc, lir, lirbuf->names,
|
||||
&js_LogController);
|
||||
&LogController);
|
||||
}
|
||||
)
|
||||
#endif
|
||||
#ifdef DEBUG
|
||||
lir = sanity_filter = new SanityFilter(lir);
|
||||
lir = validate_writer = new ValidateWriter(lir, "regexp writer pipeline");
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -3210,7 +3216,7 @@ class RegExpNativeCompiler {
|
|||
// If profiling, record where the loop label is, so that the
|
||||
// assembler can insert a frag-entry-counter increment at that
|
||||
// point
|
||||
verbose_only( if (js_LogController.lcbits & LC_FragProfile) {
|
||||
verbose_only( if (LogController.lcbits & LC_FragProfile) {
|
||||
NanoAssert(!fragment->loopLabel);
|
||||
fragment->loopLabel = loopLabel;
|
||||
})
|
||||
|
@ -3241,41 +3247,42 @@ class RegExpNativeCompiler {
|
|||
*/
|
||||
JS_ASSERT(!lirbuf->sp && !lirbuf->rp);
|
||||
|
||||
::compile(assm, fragment, tempAlloc verbose_only(, lirbuf->names->labels));
|
||||
assm->compile(fragment, tempAlloc, /*optimize*/true
|
||||
verbose_only(, lirbuf->names->labels));
|
||||
if (assm->error() != nanojit::None)
|
||||
goto fail;
|
||||
|
||||
delete lirBufWriter;
|
||||
#ifdef DEBUG
|
||||
delete sanity_filter;
|
||||
delete validate_writer;
|
||||
#endif
|
||||
#ifdef NJ_VERBOSE
|
||||
debug_only_stmt( if (js_LogController.lcbits & LC_TMRegexp)
|
||||
debug_only_stmt( if (LogController.lcbits & LC_TMRegexp)
|
||||
delete verbose_filter; )
|
||||
#endif
|
||||
return JS_TRUE;
|
||||
fail:
|
||||
if (outOfMemory() || js_OverfullJITCache(tm)) {
|
||||
if (outOfMemory() || OverfullJITCache(tm)) {
|
||||
delete lirBufWriter;
|
||||
// recover profiling data from expiring Fragments
|
||||
verbose_only(
|
||||
REHashMap::Iter iter(*(tm->reFragments));
|
||||
while (iter.next()) {
|
||||
nanojit::Fragment* frag = iter.value();
|
||||
js_FragProfiling_FragFinalizer(frag, tm);
|
||||
FragProfiling_FragFinalizer(frag, tm);
|
||||
}
|
||||
)
|
||||
js_FlushJITCache(cx);
|
||||
FlushJITCache(cx);
|
||||
} else {
|
||||
if (!guard) insertGuard(loopLabel, re_chars, re_length);
|
||||
re->flags |= JSREG_NOCOMPILE;
|
||||
delete lirBufWriter;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
delete sanity_filter;
|
||||
delete validate_writer;
|
||||
#endif
|
||||
#ifdef NJ_VERBOSE
|
||||
debug_only_stmt( if (js_LogController.lcbits & LC_TMRegexp)
|
||||
debug_only_stmt( if (LogController.lcbits & LC_TMRegexp)
|
||||
delete lir; )
|
||||
#endif
|
||||
return JS_FALSE;
|
||||
|
|
|
@ -145,15 +145,6 @@ js_CheckKeyword(const jschar *str, size_t length)
|
|||
return kw ? kw->tokentype : TOK_EOF;
|
||||
}
|
||||
|
||||
JS_FRIEND_API(void)
|
||||
js_MapKeywords(JSMapKeywordFun mapfun)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i != KEYWORD_COUNT; ++i)
|
||||
mapfun(keyword_defs[i].chars);
|
||||
}
|
||||
|
||||
JSBool
|
||||
js_IsIdentifier(JSString *str)
|
||||
{
|
||||
|
|
|
@ -371,9 +371,6 @@ js_CheckKeyword(const jschar *chars, size_t length);
|
|||
*/
|
||||
typedef void (*JSMapKeywordFun)(const char *);
|
||||
|
||||
extern JS_FRIEND_API(void)
|
||||
js_MapKeywords(JSMapKeywordFun mapfun);
|
||||
|
||||
/*
|
||||
* Check that str forms a valid JS identifier name. The function does not
|
||||
* check if str is a JS keyword.
|
||||
|
|
|
@ -63,6 +63,8 @@
|
|||
|
||||
#include "jsscopeinlines.h"
|
||||
|
||||
using namespace js;
|
||||
|
||||
uint32
|
||||
js_GenerateShape(JSContext *cx, bool gcLocked)
|
||||
{
|
||||
|
@ -95,7 +97,7 @@ js_GetMutableScope(JSContext *cx, JSObject *obj)
|
|||
|
||||
scope = OBJ_SCOPE(obj);
|
||||
JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, scope));
|
||||
if (scope->owned())
|
||||
if (!scope->isSharedEmpty())
|
||||
return scope;
|
||||
|
||||
/*
|
||||
|
@ -106,7 +108,10 @@ js_GetMutableScope(JSContext *cx, JSObject *obj)
|
|||
newscope = JSScope::create(cx, scope->ops, obj->getClass(), obj, scope->shape);
|
||||
if (!newscope)
|
||||
return NULL;
|
||||
JS_LOCK_SCOPE(cx, newscope);
|
||||
|
||||
/* The newly allocated scope is single-threaded and, as such, is locked. */
|
||||
JS_ASSERT(CX_OWNS_SCOPE_TITLE(cx, newscope));
|
||||
JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, newscope));
|
||||
obj->map = newscope;
|
||||
|
||||
JS_ASSERT(newscope->freeslot == JSSLOT_FREE(STOBJ_GET_CLASS(obj)));
|
||||
|
@ -124,10 +129,8 @@ js_GetMutableScope(JSContext *cx, JSObject *obj)
|
|||
if (newscope->freeslot < freeslot)
|
||||
newscope->freeslot = freeslot;
|
||||
}
|
||||
JS_TRANSFER_SCOPE_LOCK(cx, scope, newscope);
|
||||
JS_ATOMIC_DECREMENT(&scope->nrefs);
|
||||
if (scope->nrefs == 0)
|
||||
JSScope::destroy(cx, scope);
|
||||
JS_DROP_ALL_EMPTY_SCOPE_LOCKS(cx, scope);
|
||||
static_cast<JSEmptyScope *>(scope)->drop(cx);
|
||||
return newscope;
|
||||
}
|
||||
|
||||
|
@ -212,7 +215,6 @@ JSScope::create(JSContext *cx, const JSObjectOps *ops, JSClass *clasp,
|
|||
if (!scope)
|
||||
return NULL;
|
||||
|
||||
scope->nrefs = 1;
|
||||
scope->freeslot = JSSLOT_FREE(clasp);
|
||||
scope->flags = cx->runtime->gcRegenShapesScopeFlag;
|
||||
scope->initMinimal(cx, shape);
|
||||
|
@ -225,31 +227,24 @@ JSScope::create(JSContext *cx, const JSObjectOps *ops, JSClass *clasp,
|
|||
return scope;
|
||||
}
|
||||
|
||||
JSEmptyScope *
|
||||
JSScope::createEmptyScope(JSContext *cx, JSClass *clasp)
|
||||
JSEmptyScope::JSEmptyScope(JSContext *cx, const JSObjectOps *ops,
|
||||
JSClass *clasp)
|
||||
: JSScope(ops, NULL), clasp(clasp)
|
||||
{
|
||||
JS_ASSERT(!emptyScope);
|
||||
|
||||
JSEmptyScope *scope = cx->create<JSEmptyScope>(ops, clasp);
|
||||
if (!scope)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* This scope holds a reference to the new empty scope. Our only caller,
|
||||
* getEmptyScope, also promises to incref on behalf of its caller.
|
||||
*/
|
||||
scope->nrefs = 2;
|
||||
scope->freeslot = JSSLOT_FREE(clasp);
|
||||
scope->flags = OWN_SHAPE | cx->runtime->gcRegenShapesScopeFlag;
|
||||
scope->initMinimal(cx, js_GenerateShape(cx, false));
|
||||
nrefs = 2;
|
||||
freeslot = JSSLOT_FREE(clasp);
|
||||
flags = OWN_SHAPE | cx->runtime->gcRegenShapesScopeFlag;
|
||||
initMinimal(cx, js_GenerateShape(cx, false));
|
||||
|
||||
#ifdef JS_THREADSAFE
|
||||
js_InitTitle(cx, &scope->title);
|
||||
js_InitTitle(cx, &title);
|
||||
#endif
|
||||
JS_RUNTIME_METER(cx->runtime, liveScopes);
|
||||
JS_RUNTIME_METER(cx->runtime, totalScopes);
|
||||
emptyScope = scope;
|
||||
return scope;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -260,19 +255,46 @@ JSScope::createEmptyScope(JSContext *cx, JSClass *clasp)
|
|||
#endif
|
||||
|
||||
void
|
||||
JSScope::destroy(JSContext *cx, JSScope *scope)
|
||||
JSScope::destroy(JSContext *cx)
|
||||
{
|
||||
#ifdef JS_THREADSAFE
|
||||
js_FinishTitle(cx, &scope->title);
|
||||
js_FinishTitle(cx, &title);
|
||||
#endif
|
||||
if (scope->table)
|
||||
cx->free(scope->table);
|
||||
if (scope->emptyScope)
|
||||
scope->emptyScope->drop(cx, NULL);
|
||||
if (table)
|
||||
cx->free(table);
|
||||
|
||||
LIVE_SCOPE_METER(cx, cx->runtime->liveScopeProps -= scope->entryCount);
|
||||
/*
|
||||
* The scopes containing empty scopes are only destroyed from the GC
|
||||
* thread.
|
||||
*/
|
||||
if (emptyScope)
|
||||
emptyScope->dropFromGC(cx);
|
||||
|
||||
LIVE_SCOPE_METER(cx, cx->runtime->liveScopeProps -= entryCount);
|
||||
JS_RUNTIME_UNMETER(cx->runtime, liveScopes);
|
||||
cx->free(scope);
|
||||
cx->free(this);
|
||||
}
|
||||
|
||||
/* static */
|
||||
bool
|
||||
JSScope::initRuntimeState(JSContext *cx)
|
||||
{
|
||||
cx->runtime->emptyBlockScope = cx->create<JSEmptyScope>(cx, &js_ObjectOps,
|
||||
&js_BlockClass);
|
||||
JS_ASSERT(cx->runtime->emptyBlockScope->nrefs == 2);
|
||||
cx->runtime->emptyBlockScope->nrefs = 1;
|
||||
return !!cx->runtime->emptyBlockScope;
|
||||
}
|
||||
|
||||
/* static */
|
||||
void
|
||||
JSScope::finishRuntimeState(JSContext *cx)
|
||||
{
|
||||
JSRuntime *rt = cx->runtime;
|
||||
if (rt->emptyBlockScope) {
|
||||
rt->emptyBlockScope->drop(cx);
|
||||
rt->emptyBlockScope = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
JS_STATIC_ASSERT(sizeof(JSHashNumber) == 4);
|
||||
|
@ -1016,7 +1038,8 @@ JSScope::getChildProperty(JSContext *cx, JSScopeProperty *parent,
|
|||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
child.flags &= ~SPROP_IN_DICTIONARY;
|
||||
JSScopeProperty *sprop = GetPropertyTreeChild(cx, parent, child);
|
||||
if (sprop) {
|
||||
JS_ASSERT(sprop->parent == parent);
|
||||
|
@ -1093,7 +1116,7 @@ JSScope::generateOwnShape(JSContext *cx)
|
|||
{
|
||||
#ifdef JS_TRACER
|
||||
if (object) {
|
||||
js_LeaveTraceIfGlobalObject(cx, object);
|
||||
LeaveTraceIfGlobalObject(cx, object);
|
||||
|
||||
/*
|
||||
* The JIT must have arranged to re-guard after any unpredictable shape
|
||||
|
@ -1107,7 +1130,7 @@ JSScope::generateOwnShape(JSContext *cx)
|
|||
* Any subsequent property operation upon object on the trace currently
|
||||
* being recorded will re-guard (and re-memoize).
|
||||
*/
|
||||
JSTraceMonitor *tm = &JS_TRACE_MONITOR(cx);
|
||||
TraceMonitor *tm = &JS_TRACE_MONITOR(cx);
|
||||
if (TraceRecorder *tr = tm->recorder)
|
||||
tr->forgetGuardedShapesForObject(object);
|
||||
}
|
||||
|
@ -1650,7 +1673,7 @@ JSScope::clear(JSContext *cx)
|
|||
js_free(table);
|
||||
clearDictionaryMode();
|
||||
clearOwnShape();
|
||||
js_LeaveTraceIfGlobalObject(cx, object);
|
||||
LeaveTraceIfGlobalObject(cx, object);
|
||||
|
||||
JSClass *clasp = object->getClass();
|
||||
JSObject *proto = object->getProto();
|
||||
|
@ -1907,8 +1930,9 @@ DumpSubtree(JSContext *cx, JSScopeProperty *sprop, int level, FILE *fp)
|
|||
}
|
||||
|
||||
fprintf(fp, " g/s %p/%p slot %u attrs %x flags %x shortid %d\n",
|
||||
(void *) sprop->getter, (void *) sprop->setter, sprop->slot,
|
||||
sprop->attrs, sprop->flags, sprop->shortid);
|
||||
JS_FUNC_TO_DATA_PTR(void *, sprop->getter),
|
||||
JS_FUNC_TO_DATA_PTR(void *, sprop->setter),
|
||||
sprop->slot, sprop->attrs, sprop->flags, sprop->shortid);
|
||||
kids = sprop->kids;
|
||||
if (kids) {
|
||||
++level;
|
||||
|
|
109
js/src/jsscope.h
109
js/src/jsscope.h
|
@ -210,9 +210,7 @@ struct JSScope : public JSObjectMap
|
|||
JSTitle title; /* lock state */
|
||||
#endif
|
||||
JSObject *object; /* object that owns this scope */
|
||||
jsrefcount nrefs; /* count of all referencing objects */
|
||||
uint32 freeslot; /* index of next free slot in object */
|
||||
JSEmptyScope *emptyScope; /* cache for getEmptyScope below */
|
||||
uint8 flags; /* flags, see below */
|
||||
int8 hashShift; /* multiplicative hash shift */
|
||||
|
||||
|
@ -220,6 +218,7 @@ struct JSScope : public JSObjectMap
|
|||
uint32 entryCount; /* number of entries in table */
|
||||
uint32 removedCount; /* removed entry sentinels in table */
|
||||
JSScopeProperty **table; /* table of ptrs to shared tree nodes */
|
||||
JSEmptyScope *emptyScope; /* cache for getEmptyScope below */
|
||||
|
||||
/*
|
||||
* A little information hiding for scope->lastProp, in case it ever becomes
|
||||
|
@ -259,14 +258,17 @@ struct JSScope : public JSObjectMap
|
|||
inline void updateShape(JSContext *cx);
|
||||
inline void updateFlags(const JSScopeProperty *sprop);
|
||||
|
||||
protected:
|
||||
void initMinimal(JSContext *cx, uint32 newShape);
|
||||
|
||||
private:
|
||||
bool createTable(JSContext *cx, bool report);
|
||||
bool changeTable(JSContext *cx, int change);
|
||||
void reportReadOnlyScope(JSContext *cx);
|
||||
void generateOwnShape(JSContext *cx);
|
||||
JSScopeProperty **searchTable(jsid id, bool adding);
|
||||
inline JSScopeProperty **search(jsid id, bool adding);
|
||||
JSEmptyScope *createEmptyScope(JSContext *cx, JSClass *clasp);
|
||||
inline JSEmptyScope *createEmptyScope(JSContext *cx, JSClass *clasp);
|
||||
|
||||
JSScopeProperty *addPropertyHelper(JSContext *cx, jsid id,
|
||||
JSPropertyOp getter, JSPropertyOp setter,
|
||||
|
@ -275,17 +277,14 @@ struct JSScope : public JSObjectMap
|
|||
JSScopeProperty **spp);
|
||||
|
||||
public:
|
||||
explicit JSScope(const JSObjectOps *ops, JSObject *obj = NULL)
|
||||
JSScope(const JSObjectOps *ops, JSObject *obj)
|
||||
: JSObjectMap(ops, 0), object(obj) {}
|
||||
|
||||
/* Create a mutable, owned, empty scope. */
|
||||
static JSScope *create(JSContext *cx, const JSObjectOps *ops, JSClass *clasp,
|
||||
JSObject *obj, uint32 shape);
|
||||
static JSScope *create(JSContext *cx, const JSObjectOps *ops,
|
||||
JSClass *clasp, JSObject *obj, uint32 shape);
|
||||
|
||||
static void destroy(JSContext *cx, JSScope *scope);
|
||||
|
||||
inline void hold();
|
||||
inline bool drop(JSContext *cx, JSObject *obj);
|
||||
void destroy(JSContext *cx);
|
||||
|
||||
/*
|
||||
* Return an immutable, shareable, empty scope with the same ops as this
|
||||
|
@ -296,6 +295,8 @@ struct JSScope : public JSObjectMap
|
|||
*/
|
||||
inline JSEmptyScope *getEmptyScope(JSContext *cx, JSClass *clasp);
|
||||
|
||||
inline bool ensureEmptyScope(JSContext *cx, JSClass *clasp);
|
||||
|
||||
inline bool canProvideEmptyScope(JSObjectOps *ops, JSClass *clasp);
|
||||
|
||||
JSScopeProperty *lookup(jsid id);
|
||||
|
@ -377,7 +378,10 @@ struct JSScope : public JSObjectMap
|
|||
* This flag toggles with each shape-regenerating GC cycle.
|
||||
* See JSRuntime::gcRegenShapesScopeFlag.
|
||||
*/
|
||||
SHAPE_REGEN = 0x0040
|
||||
SHAPE_REGEN = 0x0040,
|
||||
|
||||
/* The anti-branded flag, to avoid overspecializing. */
|
||||
GENERIC = 0x0080
|
||||
};
|
||||
|
||||
bool inDictionaryMode() { return flags & DICTIONARY_MODE; }
|
||||
|
@ -390,16 +394,22 @@ struct JSScope : public JSObjectMap
|
|||
* sealed.
|
||||
*/
|
||||
bool sealed() { return flags & SEALED; }
|
||||
void setSealed() { flags |= SEALED; }
|
||||
void setSealed() {
|
||||
JS_ASSERT(!isSharedEmpty());
|
||||
flags |= SEALED;
|
||||
}
|
||||
|
||||
/*
|
||||
* A branded scope's object contains plain old methods (function-valued
|
||||
* properties without magic getters and setters), and its scope->shape
|
||||
* evolves whenever a function value changes.
|
||||
*/
|
||||
bool branded() { return flags & BRANDED; }
|
||||
bool branded() { JS_ASSERT(!generic()); return flags & BRANDED; }
|
||||
void setBranded() { flags |= BRANDED; }
|
||||
|
||||
bool generic() { return flags & GENERIC; }
|
||||
void setGeneric() { flags |= GENERIC; }
|
||||
|
||||
bool hadIndexedProperties() { return flags & INDEXED_PROPERTIES; }
|
||||
void setIndexedProperties() { flags |= INDEXED_PROPERTIES; }
|
||||
|
||||
|
@ -461,15 +471,45 @@ struct JSScope : public JSObjectMap
|
|||
bool
|
||||
brandedOrHasMethodBarrier() { return flags & (BRANDED | METHOD_BARRIER); }
|
||||
|
||||
bool owned() { return object != NULL; }
|
||||
bool isSharedEmpty() const { return !object; }
|
||||
|
||||
static bool initRuntimeState(JSContext *cx);
|
||||
static void finishRuntimeState(JSContext *cx);
|
||||
};
|
||||
|
||||
struct JSEmptyScope : public JSScope
|
||||
{
|
||||
JSClass * const clasp;
|
||||
jsrefcount nrefs; /* count of all referencing objects */
|
||||
|
||||
explicit JSEmptyScope(const JSObjectOps *ops, JSClass *clasp)
|
||||
: JSScope(ops), clasp(clasp) {}
|
||||
JSEmptyScope(JSContext *cx, const JSObjectOps *ops, JSClass *clasp);
|
||||
|
||||
void hold() {
|
||||
/* The method is only called for already held objects. */
|
||||
JS_ASSERT(nrefs >= 1);
|
||||
JS_ATOMIC_INCREMENT(&nrefs);
|
||||
}
|
||||
|
||||
void drop(JSContext *cx) {
|
||||
JS_ASSERT(nrefs >= 1);
|
||||
JS_ATOMIC_DECREMENT(&nrefs);
|
||||
if (nrefs == 0)
|
||||
destroy(cx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Optimized version of the drop method to use from the object finalizer
|
||||
* to skip expensive JS_ATOMIC_DECREMENT.
|
||||
*/
|
||||
void dropFromGC(JSContext *cx) {
|
||||
#ifdef JS_THREADSAFE
|
||||
JS_ASSERT(CX_THREAD_IS_RUNNING_GC(cx));
|
||||
#endif
|
||||
JS_ASSERT(nrefs >= 1);
|
||||
--nrefs;
|
||||
if (nrefs == 0)
|
||||
destroy(cx);
|
||||
}
|
||||
};
|
||||
|
||||
inline bool
|
||||
|
@ -779,43 +819,6 @@ JSScope::canProvideEmptyScope(JSObjectOps *ops, JSClass *clasp)
|
|||
return this->ops == ops && (!emptyScope || emptyScope->clasp == clasp);
|
||||
}
|
||||
|
||||
inline JSEmptyScope *
|
||||
JSScope::getEmptyScope(JSContext *cx, JSClass *clasp)
|
||||
{
|
||||
if (emptyScope) {
|
||||
JS_ASSERT(clasp == emptyScope->clasp);
|
||||
emptyScope->hold();
|
||||
return emptyScope;
|
||||
}
|
||||
return createEmptyScope(cx, clasp);
|
||||
}
|
||||
|
||||
inline void
|
||||
JSScope::hold()
|
||||
{
|
||||
JS_ASSERT(nrefs >= 0);
|
||||
JS_ATOMIC_INCREMENT(&nrefs);
|
||||
}
|
||||
|
||||
inline bool
|
||||
JSScope::drop(JSContext *cx, JSObject *obj)
|
||||
{
|
||||
#ifdef JS_THREADSAFE
|
||||
/* We are called from only js_ShareWaitingTitles and js_FinalizeObject. */
|
||||
JS_ASSERT(!obj || CX_THREAD_IS_RUNNING_GC(cx));
|
||||
#endif
|
||||
JS_ASSERT(nrefs > 0);
|
||||
--nrefs;
|
||||
|
||||
if (nrefs == 0) {
|
||||
destroy(cx, this);
|
||||
return false;
|
||||
}
|
||||
if (object == obj)
|
||||
object = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
inline bool
|
||||
JSScopeProperty::get(JSContext* cx, JSObject* obj, JSObject *pobj, jsval* vp)
|
||||
{
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
*
|
||||
* ***** BEGIN LICENSE BLOCK *****
|
||||
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
||||
|
@ -46,12 +46,46 @@
|
|||
#include "jsobj.h"
|
||||
#include "jsscope.h"
|
||||
|
||||
inline JSEmptyScope *
|
||||
JSScope::createEmptyScope(JSContext *cx, JSClass *clasp)
|
||||
{
|
||||
JS_ASSERT(!emptyScope);
|
||||
emptyScope = cx->create<JSEmptyScope>(cx, ops, clasp);
|
||||
return emptyScope;
|
||||
}
|
||||
|
||||
inline JSEmptyScope *
|
||||
JSScope::getEmptyScope(JSContext *cx, JSClass *clasp)
|
||||
{
|
||||
if (emptyScope) {
|
||||
JS_ASSERT(clasp == emptyScope->clasp);
|
||||
emptyScope->hold();
|
||||
return emptyScope;
|
||||
}
|
||||
return createEmptyScope(cx, clasp);
|
||||
}
|
||||
|
||||
inline bool
|
||||
JSScope::ensureEmptyScope(JSContext *cx, JSClass *clasp)
|
||||
{
|
||||
if (emptyScope) {
|
||||
JS_ASSERT(clasp == emptyScope->clasp);
|
||||
return true;
|
||||
}
|
||||
if (!createEmptyScope(cx, clasp))
|
||||
return false;
|
||||
|
||||
/* We are going to have only single ref to the scope. */
|
||||
JS_ASSERT(emptyScope->nrefs == 2);
|
||||
emptyScope->nrefs = 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
inline void
|
||||
JSScope::updateShape(JSContext *cx)
|
||||
{
|
||||
JS_ASSERT(object);
|
||||
js_LeaveTraceIfGlobalObject(cx, object);
|
||||
|
||||
js::LeaveTraceIfGlobalObject(cx, object);
|
||||
shape = (hasOwnShape() || !lastProp) ? js_GenerateShape(cx, false) : lastProp->shape;
|
||||
}
|
||||
|
||||
|
|
|
@ -67,6 +67,8 @@
|
|||
|
||||
#include "jsscriptinlines.h"
|
||||
|
||||
using namespace js;
|
||||
|
||||
const uint32 JSSLOT_EXEC_DEPTH = JSSLOT_PRIVATE + 1;
|
||||
const uint32 JSSCRIPT_RESERVED_SLOTS = 1;
|
||||
|
||||
|
@ -328,12 +330,12 @@ script_exec_sub(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
|
|||
*
|
||||
* Unlike eval, which the compiler detects, Script.prototype.exec may be
|
||||
* called from a lightweight function, or even from native code (in which
|
||||
* case fp->varobj and fp->scopeChain are null). If exec is called from
|
||||
* a lightweight function, we will need to get a Call object representing
|
||||
* its frame, to act as the var object and scope chain head.
|
||||
* fp->scopeChain is null). If exec is called from a lightweight function,
|
||||
* we will need to get a Call object representing its frame, to act as the
|
||||
* var object and scope chain head.
|
||||
*/
|
||||
caller = js_GetScriptedCaller(cx, NULL);
|
||||
if (caller && !caller->varobj) {
|
||||
if (caller && !caller->varobj(cx)) {
|
||||
/* Called from a lightweight function. */
|
||||
JS_ASSERT(caller->fun && !JSFUN_HEAVYWEIGHT_TEST(caller->fun->flags));
|
||||
|
||||
|
@ -347,7 +349,7 @@ script_exec_sub(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
|
|||
if (caller) {
|
||||
/*
|
||||
* Load caller->scopeChain after the conditional js_GetCallObject
|
||||
* call above, which resets scopeChain as well as varobj.
|
||||
* call above, which resets scopeChain as well as the callobj.
|
||||
*/
|
||||
scopeobj = js_GetScopeChain(cx, caller);
|
||||
if (!scopeobj)
|
||||
|
@ -1762,7 +1764,7 @@ js_DestroyScript(JSContext *cx, JSScript *script)
|
|||
}
|
||||
|
||||
#ifdef JS_TRACER
|
||||
js_PurgeScriptFragments(cx, script);
|
||||
PurgeScriptFragments(cx, script);
|
||||
#endif
|
||||
|
||||
cx->free(script);
|
||||
|
|
|
@ -78,6 +78,8 @@
|
|||
#include "jsvector.h"
|
||||
#include "jsstrinlines.h"
|
||||
|
||||
using namespace js;
|
||||
|
||||
#define JSSTRDEP_RECURSION_LIMIT 100
|
||||
|
||||
JS_STATIC_ASSERT(size_t(JSString::MAX_LENGTH) <= size_t(JSVAL_INT_MAX));
|
||||
|
@ -1748,7 +1750,7 @@ FindReplaceLength(JSContext *cx, ReplaceData &rdata, size_t *sizep)
|
|||
if (lambda) {
|
||||
uintN i, m, n;
|
||||
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
|
||||
/*
|
||||
* In the lambda case, not only do we find the replacement string's
|
||||
|
@ -2880,6 +2882,18 @@ const char *JSString::deflatedIntStringTable[] = {
|
|||
#undef L2
|
||||
#undef L3
|
||||
|
||||
/* Static table for common UTF8 encoding */
|
||||
#define U8(c) char(((c) >> 6) | 0xc0), char(((c) & 0x3f) | 0x80), 0
|
||||
#define U(c) U8(c), U8(c+1), U8(c+2), U8(c+3), U8(c+4), U8(c+5), U8(c+6), U8(c+7)
|
||||
|
||||
const char JSString::deflatedUnitStringTable[] = {
|
||||
U(0x80), U(0x88), U(0x90), U(0x98), U(0xa0), U(0xa8), U(0xb0), U(0xb8),
|
||||
U(0xc0), U(0xc8), U(0xd0), U(0xd8), U(0xe0), U(0xe8), U(0xf0), U(0xf8)
|
||||
};
|
||||
|
||||
#undef U
|
||||
#undef U8
|
||||
|
||||
#undef C
|
||||
|
||||
#undef O0
|
||||
|
@ -3098,10 +3112,10 @@ js_NewString(JSContext *cx, jschar *chars, size_t length)
|
|||
* If we can't leave the trace, signal OOM condition, otherwise
|
||||
* exit from trace before throwing.
|
||||
*/
|
||||
if (!js_CanLeaveTrace(cx))
|
||||
if (!CanLeaveTrace(cx))
|
||||
return NULL;
|
||||
|
||||
js_LeaveTrace(cx);
|
||||
LeaveTrace(cx);
|
||||
}
|
||||
js_ReportAllocationOverflow(cx);
|
||||
return NULL;
|
||||
|
@ -3821,11 +3835,14 @@ js_GetStringBytes(JSContext *cx, JSString *str)
|
|||
if (JSString::isUnitString(str)) {
|
||||
#ifdef IS_LITTLE_ENDIAN
|
||||
/* Unit string data is {c, 0, 0, 0} so we can just cast. */
|
||||
return (char *)str->chars();
|
||||
bytes = (char *)str->chars();
|
||||
#else
|
||||
/* Unit string data is {0, c, 0, 0} so we can point into the middle. */
|
||||
return (char *)str->chars() + 1;
|
||||
#endif
|
||||
bytes = (char *)str->chars() + 1;
|
||||
#endif
|
||||
return ((*bytes & 0x80) && js_CStringsAreUTF8)
|
||||
? JSString::deflatedUnitStringTable + ((*bytes & 0x7f) * 3)
|
||||
: bytes;
|
||||
}
|
||||
|
||||
if (JSString::isIntString(str)) {
|
||||
|
|
|
@ -58,8 +58,6 @@ JS_BEGIN_EXTERN_C
|
|||
#define JSSTRING_BIT(n) ((size_t)1 << (n))
|
||||
#define JSSTRING_BITMASK(n) (JSSTRING_BIT(n) - 1)
|
||||
|
||||
class TraceRecorder;
|
||||
|
||||
enum {
|
||||
UNIT_STRING_LIMIT = 256U,
|
||||
INT_STRING_LIMIT = 256U
|
||||
|
@ -100,7 +98,7 @@ JS_STATIC_ASSERT(JS_BITS_PER_WORD >= 32);
|
|||
* NB: Always use the length() and chars() accessor methods.
|
||||
*/
|
||||
struct JSString {
|
||||
friend class TraceRecorder;
|
||||
friend class js::TraceRecorder;
|
||||
|
||||
friend JSAtom *
|
||||
js_AtomizeString(JSContext *cx, JSString *str, uintN flags);
|
||||
|
@ -328,6 +326,7 @@ struct JSString {
|
|||
static JSString unitStringTable[];
|
||||
static JSString intStringTable[];
|
||||
static const char *deflatedIntStringTable[];
|
||||
static const char deflatedUnitStringTable[];
|
||||
|
||||
static JSString *unitString(jschar c);
|
||||
static JSString *getUnitString(JSContext *cx, JSString *str, size_t index);
|
||||
|
|
|
@ -42,6 +42,8 @@
|
|||
|
||||
#include "jsbit.h"
|
||||
|
||||
#include <new>
|
||||
|
||||
namespace js {
|
||||
|
||||
/* JavaScript Template Library. */
|
||||
|
@ -237,6 +239,55 @@ class SystemAllocPolicy
|
|||
void reportAllocOverflow() const {}
|
||||
};
|
||||
|
||||
/*
|
||||
* Small utility for lazily constructing objects without using dynamic storage.
|
||||
* When a LazilyConstructed<T> is constructed, it is |empty()|, i.e., no value
|
||||
* of T has been constructed and no T destructor will be called when the
|
||||
* LazilyConstructed<T> is destroyed. Upon calling |construct|, a T object will
|
||||
* be constructed with the given arguments and that object will be destroyed
|
||||
* when the owning LazilyConstructed<T> is destroyed.
|
||||
*/
|
||||
template <class T>
|
||||
class LazilyConstructed
|
||||
{
|
||||
char bytes[sizeof(T)];
|
||||
bool constructed;
|
||||
T &asT() { return *reinterpret_cast<T *>(bytes); }
|
||||
|
||||
public:
|
||||
LazilyConstructed() : constructed(false) {}
|
||||
~LazilyConstructed() { if (constructed) asT().~T(); }
|
||||
|
||||
bool empty() const { return !constructed; }
|
||||
|
||||
void construct() {
|
||||
JS_ASSERT(!constructed);
|
||||
new(bytes) T();
|
||||
constructed = true;
|
||||
}
|
||||
|
||||
template <class T1>
|
||||
void construct(const T1 &t1) {
|
||||
JS_ASSERT(!constructed);
|
||||
new(bytes) T(t1);
|
||||
constructed = true;
|
||||
}
|
||||
|
||||
template <class T1, class T2>
|
||||
void construct(const T1 &t1, const T2 &t2) {
|
||||
JS_ASSERT(!constructed);
|
||||
new(bytes) T(t1, t2);
|
||||
constructed = true;
|
||||
}
|
||||
|
||||
template <class T1, class T2, class T3>
|
||||
void construct(const T1 &t1, const T2 &t2, const T3 &t3) {
|
||||
JS_ASSERT(!constructed);
|
||||
new(bytes) T(t1, t2, t3);
|
||||
constructed = true;
|
||||
}
|
||||
};
|
||||
|
||||
} /* namespace js */
|
||||
|
||||
#endif /* jstl_h_ */
|
||||
|
|
1302
js/src/jstracer.cpp
1302
js/src/jstracer.cpp
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -53,6 +53,8 @@
|
|||
#include "jsnum.h"
|
||||
#include "jsvector.h"
|
||||
|
||||
namespace js {
|
||||
|
||||
#if defined(DEBUG) && !defined(JS_JIT_SPEW)
|
||||
#define JS_JIT_SPEW
|
||||
#endif
|
||||
|
@ -255,27 +257,27 @@ static void debug_only_printf(int mask, const char *fmt, ...) {}
|
|||
#elif defined(JS_JIT_SPEW)
|
||||
|
||||
// Top level logging controller object.
|
||||
extern nanojit::LogControl js_LogController;
|
||||
extern nanojit::LogControl LogController;
|
||||
|
||||
// Top level profiling hook, needed to harvest profile info from Fragments
|
||||
// whose logical lifetime is about to finish
|
||||
extern void js_FragProfiling_FragFinalizer(nanojit::Fragment* f, JSTraceMonitor*);
|
||||
extern void FragProfiling_FragFinalizer(nanojit::Fragment* f, TraceMonitor*);
|
||||
|
||||
#define debug_only_stmt(stmt) \
|
||||
stmt
|
||||
|
||||
#define debug_only_printf(mask, fmt, ...) \
|
||||
JS_BEGIN_MACRO \
|
||||
if ((js_LogController.lcbits & (mask)) > 0) { \
|
||||
js_LogController.printf(fmt, __VA_ARGS__); \
|
||||
if ((LogController.lcbits & (mask)) > 0) { \
|
||||
LogController.printf(fmt, __VA_ARGS__); \
|
||||
fflush(stdout); \
|
||||
} \
|
||||
JS_END_MACRO
|
||||
|
||||
#define debug_only_print0(mask, str) \
|
||||
JS_BEGIN_MACRO \
|
||||
if ((js_LogController.lcbits & (mask)) > 0) { \
|
||||
js_LogController.printf("%s", str); \
|
||||
if ((LogController.lcbits & (mask)) > 0) { \
|
||||
LogController.printf("%s", str); \
|
||||
fflush(stdout); \
|
||||
} \
|
||||
JS_END_MACRO
|
||||
|
@ -335,7 +337,7 @@ public:
|
|||
* otherwise work correctly. A static assertion in jstracer.cpp verifies that
|
||||
* this requirement is correctly enforced by these compilers.
|
||||
*/
|
||||
enum JSTraceType_
|
||||
enum TraceType_
|
||||
#if defined(_MSC_VER) && _MSC_VER >= 1400
|
||||
: int8_t
|
||||
#endif
|
||||
|
@ -356,9 +358,9 @@ __attribute__((packed))
|
|||
;
|
||||
|
||||
#ifdef USE_TRACE_TYPE_ENUM
|
||||
typedef JSTraceType_ JSTraceType;
|
||||
typedef TraceType_ TraceType;
|
||||
#else
|
||||
typedef int8_t JSTraceType;
|
||||
typedef int8_t TraceType;
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -370,16 +372,16 @@ const uint32 TT_INVALID = uint32(-1);
|
|||
|
||||
typedef Queue<uint16> SlotList;
|
||||
|
||||
class TypeMap : public Queue<JSTraceType> {
|
||||
class TypeMap : public Queue<TraceType> {
|
||||
public:
|
||||
TypeMap(nanojit::Allocator* alloc) : Queue<JSTraceType>(alloc) {}
|
||||
TypeMap(nanojit::Allocator* alloc) : Queue<TraceType>(alloc) {}
|
||||
void set(unsigned stackSlots, unsigned ngslots,
|
||||
const JSTraceType* stackTypeMap, const JSTraceType* globalTypeMap);
|
||||
const TraceType* stackTypeMap, const TraceType* globalTypeMap);
|
||||
JS_REQUIRES_STACK void captureTypes(JSContext* cx, JSObject* globalObj, SlotList& slots, unsigned callDepth);
|
||||
JS_REQUIRES_STACK void captureMissingGlobalTypes(JSContext* cx, JSObject* globalObj, SlotList& slots,
|
||||
unsigned stackSlots);
|
||||
bool matches(TypeMap& other) const;
|
||||
void fromRaw(JSTraceType* other, unsigned numSlots);
|
||||
void fromRaw(TraceType* other, unsigned numSlots);
|
||||
};
|
||||
|
||||
#define JS_TM_EXITCODES(_) \
|
||||
|
@ -457,7 +459,7 @@ struct VMSideExit : public nanojit::SideExit
|
|||
FrameInfo* recursive_down;
|
||||
unsigned hitcount;
|
||||
unsigned slurpFailSlot;
|
||||
JSTraceType slurpType;
|
||||
TraceType slurpType;
|
||||
|
||||
/*
|
||||
* Ordinarily 0. If a slow native function is atop the stack, the 1 bit is
|
||||
|
@ -477,20 +479,20 @@ struct VMSideExit : public nanojit::SideExit
|
|||
nativeCalleeWord = uintptr_t(callee) | (constructing ? 1 : 0);
|
||||
}
|
||||
|
||||
inline JSTraceType* stackTypeMap() {
|
||||
return (JSTraceType*)(this + 1);
|
||||
inline TraceType* stackTypeMap() {
|
||||
return (TraceType*)(this + 1);
|
||||
}
|
||||
|
||||
inline JSTraceType& stackType(unsigned i) {
|
||||
inline TraceType& stackType(unsigned i) {
|
||||
JS_ASSERT(i < numStackSlots);
|
||||
return stackTypeMap()[i];
|
||||
}
|
||||
|
||||
inline JSTraceType* globalTypeMap() {
|
||||
return (JSTraceType*)(this + 1) + this->numStackSlots;
|
||||
inline TraceType* globalTypeMap() {
|
||||
return (TraceType*)(this + 1) + this->numStackSlots;
|
||||
}
|
||||
|
||||
inline JSTraceType* fullTypeMap() {
|
||||
inline TraceType* fullTypeMap() {
|
||||
return stackTypeMap();
|
||||
}
|
||||
|
||||
|
@ -633,8 +635,8 @@ struct FrameInfo {
|
|||
bool is_constructing() const { return (argc & CONSTRUCTING_FLAG) != 0; }
|
||||
|
||||
// The typemap just before the callee is called.
|
||||
JSTraceType* get_typemap() { return (JSTraceType*) (this+1); }
|
||||
const JSTraceType* get_typemap() const { return (JSTraceType*) (this+1); }
|
||||
TraceType* get_typemap() { return (TraceType*) (this+1); }
|
||||
const TraceType* get_typemap() const { return (TraceType*) (this+1); }
|
||||
};
|
||||
|
||||
struct UnstableExit
|
||||
|
@ -728,10 +730,10 @@ struct TreeFragment : public LinkableFragment
|
|||
inline unsigned nGlobalTypes() {
|
||||
return typeMap.length() - nStackTypes;
|
||||
}
|
||||
inline JSTraceType* globalTypeMap() {
|
||||
inline TraceType* globalTypeMap() {
|
||||
return typeMap.data() + nStackTypes;
|
||||
}
|
||||
inline JSTraceType* stackTypeMap() {
|
||||
inline TraceType* stackTypeMap() {
|
||||
return typeMap.data();
|
||||
}
|
||||
|
||||
|
@ -746,32 +748,32 @@ VMFragment::toTreeFragment()
|
|||
return static_cast<TreeFragment*>(this);
|
||||
}
|
||||
|
||||
typedef enum JSBuiltinStatus {
|
||||
JSBUILTIN_BAILED = 1,
|
||||
JSBUILTIN_ERROR = 2
|
||||
} JSBuiltinStatus;
|
||||
typedef enum BuiltinStatus {
|
||||
BUILTIN_BAILED = 1,
|
||||
BUILTIN_ERROR = 2
|
||||
} BuiltinStatus;
|
||||
|
||||
// Arguments objects created on trace have a private value that points to an
|
||||
// instance of this struct. The struct includes a typemap that is allocated
|
||||
// as part of the object.
|
||||
struct js_ArgsPrivateNative {
|
||||
struct ArgsPrivateNative {
|
||||
double *argv;
|
||||
|
||||
static js_ArgsPrivateNative *create(VMAllocator &alloc, unsigned argc)
|
||||
static ArgsPrivateNative *create(VMAllocator &alloc, unsigned argc)
|
||||
{
|
||||
return (js_ArgsPrivateNative*) new (alloc) char[sizeof(js_ArgsPrivateNative) + argc];
|
||||
return (ArgsPrivateNative*) new (alloc) char[sizeof(ArgsPrivateNative) + argc];
|
||||
}
|
||||
|
||||
JSTraceType *typemap()
|
||||
TraceType *typemap()
|
||||
{
|
||||
return (JSTraceType*) (this+1);
|
||||
return (TraceType*) (this+1);
|
||||
}
|
||||
};
|
||||
|
||||
static JS_INLINE void
|
||||
js_SetBuiltinError(JSContext *cx)
|
||||
SetBuiltinError(JSContext *cx)
|
||||
{
|
||||
cx->interpState->builtinStatus |= JSBUILTIN_ERROR;
|
||||
cx->interpState->builtinStatus |= BUILTIN_ERROR;
|
||||
}
|
||||
|
||||
#ifdef DEBUG_RECORDING_STATUS_NOT_BOOL
|
||||
|
@ -906,9 +908,9 @@ enum TypeConsensus
|
|||
};
|
||||
|
||||
#ifdef DEBUG
|
||||
# define js_AbortRecording(cx, reason) js_AbortRecordingImpl(cx, reason)
|
||||
# define AbortRecording(cx, reason) AbortRecordingImpl(cx, reason)
|
||||
#else
|
||||
# define js_AbortRecording(cx, reason) js_AbortRecordingImpl(cx)
|
||||
# define AbortRecording(cx, reason) AbortRecordingImpl(cx)
|
||||
#endif
|
||||
|
||||
class TraceRecorder
|
||||
|
@ -919,7 +921,7 @@ class TraceRecorder
|
|||
JSContext* const cx;
|
||||
|
||||
/* Cached value of JS_TRACE_MONITOR(cx). */
|
||||
JSTraceMonitor* const traceMonitor;
|
||||
TraceMonitor* const traceMonitor;
|
||||
|
||||
/* The Fragment being recorded by this recording session. */
|
||||
VMFragment* const fragment;
|
||||
|
@ -1026,8 +1028,8 @@ class TraceRecorder
|
|||
/* Temporary JSSpecializedNative used to describe non-specialized fast natives. */
|
||||
JSSpecializedNative generatedSpecializedNative;
|
||||
|
||||
/* Temporary JSTraceType array used to construct temporary typemaps. */
|
||||
js::Vector<JSTraceType, 256> tempTypeMap;
|
||||
/* Temporary TraceType array used to construct temporary typemaps. */
|
||||
js::Vector<TraceType, 256> tempTypeMap;
|
||||
|
||||
/************************************************************* 10 bajillion member functions */
|
||||
|
||||
|
@ -1063,10 +1065,10 @@ class TraceRecorder
|
|||
JS_REQUIRES_STACK ptrdiff_t nativeStackOffset(jsval* p) const;
|
||||
JS_REQUIRES_STACK ptrdiff_t nativeStackSlot(jsval* p) const;
|
||||
JS_REQUIRES_STACK ptrdiff_t nativespOffset(jsval* p) const;
|
||||
JS_REQUIRES_STACK void import(nanojit::LIns* base, ptrdiff_t offset, jsval* p, JSTraceType t,
|
||||
JS_REQUIRES_STACK void import(nanojit::LIns* base, ptrdiff_t offset, jsval* p, TraceType t,
|
||||
const char *prefix, uintN index, JSStackFrame *fp);
|
||||
JS_REQUIRES_STACK void import(TreeFragment* tree, nanojit::LIns* sp, unsigned stackSlots,
|
||||
unsigned callDepth, unsigned ngslots, JSTraceType* typeMap);
|
||||
unsigned callDepth, unsigned ngslots, TraceType* typeMap);
|
||||
void trackNativeStackUse(unsigned slots);
|
||||
|
||||
JS_REQUIRES_STACK bool isValidSlot(JSScope* scope, JSScopeProperty* sprop);
|
||||
|
@ -1128,7 +1130,8 @@ class TraceRecorder
|
|||
JSScopeProperty *sprop; // sprop name was resolved to
|
||||
};
|
||||
|
||||
JS_REQUIRES_STACK nanojit::LIns* scopeChain() const;
|
||||
JS_REQUIRES_STACK nanojit::LIns* scopeChain();
|
||||
JS_REQUIRES_STACK nanojit::LIns* entryScopeChain() const;
|
||||
JS_REQUIRES_STACK JSStackFrame* frameIfInRange(JSObject* obj, unsigned* depthp = NULL) const;
|
||||
JS_REQUIRES_STACK RecordingStatus traverseScopeChain(JSObject *obj, nanojit::LIns *obj_ins, JSObject *obj2, nanojit::LIns *&obj2_ins);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus scopeChainProp(JSObject* obj, jsval*& vp, nanojit::LIns*& ins, NameResult& nr);
|
||||
|
@ -1212,6 +1215,7 @@ class TraceRecorder
|
|||
void stobj_set_slot(nanojit::LIns* obj_ins, unsigned slot, nanojit::LIns*& dslots_ins,
|
||||
nanojit::LIns* v_ins);
|
||||
|
||||
nanojit::LIns* stobj_get_const_fslot(nanojit::LIns* obj_ins, unsigned slot);
|
||||
nanojit::LIns* stobj_get_fslot(nanojit::LIns* obj_ins, unsigned slot);
|
||||
nanojit::LIns* stobj_get_dslot(nanojit::LIns* obj_ins, unsigned index,
|
||||
nanojit::LIns*& dslots_ins);
|
||||
|
@ -1231,11 +1235,19 @@ class TraceRecorder
|
|||
}
|
||||
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus name(jsval*& vp, nanojit::LIns*& ins, NameResult& nr);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus prop(JSObject* obj, nanojit::LIns* obj_ins, uint32 *slotp,
|
||||
nanojit::LIns** v_insp, jsval* outp);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus prop(JSObject* obj, nanojit::LIns* obj_ins,
|
||||
uint32 *slotp, nanojit::LIns** v_insp,
|
||||
jsval* outp);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus propTail(JSObject* obj, nanojit::LIns* obj_ins,
|
||||
JSObject* obj2, jsuword pcval,
|
||||
uint32 *slotp, nanojit::LIns** v_insp,
|
||||
jsval* outp);
|
||||
JS_REQUIRES_STACK RecordingStatus denseArrayElement(jsval& oval, jsval& idx, jsval*& vp,
|
||||
nanojit::LIns*& v_ins,
|
||||
nanojit::LIns*& addr_ins);
|
||||
nanojit::LIns*& v_ins,
|
||||
nanojit::LIns*& addr_ins);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus typedArrayElement(jsval& oval, jsval& idx, jsval*& vp,
|
||||
nanojit::LIns*& v_ins,
|
||||
nanojit::LIns*& addr_ins);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus getProp(JSObject* obj, nanojit::LIns* obj_ins);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus getProp(jsval& v);
|
||||
JS_REQUIRES_STACK RecordingStatus getThis(nanojit::LIns*& this_ins);
|
||||
|
@ -1277,6 +1289,8 @@ class TraceRecorder
|
|||
JS_REQUIRES_STACK nanojit::LIns* unbox_jsval(jsval v, nanojit::LIns* v_ins, VMSideExit* exit);
|
||||
JS_REQUIRES_STACK bool guardClass(JSObject* obj, nanojit::LIns* obj_ins, JSClass* clasp,
|
||||
VMSideExit* exit);
|
||||
JS_REQUIRES_STACK bool guardConstClass(JSObject* obj, nanojit::LIns* obj_ins, JSClass* clasp,
|
||||
VMSideExit* exit);
|
||||
JS_REQUIRES_STACK bool guardDenseArray(JSObject* obj, nanojit::LIns* obj_ins,
|
||||
ExitType exitType = MISMATCH_EXIT);
|
||||
JS_REQUIRES_STACK bool guardDenseArray(JSObject* obj, nanojit::LIns* obj_ins,
|
||||
|
@ -1293,7 +1307,7 @@ class TraceRecorder
|
|||
JS_REQUIRES_STACK void clearEntryFrameSlotsFromTracker(Tracker& which);
|
||||
JS_REQUIRES_STACK void clearCurrentFrameSlotsFromTracker(Tracker& which);
|
||||
JS_REQUIRES_STACK void clearFrameSlotsFromTracker(Tracker& which, JSStackFrame* fp, unsigned nslots);
|
||||
JS_REQUIRES_STACK void putArguments();
|
||||
JS_REQUIRES_STACK void putActivationObjects();
|
||||
JS_REQUIRES_STACK RecordingStatus guardCallee(jsval& callee);
|
||||
JS_REQUIRES_STACK JSStackFrame *guardArguments(JSObject *obj, nanojit::LIns* obj_ins,
|
||||
unsigned *depthp);
|
||||
|
@ -1332,7 +1346,7 @@ class TraceRecorder
|
|||
|
||||
JS_REQUIRES_STACK jsatomid getFullIndex(ptrdiff_t pcoff = 0);
|
||||
|
||||
JS_REQUIRES_STACK JSTraceType determineSlotType(jsval* vp);
|
||||
JS_REQUIRES_STACK TraceType determineSlotType(jsval* vp);
|
||||
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus compile();
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus closeLoop();
|
||||
|
@ -1344,7 +1358,7 @@ class TraceRecorder
|
|||
JS_REQUIRES_STACK void adjustCallerTypes(TreeFragment* f);
|
||||
JS_REQUIRES_STACK void prepareTreeCall(TreeFragment* inner, nanojit::LIns*& inner_sp_ins);
|
||||
JS_REQUIRES_STACK void emitTreeCall(TreeFragment* inner, VMSideExit* exit, nanojit::LIns* inner_sp_ins);
|
||||
JS_REQUIRES_STACK void determineGlobalTypes(JSTraceType* typeMap);
|
||||
JS_REQUIRES_STACK void determineGlobalTypes(TraceType* typeMap);
|
||||
JS_REQUIRES_STACK VMSideExit* downSnapshot(FrameInfo* downFrame);
|
||||
JS_REQUIRES_STACK TreeFragment* findNestedCompatiblePeer(TreeFragment* f);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus attemptTreeCall(TreeFragment* inner,
|
||||
|
@ -1369,7 +1383,7 @@ class TraceRecorder
|
|||
|
||||
JS_REQUIRES_STACK
|
||||
TraceRecorder(JSContext* cx, VMSideExit*, VMFragment*,
|
||||
unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap,
|
||||
unsigned stackSlots, unsigned ngslots, TraceType* typeMap,
|
||||
VMSideExit* expectedInnerExit, jsbytecode* outerTree,
|
||||
uint32 outerArgc, RecordReason reason);
|
||||
|
||||
|
@ -1390,14 +1404,14 @@ class TraceRecorder
|
|||
friend class DetermineTypesVisitor;
|
||||
friend class RecursiveSlotMap;
|
||||
friend class UpRecursiveSlotMap;
|
||||
friend jsval* js_ConcatPostImacroStackCleanup(uint32, JSFrameRegs &, TraceRecorder *);
|
||||
friend bool js_MonitorLoopEdge(JSContext*, uintN&, RecordReason);
|
||||
friend void js_AbortRecording(JSContext*, const char*);
|
||||
friend jsval* ConcatPostImacroStackCleanup(uint32, JSFrameRegs &, TraceRecorder *);
|
||||
friend bool MonitorLoopEdge(JSContext*, uintN&, RecordReason);
|
||||
friend void AbortRecording(JSContext*, const char*);
|
||||
|
||||
public:
|
||||
static bool JS_REQUIRES_STACK
|
||||
startRecorder(JSContext*, VMSideExit*, VMFragment*,
|
||||
unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap,
|
||||
unsigned stackSlots, unsigned ngslots, TraceType* typeMap,
|
||||
VMSideExit* expectedInnerExit, jsbytecode* outerTree,
|
||||
uint32 outerArgc, RecordReason reason);
|
||||
|
||||
|
@ -1452,7 +1466,7 @@ public:
|
|||
if (TraceRecorder* tr_ = TRACE_RECORDER(cx)) { \
|
||||
AbortableRecordingStatus status = tr_->record_##x args; \
|
||||
if (StatusAbortsRecording(status)) { \
|
||||
js_AbortRecording(cx, #x); \
|
||||
AbortRecording(cx, #x); \
|
||||
if (status == ARECORD_ERROR) \
|
||||
goto error; \
|
||||
} \
|
||||
|
@ -1466,37 +1480,37 @@ public:
|
|||
#define TRACE_2(x,a,b) TRACE_ARGS(x, (a, b))
|
||||
|
||||
extern JS_REQUIRES_STACK bool
|
||||
js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount, RecordReason reason);
|
||||
MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount, RecordReason reason);
|
||||
|
||||
extern JS_REQUIRES_STACK void
|
||||
js_AbortRecording(JSContext* cx, const char* reason);
|
||||
AbortRecording(JSContext* cx, const char* reason);
|
||||
|
||||
extern void
|
||||
js_InitJIT(JSTraceMonitor *tm);
|
||||
InitJIT(TraceMonitor *tm);
|
||||
|
||||
extern void
|
||||
js_FinishJIT(JSTraceMonitor *tm);
|
||||
FinishJIT(TraceMonitor *tm);
|
||||
|
||||
extern void
|
||||
js_PurgeScriptFragments(JSContext* cx, JSScript* script);
|
||||
PurgeScriptFragments(JSContext* cx, JSScript* script);
|
||||
|
||||
extern bool
|
||||
js_OverfullJITCache(JSTraceMonitor* tm);
|
||||
OverfullJITCache(TraceMonitor* tm);
|
||||
|
||||
extern void
|
||||
js_FlushJITCache(JSContext* cx);
|
||||
FlushJITCache(JSContext* cx);
|
||||
|
||||
extern void
|
||||
js_PurgeJITOracle();
|
||||
PurgeJITOracle();
|
||||
|
||||
extern JSObject *
|
||||
js_GetBuiltinFunction(JSContext *cx, uintN index);
|
||||
GetBuiltinFunction(JSContext *cx, uintN index);
|
||||
|
||||
extern void
|
||||
js_SetMaxCodeCacheBytes(JSContext* cx, uint32 bytes);
|
||||
SetMaxCodeCacheBytes(JSContext* cx, uint32 bytes);
|
||||
|
||||
extern bool
|
||||
js_NativeToValue(JSContext* cx, jsval& v, JSTraceType type, double* slot);
|
||||
NativeToValue(JSContext* cx, jsval& v, TraceType type, double* slot);
|
||||
|
||||
#ifdef MOZ_TRACEVIS
|
||||
|
||||
|
@ -1504,15 +1518,13 @@ extern JS_FRIEND_API(bool)
|
|||
JS_StartTraceVis(const char* filename);
|
||||
|
||||
extern JS_FRIEND_API(JSBool)
|
||||
js_StartTraceVis(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
|
||||
jsval *rval);
|
||||
StartTraceVis(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval);
|
||||
|
||||
extern JS_FRIEND_API(bool)
|
||||
JS_StopTraceVis();
|
||||
|
||||
extern JS_FRIEND_API(JSBool)
|
||||
js_StopTraceVis(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
|
||||
jsval *rval);
|
||||
StopTraceVis(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval);
|
||||
|
||||
/* Must contain no more than 16 items. */
|
||||
enum TraceVisState {
|
||||
|
@ -1533,7 +1545,7 @@ enum TraceVisState {
|
|||
enum TraceVisExitReason {
|
||||
R_NONE,
|
||||
R_ABORT,
|
||||
/* Reasons in js_MonitorLoopEdge */
|
||||
/* Reasons in MonitorLoopEdge */
|
||||
R_INNER_SIDE_EXIT,
|
||||
R_DOUBLES,
|
||||
R_CALLBACK_PENDING,
|
||||
|
@ -1572,10 +1584,10 @@ extern FILE* traceVisLogFile;
|
|||
extern JSHashTable *traceVisScriptTable;
|
||||
|
||||
extern JS_FRIEND_API(void)
|
||||
js_StoreTraceVisState(JSContext *cx, TraceVisState s, TraceVisExitReason r);
|
||||
StoreTraceVisState(JSContext *cx, TraceVisState s, TraceVisExitReason r);
|
||||
|
||||
static inline void
|
||||
js_LogTraceVisState(JSContext *cx, TraceVisState s, TraceVisExitReason r)
|
||||
LogTraceVisState(JSContext *cx, TraceVisState s, TraceVisExitReason r)
|
||||
{
|
||||
if (traceVisLogFile) {
|
||||
unsigned long long sllu = s;
|
||||
|
@ -1584,31 +1596,31 @@ js_LogTraceVisState(JSContext *cx, TraceVisState s, TraceVisExitReason r)
|
|||
fwrite(&d, sizeof(d), 1, traceVisLogFile);
|
||||
}
|
||||
if (traceVisScriptTable) {
|
||||
js_StoreTraceVisState(cx, s, r);
|
||||
StoreTraceVisState(cx, s, r);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Although this runs the same code as js_LogTraceVisState, it is a separate
|
||||
* Although this runs the same code as LogTraceVisState, it is a separate
|
||||
* function because the meaning of the log entry is different. Also, the entry
|
||||
* formats may diverge someday.
|
||||
*/
|
||||
static inline void
|
||||
js_LogTraceVisEvent(JSContext *cx, TraceVisState s, TraceVisFlushReason r)
|
||||
LogTraceVisEvent(JSContext *cx, TraceVisState s, TraceVisFlushReason r)
|
||||
{
|
||||
js_LogTraceVisState(cx, s, (TraceVisExitReason) r);
|
||||
LogTraceVisState(cx, s, (TraceVisExitReason) r);
|
||||
}
|
||||
|
||||
static inline void
|
||||
js_EnterTraceVisState(JSContext *cx, TraceVisState s, TraceVisExitReason r)
|
||||
EnterTraceVisState(JSContext *cx, TraceVisState s, TraceVisExitReason r)
|
||||
{
|
||||
js_LogTraceVisState(cx, s, r);
|
||||
LogTraceVisState(cx, s, r);
|
||||
}
|
||||
|
||||
static inline void
|
||||
js_ExitTraceVisState(JSContext *cx, TraceVisExitReason r)
|
||||
ExitTraceVisState(JSContext *cx, TraceVisExitReason r)
|
||||
{
|
||||
js_LogTraceVisState(cx, S_EXITLAST, r);
|
||||
LogTraceVisState(cx, S_EXITLAST, r);
|
||||
}
|
||||
|
||||
struct TraceVisStateObj {
|
||||
|
@ -1617,20 +1629,21 @@ struct TraceVisStateObj {
|
|||
|
||||
inline TraceVisStateObj(JSContext *cx, TraceVisState s) : r(R_NONE)
|
||||
{
|
||||
js_EnterTraceVisState(cx, s, R_NONE);
|
||||
EnterTraceVisState(cx, s, R_NONE);
|
||||
mCx = cx;
|
||||
}
|
||||
inline ~TraceVisStateObj()
|
||||
{
|
||||
js_ExitTraceVisState(mCx, r);
|
||||
ExitTraceVisState(mCx, r);
|
||||
}
|
||||
};
|
||||
|
||||
#endif /* MOZ_TRACEVIS */
|
||||
|
||||
extern jsval *
|
||||
js_ConcatPostImacroStackCleanup(uint32 argc, JSFrameRegs ®s,
|
||||
TraceRecorder *recorder);
|
||||
ConcatPostImacroStackCleanup(uint32 argc, JSFrameRegs ®s,
|
||||
TraceRecorder *recorder);
|
||||
} /* namespace js */
|
||||
|
||||
#else /* !JS_TRACER */
|
||||
|
||||
|
|
|
@ -40,8 +40,6 @@
|
|||
#ifndef jsvector_h_
|
||||
#define jsvector_h_
|
||||
|
||||
#include <new>
|
||||
|
||||
#include "jstl.h"
|
||||
|
||||
namespace js {
|
||||
|
|
|
@ -205,7 +205,7 @@ JS_XDRFindClassById(JSXDRState *xdr, uint32 id);
|
|||
* before deserialization of bytecode. If the saved version does not match
|
||||
* the current version, abort deserialization and invalidate the file.
|
||||
*/
|
||||
#define JSXDR_BYTECODE_VERSION (0xb973c0de - 58)
|
||||
#define JSXDR_BYTECODE_VERSION (0xb973c0de - 59)
|
||||
|
||||
/*
|
||||
* Library-private functions.
|
||||
|
|
|
@ -691,7 +691,7 @@ static JSBool
|
|||
Namespace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
||||
{
|
||||
return NamespaceHelper(cx,
|
||||
JS_IsConstructing(cx) ? obj : NULL,
|
||||
cx->isConstructing() ? obj : NULL,
|
||||
argc, argv, rval);
|
||||
}
|
||||
|
||||
|
@ -825,7 +825,7 @@ out:
|
|||
static JSBool
|
||||
QName(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
||||
{
|
||||
return QNameHelper(cx, JS_IsConstructing(cx) ? obj : NULL,
|
||||
return QNameHelper(cx, cx->isConstructing() ? obj : NULL,
|
||||
&js_QNameClass.base, argc, argv, rval);
|
||||
}
|
||||
|
||||
|
@ -833,7 +833,7 @@ static JSBool
|
|||
AttributeName(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
|
||||
jsval *rval)
|
||||
{
|
||||
return QNameHelper(cx, JS_IsConstructing(cx) ? obj : NULL,
|
||||
return QNameHelper(cx, cx->isConstructing() ? obj : NULL,
|
||||
&js_AttributeNameClass, argc, argv, rval);
|
||||
}
|
||||
|
||||
|
@ -7119,7 +7119,7 @@ XML(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
|||
*rval = OBJECT_TO_JSVAL(xobj);
|
||||
xml = (JSXML *) xobj->getPrivate();
|
||||
|
||||
if (JS_IsConstructing(cx) && !JSVAL_IS_PRIMITIVE(v)) {
|
||||
if (cx->isConstructing() && !JSVAL_IS_PRIMITIVE(v)) {
|
||||
vobj = JSVAL_TO_OBJECT(v);
|
||||
clasp = OBJ_GET_CLASS(cx, vobj);
|
||||
if (clasp == &js_XMLClass ||
|
||||
|
@ -7147,7 +7147,7 @@ XMLList(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
|
|||
if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v))
|
||||
v = STRING_TO_JSVAL(cx->runtime->emptyString);
|
||||
|
||||
if (JS_IsConstructing(cx) && !JSVAL_IS_PRIMITIVE(v)) {
|
||||
if (cx->isConstructing() && !JSVAL_IS_PRIMITIVE(v)) {
|
||||
vobj = JSVAL_TO_OBJECT(v);
|
||||
if (OBJECT_IS_XML(cx, vobj)) {
|
||||
xml = (JSXML *) vobj->getPrivate();
|
||||
|
@ -7274,31 +7274,6 @@ js_FinalizeXML(JSContext *cx, JSXML *xml)
|
|||
#endif
|
||||
}
|
||||
|
||||
JSObject *
|
||||
js_ParseNodeToXMLObject(JSCompiler *jsc, JSParseNode *pn)
|
||||
{
|
||||
jsval nsval;
|
||||
JSObject *ns;
|
||||
JSXMLArray nsarray;
|
||||
JSXML *xml;
|
||||
|
||||
if (!js_GetDefaultXMLNamespace(jsc->context, &nsval))
|
||||
return NULL;
|
||||
JS_ASSERT(!JSVAL_IS_PRIMITIVE(nsval));
|
||||
ns = JSVAL_TO_OBJECT(nsval);
|
||||
|
||||
if (!XMLArrayInit(jsc->context, &nsarray, 1))
|
||||
return NULL;
|
||||
|
||||
XMLARRAY_APPEND(jsc->context, &nsarray, ns);
|
||||
xml = ParseNodeToXML(jsc, pn, &nsarray, XSF_PRECOMPILED_ROOT);
|
||||
XMLArrayFinish(jsc->context, &nsarray);
|
||||
if (!xml)
|
||||
return NULL;
|
||||
|
||||
return xml->object;
|
||||
}
|
||||
|
||||
JSObject *
|
||||
js_NewXMLObject(JSContext *cx, JSXMLClass xml_class)
|
||||
{
|
||||
|
@ -7531,8 +7506,8 @@ js_GetFunctionNamespace(JSContext *cx, jsval *vp)
|
|||
* Note the asymmetry between js_GetDefaultXMLNamespace and js_SetDefaultXML-
|
||||
* Namespace. Get searches fp->scopeChain for JS_DEFAULT_XML_NAMESPACE_ID,
|
||||
* while Set sets JS_DEFAULT_XML_NAMESPACE_ID in fp->varobj. There's no
|
||||
* requirement that fp->varobj lie directly on fp->scopeChain, although it
|
||||
* should be reachable using the prototype chain from a scope object (cf.
|
||||
* requirement that fp->varobj lie directly on fp->scopeChain, although
|
||||
* it should be reachable using the prototype chain from a scope object (cf.
|
||||
* JSOPTION_VAROBJFIX in jsapi.h).
|
||||
*
|
||||
* If Get can't find JS_DEFAULT_XML_NAMESPACE_ID along the scope chain, it
|
||||
|
@ -7592,9 +7567,10 @@ js_SetDefaultXMLNamespace(JSContext *cx, jsval v)
|
|||
v = OBJECT_TO_JSVAL(ns);
|
||||
|
||||
fp = js_GetTopStackFrame(cx);
|
||||
varobj = fp->varobj;
|
||||
varobj = fp->varobj(cx);
|
||||
if (!varobj->defineProperty(cx, JS_DEFAULT_XML_NAMESPACE_ID, v,
|
||||
JS_PropertyStub, JS_PropertyStub, JSPROP_PERMANENT)) {
|
||||
JS_PropertyStub, JS_PropertyStub,
|
||||
JSPROP_PERMANENT)) {
|
||||
return JS_FALSE;
|
||||
}
|
||||
return JS_TRUE;
|
||||
|
|
|
@ -151,9 +151,6 @@ js_TraceXML(JSTracer *trc, JSXML *xml);
|
|||
extern void
|
||||
js_FinalizeXML(JSContext *cx, JSXML *xml);
|
||||
|
||||
extern JSObject *
|
||||
js_ParseNodeToXMLObject(JSCompiler *jsc, JSParseNode *pn);
|
||||
|
||||
extern JSObject *
|
||||
js_NewXMLObject(JSContext *cx, JSXMLClass xml_class);
|
||||
|
||||
|
|
|
@ -82,7 +82,8 @@ CLASS( LOP_Q_Q, 1, 0) // 14% (none)
|
|||
CLASS( LOP_F_F, 0, 2) // 16% LIR_fneg
|
||||
|
||||
CLASS( LOP_I_II, 0, 16) // 32% LIR_add, LIR_and, LIR_eq, etc.
|
||||
CLASS( LOP_Q_QQ, 1, 9) // 41% LIR_qiadd, LIR_qiand, LIR_qeq, etc.
|
||||
CLASS( LOP_Q_QQ, 1, 7) // 39% LIR_qiadd, LIR_qiand, LIR_qeq, etc.
|
||||
CLASS( LOP_Q_QI, 1, 2) // 41% LIR_qilsh, LIR_qirsh, LIR_qursh
|
||||
CLASS( LOP_F_FF, 0, 10) // 51% LIR_fadd, etc.
|
||||
|
||||
// cmov has a low weight because is also used with LIR_div/LIR_mod.
|
||||
|
@ -95,8 +96,8 @@ CLASS( LOP_B_FF, 0, 3) // 63% LIR_feq, LIR_flt, etc
|
|||
|
||||
CLASS( LOP_Q_I, 1, 2) // 65% LIR_i2q, LIR_u2q
|
||||
CLASS( LOP_F_I, 0, 2) // 67% LIR_i2f, LIR_u2f
|
||||
|
||||
CLASS( LOP_I_F, 0, 2) // 69% LIR_qlo, LIR_qhi
|
||||
CLASS( LOP_I_Q, 1, 1) // 68% LIR_q2i
|
||||
CLASS( LOP_I_F, 0, 1) // 69% LIR_qlo, LIR_qhi, LIR_f2i
|
||||
CLASS( LOP_F_II, 0, 1) // 70% LIR_qjoin
|
||||
|
||||
CLASS( LLD_I, 0, 3) // 73% LIR_ld, LIR_ldc, LIR_ld*b, LIR_ld*s
|
||||
|
|
|
@ -73,6 +73,6 @@ CFLAGS += -EHsc
|
|||
CXXFLAGS += -EHsc
|
||||
endif
|
||||
|
||||
check::
|
||||
$(srcdir)/testlirc.sh
|
||||
check:: $(PROGRAM)
|
||||
$(srcdir)/testlirc.sh $(PROGRAM)
|
||||
|
||||
|
|
|
@ -247,8 +247,8 @@ public:
|
|||
Lirasm(bool verbose);
|
||||
~Lirasm();
|
||||
|
||||
void assemble(istream &in);
|
||||
void assembleRandom(int nIns);
|
||||
void assemble(istream &in, bool optimize);
|
||||
void assembleRandom(int nIns, bool optimize);
|
||||
bool lookupFunction(const string &name, CallInfo *&ci);
|
||||
|
||||
LirBuffer *mLirbuf;
|
||||
|
@ -273,7 +273,7 @@ private:
|
|||
|
||||
class FragmentAssembler {
|
||||
public:
|
||||
FragmentAssembler(Lirasm &parent, const string &fragmentName);
|
||||
FragmentAssembler(Lirasm &parent, const string &fragmentName, bool optimize);
|
||||
~FragmentAssembler();
|
||||
|
||||
void assembleFragment(LirTokenStream &in,
|
||||
|
@ -293,6 +293,7 @@ private:
|
|||
Lirasm &mParent;
|
||||
const string mFragName;
|
||||
Fragment *mFragment;
|
||||
bool optimize;
|
||||
vector<CallInfo*> mCallInfos;
|
||||
map<string, LIns*> mLabels;
|
||||
LirWriter *mLir;
|
||||
|
@ -300,6 +301,8 @@ private:
|
|||
LirWriter *mCseFilter;
|
||||
LirWriter *mExprFilter;
|
||||
LirWriter *mVerboseWriter;
|
||||
LirWriter *mValidateWriter1;
|
||||
LirWriter *mValidateWriter2;
|
||||
multimap<string, LIns *> mFwdJumps;
|
||||
|
||||
size_t mLineno;
|
||||
|
@ -482,9 +485,10 @@ dump_srecords(ostream &, Fragment *)
|
|||
uint32_t
|
||||
FragmentAssembler::sProfId = 0;
|
||||
|
||||
FragmentAssembler::FragmentAssembler(Lirasm &parent, const string &fragmentName)
|
||||
: mParent(parent), mFragName(fragmentName),
|
||||
mBufWriter(NULL), mCseFilter(NULL), mExprFilter(NULL), mVerboseWriter(NULL)
|
||||
FragmentAssembler::FragmentAssembler(Lirasm &parent, const string &fragmentName, bool optimize)
|
||||
: mParent(parent), mFragName(fragmentName), optimize(optimize),
|
||||
mBufWriter(NULL), mCseFilter(NULL), mExprFilter(NULL), mVerboseWriter(NULL),
|
||||
mValidateWriter1(NULL), mValidateWriter2(NULL)
|
||||
{
|
||||
mFragment = new Fragment(NULL verbose_only(, (mParent.mLogc.lcbits &
|
||||
nanojit::LC_FragProfile) ?
|
||||
|
@ -492,9 +496,17 @@ FragmentAssembler::FragmentAssembler(Lirasm &parent, const string &fragmentName)
|
|||
mFragment->lirbuf = mParent.mLirbuf;
|
||||
mParent.mFragments[mFragName].fragptr = mFragment;
|
||||
|
||||
mLir = mBufWriter = new LirBufWriter(mParent.mLirbuf);
|
||||
mLir = mCseFilter = new CseFilter(mLir, mParent.mAlloc);
|
||||
mLir = mExprFilter = new ExprFilter(mLir);
|
||||
mLir = mBufWriter = new LirBufWriter(mParent.mLirbuf, nanojit::AvmCore::config);
|
||||
if (optimize) {
|
||||
#ifdef DEBUG
|
||||
mLir = mValidateWriter2 = new ValidateWriter(mLir, "end of writer pipeline");
|
||||
#endif
|
||||
mLir = mCseFilter = new CseFilter(mLir, mParent.mAlloc);
|
||||
mLir = mExprFilter = new ExprFilter(mLir);
|
||||
}
|
||||
#ifdef DEBUG
|
||||
mLir = mValidateWriter1 = new ValidateWriter(mLir, "start of writer pipeline");
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
if (mParent.mVerbose) {
|
||||
|
@ -514,6 +526,8 @@ FragmentAssembler::FragmentAssembler(Lirasm &parent, const string &fragmentName)
|
|||
|
||||
FragmentAssembler::~FragmentAssembler()
|
||||
{
|
||||
delete mValidateWriter1;
|
||||
delete mValidateWriter2;
|
||||
delete mVerboseWriter;
|
||||
delete mExprFilter;
|
||||
delete mCseFilter;
|
||||
|
@ -659,9 +673,9 @@ FragmentAssembler::assemble_call(const string &op)
|
|||
size_t argc = mTokens.size();
|
||||
for (size_t i = 0; i < argc; ++i) {
|
||||
args[i] = ref(mTokens[mTokens.size() - (i+1)]);
|
||||
if (args[i]->isFloat()) ty = ARGSIZE_F;
|
||||
else if (args[i]->isQuad()) ty = ARGSIZE_Q;
|
||||
else ty = ARGSIZE_I;
|
||||
if (args[i]->isF64()) ty = ARGSIZE_F;
|
||||
else if (args[i]->isI64()) ty = ARGSIZE_Q;
|
||||
else ty = ARGSIZE_I;
|
||||
// Nb: i+1 because argMask() uses 1-based arg counting.
|
||||
ci->_argtypes |= argMask(ty, i+1, argc);
|
||||
}
|
||||
|
@ -746,7 +760,7 @@ FragmentAssembler::endFragment()
|
|||
mFragment->lastIns =
|
||||
mLir->insGuard(LIR_x, NULL, createGuardRecord(createSideExit()));
|
||||
|
||||
::compile(&mParent.mAssm, mFragment, mParent.mAlloc
|
||||
mParent.mAssm.compile(mFragment, mParent.mAlloc, optimize
|
||||
verbose_only(, mParent.mLabelMap));
|
||||
|
||||
if (mParent.mAssm.error() != nanojit::None) {
|
||||
|
@ -878,17 +892,23 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
|
|||
break;
|
||||
|
||||
case LIR_live:
|
||||
case LIR_qlive:
|
||||
case LIR_flive:
|
||||
case LIR_neg:
|
||||
case LIR_fneg:
|
||||
case LIR_not:
|
||||
case LIR_qlo:
|
||||
case LIR_qhi:
|
||||
case LIR_q2i:
|
||||
case LIR_ov:
|
||||
case LIR_i2q:
|
||||
case LIR_u2q:
|
||||
case LIR_i2f:
|
||||
case LIR_u2f:
|
||||
case LIR_f2i:
|
||||
#if defined NANOJIT_IA32 || defined NANOJIT_X64
|
||||
case LIR_mod:
|
||||
#endif
|
||||
need(1);
|
||||
ins = mLir->ins1(mOpcode,
|
||||
ref(mTokens[0]));
|
||||
|
@ -901,13 +921,11 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
|
|||
case LIR_mul:
|
||||
#if defined NANOJIT_IA32 || defined NANOJIT_X64
|
||||
case LIR_div:
|
||||
case LIR_mod:
|
||||
#endif
|
||||
case LIR_fadd:
|
||||
case LIR_fsub:
|
||||
case LIR_fmul:
|
||||
case LIR_fdiv:
|
||||
case LIR_fmod:
|
||||
case LIR_qiadd:
|
||||
case LIR_and:
|
||||
case LIR_or:
|
||||
|
@ -1070,6 +1088,7 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
|
|||
case LIR_line:
|
||||
case LIR_xtbl:
|
||||
case LIR_jtbl:
|
||||
case LIR_qret:
|
||||
nyi(op);
|
||||
break;
|
||||
|
||||
|
@ -1244,19 +1263,20 @@ const CallInfo ci_N_IQF = CI(f_N_IQF, argMask(I32, 1, 3) |
|
|||
// sufficiently big that it's spread across multiple chunks.
|
||||
//
|
||||
// The following instructions aren't generated yet:
|
||||
// - iparam/qparam (hard to test beyond what is auto-generated in fragment
|
||||
// - LIR_iparam/LIR_qparam (hard to test beyond what is auto-generated in fragment
|
||||
// prologues)
|
||||
// - live/flive
|
||||
// - callh
|
||||
// - x/xt/xf/xtbl (hard to test without having multiple fragments; when we
|
||||
// only have one fragment we don't really want to leave it early)
|
||||
// - ret/fret (hard to test without having multiple fragments)
|
||||
// - j/jt/jf/ji/label (ji is not implemented in NJ)
|
||||
// - ov (takes an arithmetic (int or FP) value as operand, and must
|
||||
// - LIR_live/LIR_qlive/LIR_flive
|
||||
// - LIR_callh
|
||||
// - LIR_x/LIR_xt/LIR_xf/LIR_xtbl (hard to test without having multiple
|
||||
// fragments; when we only have one fragment we don't really want to leave
|
||||
// it early)
|
||||
// - LIR_ret/LIR_qret/LIR_fret (hard to test without having multiple fragments)
|
||||
// - LIR_j/LIR_jt/LIR_jf/LIR_jtbl/LIR_label
|
||||
// - LIR_ov (takes an arithmetic (int or FP) value as operand, and must
|
||||
// immediately follow it to be safe... not that that really matters in
|
||||
// randomly generated code)
|
||||
// - file/line (#ifdef VTUNE only)
|
||||
// - fmod (not implemented in NJ)
|
||||
// - LIR_file/LIR_line (#ifdef VTUNE only)
|
||||
// - LIR_fmod (not implemented in NJ backends)
|
||||
//
|
||||
void
|
||||
FragmentAssembler::assembleRandomFragment(int nIns)
|
||||
|
@ -1279,7 +1299,9 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
|||
|
||||
vector<LOpcode> I_II_ops;
|
||||
I_II_ops.push_back(LIR_add);
|
||||
#if !defined NANOJIT_64BIT
|
||||
I_II_ops.push_back(LIR_iaddp);
|
||||
#endif
|
||||
I_II_ops.push_back(LIR_sub);
|
||||
I_II_ops.push_back(LIR_mul);
|
||||
#if defined NANOJIT_IA32 || defined NANOJIT_X64
|
||||
|
@ -1299,9 +1321,11 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
|||
Q_QQ_ops.push_back(LIR_qiand);
|
||||
Q_QQ_ops.push_back(LIR_qior);
|
||||
Q_QQ_ops.push_back(LIR_qxor);
|
||||
Q_QQ_ops.push_back(LIR_qilsh);
|
||||
Q_QQ_ops.push_back(LIR_qirsh);
|
||||
Q_QQ_ops.push_back(LIR_qursh);
|
||||
|
||||
vector<LOpcode> Q_QI_ops;
|
||||
Q_QI_ops.push_back(LIR_qilsh);
|
||||
Q_QI_ops.push_back(LIR_qirsh);
|
||||
Q_QI_ops.push_back(LIR_qursh);
|
||||
|
||||
vector<LOpcode> F_FF_ops;
|
||||
F_FF_ops.push_back(LIR_fadd);
|
||||
|
@ -1348,13 +1372,19 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
|||
Q_I_ops.push_back(LIR_i2q);
|
||||
Q_I_ops.push_back(LIR_u2q);
|
||||
|
||||
vector<LOpcode> I_Q_ops;
|
||||
I_Q_ops.push_back(LIR_q2i);
|
||||
|
||||
vector<LOpcode> F_I_ops;
|
||||
F_I_ops.push_back(LIR_i2f);
|
||||
F_I_ops.push_back(LIR_u2f);
|
||||
|
||||
vector<LOpcode> I_F_ops;
|
||||
#if !defined NANOJIT_64BIT
|
||||
I_F_ops.push_back(LIR_qlo);
|
||||
I_F_ops.push_back(LIR_qhi);
|
||||
#endif
|
||||
I_F_ops.push_back(LIR_f2i);
|
||||
|
||||
vector<LOpcode> F_II_ops;
|
||||
F_II_ops.push_back(LIR_qjoin);
|
||||
|
@ -1608,6 +1638,14 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
|||
}
|
||||
break;
|
||||
|
||||
case LOP_Q_QI:
|
||||
if (!Qs.empty() && !Is.empty()) {
|
||||
ins = mLir->ins2(rndPick(Q_QI_ops), rndPick(Qs), rndPick(Is));
|
||||
addOrReplace(Qs, ins);
|
||||
n++;
|
||||
}
|
||||
break;
|
||||
|
||||
case LOP_F_FF:
|
||||
if (!Fs.empty()) {
|
||||
ins = mLir->ins2(rndPick(F_FF_ops), rndPick(Fs), rndPick(Fs));
|
||||
|
@ -1676,6 +1714,14 @@ FragmentAssembler::assembleRandomFragment(int nIns)
|
|||
}
|
||||
break;
|
||||
|
||||
case LOP_I_Q:
|
||||
if (!Qs.empty()) {
|
||||
ins = mLir->ins1(rndPick(I_Q_ops), rndPick(Qs));
|
||||
addOrReplace(Is, ins);
|
||||
n++;
|
||||
}
|
||||
break;
|
||||
|
||||
case LOP_I_F:
|
||||
// XXX: NativeX64 doesn't implement qhi yet (and it may not need to).
|
||||
#if !defined NANOJIT_X64
|
||||
|
@ -1913,7 +1959,7 @@ Lirasm::lookupFunction(const string &name, CallInfo *&ci)
|
|||
}
|
||||
|
||||
void
|
||||
Lirasm::assemble(istream &in)
|
||||
Lirasm::assemble(istream &in, bool optimize)
|
||||
{
|
||||
LirTokenStream ts(in);
|
||||
bool first = true;
|
||||
|
@ -1936,13 +1982,13 @@ Lirasm::assemble(istream &in)
|
|||
if (!ts.eat(NEWLINE))
|
||||
bad("extra junk after .begin " + name);
|
||||
|
||||
FragmentAssembler assembler(*this, name);
|
||||
FragmentAssembler assembler(*this, name, optimize);
|
||||
assembler.assembleFragment(ts, false, NULL);
|
||||
first = false;
|
||||
} else if (op == ".end") {
|
||||
bad(".end without .begin");
|
||||
} else if (first) {
|
||||
FragmentAssembler assembler(*this, "main");
|
||||
FragmentAssembler assembler(*this, "main", optimize);
|
||||
assembler.assembleFragment(ts, true, &token);
|
||||
break;
|
||||
} else {
|
||||
|
@ -1952,10 +1998,10 @@ Lirasm::assemble(istream &in)
|
|||
}
|
||||
|
||||
void
|
||||
Lirasm::assembleRandom(int nIns)
|
||||
Lirasm::assembleRandom(int nIns, bool optimize)
|
||||
{
|
||||
string name = "main";
|
||||
FragmentAssembler assembler(*this, name);
|
||||
FragmentAssembler assembler(*this, name, optimize);
|
||||
assembler.assembleRandomFragment(nIns);
|
||||
}
|
||||
|
||||
|
@ -1997,7 +2043,8 @@ usageAndQuit(const string& progname)
|
|||
" -h --help print this message\n"
|
||||
" -v --verbose print LIR and assembly code\n"
|
||||
" --execute execute LIR\n"
|
||||
" --random [N] generate a random LIR block of size N (default=100)\n"
|
||||
" --[no-]optimize enable or disable optimization of the LIR (default=off)\n"
|
||||
" --random [N] generate a random LIR block of size N (default=1000)\n"
|
||||
" i386-specific options:\n"
|
||||
" --sse use SSE2 instructions\n"
|
||||
" ARM-specific options:\n"
|
||||
|
@ -2018,6 +2065,7 @@ struct CmdLineOptions {
|
|||
string progname;
|
||||
bool verbose;
|
||||
bool execute;
|
||||
bool optimize;
|
||||
int random;
|
||||
string filename;
|
||||
};
|
||||
|
@ -2029,6 +2077,7 @@ processCmdLine(int argc, char **argv, CmdLineOptions& opts)
|
|||
opts.verbose = false;
|
||||
opts.execute = false;
|
||||
opts.random = 0;
|
||||
opts.optimize = false;
|
||||
|
||||
// Architecture-specific options.
|
||||
#if defined NANOJIT_IA32
|
||||
|
@ -2048,6 +2097,10 @@ processCmdLine(int argc, char **argv, CmdLineOptions& opts)
|
|||
opts.verbose = true;
|
||||
else if (arg == "--execute")
|
||||
opts.execute = true;
|
||||
else if (arg == "--optimize")
|
||||
opts.optimize = true;
|
||||
else if (arg == "--no-optimize")
|
||||
opts.optimize = false;
|
||||
else if (arg == "--random") {
|
||||
const int defaultSize = 100;
|
||||
if (i == argc - 1) {
|
||||
|
@ -2066,6 +2119,7 @@ processCmdLine(int argc, char **argv, CmdLineOptions& opts)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Architecture-specific flags.
|
||||
#if defined NANOJIT_IA32
|
||||
else if (arg == "--sse") {
|
||||
|
@ -2112,12 +2166,12 @@ processCmdLine(int argc, char **argv, CmdLineOptions& opts)
|
|||
avmplus::AvmCore::config.fixed_esp = true;
|
||||
#elif defined NANOJIT_ARM
|
||||
// Note that we don't check for sensible configurations here!
|
||||
avmplus::AvmCore::config.arch = arm_arch;
|
||||
avmplus::AvmCore::config.vfp = arm_vfp;
|
||||
avmplus::AvmCore::config.arm_arch = arm_arch;
|
||||
avmplus::AvmCore::config.arm_vfp = arm_vfp;
|
||||
avmplus::AvmCore::config.soft_float = !arm_vfp;
|
||||
// This doesn't allow us to test ARMv6T2 (which also supports Thumb2), but this shouldn't
|
||||
// really matter here.
|
||||
avmplus::AvmCore::config.thumb2 = (arm_arch >= 7);
|
||||
avmplus::AvmCore::config.arm_thumb2 = (arm_arch >= 7);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -2129,12 +2183,12 @@ main(int argc, char **argv)
|
|||
|
||||
Lirasm lasm(opts.verbose);
|
||||
if (opts.random) {
|
||||
lasm.assembleRandom(opts.random);
|
||||
lasm.assembleRandom(opts.random, opts.optimize);
|
||||
} else {
|
||||
ifstream in(opts.filename.c_str());
|
||||
if (!in)
|
||||
errMsgAndQuit(opts.progname, "unable to open file " + opts.filename);
|
||||
lasm.assemble(in);
|
||||
lasm.assemble(in, opts.optimize);
|
||||
}
|
||||
|
||||
Fragments::const_iterator i;
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
set -eu
|
||||
|
||||
LIRASM=$1
|
||||
|
||||
TESTS_DIR=`dirname "$0"`/tests
|
||||
|
||||
for infile in "$TESTS_DIR"/*.in
|
||||
|
@ -13,21 +15,24 @@ do
|
|||
exit 1
|
||||
fi
|
||||
|
||||
# If it has the special name "random.in", replace filename with --random.
|
||||
# Treat "random.in" and "random-opt.in" specially.
|
||||
if [ `basename $infile` = "random.in" ]
|
||||
then
|
||||
infile="--random 1000"
|
||||
infile="--random 1000000"
|
||||
elif [ `basename $infile` = "random-opt.in" ]
|
||||
then
|
||||
infile="--random 1000000 --optimize"
|
||||
fi
|
||||
|
||||
if ./lirasm --execute $infile > testoutput.txt && cmp -s testoutput.txt $outfile
|
||||
if $LIRASM --execute $infile | tr -d '\r' > testoutput.txt && cmp -s testoutput.txt $outfile
|
||||
then
|
||||
echo "$0: output correct for $infile"
|
||||
echo "TEST-PASS | lirasm | lirasm --execute $infile"
|
||||
else
|
||||
echo "$0: incorrect output for $infile"
|
||||
echo "$0: === actual output ==="
|
||||
echo "TEST-UNEXPECTED-FAIL | lirasm | lirasm --execute $infile"
|
||||
echo "expected output"
|
||||
cat $outfile
|
||||
echo "actual output"
|
||||
cat testoutput.txt
|
||||
echo "$0: === expected output ==="
|
||||
cat $outfile
|
||||
fi
|
||||
done
|
||||
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
a = alloc 8
|
||||
d = float 5.0
|
||||
stfi d a 0
|
||||
x = ldf a 0
|
||||
i = f2i x
|
||||
ret i
|
|
@ -0,0 +1 @@
|
|||
Output is: 5
|
|
@ -1,7 +1,8 @@
|
|||
; See bug 541232 for why the params are commented out.
|
||||
.begin avg
|
||||
p1 = param 0 0
|
||||
p2 = param 1 0
|
||||
sum = add p1 p2
|
||||
oneh = int 100 ; should be: p1 = param 0 0
|
||||
twoh = int 200 ; should be: p2 = param 1 0
|
||||
sum = add oneh twoh ; should be: sum = add p1 p2
|
||||
one = int 1
|
||||
avg = rsh sum one
|
||||
ret avg
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
# dummy file, contents aren't used
|
|
@ -0,0 +1 @@
|
|||
Output is: 0
|
|
@ -1 +1 @@
|
|||
a19809f7ba60b4381b77b84363bebf0ff7cf9629
|
||||
f4ece4c13545709edbd5b8f856ec39f155223892
|
||||
|
|
|
@ -66,6 +66,9 @@ namespace nanojit
|
|||
, _branchStateMap(alloc)
|
||||
, _patches(alloc)
|
||||
, _labels(alloc)
|
||||
#if NJ_USES_QUAD_CONSTANTS
|
||||
, _quadConstants(alloc)
|
||||
#endif
|
||||
, _epilogue(NULL)
|
||||
, _err(None)
|
||||
#if PEDANTIC
|
||||
|
@ -128,7 +131,7 @@ namespace nanojit
|
|||
validateQuick();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
inline void AR::clear()
|
||||
|
@ -165,6 +168,9 @@ namespace nanojit
|
|||
_branchStateMap.clear();
|
||||
_patches.clear();
|
||||
_labels.clear();
|
||||
#if NJ_USES_QUAD_CONSTANTS
|
||||
_quadConstants.clear();
|
||||
#endif
|
||||
}
|
||||
|
||||
void Assembler::registerResetAll()
|
||||
|
@ -179,20 +185,35 @@ namespace nanojit
|
|||
#endif
|
||||
}
|
||||
|
||||
// Finds a register in 'allow' to store the result of 'ins', evicting one
|
||||
// if necessary. Doesn't consider the prior state of 'ins' (except that
|
||||
// ins->isUsed() must be true).
|
||||
Register Assembler::registerAlloc(LIns* ins, RegisterMask allow)
|
||||
// Legend for register sets: A = allowed, P = preferred, F = free, S = SavedReg.
|
||||
//
|
||||
// Finds a register in 'setA___' to store the result of 'ins' (one from
|
||||
// 'set_P__' if possible), evicting one if necessary. Doesn't consider
|
||||
// the prior state of 'ins'.
|
||||
//
|
||||
// Nb: 'setA___' comes from the instruction's use, 'set_P__' comes from its def.
|
||||
// Eg. in 'add(call(...), ...)':
|
||||
// - the call's use means setA___==GpRegs;
|
||||
// - the call's def means set_P__==rmask(retRegs[0]).
|
||||
//
|
||||
Register Assembler::registerAlloc(LIns* ins, RegisterMask setA___, RegisterMask set_P__)
|
||||
{
|
||||
RegisterMask allowedAndFree = allow & _allocator.free;
|
||||
Register r;
|
||||
NanoAssert(ins->isUsed());
|
||||
RegisterMask set__F_ = _allocator.free;
|
||||
RegisterMask setA_F_ = setA___ & set__F_;
|
||||
|
||||
if (setA_F_) {
|
||||
RegisterMask set___S = SavedRegs;
|
||||
RegisterMask setA_FS = setA_F_ & set___S;
|
||||
RegisterMask setAPF_ = setA_F_ & set_P__;
|
||||
RegisterMask setAPFS = setA_FS & set_P__;
|
||||
RegisterMask set;
|
||||
|
||||
if (setAPFS) set = setAPFS;
|
||||
else if (setAPF_) set = setAPF_;
|
||||
else if (setA_FS) set = setA_FS;
|
||||
else set = setA_F_;
|
||||
|
||||
if (allowedAndFree) {
|
||||
// At least one usable register is free -- no need to steal.
|
||||
// Pick a preferred one if possible.
|
||||
RegisterMask preferredAndFree = allowedAndFree & SavedRegs;
|
||||
RegisterMask set = ( preferredAndFree ? preferredAndFree : allowedAndFree );
|
||||
r = nRegisterAllocFromSet(set);
|
||||
_allocator.addActive(r, ins);
|
||||
ins->setReg(r);
|
||||
|
@ -201,8 +222,8 @@ namespace nanojit
|
|||
|
||||
// Nothing free, steal one.
|
||||
// LSRA says pick the one with the furthest use.
|
||||
LIns* vic = findVictim(allow);
|
||||
NanoAssert(vic->isUsed());
|
||||
LIns* vic = findVictim(setA___);
|
||||
NanoAssert(vic->isInReg());
|
||||
r = vic->getReg();
|
||||
|
||||
evict(vic);
|
||||
|
@ -224,8 +245,7 @@ namespace nanojit
|
|||
Register Assembler::registerAllocTmp(RegisterMask allow)
|
||||
{
|
||||
LIns dummyIns;
|
||||
dummyIns.markAsUsed();
|
||||
Register r = registerAlloc(&dummyIns, allow);
|
||||
Register r = registerAlloc(&dummyIns, allow, /*prefer*/0);
|
||||
|
||||
// Mark r as free, ready for use as a temporary value.
|
||||
_allocator.removeActive(r);
|
||||
|
@ -302,7 +322,6 @@ namespace nanojit
|
|||
LIns* ins = _entries[i];
|
||||
if (!ins)
|
||||
continue;
|
||||
Register r = ins->getReg();
|
||||
uint32_t arIndex = ins->getArIndex();
|
||||
NanoAssert(arIndex != 0);
|
||||
if (ins->isop(LIR_alloc)) {
|
||||
|
@ -313,14 +332,15 @@ namespace nanojit
|
|||
NanoAssert(arIndex == (uint32_t)n-1);
|
||||
i = n-1;
|
||||
}
|
||||
else if (ins->isQuad()) {
|
||||
else if (ins->isI64() || ins->isF64()) {
|
||||
NanoAssert(_entries[i + 1]==ins);
|
||||
i += 1; // skip high word
|
||||
}
|
||||
else {
|
||||
NanoAssertMsg(arIndex == i, "Stack record index mismatch");
|
||||
}
|
||||
NanoAssertMsg(r == UnknownReg || regs.isConsistent(r, ins), "Register record mismatch");
|
||||
NanoAssertMsg(!ins->isInReg() || regs.isConsistent(ins->getReg(), ins),
|
||||
"Register record mismatch");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -365,35 +385,25 @@ namespace nanojit
|
|||
}
|
||||
#endif /* _DEBUG */
|
||||
|
||||
void Assembler::findRegFor2(RegisterMask allow, LIns* ia, Register& ra, LIns* ib, Register& rb)
|
||||
void Assembler::findRegFor2(RegisterMask allowa, LIns* ia, Register& ra,
|
||||
RegisterMask allowb, LIns* ib, Register& rb)
|
||||
{
|
||||
// There should be some overlap between 'allowa' and 'allowb', else
|
||||
// there's no point calling this function.
|
||||
NanoAssert(allowa & allowb);
|
||||
|
||||
if (ia == ib) {
|
||||
ra = rb = findRegFor(ia, allow);
|
||||
ra = rb = findRegFor(ia, allowa & allowb); // use intersection(allowa, allowb)
|
||||
|
||||
} else if (ib->isInRegMask(allowb)) {
|
||||
// 'ib' is already in an allowable reg -- don't let it get evicted
|
||||
// when finding 'ra'.
|
||||
rb = ib->getReg();
|
||||
ra = findRegFor(ia, allowa & ~rmask(rb));
|
||||
|
||||
} else {
|
||||
// You might think we could just do this:
|
||||
//
|
||||
// ra = findRegFor(ia, allow);
|
||||
// rb = findRegFor(ib, allow & ~rmask(ra));
|
||||
//
|
||||
// But if 'ib' was already in an allowed register, the first
|
||||
// findRegFor() call could evict it, whereupon the second
|
||||
// findRegFor() call would immediately restore it, which is
|
||||
// sub-optimal. What we effectively do instead is this:
|
||||
//
|
||||
// ra = findRegFor(ia, allow & ~rmask(rb));
|
||||
// rb = findRegFor(ib, allow & ~rmask(ra));
|
||||
//
|
||||
// but we have to determine what 'rb' initially is to avoid the
|
||||
// mutual dependency between the assignments.
|
||||
bool rbDone = !ib->isUnusedOrHasUnknownReg() && (rb = ib->getReg(), allow & rmask(rb));
|
||||
if (rbDone) {
|
||||
allow &= ~rmask(rb); // ib already in an allowable reg, keep that one
|
||||
}
|
||||
ra = findRegFor(ia, allow);
|
||||
if (!rbDone) {
|
||||
allow &= ~rmask(ra);
|
||||
rb = findRegFor(ib, allow);
|
||||
}
|
||||
ra = findRegFor(ia, allowa);
|
||||
rb = findRegFor(ib, allowb & ~rmask(ra));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -426,6 +436,27 @@ namespace nanojit
|
|||
return findRegFor(i, allow);
|
||||
}
|
||||
|
||||
// Like findRegFor2(), but used for stores where the base value has the
|
||||
// same type as the stored value, eg. in asm_store32() on 32-bit platforms
|
||||
// and asm_store64() on 64-bit platforms. Similar to getBaseReg(),
|
||||
// findRegFor2() can be called instead, but this function can optimize the
|
||||
// case where the base value is a LIR_alloc.
|
||||
void Assembler::getBaseReg2(RegisterMask allowValue, LIns* value, Register& rv,
|
||||
RegisterMask allowBase, LIns* base, Register& rb, int &d)
|
||||
{
|
||||
#if !PEDANTIC
|
||||
if (base->isop(LIR_alloc)) {
|
||||
rb = FP;
|
||||
d += findMemFor(base);
|
||||
rv = findRegFor(value, allowValue);
|
||||
return;
|
||||
}
|
||||
#else
|
||||
(void) d;
|
||||
#endif
|
||||
findRegFor2(allowValue, value, rv, allowBase, base, rb);
|
||||
}
|
||||
|
||||
// Finds a register in 'allow' to hold the result of 'ins'. Used when we
|
||||
// encounter a use of 'ins'. The actions depend on the prior regstate of
|
||||
// 'ins':
|
||||
|
@ -444,17 +475,9 @@ namespace nanojit
|
|||
|
||||
Register r;
|
||||
|
||||
if (!ins->isUsed()) {
|
||||
// 'ins' is unused, ie. dead after this point. Mark it as used
|
||||
// and allocate it a register.
|
||||
ins->markAsUsed();
|
||||
RegisterMask prefer = hint(ins, allow);
|
||||
r = registerAlloc(ins, prefer);
|
||||
|
||||
} else if (!ins->hasKnownReg()) {
|
||||
// 'ins' is in a spill slot. Allocate it a register.
|
||||
RegisterMask prefer = hint(ins, allow);
|
||||
r = registerAlloc(ins, prefer);
|
||||
if (!ins->isInReg()) {
|
||||
// 'ins' isn't in a register (must be in a spill slot or nowhere).
|
||||
r = registerAlloc(ins, allow, hint(ins));
|
||||
|
||||
} else if (rmask(r = ins->getReg()) & allow) {
|
||||
// 'ins' is in an allowed register.
|
||||
|
@ -462,7 +485,6 @@ namespace nanojit
|
|||
|
||||
} else {
|
||||
// 'ins' is in a register (r) that's not in 'allow'.
|
||||
RegisterMask prefer = hint(ins, allow);
|
||||
#ifdef NANOJIT_IA32
|
||||
if (((rmask(r)&XmmRegs) && !(allow&XmmRegs)) ||
|
||||
((rmask(r)&x87Regs) && !(allow&x87Regs)))
|
||||
|
@ -470,14 +492,14 @@ namespace nanojit
|
|||
// x87 <-> xmm copy required
|
||||
//_nvprof("fpu-evict",1);
|
||||
evict(ins);
|
||||
r = registerAlloc(ins, prefer);
|
||||
r = registerAlloc(ins, allow, hint(ins));
|
||||
} else
|
||||
#elif defined(NANOJIT_PPC)
|
||||
if (((rmask(r)&GpRegs) && !(allow&GpRegs)) ||
|
||||
((rmask(r)&FpRegs) && !(allow&FpRegs)))
|
||||
{
|
||||
evict(ins);
|
||||
r = registerAlloc(ins, prefer);
|
||||
r = registerAlloc(ins, allow, hint(ins));
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
|
@ -491,7 +513,7 @@ namespace nanojit
|
|||
//
|
||||
Register s = r;
|
||||
_allocator.retire(r);
|
||||
r = registerAlloc(ins, prefer);
|
||||
r = registerAlloc(ins, allow, hint(ins));
|
||||
|
||||
// 'ins' is in 'allow', in register r (different to the old r);
|
||||
// s is the old r.
|
||||
|
@ -518,11 +540,9 @@ namespace nanojit
|
|||
findMemFor(ins);
|
||||
}
|
||||
|
||||
NanoAssert(ins->isUnusedOrHasUnknownReg());
|
||||
NanoAssert(!ins->isInReg());
|
||||
NanoAssert(_allocator.free & rmask(r));
|
||||
|
||||
if (!ins->isUsed())
|
||||
ins->markAsUsed();
|
||||
ins->setReg(r);
|
||||
_allocator.removeFree(r);
|
||||
_allocator.addActive(r, ins);
|
||||
|
@ -530,31 +550,46 @@ namespace nanojit
|
|||
return r;
|
||||
}
|
||||
|
||||
#if NJ_USES_QUAD_CONSTANTS
|
||||
const uint64_t* Assembler::findQuadConstant(uint64_t q)
|
||||
{
|
||||
uint64_t* p = _quadConstants.get(q);
|
||||
if (!p)
|
||||
{
|
||||
p = new (_dataAlloc) uint64_t;
|
||||
*p = q;
|
||||
_quadConstants.put(q, p);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
#endif
|
||||
|
||||
int Assembler::findMemFor(LIns *ins)
|
||||
{
|
||||
if (!ins->isUsed())
|
||||
ins->markAsUsed();
|
||||
if (!ins->getArIndex()) {
|
||||
#if NJ_USES_QUAD_CONSTANTS
|
||||
NanoAssert(!ins->isconstq());
|
||||
#endif
|
||||
if (!ins->isInAr()) {
|
||||
uint32_t const arIndex = arReserve(ins);
|
||||
ins->setArIndex(arIndex);
|
||||
NanoAssert(_activation.isValidEntry(ins->getArIndex(), ins) == (arIndex != 0));
|
||||
}
|
||||
return disp(ins);
|
||||
return arDisp(ins);
|
||||
}
|
||||
|
||||
// XXX: this function is dangerous and should be phased out;
|
||||
// See bug 513615. Calls to it should replaced it with a
|
||||
// prepareResultReg() / generate code / freeResourcesOf() sequence.
|
||||
Register Assembler::prepResultReg(LIns *ins, RegisterMask allow)
|
||||
Register Assembler::deprecated_prepResultReg(LIns *ins, RegisterMask allow)
|
||||
{
|
||||
#ifdef NANOJIT_IA32
|
||||
const bool pop = (allow & rmask(FST0)) &&
|
||||
(ins->isUnusedOrHasUnknownReg() || ins->getReg() != FST0);
|
||||
(!ins->isInReg() || ins->getReg() != FST0);
|
||||
#else
|
||||
const bool pop = false;
|
||||
#endif
|
||||
Register r = findRegFor(ins, allow);
|
||||
freeRsrcOf(ins, pop);
|
||||
deprecated_freeRsrcOf(ins, pop);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -599,7 +634,7 @@ namespace nanojit
|
|||
// a spill slot, or 'ins' is in an XMM register.
|
||||
#ifdef NANOJIT_IA32
|
||||
const bool pop = (allow & rmask(FST0)) &&
|
||||
(ins->isUnusedOrHasUnknownReg() || ins->getReg() != FST0);
|
||||
(!ins->isInReg() || ins->getReg() != FST0);
|
||||
#else
|
||||
const bool pop = false;
|
||||
#endif
|
||||
|
@ -610,35 +645,39 @@ namespace nanojit
|
|||
|
||||
void Assembler::asm_spilli(LInsp ins, bool pop)
|
||||
{
|
||||
int d = disp(ins);
|
||||
int d = ins->isInAr() ? arDisp(ins) : 0;
|
||||
Register r = ins->getReg();
|
||||
verbose_only( if (d && (_logc->lcbits & LC_Assembly)) {
|
||||
setOutputForEOL(" <= spill %s",
|
||||
_thisfrag->lirbuf->names->formatRef(ins)); } )
|
||||
asm_spill(r, d, pop, ins->isQuad());
|
||||
asm_spill(r, d, pop, ins->isI64() || ins->isF64());
|
||||
}
|
||||
|
||||
// XXX: This function is error-prone and should be phased out; see bug 513615.
|
||||
void Assembler::freeRsrcOf(LIns *ins, bool pop)
|
||||
void Assembler::deprecated_freeRsrcOf(LIns *ins, bool pop)
|
||||
{
|
||||
Register r = ins->getReg();
|
||||
if (isKnownReg(r)) {
|
||||
if (ins->isInReg()) {
|
||||
asm_spilli(ins, pop);
|
||||
_allocator.retire(r); // free any register associated with entry
|
||||
_allocator.retire(ins->getReg()); // free any register associated with entry
|
||||
ins->clearReg();
|
||||
}
|
||||
if (ins->isInAr()) {
|
||||
arFree(ins); // free any AR space associated with entry
|
||||
ins->clearArIndex();
|
||||
}
|
||||
arFreeIfInUse(ins); // free any stack stack space associated with entry
|
||||
ins->markAsClear();
|
||||
}
|
||||
|
||||
// Frees all record of registers and spill slots used by 'ins'.
|
||||
void Assembler::freeResourcesOf(LIns *ins)
|
||||
{
|
||||
Register r = ins->getReg();
|
||||
if (isKnownReg(r)) {
|
||||
_allocator.retire(r); // free any register associated with entry
|
||||
if (ins->isInReg()) {
|
||||
_allocator.retire(ins->getReg()); // free any register associated with entry
|
||||
ins->clearReg();
|
||||
}
|
||||
if (ins->isInAr()) {
|
||||
arFree(ins); // free any AR space associated with entry
|
||||
ins->clearArIndex();
|
||||
}
|
||||
arFreeIfInUse(ins); // free any stack stack space associated with entry
|
||||
ins->markAsClear();
|
||||
}
|
||||
|
||||
// Frees 'r' in the RegAlloc regstate, if it's not already free.
|
||||
|
@ -677,8 +716,7 @@ namespace nanojit
|
|||
asm_restore(vic, r);
|
||||
|
||||
_allocator.retire(r);
|
||||
if (vic->isUsed())
|
||||
vic->setReg(UnknownReg);
|
||||
vic->clearReg();
|
||||
|
||||
// At this point 'vic' is unused (if rematerializable), or in a spill
|
||||
// slot (if not).
|
||||
|
@ -787,6 +825,155 @@ namespace nanojit
|
|||
return jmpTarget;
|
||||
}
|
||||
|
||||
void Assembler::compile(Fragment* frag, Allocator& alloc, bool optimize verbose_only(, LabelMap* labels))
|
||||
{
|
||||
verbose_only(
|
||||
bool anyVerb = (_logc->lcbits & 0xFFFF & ~LC_FragProfile) > 0;
|
||||
bool asmVerb = (_logc->lcbits & 0xFFFF & LC_Assembly) > 0;
|
||||
bool liveVerb = (_logc->lcbits & 0xFFFF & LC_Liveness) > 0;
|
||||
)
|
||||
|
||||
/* BEGIN decorative preamble */
|
||||
verbose_only(
|
||||
if (anyVerb) {
|
||||
_logc->printf("========================================"
|
||||
"========================================\n");
|
||||
_logc->printf("=== BEGIN LIR::compile(%p, %p)\n",
|
||||
(void*)this, (void*)frag);
|
||||
_logc->printf("===\n");
|
||||
})
|
||||
/* END decorative preamble */
|
||||
|
||||
verbose_only( if (liveVerb) {
|
||||
_logc->printf("\n");
|
||||
_logc->printf("=== Results of liveness analysis:\n");
|
||||
_logc->printf("===\n");
|
||||
LirReader br(frag->lastIns);
|
||||
LirFilter* lir = &br;
|
||||
if (optimize) {
|
||||
StackFilter* sf = new (alloc) StackFilter(lir, alloc, frag->lirbuf->sp, frag->lirbuf->rp);
|
||||
lir = sf;
|
||||
}
|
||||
live(lir, alloc, frag, _logc);
|
||||
})
|
||||
|
||||
/* Set up the generic text output cache for the assembler */
|
||||
verbose_only( StringList asmOutput(alloc); )
|
||||
verbose_only( _outputCache = &asmOutput; )
|
||||
|
||||
beginAssembly(frag);
|
||||
if (error())
|
||||
return;
|
||||
|
||||
//_logc->printf("recompile trigger %X kind %d\n", (int)frag, frag->kind);
|
||||
|
||||
verbose_only( if (anyVerb) {
|
||||
_logc->printf("=== Translating LIR fragments into assembly:\n");
|
||||
})
|
||||
|
||||
// now the the main trunk
|
||||
verbose_only( if (anyVerb) {
|
||||
_logc->printf("=== -- Compile trunk %s: begin\n",
|
||||
labels->format(frag));
|
||||
})
|
||||
|
||||
// Used for debug printing, if needed
|
||||
debug_only(ValidateReader *validate = NULL;)
|
||||
verbose_only(
|
||||
ReverseLister *pp_init = NULL;
|
||||
ReverseLister *pp_after_sf = NULL;
|
||||
)
|
||||
|
||||
// The LIR passes through these filters as listed in this
|
||||
// function, viz, top to bottom.
|
||||
|
||||
// set up backwards pipeline: assembler <- StackFilter <- LirReader
|
||||
LirFilter* lir = new (alloc) LirReader(frag->lastIns);
|
||||
|
||||
#ifdef DEBUG
|
||||
// VALIDATION
|
||||
validate = new (alloc) ValidateReader(lir);
|
||||
lir = validate;
|
||||
#endif
|
||||
|
||||
// INITIAL PRINTING
|
||||
verbose_only( if (_logc->lcbits & LC_ReadLIR) {
|
||||
pp_init = new (alloc) ReverseLister(lir, alloc, frag->lirbuf->names, _logc,
|
||||
"Initial LIR");
|
||||
lir = pp_init;
|
||||
})
|
||||
|
||||
// STACKFILTER
|
||||
if (optimize) {
|
||||
StackFilter* stackfilter =
|
||||
new (alloc) StackFilter(lir, alloc, frag->lirbuf->sp, frag->lirbuf->rp);
|
||||
lir = stackfilter;
|
||||
}
|
||||
|
||||
verbose_only( if (_logc->lcbits & LC_AfterSF) {
|
||||
pp_after_sf = new (alloc) ReverseLister(lir, alloc, frag->lirbuf->names, _logc,
|
||||
"After StackFilter");
|
||||
lir = pp_after_sf;
|
||||
})
|
||||
|
||||
assemble(frag, lir);
|
||||
|
||||
// If we were accumulating debug info in the various ReverseListers,
|
||||
// call finish() to emit whatever contents they have accumulated.
|
||||
verbose_only(
|
||||
if (pp_init) pp_init->finish();
|
||||
if (pp_after_sf) pp_after_sf->finish();
|
||||
)
|
||||
|
||||
verbose_only( if (anyVerb) {
|
||||
_logc->printf("=== -- Compile trunk %s: end\n",
|
||||
labels->format(frag));
|
||||
})
|
||||
|
||||
verbose_only(
|
||||
if (asmVerb)
|
||||
outputf("## compiling trunk %s", labels->format(frag));
|
||||
)
|
||||
endAssembly(frag);
|
||||
|
||||
// Reverse output so that assembly is displayed low-to-high.
|
||||
// Up to this point, _outputCache has been non-NULL, and so has been
|
||||
// accumulating output. Now we set it to NULL, traverse the entire
|
||||
// list of stored strings, and hand them a second time to output.
|
||||
// Since _outputCache is now NULL, outputf just hands these strings
|
||||
// directly onwards to _logc->printf.
|
||||
verbose_only( if (anyVerb) {
|
||||
_logc->printf("\n");
|
||||
_logc->printf("=== Aggregated assembly output: BEGIN\n");
|
||||
_logc->printf("===\n");
|
||||
_outputCache = 0;
|
||||
for (Seq<char*>* p = asmOutput.get(); p != NULL; p = p->tail) {
|
||||
char *str = p->head;
|
||||
outputf(" %s", str);
|
||||
}
|
||||
_logc->printf("===\n");
|
||||
_logc->printf("=== Aggregated assembly output: END\n");
|
||||
});
|
||||
|
||||
if (error())
|
||||
frag->fragEntry = 0;
|
||||
|
||||
verbose_only( frag->nCodeBytes += codeBytes; )
|
||||
verbose_only( frag->nExitBytes += exitBytes; )
|
||||
|
||||
/* BEGIN decorative postamble */
|
||||
verbose_only( if (anyVerb) {
|
||||
_logc->printf("\n");
|
||||
_logc->printf("===\n");
|
||||
_logc->printf("=== END LIR::compile(%p, %p)\n",
|
||||
(void*)this, (void*)frag);
|
||||
_logc->printf("========================================"
|
||||
"========================================\n");
|
||||
_logc->printf("\n");
|
||||
});
|
||||
/* END decorative postamble */
|
||||
}
|
||||
|
||||
void Assembler::beginAssembly(Fragment *frag)
|
||||
{
|
||||
verbose_only( codeBytes = 0; )
|
||||
|
@ -891,8 +1078,10 @@ namespace nanojit
|
|||
if (error()) {
|
||||
// something went wrong, release all allocated code memory
|
||||
_codeAlloc.freeAll(codeList);
|
||||
_codeAlloc.free(exitStart, exitEnd);
|
||||
if (_nExitIns)
|
||||
_codeAlloc.free(exitStart, exitEnd);
|
||||
_codeAlloc.free(codeStart, codeEnd);
|
||||
codeList = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -905,15 +1094,19 @@ namespace nanojit
|
|||
// save used parts of current block on fragment's code list, free the rest
|
||||
#ifdef NANOJIT_ARM
|
||||
// [codeStart, _nSlot) ... gap ... [_nIns, codeEnd)
|
||||
_codeAlloc.addRemainder(codeList, exitStart, exitEnd, _nExitSlot, _nExitIns);
|
||||
if (_nExitIns) {
|
||||
_codeAlloc.addRemainder(codeList, exitStart, exitEnd, _nExitSlot, _nExitIns);
|
||||
verbose_only( exitBytes -= (_nExitIns - _nExitSlot) * sizeof(NIns); )
|
||||
}
|
||||
_codeAlloc.addRemainder(codeList, codeStart, codeEnd, _nSlot, _nIns);
|
||||
verbose_only( exitBytes -= (_nExitIns - _nExitSlot) * sizeof(NIns); )
|
||||
verbose_only( codeBytes -= (_nIns - _nSlot) * sizeof(NIns); )
|
||||
#else
|
||||
// [codeStart ... gap ... [_nIns, codeEnd))
|
||||
_codeAlloc.addRemainder(codeList, exitStart, exitEnd, exitStart, _nExitIns);
|
||||
if (_nExitIns) {
|
||||
_codeAlloc.addRemainder(codeList, exitStart, exitEnd, exitStart, _nExitIns);
|
||||
verbose_only( exitBytes -= (_nExitIns - exitStart) * sizeof(NIns); )
|
||||
}
|
||||
_codeAlloc.addRemainder(codeList, codeStart, codeEnd, codeStart, _nIns);
|
||||
verbose_only( exitBytes -= (_nExitIns - exitStart) * sizeof(NIns); )
|
||||
verbose_only( codeBytes -= (_nIns - codeStart) * sizeof(NIns); )
|
||||
#endif
|
||||
|
||||
|
@ -943,11 +1136,7 @@ namespace nanojit
|
|||
// Clear reg allocation, preserve stack allocation.
|
||||
_allocator.retire(r);
|
||||
NanoAssert(r == ins->getReg());
|
||||
ins->setReg(UnknownReg);
|
||||
|
||||
if (!ins->getArIndex()) {
|
||||
ins->markAsClear();
|
||||
}
|
||||
ins->clearReg();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1010,18 +1199,20 @@ namespace nanojit
|
|||
{
|
||||
NanoAssert(_thisfrag->nStaticExits == 0);
|
||||
|
||||
// trace must end with LIR_x, LIR_[f]ret, LIR_xtbl, or LIR_[f]live
|
||||
NanoAssert(reader->pos()->isop(LIR_x) ||
|
||||
reader->pos()->isop(LIR_ret) ||
|
||||
reader->pos()->isop(LIR_fret) ||
|
||||
reader->pos()->isop(LIR_xtbl) ||
|
||||
reader->pos()->isop(LIR_flive) ||
|
||||
reader->pos()->isop(LIR_live));
|
||||
// The trace must end with one of these opcodes.
|
||||
NanoAssert(reader->pos()->isop(LIR_x) ||
|
||||
reader->pos()->isop(LIR_xtbl) ||
|
||||
reader->pos()->isop(LIR_ret) ||
|
||||
reader->pos()->isop(LIR_qret) ||
|
||||
reader->pos()->isop(LIR_fret) ||
|
||||
reader->pos()->isop(LIR_live) ||
|
||||
reader->pos()->isop(LIR_qlive) ||
|
||||
reader->pos()->isop(LIR_flive));
|
||||
|
||||
InsList pending_lives(alloc);
|
||||
|
||||
for (LInsp ins = reader->read(); !ins->isop(LIR_start) && !error();
|
||||
ins = reader->read())
|
||||
NanoAssert(!error());
|
||||
for (LInsp ins = reader->read(); !ins->isop(LIR_start); ins = reader->read())
|
||||
{
|
||||
/* What's going on here: we're visiting all the LIR instructions
|
||||
in the buffer, working strictly backwards in buffer-order, and
|
||||
|
@ -1097,8 +1288,9 @@ namespace nanojit
|
|||
evictAllActiveRegs();
|
||||
break;
|
||||
|
||||
case LIR_flive:
|
||||
case LIR_live: {
|
||||
case LIR_live:
|
||||
case LIR_qlive:
|
||||
case LIR_flive: {
|
||||
countlir_live();
|
||||
LInsp op1 = ins->oprnd1();
|
||||
// alloca's are meant to live until the point of the LIR_live instruction, marking
|
||||
|
@ -1116,8 +1308,9 @@ namespace nanojit
|
|||
break;
|
||||
}
|
||||
|
||||
case LIR_fret:
|
||||
case LIR_ret: {
|
||||
case LIR_ret:
|
||||
case LIR_qret:
|
||||
case LIR_fret: {
|
||||
countlir_ret();
|
||||
asm_ret(ins);
|
||||
break;
|
||||
|
@ -1127,12 +1320,12 @@ namespace nanojit
|
|||
// is the address of the stack space.
|
||||
case LIR_alloc: {
|
||||
countlir_alloc();
|
||||
NanoAssert(ins->getArIndex() != 0);
|
||||
Register r = ins->getReg();
|
||||
if (isKnownReg(r)) {
|
||||
NanoAssert(ins->isInAr());
|
||||
if (ins->isInReg()) {
|
||||
Register r = ins->getReg();
|
||||
asm_restore(ins, r);
|
||||
_allocator.retire(r);
|
||||
ins->setReg(UnknownReg);
|
||||
ins->clearReg();
|
||||
}
|
||||
freeResourcesOf(ins);
|
||||
break;
|
||||
|
@ -1154,7 +1347,7 @@ namespace nanojit
|
|||
case LIR_callh:
|
||||
{
|
||||
// return result of quad-call in register
|
||||
prepResultReg(ins, rmask(retRegs[1]));
|
||||
deprecated_prepResultReg(ins, rmask(retRegs[1]));
|
||||
// if hi half was used, we must use the call to ensure it happens
|
||||
findSpecificRegFor(ins->oprnd1(), retRegs[0]);
|
||||
break;
|
||||
|
@ -1166,6 +1359,12 @@ namespace nanojit
|
|||
asm_param(ins);
|
||||
break;
|
||||
}
|
||||
case LIR_q2i:
|
||||
{
|
||||
countlir_alu();
|
||||
asm_q2i(ins);
|
||||
break;
|
||||
}
|
||||
case LIR_qlo:
|
||||
{
|
||||
countlir_qlo();
|
||||
|
@ -1285,6 +1484,12 @@ namespace nanojit
|
|||
asm_u2f(ins);
|
||||
break;
|
||||
}
|
||||
case LIR_f2i:
|
||||
{
|
||||
countlir_fpu();
|
||||
asm_f2i(ins);
|
||||
break;
|
||||
}
|
||||
case LIR_i2q:
|
||||
case LIR_u2q:
|
||||
{
|
||||
|
@ -1649,10 +1854,10 @@ namespace nanojit
|
|||
void Assembler::reserveSavedRegs()
|
||||
{
|
||||
LirBuffer *b = _thisfrag->lirbuf;
|
||||
for (int i=0, n = NumSavedRegs; i < n; i++) {
|
||||
LIns *p = b->savedRegs[i];
|
||||
if (p)
|
||||
findMemFor(p);
|
||||
for (int i = 0, n = NumSavedRegs; i < n; i++) {
|
||||
LIns *ins = b->savedRegs[i];
|
||||
if (ins)
|
||||
findMemFor(ins);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1671,15 +1876,22 @@ namespace nanojit
|
|||
// ensure that exprs spanning the loop are marked live at the end of the loop
|
||||
reserveSavedRegs();
|
||||
for (Seq<LIns*> *p = pending_lives.get(); p != NULL; p = p->tail) {
|
||||
LIns *i = p->head;
|
||||
NanoAssert(i->isop(LIR_live) || i->isop(LIR_flive));
|
||||
LIns *op1 = i->oprnd1();
|
||||
LIns *ins = p->head;
|
||||
NanoAssert(ins->isop(LIR_live) || ins->isop(LIR_qlive) || ins->isop(LIR_flive));
|
||||
LIns *op1 = ins->oprnd1();
|
||||
// must findMemFor even if we're going to findRegFor; loop-carried
|
||||
// operands may spill on another edge, and we need them to always
|
||||
// spill to the same place.
|
||||
findMemFor(op1);
|
||||
#if NJ_USES_QUAD_CONSTANTS
|
||||
// exception: if quad constants are true constants, we should
|
||||
// never call findMemFor on those ops
|
||||
if (!op1->isconstq())
|
||||
#endif
|
||||
{
|
||||
findMemFor(op1);
|
||||
}
|
||||
if (! (op1->isconst() || op1->isconstf() || op1->isconstq()))
|
||||
findRegFor(op1, i->isop(LIR_flive) ? FpRegs : GpRegs);
|
||||
findRegFor(op1, ins->isop(LIR_flive) ? FpRegs : GpRegs);
|
||||
}
|
||||
|
||||
// clear this list since we have now dealt with those lifetimes. extending
|
||||
|
@ -1725,8 +1937,7 @@ namespace nanojit
|
|||
continue;
|
||||
}
|
||||
|
||||
const char* rname = ins->isQuad() ? fpn(r) : gpn(r);
|
||||
VMPI_sprintf(s, " %s(%s)", rname, n);
|
||||
VMPI_sprintf(s, " %s(%s)", gpn(r), n);
|
||||
s += VMPI_strlen(s);
|
||||
}
|
||||
}
|
||||
|
@ -1851,13 +2062,13 @@ namespace nanojit
|
|||
return i;
|
||||
}
|
||||
|
||||
void Assembler::arFreeIfInUse(LIns* ins)
|
||||
void Assembler::arFree(LIns* ins)
|
||||
{
|
||||
NanoAssert(ins->isInAr());
|
||||
uint32_t arIndex = ins->getArIndex();
|
||||
if (arIndex) {
|
||||
NanoAssert(_activation.isValidEntry(arIndex, ins));
|
||||
_activation.freeEntryAt(arIndex); // free any stack stack space associated with entry
|
||||
}
|
||||
NanoAssert(arIndex);
|
||||
NanoAssert(_activation.isValidEntry(arIndex, ins));
|
||||
_activation.freeEntryAt(arIndex); // free any stack stack space associated with entry
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -116,7 +116,7 @@ namespace nanojit
|
|||
|
||||
public:
|
||||
AR();
|
||||
|
||||
|
||||
uint32_t stackSlotsNeeded() const;
|
||||
|
||||
void clear();
|
||||
|
@ -145,13 +145,13 @@ namespace nanojit
|
|||
|
||||
inline AR::AR()
|
||||
{
|
||||
_entries[0] = NULL;
|
||||
_entries[0] = NULL;
|
||||
clear();
|
||||
}
|
||||
|
||||
inline /*static*/ uint32_t AR::nStackSlotsFor(LIns* ins)
|
||||
{
|
||||
return ins->isop(LIR_alloc) ? (ins->size()>>2) : (ins->isQuad() ? 2 : 1);
|
||||
return ins->isop(LIR_alloc) ? (ins->size()>>2) : ((ins->isI64() || ins->isF64()) ? 2 : 1);
|
||||
}
|
||||
|
||||
inline uint32_t AR::stackSlotsNeeded() const
|
||||
|
@ -197,6 +197,9 @@ namespace nanojit
|
|||
|
||||
typedef SeqBuilder<NIns*> NInsList;
|
||||
typedef HashMap<NIns*, LIns*> NInsMap;
|
||||
#if NJ_USES_QUAD_CONSTANTS
|
||||
typedef HashMap<uint64_t, uint64_t*> QuadConstantMap;
|
||||
#endif
|
||||
|
||||
#ifdef VTUNE
|
||||
class avmplus::CodegenLIR;
|
||||
|
@ -224,8 +227,6 @@ namespace nanojit
|
|||
LabelState *get(LIns *);
|
||||
};
|
||||
|
||||
typedef SeqBuilder<char*> StringList;
|
||||
|
||||
/** map tracking the register allocation state at each bailout point
|
||||
* (represented by SideExit*) in a trace fragment. */
|
||||
typedef HashMap<SideExit*, RegAlloc*> RegAllocMap;
|
||||
|
@ -240,11 +241,8 @@ namespace nanojit
|
|||
class Assembler
|
||||
{
|
||||
friend class VerboseBlockReader;
|
||||
public:
|
||||
#ifdef NJ_VERBOSE
|
||||
// Log controller object. Contains what-stuff-should-we-print
|
||||
// bits, and a sink function for debug printing.
|
||||
LogControl* _logc;
|
||||
public:
|
||||
// Buffer for holding text as we generate it in reverse order.
|
||||
StringList* _outputCache;
|
||||
|
||||
|
@ -253,6 +251,10 @@ namespace nanojit
|
|||
void outputf(const char* format, ...);
|
||||
|
||||
private:
|
||||
// Log controller object. Contains what-stuff-should-we-print
|
||||
// bits, and a sink function for debug printing.
|
||||
LogControl* _logc;
|
||||
|
||||
// Buffer used in most of the output function. It must big enough
|
||||
// to hold both the output line and the 'outlineEOL' buffer, which
|
||||
// is concatenated onto 'outline' just before it is printed.
|
||||
|
@ -280,6 +282,9 @@ namespace nanojit
|
|||
|
||||
Assembler(CodeAlloc& codeAlloc, Allocator& dataAlloc, Allocator& alloc, AvmCore* core, LogControl* logc);
|
||||
|
||||
void compile(Fragment *frag, Allocator& alloc, bool optimize
|
||||
verbose_only(, LabelMap*));
|
||||
|
||||
void endAssembly(Fragment* frag);
|
||||
void assemble(Fragment* frag, LirFilter* reader);
|
||||
void beginAssembly(Fragment *frag);
|
||||
|
@ -301,20 +306,20 @@ namespace nanojit
|
|||
debug_only( void resourceConsistencyCheck(); )
|
||||
debug_only( void registerConsistencyCheck(); )
|
||||
|
||||
Stats _stats;
|
||||
CodeList* codeList; // finished blocks of code.
|
||||
|
||||
private:
|
||||
Stats _stats;
|
||||
|
||||
void gen(LirFilter* toCompile);
|
||||
NIns* genPrologue();
|
||||
NIns* genEpilogue();
|
||||
|
||||
uint32_t arReserve(LIns* ins);
|
||||
void arFreeIfInUse(LIns* ins);
|
||||
void arFree(LIns* ins);
|
||||
void arReset();
|
||||
|
||||
Register registerAlloc(LIns* ins, RegisterMask allow);
|
||||
Register registerAlloc(LIns* ins, RegisterMask allow, RegisterMask prefer);
|
||||
Register registerAllocTmp(RegisterMask allow);
|
||||
void registerResetAll();
|
||||
void evictAllActiveRegs();
|
||||
|
@ -326,25 +331,37 @@ namespace nanojit
|
|||
LInsp findVictim(RegisterMask allow);
|
||||
|
||||
Register getBaseReg(LIns *i, int &d, RegisterMask allow);
|
||||
void getBaseReg2(RegisterMask allowValue, LIns* value, Register& rv,
|
||||
RegisterMask allowBase, LIns* base, Register& rb, int &d);
|
||||
#if NJ_USES_QUAD_CONSTANTS
|
||||
const uint64_t*
|
||||
findQuadConstant(uint64_t q);
|
||||
#endif
|
||||
int findMemFor(LIns* i);
|
||||
Register findRegFor(LIns* i, RegisterMask allow);
|
||||
void findRegFor2(RegisterMask allow, LIns* ia, Register &ra, LIns *ib, Register &rb);
|
||||
void findRegFor2(RegisterMask allowa, LIns* ia, Register &ra,
|
||||
RegisterMask allowb, LIns *ib, Register &rb);
|
||||
Register findSpecificRegFor(LIns* i, Register r);
|
||||
Register findSpecificRegForUnallocated(LIns* i, Register r);
|
||||
Register prepResultReg(LIns *i, RegisterMask allow);
|
||||
Register deprecated_prepResultReg(LIns *i, RegisterMask allow);
|
||||
Register prepareResultReg(LIns *i, RegisterMask allow);
|
||||
void freeRsrcOf(LIns *i, bool pop);
|
||||
void deprecated_freeRsrcOf(LIns *i, bool pop);
|
||||
void freeResourcesOf(LIns *ins);
|
||||
void evictIfActive(Register r);
|
||||
void evict(LIns* vic);
|
||||
RegisterMask hint(LIns*i, RegisterMask allow);
|
||||
RegisterMask hint(LIns* ins); // mask==0 means there's no preferred register(s)
|
||||
|
||||
void codeAlloc(NIns *&start, NIns *&end, NIns *&eip
|
||||
verbose_only(, size_t &nBytes));
|
||||
bool canRemat(LIns*);
|
||||
|
||||
// njn
|
||||
// njn
|
||||
// njn
|
||||
// njn
|
||||
// njn
|
||||
bool isKnownReg(Register r) {
|
||||
return r != UnknownReg;
|
||||
return r != deprecated_UnknownReg;
|
||||
}
|
||||
|
||||
Allocator& alloc; // for items with same lifetime as this Assembler
|
||||
|
@ -354,6 +371,9 @@ namespace nanojit
|
|||
RegAllocMap _branchStateMap;
|
||||
NInsMap _patches;
|
||||
LabelStateMap _labels;
|
||||
#if NJ_USES_QUAD_CONSTANTS
|
||||
QuadConstantMap _quadConstants;
|
||||
#endif
|
||||
|
||||
// We generate code into two places: normal code chunks, and exit
|
||||
// code chunks (for exit stubs). We use a hack to avoid having to
|
||||
|
@ -369,13 +389,12 @@ namespace nanojit
|
|||
NIns *exitStart, *exitEnd; // current exit code chunk
|
||||
NIns* _nIns; // current instruction in current normal code chunk
|
||||
NIns* _nExitIns; // current instruction in current exit code chunk
|
||||
// note: _nExitIns == NULL until the first side exit is seen.
|
||||
#ifdef NJ_VERBOSE
|
||||
public:
|
||||
size_t codeBytes; // bytes allocated in normal code chunks
|
||||
size_t exitBytes; // bytes allocated in exit code chunks
|
||||
#endif
|
||||
|
||||
private:
|
||||
#define SWAP(t, a, b) do { t tmp = a; a = b; b = tmp; } while (0)
|
||||
void swapCodeChunks();
|
||||
|
||||
|
@ -415,6 +434,8 @@ namespace nanojit
|
|||
void asm_fop(LInsp ins);
|
||||
void asm_i2f(LInsp ins);
|
||||
void asm_u2f(LInsp ins);
|
||||
void asm_f2i(LInsp ins);
|
||||
void asm_q2i(LInsp ins);
|
||||
void asm_promote(LIns *ins);
|
||||
void asm_nongp_copy(Register r, Register s);
|
||||
void asm_call(LInsp);
|
||||
|
@ -457,10 +478,16 @@ namespace nanojit
|
|||
avmplus::Config &config;
|
||||
};
|
||||
|
||||
inline int32_t disp(LIns* ins)
|
||||
inline int32_t arDisp(LIns* ins)
|
||||
{
|
||||
// even on 64bit cpu's, we allocate stack area in 4byte chunks
|
||||
return -4 * int32_t(ins->getArIndex());
|
||||
}
|
||||
// XXX: deprecated, use arDisp() instead. See bug 538924.
|
||||
inline int32_t deprecated_disp(LIns* ins)
|
||||
{
|
||||
// even on 64bit cpu's, we allocate stack area in 4byte chunks
|
||||
return -4 * int32_t(ins->deprecated_getArIndex());
|
||||
}
|
||||
}
|
||||
#endif // __nanojit_Assembler__
|
||||
|
|
|
@ -70,14 +70,14 @@ namespace nanojit
|
|||
void CodeAlloc::reset() {
|
||||
// give all memory back to gcheap. Assumption is that all
|
||||
// code is done being used by now.
|
||||
for (CodeList* b = heapblocks; b != 0; ) {
|
||||
for (CodeList* hb = heapblocks; hb != 0; ) {
|
||||
_nvprof("free page",1);
|
||||
CodeList* next = b->next;
|
||||
void *mem = firstBlock(b);
|
||||
VMPI_setPageProtection(mem, bytesPerAlloc, false /* executable */, true /* writable */);
|
||||
freeCodeChunk(mem, bytesPerAlloc);
|
||||
CodeList* next = hb->next;
|
||||
CodeList* fb = firstBlock(hb);
|
||||
markBlockWrite(fb);
|
||||
freeCodeChunk(fb, bytesPerAlloc);
|
||||
totalAllocated -= bytesPerAlloc;
|
||||
b = next;
|
||||
hb = next;
|
||||
}
|
||||
NanoAssert(!totalAllocated);
|
||||
heapblocks = availblocks = 0;
|
||||
|
@ -89,9 +89,10 @@ namespace nanojit
|
|||
return (CodeList*) (end - (uintptr_t)bytesPerAlloc);
|
||||
}
|
||||
|
||||
int round(size_t x) {
|
||||
static int round(size_t x) {
|
||||
return (int)((x + 512) >> 10);
|
||||
}
|
||||
|
||||
void CodeAlloc::logStats() {
|
||||
size_t total = 0;
|
||||
size_t frag_size = 0;
|
||||
|
@ -112,9 +113,19 @@ namespace nanojit
|
|||
round(total), round(free_size), frag_size);
|
||||
}
|
||||
|
||||
inline void CodeAlloc::markBlockWrite(CodeList* b) {
|
||||
NanoAssert(b->terminator != NULL);
|
||||
CodeList* term = b->terminator;
|
||||
if (term->isExec) {
|
||||
markCodeChunkWrite(firstBlock(term), bytesPerAlloc);
|
||||
term->isExec = false;
|
||||
}
|
||||
}
|
||||
|
||||
void CodeAlloc::alloc(NIns* &start, NIns* &end) {
|
||||
// Reuse a block if possible.
|
||||
if (availblocks) {
|
||||
markBlockWrite(availblocks);
|
||||
CodeList* b = removeBlock(availblocks);
|
||||
b->isFree = false;
|
||||
start = b->start();
|
||||
|
@ -128,7 +139,6 @@ namespace nanojit
|
|||
totalAllocated += bytesPerAlloc;
|
||||
NanoAssert(mem != NULL); // see allocCodeChunk contract in CodeAlloc.h
|
||||
_nvprof("alloc page", uintptr_t(mem)>>12);
|
||||
VMPI_setPageProtection(mem, bytesPerAlloc, true/*executable*/, true/*writable*/);
|
||||
CodeList* b = addMem(mem, bytesPerAlloc);
|
||||
b->isFree = false;
|
||||
start = b->start();
|
||||
|
@ -225,7 +235,7 @@ namespace nanojit
|
|||
void* mem = hb->lower;
|
||||
*prev = hb->next;
|
||||
_nvprof("free page",1);
|
||||
VMPI_setPageProtection(mem, bytesPerAlloc, false /* executable */, true /* writable */);
|
||||
markBlockWrite(firstBlock(hb));
|
||||
freeCodeChunk(mem, bytesPerAlloc);
|
||||
totalAllocated -= bytesPerAlloc;
|
||||
} else {
|
||||
|
@ -347,9 +357,12 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
|
|||
// create a tiny terminator block, add to fragmented list, this way
|
||||
// all other blocks have a valid block at b->higher
|
||||
CodeList* terminator = b->higher;
|
||||
b->terminator = terminator;
|
||||
terminator->lower = b;
|
||||
terminator->end = 0; // this is how we identify the terminator
|
||||
terminator->isFree = false;
|
||||
terminator->isExec = false;
|
||||
terminator->terminator = 0;
|
||||
debug_only(sanity_check();)
|
||||
|
||||
// add terminator to heapblocks list so we can track whole blocks
|
||||
|
@ -365,7 +378,7 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
|
|||
|
||||
CodeList* CodeAlloc::removeBlock(CodeList* &blocks) {
|
||||
CodeList* b = blocks;
|
||||
NanoAssert(b);
|
||||
NanoAssert(b != NULL);
|
||||
blocks = b->next;
|
||||
b->next = 0;
|
||||
return b;
|
||||
|
@ -399,6 +412,7 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
|
|||
// b1 b2
|
||||
CodeList* b1 = getBlock(start, end);
|
||||
CodeList* b2 = (CodeList*) (uintptr_t(holeEnd) - offsetof(CodeList, code));
|
||||
b2->terminator = b1->terminator;
|
||||
b2->isFree = false;
|
||||
b2->next = 0;
|
||||
b2->higher = b1->higher;
|
||||
|
@ -421,10 +435,12 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
|
|||
b2->lower = b1;
|
||||
b2->higher = b3;
|
||||
b2->isFree = false; // redundant, since we're about to free, but good hygiene
|
||||
b2->terminator = b1->terminator;
|
||||
b3->lower = b2;
|
||||
b3->end = end;
|
||||
b3->isFree = false;
|
||||
b3->higher->lower = b3;
|
||||
b3->terminator = b1->terminator;
|
||||
b2->next = 0;
|
||||
b3->next = 0;
|
||||
debug_only(sanity_check();)
|
||||
|
@ -518,5 +534,14 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
|
|||
#endif /* CROSS_CHECK_FREE_LIST */
|
||||
}
|
||||
#endif
|
||||
|
||||
void CodeAlloc::markAllExec() {
|
||||
for (CodeList* hb = heapblocks; hb != NULL; hb = hb->next) {
|
||||
if (!hb->isExec) {
|
||||
hb->isExec = true;
|
||||
markCodeChunkExec(firstBlock(hb), bytesPerAlloc);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // FEATURE_NANOJIT
|
||||
|
|
|
@ -64,8 +64,16 @@ namespace nanojit
|
|||
for splitting and coalescing blocks. */
|
||||
CodeList* lower;
|
||||
|
||||
/** pointer to the heapblock terminal that represents the code chunk containing this block */
|
||||
CodeList* terminator;
|
||||
|
||||
/** true if block is free, false otherwise */
|
||||
bool isFree;
|
||||
|
||||
/** (only valid for terminator blocks). Set true just before calling
|
||||
* markCodeChunkExec() and false just after markCodeChunkWrite() */
|
||||
bool isExec;
|
||||
|
||||
union {
|
||||
// this union is used in leu of pointer punning in code
|
||||
// the end of this block is always the address of the next higher block
|
||||
|
@ -142,9 +150,17 @@ namespace nanojit
|
|||
|
||||
/** free a block previously allocated by allocCodeMem. nbytes will
|
||||
* match the previous allocCodeMem, but is provided here as well
|
||||
* to mirror the mmap()/munmap() api. */
|
||||
* to mirror the mmap()/munmap() api. markCodeChunkWrite() will have
|
||||
* been called if necessary, so it is not necessary for freeCodeChunk()
|
||||
* to do it again. */
|
||||
void freeCodeChunk(void* addr, size_t nbytes);
|
||||
|
||||
/** make this specific extent ready to execute (might remove write) */
|
||||
void markCodeChunkExec(void* addr, size_t nbytes);
|
||||
|
||||
/** make this extent ready to modify (might remove exec) */
|
||||
void markCodeChunkWrite(void* addr, size_t nbytes);
|
||||
|
||||
public:
|
||||
CodeAlloc();
|
||||
~CodeAlloc();
|
||||
|
@ -198,6 +214,12 @@ namespace nanojit
|
|||
|
||||
/** return any completely empty pages */
|
||||
void sweep();
|
||||
|
||||
/** protect all code in this code alloc */
|
||||
void markAllExec();
|
||||
|
||||
/** unprotect the code chunk containing just this one block */
|
||||
void markBlockWrite(CodeList* b);
|
||||
};
|
||||
}
|
||||
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -40,15 +40,6 @@
|
|||
#ifndef __nanojit_LIR__
|
||||
#define __nanojit_LIR__
|
||||
|
||||
/**
|
||||
* Fundamentally, the arguments to the various operands can be grouped along
|
||||
* two dimensions. One dimension is size: can the arguments fit into a 32-bit
|
||||
* register, or not? The other dimension is whether the argument is an integer
|
||||
* (including pointers) or a floating-point value. In all comments below,
|
||||
* "integer" means integer of any size, including 64-bit, unless otherwise
|
||||
* specified. All floating-point values are always 64-bit. Below, "quad" is
|
||||
* used for a 64-bit value that might be either integer or floating-point.
|
||||
*/
|
||||
namespace nanojit
|
||||
{
|
||||
enum LOpcode
|
||||
|
@ -93,7 +84,9 @@ namespace nanojit
|
|||
LIR_puge = PTR_SIZE(LIR_uge, LIR_quge),
|
||||
LIR_alloc = PTR_SIZE(LIR_ialloc, LIR_qalloc),
|
||||
LIR_pcall = PTR_SIZE(LIR_icall, LIR_qcall),
|
||||
LIR_param = PTR_SIZE(LIR_iparam, LIR_qparam)
|
||||
LIR_param = PTR_SIZE(LIR_iparam, LIR_qparam),
|
||||
LIR_plive = PTR_SIZE(LIR_live, LIR_qlive),
|
||||
LIR_pret = PTR_SIZE(LIR_ret, LIR_qret)
|
||||
};
|
||||
|
||||
struct GuardRecord;
|
||||
|
@ -137,7 +130,19 @@ namespace nanojit
|
|||
verbose_only ( const char* _name; )
|
||||
|
||||
uint32_t _count_args(uint32_t mask) const;
|
||||
uint32_t get_sizes(ArgSize*) const;
|
||||
// Nb: uses right-to-left order, eg. sizes[0] is the size of the right-most arg.
|
||||
uint32_t get_sizes(ArgSize* sizes) const;
|
||||
|
||||
inline ArgSize returnType() const {
|
||||
return ArgSize(_argtypes & ARGSIZE_MASK_ANY);
|
||||
}
|
||||
|
||||
// Note that this indexes arguments *backwards*, that is to
|
||||
// get the Nth arg, you have to ask for index (numargs - N).
|
||||
// See mozilla bug 525815 for fixing this.
|
||||
inline ArgSize argType(uint32_t arg) const {
|
||||
return ArgSize((_argtypes >> (ARGSIZE_SHIFT * (arg+1))) & ARGSIZE_MASK_ANY);
|
||||
}
|
||||
|
||||
inline bool isIndirect() const {
|
||||
return _address < 256;
|
||||
|
@ -169,7 +174,7 @@ namespace nanojit
|
|||
(op >= LIR_quad && op <= LIR_quge);
|
||||
}
|
||||
inline bool isRetOpcode(LOpcode op) {
|
||||
return op == LIR_ret || op == LIR_fret;
|
||||
return op == LIR_ret || op == LIR_qret || op == LIR_fret;
|
||||
}
|
||||
LOpcode f64arith_to_i32arith(LOpcode op);
|
||||
LOpcode i32cmp_to_i64cmp(LOpcode op);
|
||||
|
@ -177,16 +182,23 @@ namespace nanojit
|
|||
// Array holding the 'repKind' field from LIRopcode.tbl.
|
||||
extern const uint8_t repKinds[];
|
||||
|
||||
enum LTy {
|
||||
LTy_Void, // no value/no type
|
||||
LTy_I32, // 32-bit integer
|
||||
LTy_I64, // 64-bit integer
|
||||
LTy_F64 // 64-bit float
|
||||
enum LTy { // Nb: enum values must be 0..n for typeStrings[] to work.
|
||||
LTy_Void = 0, // no value/no type
|
||||
LTy_I32 = 1, // 32-bit integer
|
||||
LTy_I64 = 2, // 64-bit integer
|
||||
LTy_F64 = 3, // 64-bit float
|
||||
|
||||
LTy_Ptr = PTR_SIZE(LTy_I32, LTy_I64) // word-sized integer
|
||||
};
|
||||
|
||||
// Array holding the 'retType' field from LIRopcode.tbl.
|
||||
extern const LTy retTypes[];
|
||||
|
||||
inline RegisterMask rmask(Register r)
|
||||
{
|
||||
return RegisterMask(1) << r;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------
|
||||
// Low-level instructions. This is a bit complicated, because we have a
|
||||
// variable-width representation to minimise space usage.
|
||||
|
@ -291,14 +303,18 @@ namespace nanojit
|
|||
class LIns
|
||||
{
|
||||
private:
|
||||
// LastWord: fields shared by all LIns kinds. The .arIndex, .reg and
|
||||
// .used fields form a "reservation" that is used temporarily during
|
||||
// assembly to record information relating to register allocation.
|
||||
// See class RegAlloc for more details.
|
||||
// LastWord: fields shared by all LIns kinds. The .inReg, .reg,
|
||||
// .inAr and .arIndex fields form a "reservation" that is used
|
||||
// temporarily during assembly to record information relating to
|
||||
// register allocation. See class RegAlloc for more details.
|
||||
//
|
||||
// Note: all combinations of .inReg/.inAr are possible, ie. 0/0, 0/1,
|
||||
// 1/0, 1/1.
|
||||
struct LastWord {
|
||||
uint32_t arIndex:16; // index into stack frame. displ is -4*arIndex
|
||||
Register reg:7; // register UnknownReg implies not in register
|
||||
uint32_t used:1; // when set, the reservation is active
|
||||
uint32_t inReg:1; // if 1, 'reg' is active
|
||||
Register reg:7;
|
||||
uint32_t inAr:1; // if 1, 'arIndex' is active
|
||||
uint32_t arIndex:15; // index into stack frame; displ is -4*arIndex
|
||||
|
||||
LOpcode opcode:8; // instruction's opcode
|
||||
};
|
||||
|
@ -346,39 +362,63 @@ namespace nanojit
|
|||
|
||||
LOpcode opcode() const { return lastWord.opcode; }
|
||||
|
||||
void markAsUsed() {
|
||||
lastWord.reg = UnknownReg;
|
||||
lastWord.arIndex = 0;
|
||||
lastWord.used = 1;
|
||||
// XXX: old reservation manipulating functions. See bug 538924.
|
||||
// Replacement strategy:
|
||||
// - deprecated_markAsClear() --> clearReg() and/or clearArIndex()
|
||||
// - deprecated_hasKnownReg() --> isInReg()
|
||||
// - deprecated_getReg() --> getReg() after checking isInReg()
|
||||
//
|
||||
void deprecated_markAsClear() {
|
||||
lastWord.inReg = 0;
|
||||
lastWord.inAr = 0;
|
||||
}
|
||||
void markAsClear() {
|
||||
lastWord.used = 0;
|
||||
}
|
||||
bool isUsed() {
|
||||
return lastWord.used;
|
||||
}
|
||||
bool hasKnownReg() {
|
||||
bool deprecated_hasKnownReg() {
|
||||
NanoAssert(isUsed());
|
||||
return getReg() != UnknownReg;
|
||||
return isInReg();
|
||||
}
|
||||
Register deprecated_getReg() {
|
||||
NanoAssert(isUsed());
|
||||
return ( isInReg() ? lastWord.reg : deprecated_UnknownReg );
|
||||
}
|
||||
uint32_t deprecated_getArIndex() {
|
||||
NanoAssert(isUsed());
|
||||
return ( isInAr() ? lastWord.arIndex : 0 );
|
||||
}
|
||||
|
||||
// Reservation manipulation.
|
||||
bool isUsed() {
|
||||
return isInReg() || isInAr();
|
||||
}
|
||||
bool isInReg() {
|
||||
return lastWord.inReg;
|
||||
}
|
||||
bool isInRegMask(RegisterMask allow) {
|
||||
return isInReg() && (rmask(getReg()) & allow);
|
||||
}
|
||||
Register getReg() {
|
||||
NanoAssert(isUsed());
|
||||
NanoAssert(isInReg());
|
||||
return lastWord.reg;
|
||||
}
|
||||
void setReg(Register r) {
|
||||
NanoAssert(isUsed());
|
||||
lastWord.inReg = 1;
|
||||
lastWord.reg = r;
|
||||
}
|
||||
void clearReg() {
|
||||
lastWord.inReg = 0;
|
||||
}
|
||||
bool isInAr() {
|
||||
return lastWord.inAr;
|
||||
}
|
||||
uint32_t getArIndex() {
|
||||
NanoAssert(isUsed());
|
||||
NanoAssert(isInAr());
|
||||
return lastWord.arIndex;
|
||||
}
|
||||
void setArIndex(uint32_t arIndex) {
|
||||
NanoAssert(isUsed());
|
||||
lastWord.inAr = 1;
|
||||
lastWord.arIndex = arIndex;
|
||||
}
|
||||
bool isUnusedOrHasUnknownReg() {
|
||||
return !isUsed() || !hasKnownReg();
|
||||
void clearArIndex() {
|
||||
lastWord.inAr = 0;
|
||||
}
|
||||
|
||||
// For various instruction kinds.
|
||||
|
@ -417,7 +457,7 @@ namespace nanojit
|
|||
inline void setSize(int32_t nbytes);
|
||||
|
||||
// For LInsC.
|
||||
inline LIns* arg(uint32_t i) const;
|
||||
inline LIns* arg(uint32_t i) const; // right-to-left-order: arg(0) is rightmost
|
||||
inline uint32_t argc() const;
|
||||
inline LIns* callArgN(uint32_t n) const;
|
||||
inline const CallInfo* callInfo() const;
|
||||
|
@ -492,14 +532,12 @@ namespace nanojit
|
|||
bool isop(LOpcode o) const {
|
||||
return opcode() == o;
|
||||
}
|
||||
bool isQuad() const {
|
||||
LTy ty = retTypes[opcode()];
|
||||
return ty == LTy_I64 || ty == LTy_F64;
|
||||
}
|
||||
bool isCond() const {
|
||||
return (isop(LIR_ov)) || isCmp();
|
||||
return isop(LIR_ov) || isCmp();
|
||||
}
|
||||
bool isOverflowable() const {
|
||||
return isop(LIR_neg) || isop(LIR_add) || isop(LIR_sub) || isop(LIR_mul);
|
||||
}
|
||||
bool isFloat() const; // not inlined because it contains a switch
|
||||
bool isCmp() const {
|
||||
LOpcode op = opcode();
|
||||
return (op >= LIR_eq && op <= LIR_uge) ||
|
||||
|
@ -550,11 +588,26 @@ namespace nanojit
|
|||
return isop(LIR_jt) || isop(LIR_jf) || isop(LIR_j) || isop(LIR_jtbl);
|
||||
}
|
||||
|
||||
bool isPtr() {
|
||||
LTy retType() const {
|
||||
return retTypes[opcode()];
|
||||
}
|
||||
bool isVoid() const {
|
||||
return retType() == LTy_Void;
|
||||
}
|
||||
bool isI32() const {
|
||||
return retType() == LTy_I32;
|
||||
}
|
||||
bool isI64() const {
|
||||
return retType() == LTy_I64;
|
||||
}
|
||||
bool isF64() const {
|
||||
return retType() == LTy_F64;
|
||||
}
|
||||
bool isPtr() const {
|
||||
#ifdef NANOJIT_64BIT
|
||||
return retTypes[opcode()] == LTy_I64;
|
||||
return isI64();
|
||||
#else
|
||||
return retTypes[opcode()] == LTy_I32;
|
||||
return isI32();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -571,7 +624,7 @@ namespace nanojit
|
|||
if (isCall())
|
||||
return !isCse();
|
||||
else
|
||||
return retTypes[opcode()] == LTy_Void;
|
||||
return isVoid();
|
||||
}
|
||||
|
||||
inline void* constvalp() const
|
||||
|
@ -586,6 +639,7 @@ namespace nanojit
|
|||
|
||||
typedef LIns* LInsp;
|
||||
typedef SeqBuilder<LIns*> InsList;
|
||||
typedef SeqBuilder<char*> StringList;
|
||||
|
||||
|
||||
// 0-operand form. Used for LIR_start and LIR_label.
|
||||
|
@ -717,7 +771,7 @@ namespace nanojit
|
|||
LIns* getLIns() { return &ins; };
|
||||
};
|
||||
|
||||
// Used for LIR_iparam.
|
||||
// Used for LIR_iparam, LIR_qparam.
|
||||
class LInsP
|
||||
{
|
||||
private:
|
||||
|
@ -762,8 +816,9 @@ namespace nanojit
|
|||
LIns* getLIns() { return &ins; };
|
||||
};
|
||||
|
||||
// Used for LIR_jtbl. oprnd_1 must be a uint32_t index in
|
||||
// Used for LIR_jtbl. 'oprnd_1' must be a uint32_t index in
|
||||
// the range 0 <= index < size; no range check is performed.
|
||||
// 'table' is an array of labels.
|
||||
class LInsJtbl
|
||||
{
|
||||
private:
|
||||
|
@ -799,25 +854,29 @@ namespace nanojit
|
|||
LInsJtbl*LIns::toLInsJtbl()const { return (LInsJtbl*)(uintptr_t(this+1) - sizeof(LInsJtbl)); }
|
||||
|
||||
void LIns::initLInsOp0(LOpcode opcode) {
|
||||
markAsClear();
|
||||
clearReg();
|
||||
clearArIndex();
|
||||
lastWord.opcode = opcode;
|
||||
NanoAssert(isLInsOp0());
|
||||
}
|
||||
void LIns::initLInsOp1(LOpcode opcode, LIns* oprnd1) {
|
||||
markAsClear();
|
||||
clearReg();
|
||||
clearArIndex();
|
||||
lastWord.opcode = opcode;
|
||||
toLInsOp1()->oprnd_1 = oprnd1;
|
||||
NanoAssert(isLInsOp1());
|
||||
}
|
||||
void LIns::initLInsOp2(LOpcode opcode, LIns* oprnd1, LIns* oprnd2) {
|
||||
markAsClear();
|
||||
clearReg();
|
||||
clearArIndex();
|
||||
lastWord.opcode = opcode;
|
||||
toLInsOp2()->oprnd_1 = oprnd1;
|
||||
toLInsOp2()->oprnd_2 = oprnd2;
|
||||
NanoAssert(isLInsOp2());
|
||||
}
|
||||
void LIns::initLInsOp3(LOpcode opcode, LIns* oprnd1, LIns* oprnd2, LIns* oprnd3) {
|
||||
markAsClear();
|
||||
clearReg();
|
||||
clearArIndex();
|
||||
lastWord.opcode = opcode;
|
||||
toLInsOp3()->oprnd_1 = oprnd1;
|
||||
toLInsOp3()->oprnd_2 = oprnd2;
|
||||
|
@ -825,14 +884,16 @@ namespace nanojit
|
|||
NanoAssert(isLInsOp3());
|
||||
}
|
||||
void LIns::initLInsLd(LOpcode opcode, LIns* val, int32_t d) {
|
||||
markAsClear();
|
||||
clearReg();
|
||||
clearArIndex();
|
||||
lastWord.opcode = opcode;
|
||||
toLInsLd()->oprnd_1 = val;
|
||||
toLInsLd()->disp = d;
|
||||
NanoAssert(isLInsLd());
|
||||
}
|
||||
void LIns::initLInsSti(LOpcode opcode, LIns* val, LIns* base, int32_t d) {
|
||||
markAsClear();
|
||||
clearReg();
|
||||
clearArIndex();
|
||||
lastWord.opcode = opcode;
|
||||
toLInsSti()->oprnd_1 = val;
|
||||
toLInsSti()->oprnd_2 = base;
|
||||
|
@ -840,20 +901,23 @@ namespace nanojit
|
|||
NanoAssert(isLInsSti());
|
||||
}
|
||||
void LIns::initLInsSk(LIns* prevLIns) {
|
||||
markAsClear();
|
||||
clearReg();
|
||||
clearArIndex();
|
||||
lastWord.opcode = LIR_skip;
|
||||
toLInsSk()->prevLIns = prevLIns;
|
||||
NanoAssert(isLInsSk());
|
||||
}
|
||||
void LIns::initLInsC(LOpcode opcode, LIns** args, const CallInfo* ci) {
|
||||
markAsClear();
|
||||
clearReg();
|
||||
clearArIndex();
|
||||
lastWord.opcode = opcode;
|
||||
toLInsC()->args = args;
|
||||
toLInsC()->ci = ci;
|
||||
NanoAssert(isLInsC());
|
||||
}
|
||||
void LIns::initLInsP(int32_t arg, int32_t kind) {
|
||||
markAsClear();
|
||||
clearReg();
|
||||
clearArIndex();
|
||||
lastWord.opcode = LIR_param;
|
||||
NanoAssert(isU8(arg) && isU8(kind));
|
||||
toLInsP()->arg = arg;
|
||||
|
@ -861,20 +925,23 @@ namespace nanojit
|
|||
NanoAssert(isLInsP());
|
||||
}
|
||||
void LIns::initLInsI(LOpcode opcode, int32_t imm32) {
|
||||
markAsClear();
|
||||
clearReg();
|
||||
clearArIndex();
|
||||
lastWord.opcode = opcode;
|
||||
toLInsI()->imm32 = imm32;
|
||||
NanoAssert(isLInsI());
|
||||
}
|
||||
void LIns::initLInsN64(LOpcode opcode, int64_t imm64) {
|
||||
markAsClear();
|
||||
clearReg();
|
||||
clearArIndex();
|
||||
lastWord.opcode = opcode;
|
||||
toLInsN64()->imm64_0 = int32_t(imm64);
|
||||
toLInsN64()->imm64_1 = int32_t(imm64 >> 32);
|
||||
NanoAssert(isLInsN64());
|
||||
}
|
||||
void LIns::initLInsJtbl(LIns* index, uint32_t size, LIns** table) {
|
||||
markAsClear();
|
||||
clearReg();
|
||||
clearArIndex();
|
||||
lastWord.opcode = LIR_jtbl;
|
||||
toLInsJtbl()->oprnd_1 = index;
|
||||
toLInsJtbl()->table = table;
|
||||
|
@ -1163,15 +1230,20 @@ namespace nanojit
|
|||
InsList code;
|
||||
LirNameMap* names;
|
||||
LogControl* logc;
|
||||
const char* const prefix;
|
||||
bool const always_flush;
|
||||
public:
|
||||
VerboseWriter(Allocator& alloc, LirWriter *out,
|
||||
LirNameMap* names, LogControl* logc)
|
||||
: LirWriter(out), code(alloc), names(names), logc(logc)
|
||||
LirNameMap* names, LogControl* logc, const char* prefix = "", bool always_flush = false)
|
||||
: LirWriter(out), code(alloc), names(names), logc(logc), prefix(prefix), always_flush(always_flush)
|
||||
{}
|
||||
|
||||
LInsp add(LInsp i) {
|
||||
if (i)
|
||||
if (i) {
|
||||
code.add(i);
|
||||
if (always_flush)
|
||||
flush();
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -1186,7 +1258,7 @@ namespace nanojit
|
|||
if (!code.isEmpty()) {
|
||||
int32_t count = 0;
|
||||
for (Seq<LIns*>* p = code.get(); p != NULL; p = p->tail) {
|
||||
logc->printf(" %s\n",names->formatIns(p->head));
|
||||
logc->printf("%s %s\n",prefix,names->formatIns(p->head));
|
||||
count++;
|
||||
}
|
||||
code.clear();
|
||||
|
@ -1406,11 +1478,12 @@ namespace nanojit
|
|||
|
||||
class LirBufWriter : public LirWriter
|
||||
{
|
||||
LirBuffer* _buf; // underlying buffer housing the instructions
|
||||
LirBuffer* _buf; // underlying buffer housing the instructions
|
||||
const avmplus::Config& _config;
|
||||
|
||||
public:
|
||||
LirBufWriter(LirBuffer* buf)
|
||||
: LirWriter(0), _buf(buf) {
|
||||
LirBufWriter(LirBuffer* buf, const avmplus::Config& config)
|
||||
: LirWriter(0), _buf(buf), _config(config) {
|
||||
}
|
||||
|
||||
// LirWriter interface
|
||||
|
@ -1475,14 +1548,10 @@ namespace nanojit
|
|||
}
|
||||
};
|
||||
|
||||
class Assembler;
|
||||
|
||||
void compile(Assembler *assm, Fragment *frag, Allocator& alloc verbose_only(, LabelMap*));
|
||||
verbose_only(void live(Allocator& alloc, Fragment* frag, LogControl*);)
|
||||
verbose_only(void live(LirFilter* in, Allocator& alloc, Fragment* frag, LogControl*);)
|
||||
|
||||
class StackFilter: public LirFilter
|
||||
{
|
||||
LirBuffer *lirbuf;
|
||||
LInsp sp;
|
||||
LInsp rp;
|
||||
BitSet spStk;
|
||||
|
@ -1492,7 +1561,7 @@ namespace nanojit
|
|||
void getTops(LInsp br, int& spTop, int& rpTop);
|
||||
|
||||
public:
|
||||
StackFilter(LirFilter *in, Allocator& alloc, LirBuffer *lirbuf, LInsp sp, LInsp rp);
|
||||
StackFilter(LirFilter *in, Allocator& alloc, LInsp sp, LInsp rp);
|
||||
bool ignoreStore(LInsp ins, int top, BitSet* stk);
|
||||
LInsp read();
|
||||
};
|
||||
|
@ -1529,16 +1598,92 @@ namespace nanojit
|
|||
};
|
||||
|
||||
#ifdef DEBUG
|
||||
class SanityFilter : public LirWriter
|
||||
// This class does thorough checking of LIR. It checks *implicit* LIR
|
||||
// instructions, ie. LIR instructions specified via arguments -- to
|
||||
// methods like insLoad() -- that have not yet been converted into
|
||||
// *explicit* LIns objects in a LirBuffer. The reason for this is that if
|
||||
// we wait until the LIR instructions are explicit, they will have gone
|
||||
// through the entire writer pipeline and been optimised. By checking
|
||||
// implicit LIR instructions we can check the LIR code at the start of the
|
||||
// writer pipeline, exactly as it is generated by the compiler front-end.
|
||||
//
|
||||
// A general note about the errors produced by this class: for
|
||||
// TraceMonkey, they won't include special names for instructions that
|
||||
// have them unless TMFLAGS is specified.
|
||||
class ValidateWriter : public LirWriter
|
||||
{
|
||||
private:
|
||||
const char* _whereInPipeline;
|
||||
|
||||
const char* type2string(LTy type);
|
||||
void typeCheckArgs(LOpcode op, int nArgs, LTy formals[], LIns* args[]);
|
||||
void errorStructureShouldBe(LOpcode op, const char* argDesc, int argN, LIns* arg,
|
||||
const char* shouldBeDesc);
|
||||
void errorPlatformShouldBe(LOpcode op, int nBits);
|
||||
void checkLInsHasOpcode(LOpcode op, int argN, LIns* ins, LOpcode op2);
|
||||
void checkLInsIsACondOrConst(LOpcode op, int argN, LIns* ins);
|
||||
void checkLInsIsNull(LOpcode op, int argN, LIns* ins);
|
||||
void checkLInsIsOverflowable(LOpcode op, int argN, LIns* ins);
|
||||
void checkIs32BitPlatform(LOpcode op);
|
||||
void checkIs64BitPlatform(LOpcode op);
|
||||
void checkOprnd1ImmediatelyPrecedes(LIns* ins);
|
||||
|
||||
public:
|
||||
SanityFilter(LirWriter* out) : LirWriter(out)
|
||||
{ }
|
||||
ValidateWriter(LirWriter* out, const char* stageName);
|
||||
LIns* insLoad(LOpcode op, LIns* base, int32_t d);
|
||||
LIns* insStore(LOpcode op, LIns* value, LIns* base, int32_t d);
|
||||
LIns* ins0(LOpcode v);
|
||||
LIns* ins1(LOpcode v, LIns* a);
|
||||
LIns* ins2(LOpcode v, LIns* a, LIns* b);
|
||||
LIns* ins3(LOpcode v, LIns* a, LIns* b, LIns* c);
|
||||
LIns* insParam(int32_t arg, int32_t kind);
|
||||
LIns* insImm(int32_t imm);
|
||||
LIns* insImmq(uint64_t imm);
|
||||
LIns* insImmf(double d);
|
||||
LIns* insCall(const CallInfo *call, LIns* args[]);
|
||||
LIns* insGuard(LOpcode v, LIns *c, GuardRecord *gr);
|
||||
LIns* insBranch(LOpcode v, LIns* condition, LIns* to);
|
||||
LIns* insAlloc(int32_t size);
|
||||
LIns* insJtbl(LIns* index, uint32_t size);
|
||||
};
|
||||
|
||||
// This just checks things that aren't possible to check in
|
||||
// ValidateWriter, eg. whether all branch targets are set and are labels.
|
||||
class ValidateReader: public LirFilter {
|
||||
public:
|
||||
LIns* ins1(LOpcode v, LIns* s0);
|
||||
LIns* ins2(LOpcode v, LIns* s0, LIns* s1);
|
||||
LIns* ins3(LOpcode v, LIns* s0, LIns* s1, LIns* s2);
|
||||
ValidateReader(LirFilter* in);
|
||||
LIns* read();
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef NJ_VERBOSE
|
||||
/* A listing filter for LIR, going through backwards. It merely
|
||||
passes its input to its output, but notes it down too. When
|
||||
finish() is called, prints out what went through. Is intended to be
|
||||
used to print arbitrary intermediate transformation stages of
|
||||
LIR. */
|
||||
class ReverseLister : public LirFilter
|
||||
{
|
||||
Allocator& _alloc;
|
||||
LirNameMap* _names;
|
||||
const char* _title;
|
||||
StringList _strs;
|
||||
LogControl* _logc;
|
||||
public:
|
||||
ReverseLister(LirFilter* in, Allocator& alloc,
|
||||
LirNameMap* names, LogControl* logc, const char* title)
|
||||
: LirFilter(in)
|
||||
, _alloc(alloc)
|
||||
, _names(names)
|
||||
, _title(title)
|
||||
, _strs(alloc)
|
||||
, _logc(logc)
|
||||
{ }
|
||||
|
||||
void finish();
|
||||
LInsp read();
|
||||
};
|
||||
#endif
|
||||
|
||||
}
|
||||
#endif // __nanojit_LIR__
|
||||
|
|
|
@ -78,9 +78,9 @@ OPDEF(stb, 9, Sti, Void) // 8-bit integer store
|
|||
OPDEF(ld, 10, Ld, I32) // 32-bit integer load
|
||||
OPDEF(ialloc, 11, I, I32) // alloc some stack space (value is 32bit address)
|
||||
OPDEF(sti, 12, Sti, Void) // 32-bit integer store
|
||||
OPDEF(ret, 13, Op1, Void) // return a word-sized value
|
||||
OPDEF(live, 14, Op1, Void) // extend live range of reference
|
||||
OPDEF(flive, 15, Op1, Void) // extend live range of a floating point value reference
|
||||
OPDEF(ret, 13, Op1, Void) // return a 32-bit integer
|
||||
OPDEF(live, 14, Op1, Void) // extend live range of a 32-bit integer
|
||||
OPDEF(flive, 15, Op1, Void) // extend live range of a 64-bit float
|
||||
OPDEF(icall, 16, C, I32) // subroutine call returning a 32-bit value
|
||||
OPDEF(sts, 17, Sti, Void) // 16-bit integer store
|
||||
|
||||
|
@ -103,7 +103,11 @@ OPDEF(jtbl, 23, Jtbl, Void) // jump to address in table
|
|||
|
||||
OPDEF(int, 24, I, I32) // constant 32-bit integer
|
||||
OPDEF(cmov, 25, Op3, I32) // conditional move
|
||||
OPDEF(callh, 26, Op1, I32) // get the high 32 bits of a call returning a 64-bit value in two 32bit registers
|
||||
// LIR_callh is a hack that's only used on 32-bit platforms that use SoftFloat.
|
||||
// Its operand is always a LIR_fcall, and it indicates that the 64-bit float
|
||||
// return value is being returned via two 32-bit registers. The result is
|
||||
// always used as the first operand of a LIR_qjoin.
|
||||
OPDEF(callh, 26, Op1, I32)
|
||||
|
||||
// feq though fge must only be used on float arguments. They return integers.
|
||||
// For all except feq, (op ^ 1) is the op which flips the
|
||||
|
@ -127,7 +131,10 @@ OPDEF(add, 36, Op2, I32) // integer addition
|
|||
OPDEF(sub, 37, Op2, I32) // integer subtraction
|
||||
OPDEF(mul, 38, Op2, I32) // integer multiplication
|
||||
OPDEF(div, 39, Op2, I32) // integer division
|
||||
OPDEF(mod, 40, Op1, I32) // hack: get the modulus from a LIR_div result, for x86 only
|
||||
// LIR_mod is a hack. It's only used on i386/X64. The operand is the result
|
||||
// of a LIR_div because on i386/X64 div and mod results are computed by the
|
||||
// same instruction.
|
||||
OPDEF(mod, 40, Op1, I32) // integer modulus
|
||||
|
||||
OPDEF(and, 41, Op2, I32) // 32-bit bitwise AND
|
||||
OPDEF(or, 42, Op2, I32) // 32-bit bitwise OR
|
||||
|
@ -148,7 +155,16 @@ OPDEF(qhi, 51, Op1, I32) // get the high 32 bits of a 64-bit value
|
|||
OPDEF(ldcsb, 52, Ld, I32) // non-volatile 8-bit integer load, sign-extended to 32-bit
|
||||
OPDEF(ldcss, 53, Ld, I32) // non-volatile 16-bit integer load, sign-extended to 32-bit
|
||||
|
||||
OPDEF(ov, 54, Op1, I32) // test for overflow; value must have just been computed
|
||||
// This is an overflow test. The operand is the result of an 32-bit integer
|
||||
// operation that may have overflowed (eg. a LIR_add). It's a nasty hack
|
||||
// because this is a lie -- the true operand is not the result of the LIR_add
|
||||
// but rather an overflow condition code that is set implicitly as a result of
|
||||
// the LIR_add. But there's no way to model multiple outputs from an
|
||||
// instruction in LIR. So we rely on the LIR_ov always being right after the
|
||||
// LIR_add so that the back-ends can generate code in such a way that the
|
||||
// machine condition codes can be checked immediately after the addition is
|
||||
// performed.
|
||||
OPDEF(ov, 54, Op1, I32)
|
||||
|
||||
// Integer (32-bit) relational operators. (op ^ 1) is the op which flips the
|
||||
// left and right sides of the comparison, so (lt ^ 1) == gt, or the operator
|
||||
|
@ -174,7 +190,7 @@ OPDEF(line, 66, Op1, Void) // source line number for debug symbols
|
|||
OPDEF(xbarrier, 67, Op2, Void) // memory barrier; doesn't exit, but flushes all values to the stack
|
||||
OPDEF(xtbl, 68, Op2, Void) // exit via indirect jump
|
||||
|
||||
OPDEF(__69, 69, None, Void)
|
||||
OPDEF(qlive, 69, Op1, Void) // extend live range of a 64-bit integer
|
||||
OPDEF(__70, 70, None, Void)
|
||||
OPDEF(qaddp, 71, Op2, I64) // integer addition for temp pointer calculations (64bit only)
|
||||
OPDEF(qparam, 72, P, I64) // load a parameter (64bit register or stk location)
|
||||
|
@ -185,17 +201,17 @@ OPDEF(ldq, 74, Ld, I64) // 64-bit integer load
|
|||
OPDEF(qalloc, 75, I, I64) // allocate some stack space (value is 64bit address)
|
||||
|
||||
OPDEF(stqi, 76, Sti, Void) // 64-bit integer store
|
||||
OPDEF(fret, 77, Op1, Void)
|
||||
|
||||
OPDEF(st32f, 78, Sti, Void) // store 64-bit float as a 32-bit float (dropping precision)
|
||||
OPDEF(ld32f, 79, Ld, F64) // load 32-bit float and widen to 64-bit float
|
||||
OPDEF(st32f, 77, Sti, Void) // store 64-bit float as a 32-bit float (dropping precision)
|
||||
OPDEF(ld32f, 78, Ld, F64) // load 32-bit float and widen to 64-bit float
|
||||
|
||||
OPDEF(fcall, 80, C, F64) // subroutine call returning 64-bit (quad) double value
|
||||
OPDEF(qcall, 81, C, I64) // subroutine call returning 64-bit (quad) integer value
|
||||
OPDEF(fcall, 79, C, F64) // subroutine call returning 64-bit (quad) double value
|
||||
OPDEF(qcall, 80, C, I64) // subroutine call returning 64-bit (quad) integer value
|
||||
|
||||
OPDEF(stfi, 82, Sti, Void) // 64-bit float store
|
||||
OPDEF(stfi, 81, Sti, Void) // 64-bit float store
|
||||
|
||||
OPDEF(__83, 83, None, Void)
|
||||
OPDEF(fret, 82, Op1, Void) // return a 64-bit float
|
||||
OPDEF(qret, 83, Op1, Void) // return a 64-bit integer
|
||||
OPDEF(__84, 84, None, Void)
|
||||
OPDEF(__85, 85, None, Void)
|
||||
OPDEF(__86, 86, None, Void)
|
||||
|
@ -210,8 +226,8 @@ OPDEF(i2q, 90, Op1, I64) // sign-extend i32 to i64
|
|||
OPDEF(u2q, 91, Op1, I64) // zero-extend u32 to u64
|
||||
OPDEF(i2f, 92, Op1, F64) // convert a signed 32-bit integer to a float
|
||||
OPDEF(u2f, 93, Op1, F64) // convert an unsigned 32-bit integer to a float
|
||||
OPDEF(f2i, 94, Op1, I32) // f2i conversion, no exception raised, platform rounding rules.
|
||||
|
||||
OPDEF(__94, 94, None, Void)
|
||||
OPDEF(__95, 95, None, Void)
|
||||
OPDEF(__96, 96, None, Void)
|
||||
|
||||
|
@ -223,20 +239,24 @@ OPDEF(fadd, 100, Op2, F64) // floating-point addition
|
|||
OPDEF(fsub, 101, Op2, F64) // floating-point subtraction
|
||||
OPDEF(fmul, 102, Op2, F64) // floating-point multiplication
|
||||
OPDEF(fdiv, 103, Op2, F64) // floating-point division
|
||||
OPDEF(fmod, 104, Op2, F64) // floating-point modulus(?)
|
||||
// LIR_fmod is just a place-holder opcode, eg. the back-ends cannot generate
|
||||
// code for it. It's used in TraceMonkey briefly but is always demoted to a
|
||||
// LIR_mod or converted to a function call before Nanojit has to do anything
|
||||
// serious with it.
|
||||
OPDEF(fmod, 104, Op2, F64) // floating-point modulus
|
||||
|
||||
OPDEF(qiand, 105, Op2, I64) // 64-bit bitwise AND
|
||||
OPDEF(qior, 106, Op2, I64) // 64-bit bitwise OR
|
||||
OPDEF(qxor, 107, Op2, I64) // 64-bit bitwise XOR
|
||||
OPDEF(__108, 108, None, Void)
|
||||
OPDEF(qilsh, 109, Op2, I64) // 64-bit left shift
|
||||
OPDEF(qirsh, 110, Op2, I64) // 64-bit signed right shift
|
||||
OPDEF(qursh, 111, Op2, I64) // 64-bit unsigned right shift
|
||||
OPDEF(qilsh, 109, Op2, I64) // 64-bit left shift; 2nd operand is a 32-bit integer
|
||||
OPDEF(qirsh, 110, Op2, I64) // 64-bit signed right shift; 2nd operand is a 32-bit integer
|
||||
OPDEF(qursh, 111, Op2, I64) // 64-bit unsigned right shift; 2nd operand is a 32-bit integer
|
||||
OPDEF(qiadd, 112, Op2, I64) // 64-bit bitwise ADD
|
||||
|
||||
OPDEF(ldc32f, 113, Ld, F64) // non-volatile load 32-bit float and widen to 64-bit float
|
||||
OPDEF(qjoin, 114, Op2, F64) // join two 32-bit values (1st arg is low bits, 2nd is high)
|
||||
OPDEF(__115, 115, None, Void)
|
||||
OPDEF(q2i, 115, Op1, I32) // truncate i64 to i32
|
||||
OPDEF(__116, 116, None, Void)
|
||||
OPDEF(__117, 117, None, Void)
|
||||
OPDEF(float, 118, N64, F64) // 64-bit float constant value
|
||||
|
|
|
@ -76,6 +76,14 @@
|
|||
# define NJ_EXPANDED_LOADSTORE_SUPPORTED 0
|
||||
#endif
|
||||
|
||||
#ifndef NJ_USES_QUAD_CONSTANTS
|
||||
# define NJ_USES_QUAD_CONSTANTS 0
|
||||
#endif
|
||||
|
||||
#ifndef NJ_F2I_SUPPORTED
|
||||
# define NJ_F2I_SUPPORTED 0
|
||||
#endif
|
||||
|
||||
namespace nanojit {
|
||||
|
||||
inline Register nextreg(Register r) {
|
||||
|
@ -124,7 +132,6 @@ namespace nanojit {
|
|||
#ifdef NJ_NO_VARIADIC_MACROS
|
||||
static void asm_output(const char *f, ...) {}
|
||||
#define gpn(r) regNames[(r)]
|
||||
#define fpn(r) regNames[(r)]
|
||||
#elif defined(NJ_VERBOSE)
|
||||
// Used for printing native instructions. Like Assembler::outputf(),
|
||||
// but only outputs if LC_Assembly is set. Also prepends the output
|
||||
|
@ -139,11 +146,9 @@ namespace nanojit {
|
|||
} \
|
||||
} while (0) /* no semi */
|
||||
#define gpn(r) regNames[(r)]
|
||||
#define fpn(r) regNames[(r)]
|
||||
#else
|
||||
#define asm_output(...)
|
||||
#define gpn(r)
|
||||
#define fpn(r)
|
||||
#endif /* NJ_VERBOSE */
|
||||
|
||||
#endif // __nanojit_Native__
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -62,16 +62,6 @@ namespace nanojit
|
|||
# define NJ_ARM_EABI 1
|
||||
#endif
|
||||
|
||||
// default to ARMv5
|
||||
#if !defined(ARM_ARCH)
|
||||
# define ARM_ARCH 5
|
||||
#endif
|
||||
|
||||
// default to no-thumb2
|
||||
#if !defined(ARM_THUMB2)
|
||||
# define ARM_THUMB2 0
|
||||
#endif
|
||||
|
||||
// only d0-d6 are actually used; we'll use d7 as s14-s15 for i2f/u2f/etc.
|
||||
#define NJ_VFP_MAX_REGISTERS 8
|
||||
#define NJ_MAX_REGISTERS (11 + NJ_VFP_MAX_REGISTERS)
|
||||
|
@ -79,7 +69,8 @@ namespace nanojit
|
|||
#define NJ_MAX_PARAMETERS 16
|
||||
#define NJ_ALIGN_STACK 8
|
||||
#define NJ_JTBL_SUPPORTED 1
|
||||
#define NJ_EXPANDED_LOADSTORE_SUPPORTED 0
|
||||
#define NJ_EXPANDED_LOADSTORE_SUPPORTED 1
|
||||
#define NJ_F2I_SUPPORTED 1
|
||||
|
||||
#define NJ_CONSTANT_POOLS
|
||||
const int NJ_MAX_CPOOL_OFFSET = 4096;
|
||||
|
@ -128,10 +119,9 @@ typedef enum {
|
|||
|
||||
FirstReg = R0,
|
||||
LastReg = D6,
|
||||
UnknownReg = 32,
|
||||
deprecated_UnknownReg = 32,
|
||||
|
||||
// special value referring to S14
|
||||
FpSingleScratch = 24
|
||||
S14 = 24
|
||||
} Register;
|
||||
|
||||
/* ARM condition codes */
|
||||
|
@ -359,7 +349,7 @@ enum {
|
|||
// S - bit, 0 or 1, whether the CPSR register is updated
|
||||
// rd - destination register
|
||||
// rl - first (left) operand register
|
||||
// rr - first (left) operand register
|
||||
// rr - second (right) operand register
|
||||
// sh - a ShiftOperator
|
||||
// imm - immediate argument to shift operator, 5 bits (0..31)
|
||||
#define ALUr_shi(cond, op, S, rd, rl, rr, sh, imm) do {\
|
||||
|
@ -372,9 +362,11 @@ enum {
|
|||
NanoAssert((imm)>=0 && (imm)<32);\
|
||||
*(--_nIns) = (NIns) ((cond)<<28 |(ARM_##op)<<21 | (S)<<20 | (rl)<<16 | (rd)<<12 | (imm)<<7 | (sh)<<4 | (rr));\
|
||||
if (ARM_##op == ARM_mov || ARM_##op == ARM_mvn) { \
|
||||
NanoAssert(rl==0); \
|
||||
asm_output("%s%s%s %s, %s, %s #%d", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rr), shiftNames[sh], (imm));\
|
||||
} else if (ARM_##op >= ARM_tst && ARM_##op <= ARM_cmn) { \
|
||||
NanoAssert(S==1);\
|
||||
NanoAssert(rd==0);\
|
||||
asm_output("%s%s %s, %s, %s #%d", #op, condNames[cond], gpn(rl), gpn(rr), shiftNames[sh], (imm));\
|
||||
} else { \
|
||||
asm_output("%s%s%s %s, %s, %s, %s #%d", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rl), gpn(rr), shiftNames[sh], (imm));\
|
||||
|
@ -466,42 +458,25 @@ enum {
|
|||
// --------
|
||||
|
||||
// [_d_hi,_d] = _l * _r
|
||||
#define SMULL_dont_check_op1(_d, _d_hi, _l, _r) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert((ARM_ARCH >= 6) || ((_d) != (_l))); \
|
||||
NanoAssert(IsGpReg(_d) && IsGpReg(_d_hi) && IsGpReg(_l) && IsGpReg(_r)); \
|
||||
NanoAssert(((_d) != PC) && ((_d_hi) != PC) && ((_l) != PC) && ((_r) != PC));\
|
||||
*(--_nIns) = (NIns)( COND_AL | 0xc00090 | (_d_hi)<<16 | (_d)<<12 | (_r)<<8 | (_l) );\
|
||||
asm_output("smull %s, %s, %s, %s",gpn(_d),gpn(_d_hi),gpn(_l),gpn(_r)); \
|
||||
#define SMULL(_d, _d_hi, _l, _r) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert((config.arm_arch >= 6) || ((_d ) != (_l))); \
|
||||
NanoAssert((config.arm_arch >= 6) || ((_d_hi) != (_l))); \
|
||||
NanoAssert(IsGpReg(_d) && IsGpReg(_d_hi) && IsGpReg(_l) && IsGpReg(_r)); \
|
||||
NanoAssert(((_d) != PC) && ((_d_hi) != PC) && ((_l) != PC) && ((_r) != PC)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | 0xc00090 | (_d_hi)<<16 | (_d)<<12 | (_r)<<8 | (_l) ); \
|
||||
asm_output("smull %s, %s, %s, %s",gpn(_d),gpn(_d_hi),gpn(_l),gpn(_r)); \
|
||||
} while(0)
|
||||
|
||||
#if NJ_ARM_ARCH >= NJ_ARM_V6
|
||||
#define SMULL(_d, _d_hi, _l, _r) SMULL_dont_check_op1(_d, _d_hi, _l, _r)
|
||||
#else
|
||||
#define SMULL(_d, _d_hi, _l, _r) do { \
|
||||
NanoAssert( (_d)!=(_l)); \
|
||||
NanoAssert((_d_hi)!=(_l)); \
|
||||
SMULL_dont_check_op1(_d, _d_hi, _l, _r); \
|
||||
} while(0)
|
||||
#endif
|
||||
|
||||
// _d = _l * _r
|
||||
#define MUL_dont_check_op1(_d, _l, _r) do { \
|
||||
#define MUL(_d, _l, _r) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert((ARM_ARCH >= 6) || ((_d) != (_l))); \
|
||||
NanoAssert((config.arm_arch >= 6) || ((_d) != (_l))); \
|
||||
NanoAssert(IsGpReg(_d) && IsGpReg(_l) && IsGpReg(_r)); \
|
||||
NanoAssert(((_d) != PC) && ((_l) != PC) && ((_r) != PC)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (_d)<<16 | (_r)<<8 | 0x90 | (_l) ); \
|
||||
asm_output("mul %s, %s, %s",gpn(_d),gpn(_l),gpn(_r)); } while(0)
|
||||
|
||||
#if NJ_ARM_ARCH >= NJ_ARM_V6
|
||||
#define MUL(_d, _l, _r) MUL_dont_check_op1(_d, _l, _r)
|
||||
#else
|
||||
#define MUL(_d, _l, _r) do { \
|
||||
NanoAssert((_d)!=(_l)); \
|
||||
MUL_dont_check_op1(_d, _l, _r); \
|
||||
} while(0)
|
||||
#endif
|
||||
asm_output("mul %s, %s, %s",gpn(_d),gpn(_l),gpn(_r)); \
|
||||
} while(0)
|
||||
|
||||
// RSBS _d, _r
|
||||
// _d = 0 - _r
|
||||
|
@ -637,13 +612,26 @@ enum {
|
|||
asm_output("ldrb %s, [%s,#%d]", gpn(_d),gpn(_n),(_off)); \
|
||||
} while(0)
|
||||
|
||||
// Load and sign-extend a half word (16 bits). The offset range is ±255, and
|
||||
// must be aligned to two bytes on some architectures, but we never make
|
||||
// unaligned accesses so a simple assertion is sufficient here.
|
||||
#define LDRH(_d,_n,_off) do { \
|
||||
/* TODO: This is actually LDRSH. Is this correct? */ \
|
||||
// Load a byte (8 bits), sign-extend to 32 bits. The offset range is
|
||||
// ±255 (different from LDRB, same as LDRH/LDRSH)
|
||||
#define LDRSB(_d,_n,_off) do { \
|
||||
NanoAssert(IsGpReg(_d) && IsGpReg(_n)); \
|
||||
underrunProtect(4); \
|
||||
if (_off < 0) { \
|
||||
NanoAssert(isU8(-_off)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0x15<<20) | ((_n)<<16) | ((_d)<<12) | ((0xD)<<4) | (((-_off)&0xf0)<<4) | ((-_off)&0xf) ); \
|
||||
} else { \
|
||||
NanoAssert(isU8(_off)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0x1D<<20) | ((_n)<<16) | ((_d)<<12) | ((0xD)<<4) | (((_off)&0xf0)<<4) | ((_off)&0xf) ); \
|
||||
} \
|
||||
asm_output("ldrsb %s, [%s,#%d]", gpn(_d),gpn(_n),(_off)); \
|
||||
} while(0)
|
||||
|
||||
// Load and sign-extend a half word (16 bits). The offset range is ±255, and
|
||||
// must be aligned to two bytes on some architectures (the caller is responsible
|
||||
// for ensuring appropriate alignment)
|
||||
#define LDRH(_d,_n,_off) do { \
|
||||
NanoAssert(IsGpReg(_d) && IsGpReg(_n)); \
|
||||
NanoAssert(((_off) & ~1) == (_off)); \
|
||||
underrunProtect(4); \
|
||||
if (_off < 0) { \
|
||||
NanoAssert(isU8(-_off)); \
|
||||
|
@ -652,9 +640,26 @@ enum {
|
|||
NanoAssert(isU8(_off)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0x1D<<20) | ((_n)<<16) | ((_d)<<12) | ((0xB)<<4) | (((_off)&0xf0)<<4) | ((_off)&0xf) ); \
|
||||
} \
|
||||
asm_output("ldrh %s, [%s,#%d]", gpn(_d),gpn(_n),(_off)); \
|
||||
} while(0)
|
||||
|
||||
// Load and sign-extend a half word (16 bits). The offset range is ±255, and
|
||||
// must be aligned to two bytes on some architectures (the caller is responsible
|
||||
// for ensuring appropriate alignment)
|
||||
#define LDRSH(_d,_n,_off) do { \
|
||||
NanoAssert(IsGpReg(_d) && IsGpReg(_n)); \
|
||||
underrunProtect(4); \
|
||||
if (_off < 0) { \
|
||||
NanoAssert(isU8(-_off)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0x15<<20) | ((_n)<<16) | ((_d)<<12) | ((0xF)<<4) | (((-_off)&0xf0)<<4) | ((-_off)&0xf) ); \
|
||||
} else { \
|
||||
NanoAssert(isU8(_off)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0x1D<<20) | ((_n)<<16) | ((_d)<<12) | ((0xF)<<4) | (((_off)&0xf0)<<4) | ((_off)&0xf) ); \
|
||||
} \
|
||||
asm_output("ldrsh %s, [%s,#%d]", gpn(_d),gpn(_n),(_off)); \
|
||||
} while(0)
|
||||
|
||||
// Valid offset for STR and STRB is +/- 4095, STRH only has +/- 255
|
||||
#define STR(_d,_n,_off) do { \
|
||||
NanoAssert(IsGpReg(_d) && IsGpReg(_n)); \
|
||||
NanoAssert(isU12(_off) || isU12(-_off)); \
|
||||
|
@ -664,6 +669,29 @@ enum {
|
|||
asm_output("str %s, [%s, #%d]", gpn(_d), gpn(_n), (_off)); \
|
||||
} while(0)
|
||||
|
||||
#define STRB(_d,_n,_off) do { \
|
||||
NanoAssert(IsGpReg(_d) && IsGpReg(_n)); \
|
||||
NanoAssert(isU12(_off) || isU12(-_off)); \
|
||||
underrunProtect(4); \
|
||||
if ((_off)<0) *(--_nIns) = (NIns)( COND_AL | (0x54<<20) | ((_n)<<16) | ((_d)<<12) | ((-(_off))&0xFFF) ); \
|
||||
else *(--_nIns) = (NIns)( COND_AL | (0x5C<<20) | ((_n)<<16) | ((_d)<<12) | ((_off)&0xFFF) ); \
|
||||
asm_output("strb %s, [%s, #%d]", gpn(_d), gpn(_n), (_off)); \
|
||||
} while(0)
|
||||
|
||||
// Only +/- 255 range, unlike STRB/STR
|
||||
#define STRH(_d,_n,_off) do { \
|
||||
NanoAssert(IsGpReg(_d) && IsGpReg(_n)); \
|
||||
underrunProtect(4); \
|
||||
if ((_off)<0) { \
|
||||
NanoAssert(isU8(-_off)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0x14<<20) | ((_n)<<16) | ((_d)<<12) | (((-(_off))&0xF0)<<4) | (0xB<<4) | ((-(_off))&0xF) ); \
|
||||
} else { \
|
||||
NanoAssert(isU8(_off)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0x1C<<20) | ((_n)<<16) | ((_d)<<12) | (((_off)&0xF0)<<4) | (0xB<<4) | ((_off)&0xF) ); \
|
||||
} \
|
||||
asm_output("strh %s, [%s, #%d]", gpn(_d), gpn(_n), (_off)); \
|
||||
} while(0)
|
||||
|
||||
// Encode a breakpoint. The ID is not important and is ignored by the
|
||||
// processor, but it can be useful as a marker when debugging emitted code.
|
||||
#define BKPT_insn ((NIns)( COND_AL | (0x12<<20) | (0x7<<4) ))
|
||||
|
@ -799,7 +827,7 @@ enum {
|
|||
|
||||
#define FMDRR(_Dm,_Rd,_Rn) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(IsFpReg(_Dm) && IsGpReg(_Rd) && IsGpReg(_Rn)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xC4<<20) | ((_Rn)<<16) | ((_Rd)<<12) | (0xB1<<4) | (FpRegNum(_Dm)) ); \
|
||||
asm_output("fmdrr %s,%s,%s", gpn(_Dm), gpn(_Rd), gpn(_Rn)); \
|
||||
|
@ -807,7 +835,7 @@ enum {
|
|||
|
||||
#define FMRRD(_Rd,_Rn,_Dm) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(IsGpReg(_Rd) && IsGpReg(_Rn) && IsFpReg(_Dm)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xC5<<20) | ((_Rn)<<16) | ((_Rd)<<12) | (0xB1<<4) | (FpRegNum(_Dm)) ); \
|
||||
asm_output("fmrrd %s,%s,%s", gpn(_Rd), gpn(_Rn), gpn(_Dm)); \
|
||||
|
@ -815,7 +843,7 @@ enum {
|
|||
|
||||
#define FMRDH(_Rd,_Dn) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(IsGpReg(_Rd) && IsFpReg(_Dn)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xE3<<20) | (FpRegNum(_Dn)<<16) | ((_Rd)<<12) | (0xB<<8) | (1<<4) ); \
|
||||
asm_output("fmrdh %s,%s", gpn(_Rd), gpn(_Dn)); \
|
||||
|
@ -823,17 +851,17 @@ enum {
|
|||
|
||||
#define FMRDL(_Rd,_Dn) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(IsGpReg(_Rd) && IsFpReg(_Dn)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xE1<<20) | (FpRegNum(_Dn)<<16) | ((_Rd)<<12) | (0xB<<8) | (1<<4) ); \
|
||||
asm_output("fmrdh %s,%s", gpn(_Rd), gpn(_Dn)); \
|
||||
} while (0)
|
||||
|
||||
#define FSTD(_Dd,_Rn,_offs) do { \
|
||||
#define FSTD_allowD7(_Dd,_Rn,_offs,_allowD7) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2)); \
|
||||
NanoAssert(IsFpReg(_Dd) && !IsFpReg(_Rn)); \
|
||||
NanoAssert((IsFpReg(_Dd) || ((_allowD7) && (_Dd) == D7)) && !IsFpReg(_Rn)); \
|
||||
int negflag = 1<<23; \
|
||||
intptr_t offs = (_offs); \
|
||||
if (_offs < 0) { \
|
||||
|
@ -844,9 +872,12 @@ enum {
|
|||
asm_output("fstd %s,%s(%d)", gpn(_Dd), gpn(_Rn), _offs); \
|
||||
} while (0)
|
||||
|
||||
#define FSTD(_Dd,_Rn,_offs) \
|
||||
FSTD_allowD7(_Dd,_Rn,_offs,0)
|
||||
|
||||
#define FLDD_chk(_Dd,_Rn,_offs,_chk) do { \
|
||||
if(_chk) underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2)); \
|
||||
NanoAssert(IsFpReg(_Dd) && !IsFpReg(_Rn)); \
|
||||
int negflag = 1<<23; \
|
||||
|
@ -860,34 +891,17 @@ enum {
|
|||
} while (0)
|
||||
#define FLDD(_Dd,_Rn,_offs) FLDD_chk(_Dd,_Rn,_offs,1)
|
||||
|
||||
#define FSITOD(_Dd,_Sm) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(IsFpReg(_Dd) && ((_Sm) == FpSingleScratch)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xEB8<<16) | (FpRegNum(_Dd)<<12) | (0x2F<<6) | (0<<5) | (0x7) ); \
|
||||
asm_output("fsitod %s,%s", gpn(_Dd), gpn(_Sm)); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define FUITOD(_Dd,_Sm) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(IsFpReg(_Dd) && ((_Sm) == FpSingleScratch)); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(IsFpReg(_Dd) && ((_Sm) == S14)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xEB8<<16) | (FpRegNum(_Dd)<<12) | (0x2D<<6) | (0<<5) | (0x7) ); \
|
||||
asm_output("fuitod %s,%s", gpn(_Dd), gpn(_Sm)); \
|
||||
} while (0)
|
||||
|
||||
#define FMSR(_Sn,_Rd) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(((_Sn) == FpSingleScratch) && IsGpReg(_Rd)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xE0<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
|
||||
asm_output("fmsr %s,%s", gpn(_Sn), gpn(_Rd)); \
|
||||
} while (0)
|
||||
|
||||
#define FNEGD(_Dd,_Dm) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dm)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xEB1<<16) | (FpRegNum(_Dd)<<12) | (0xB4<<4) | (FpRegNum(_Dm)) ); \
|
||||
asm_output("fnegd %s,%s", gpn(_Dd), gpn(_Dm)); \
|
||||
|
@ -895,7 +909,7 @@ enum {
|
|||
|
||||
#define FADDD(_Dd,_Dn,_Dm) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dn) && IsFpReg(_Dm)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xE3<<20) | (FpRegNum(_Dn)<<16) | (FpRegNum(_Dd)<<12) | (0xB0<<4) | (FpRegNum(_Dm)) ); \
|
||||
asm_output("faddd %s,%s,%s", gpn(_Dd), gpn(_Dn), gpn(_Dm)); \
|
||||
|
@ -903,7 +917,7 @@ enum {
|
|||
|
||||
#define FSUBD(_Dd,_Dn,_Dm) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dn) && IsFpReg(_Dm)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xE3<<20) | (FpRegNum(_Dn)<<16) | (FpRegNum(_Dd)<<12) | (0xB4<<4) | (FpRegNum(_Dm)) ); \
|
||||
asm_output("fsubd %s,%s,%s", gpn(_Dd), gpn(_Dn), gpn(_Dm)); \
|
||||
|
@ -911,7 +925,7 @@ enum {
|
|||
|
||||
#define FMULD(_Dd,_Dn,_Dm) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dn) && IsFpReg(_Dm)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xE2<<20) | (FpRegNum(_Dn)<<16) | (FpRegNum(_Dd)<<12) | (0xB0<<4) | (FpRegNum(_Dm)) ); \
|
||||
asm_output("fmuld %s,%s,%s", gpn(_Dd), gpn(_Dn), gpn(_Dm)); \
|
||||
|
@ -919,7 +933,7 @@ enum {
|
|||
|
||||
#define FDIVD(_Dd,_Dn,_Dm) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dn) && IsFpReg(_Dm)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xE8<<20) | (FpRegNum(_Dn)<<16) | (FpRegNum(_Dd)<<12) | (0xB0<<4) | (FpRegNum(_Dm)) ); \
|
||||
asm_output("fmuld %s,%s,%s", gpn(_Dd), gpn(_Dn), gpn(_Dm)); \
|
||||
|
@ -927,14 +941,14 @@ enum {
|
|||
|
||||
#define FMSTAT() do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
*(--_nIns) = (NIns)( COND_AL | 0x0EF1FA10); \
|
||||
asm_output("fmstat"); \
|
||||
} while (0)
|
||||
|
||||
#define FCMPD(_Dd,_Dm,_E) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dm)); \
|
||||
NanoAssert(((_E)==0) || ((_E)==1)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xEB4<<16) | (FpRegNum(_Dd)<<12) | (0xB<<8) | ((_E)<<7) | (0x4<<4) | (FpRegNum(_Dm)) ); \
|
||||
|
@ -943,10 +957,116 @@ enum {
|
|||
|
||||
#define FCPYD(_Dd,_Dm) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(ARM_VFP); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dm)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xEB0<<16) | (FpRegNum(_Dd)<<12) | (0xB4<<4) | (FpRegNum(_Dm)) ); \
|
||||
asm_output("fcpyd %s,%s", gpn(_Dd), gpn(_Dm)); \
|
||||
} while (0)
|
||||
}
|
||||
|
||||
#define FMRS(_Rd,_Sn) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(((_Sn) == S14) && IsGpReg(_Rd)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xE1<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
|
||||
asm_output("fmrs %s,%s", gpn(_Rd), gpn(_Sn)); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* The following instructions can only be used with S14 as the
|
||||
* single-precision register; that limitation can be removed if
|
||||
* needed, but we'd have to teach NJ about all the single precision
|
||||
* regs, and their encoding is strange (top 4 bits usually in a block,
|
||||
* low bit elsewhere).
|
||||
*/
|
||||
|
||||
#define FSITOD(_Dd,_Sm) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(IsFpReg(_Dd) && ((_Sm) == S14)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xEB8<<16) | (FpRegNum(_Dd)<<12) | (0x2F<<6) | (0<<5) | (0x7) ); \
|
||||
asm_output("fsitod %s,%s", gpn(_Dd), gpn(_Sm)); \
|
||||
} while (0)
|
||||
|
||||
#define FMSR(_Sn,_Rd) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(((_Sn) == S14) && IsGpReg(_Rd)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xE0<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
|
||||
asm_output("fmsr %s,%s", gpn(_Sn), gpn(_Rd)); \
|
||||
} while (0)
|
||||
|
||||
#define FMRS(_Rd,_Sn) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(((_Sn) == S14) && IsGpReg(_Rd)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xE1<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
|
||||
asm_output("fmrs %s,%s", gpn(_Rd), gpn(_Sn)); \
|
||||
} while (0)
|
||||
|
||||
#define FMSR(_Sn,_Rd) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(((_Sn) == S14) && IsGpReg(_Rd)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xE0<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
|
||||
asm_output("fmsr %s,%s", gpn(_Sn), gpn(_Rd)); \
|
||||
} while (0)
|
||||
|
||||
#define FCVTSD(_Sd,_Dm) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(((_Sd) == S14) && IsFpReg(_Dm)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xEB7<<16) | (0x7<<12) | (0xBC<<4) | (FpRegNum(_Dm)) ); \
|
||||
asm_output("[0x%08x] fcvtsd s14,%s", *_nIns, gpn(_Dm)); \
|
||||
} while (0)
|
||||
|
||||
#define FCVTDS_allowD7(_Dd,_Sm,_allowD7) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(((_Sm) == S14) && (IsFpReg(_Dd) || ((_allowD7) && (_Dd) == D7))); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xEB7<<16) | (FpRegNum(_Dd)<<12) | (0xAC<<4) | (0x7) ); \
|
||||
asm_output("[0x%08x] fcvtds %s,s14", *_nIns, gpn(_Dd)); \
|
||||
} while(0)
|
||||
|
||||
#define FCVTDS(_Dd,_Sm) \
|
||||
FCVTDS_allowD7(_Dd,_Sm,0)
|
||||
|
||||
#define FLDS(_Sd,_Rn,_offs) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(((_Sd) == S14) && !IsFpReg(_Rn)); \
|
||||
NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2)); \
|
||||
int addflag = 1<<23; \
|
||||
intptr_t offs = (_offs); \
|
||||
if (offs < 0) { \
|
||||
addflag = 0; \
|
||||
offs = -offs; \
|
||||
} \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xD1<<20) | ((_Rn)<<16) | (0x7<<12) | (0xA << 8) | addflag | ((offs>>2)&0xff) ); \
|
||||
asm_output("[0x%08x] flds s14, [%s, #%d]", *_nIns, gpn(_Rn), (_offs)); \
|
||||
} while (0)
|
||||
|
||||
#define FSTS(_Sd,_Rn,_offs) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(((_Sd) == S14) && !IsFpReg(_Rn)); \
|
||||
NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2)); \
|
||||
int addflag = 1<<23; \
|
||||
intptr_t offs = (_offs); \
|
||||
if (offs < 0) { \
|
||||
addflag = 0; \
|
||||
offs = -offs; \
|
||||
} \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xD0<<20) | ((_Rn)<<16) | (0x7<<12) | (0xA << 8) | addflag | ((offs>>2)&0xff) ); \
|
||||
asm_output("[0x%08x] fsts s14, [%s, #%d]", *_nIns, gpn(_Rn), (_offs)); \
|
||||
} while (0)
|
||||
|
||||
#define FTOSID(_Sd,_Dm) do { \
|
||||
underrunProtect(4); \
|
||||
NanoAssert(config.arm_vfp); \
|
||||
NanoAssert(((_Sd) == S14) && IsFpReg(_Dm)); \
|
||||
*(--_nIns) = (NIns)( COND_AL | (0xEBD<<16) | (0x7<<12) | (0xB4<<4) | FpRegNum(_Dm) ); \
|
||||
asm_output("ftosid s14, %s", gpn(_Dm)); \
|
||||
} while (0)
|
||||
|
||||
} // namespace nanojit
|
||||
#endif // __nanojit_NativeARM__
|
||||
|
|
|
@ -87,6 +87,7 @@ namespace nanojit
|
|||
* sp+12 sp+24 reserved
|
||||
*/
|
||||
|
||||
const int min_param_area_size = 8*sizeof(void*); // r3-r10
|
||||
const int linkage_size = 6*sizeof(void*);
|
||||
const int lr_offset = 2*sizeof(void*); // linkage.lr
|
||||
const int cr_offset = 1*sizeof(void*); // linkage.cr
|
||||
|
@ -96,8 +97,13 @@ namespace nanojit
|
|||
// stw r0, lr_offset(sp)
|
||||
// stwu sp, -framesize(sp)
|
||||
|
||||
// param_area must be at least large enough for r3-r10 to be saved,
|
||||
// regardless of whether we think the callee needs less: e.g., the callee
|
||||
// might tail-call to a function that uses varargs, which could flush
|
||||
// r3-r10 to the parameter area.
|
||||
uint32_t param_area = (max_param_size > min_param_area_size) ? max_param_size : min_param_area_size;
|
||||
// activation frame is 4 bytes per entry even on 64bit machines
|
||||
uint32_t stackNeeded = max_param_size + linkage_size + _activation.stackSlotsNeeded() * 4;
|
||||
uint32_t stackNeeded = param_area + linkage_size + _activation.stackSlotsNeeded() * 4;
|
||||
uint32_t aligned = alignUp(stackNeeded, NJ_ALIGN_STACK);
|
||||
|
||||
UNLESS_PEDANTIC( if (isS16(aligned)) {
|
||||
|
@ -137,13 +143,13 @@ namespace nanojit
|
|||
// okay if r gets recycled.
|
||||
r = findRegFor(lo, GpRegs);
|
||||
STW(r, d, FP);
|
||||
freeRsrcOf(ins, false); // if we had a reg in use, emit a ST to flush it to mem
|
||||
deprecated_freeRsrcOf(ins, false); // if we had a reg in use, emit a ST to flush it to mem
|
||||
}
|
||||
|
||||
void Assembler::asm_load32(LIns *ins) {
|
||||
LIns* base = ins->oprnd1();
|
||||
int d = ins->disp();
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
Register ra = getBaseReg(base, d, GpRegs);
|
||||
|
||||
switch(ins->opcode()) {
|
||||
|
@ -238,17 +244,17 @@ namespace nanojit
|
|||
|
||||
LIns* base = ins->oprnd1();
|
||||
#ifdef NANOJIT_64BIT
|
||||
Register rr = ins->getReg();
|
||||
Register rr = ins->deprecated_getReg();
|
||||
if (isKnownReg(rr) && (rmask(rr) & FpRegs)) {
|
||||
// FPR already assigned, fine, use it
|
||||
freeRsrcOf(ins, false);
|
||||
deprecated_freeRsrcOf(ins, false);
|
||||
} else {
|
||||
// use a GPR register; its okay to copy doubles with GPR's
|
||||
// but *not* okay to copy non-doubles with FPR's
|
||||
rr = prepResultReg(ins, GpRegs);
|
||||
rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
}
|
||||
#else
|
||||
Register rr = prepResultReg(ins, FpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, FpRegs);
|
||||
#endif
|
||||
|
||||
int dr = ins->disp();
|
||||
|
@ -313,7 +319,7 @@ namespace nanojit
|
|||
}
|
||||
|
||||
void Assembler::asm_store64(LOpcode op, LIns *value, int32_t dr, LIns *base) {
|
||||
NanoAssert(value->isQuad());
|
||||
NanoAssert(value->isI64() || value->isF64());
|
||||
|
||||
switch (op) {
|
||||
case LIR_stfi:
|
||||
|
@ -359,9 +365,9 @@ namespace nanojit
|
|||
Register rs = findRegFor(value, FpRegs);
|
||||
#else
|
||||
// if we have to choose a register, use a GPR
|
||||
Register rs = ( value->isUnusedOrHasUnknownReg()
|
||||
Register rs = ( !value->isInReg()
|
||||
? findRegFor(value, GpRegs & ~rmask(ra))
|
||||
: value->getReg() );
|
||||
: value->deprecated_getReg() );
|
||||
|
||||
if (rmask(rs) & GpRegs) {
|
||||
#if !PEDANTIC
|
||||
|
@ -396,7 +402,7 @@ namespace nanojit
|
|||
LIns *a = ins->oprnd1();
|
||||
LIns *b = ins->oprnd2();
|
||||
ConditionRegister cr = CR7;
|
||||
Register r = prepResultReg(ins, GpRegs);
|
||||
Register r = deprecated_prepResultReg(ins, GpRegs);
|
||||
switch (op) {
|
||||
case LIR_eq: case LIR_feq:
|
||||
case LIR_qeq:
|
||||
|
@ -631,7 +637,7 @@ namespace nanojit
|
|||
releaseRegisters();
|
||||
assignSavedRegs();
|
||||
LIns *value = ins->oprnd1();
|
||||
Register r = ins->isop(LIR_ret) ? R3 : F1;
|
||||
Register r = ins->isop(LIR_fret) ? F1 : R3;
|
||||
findSpecificRegFor(value, r);
|
||||
}
|
||||
|
||||
|
@ -644,35 +650,38 @@ namespace nanojit
|
|||
void Assembler::asm_restore(LIns *i, Register r) {
|
||||
int d;
|
||||
if (i->isop(LIR_alloc)) {
|
||||
d = disp(i);
|
||||
d = deprecated_disp(i);
|
||||
ADDI(r, FP, d);
|
||||
}
|
||||
else if (i->isconst()) {
|
||||
if (!i->getArIndex()) {
|
||||
i->markAsClear();
|
||||
if (!i->deprecated_getArIndex()) {
|
||||
i->deprecated_markAsClear();
|
||||
}
|
||||
asm_li(r, i->imm32());
|
||||
}
|
||||
else {
|
||||
d = findMemFor(i);
|
||||
if (IsFpReg(r)) {
|
||||
NanoAssert(i->isQuad());
|
||||
NanoAssert(i->isI64() || i->isF64());
|
||||
LFD(r, d, FP);
|
||||
} else if (i->isQuad()) {
|
||||
} else if (i->isI64() || i->isF64()) {
|
||||
NanoAssert(IsGpReg(r));
|
||||
LD(r, d, FP);
|
||||
} else {
|
||||
NanoAssert(i->isI32());
|
||||
NanoAssert(IsGpReg(r));
|
||||
LWZ(r, d, FP);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_int(LIns *ins) {
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
asm_li(rr, ins->imm32());
|
||||
}
|
||||
|
||||
void Assembler::asm_fneg(LIns *ins) {
|
||||
Register rr = prepResultReg(ins, FpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, FpRegs);
|
||||
Register ra = findRegFor(ins->oprnd1(), FpRegs);
|
||||
FNEG(rr,ra);
|
||||
}
|
||||
|
@ -685,7 +694,7 @@ namespace nanojit
|
|||
// first eight args always in R3..R10 for PPC
|
||||
if (a < 8) {
|
||||
// incoming arg in register
|
||||
prepResultReg(ins, rmask(argRegs[a]));
|
||||
deprecated_prepResultReg(ins, rmask(argRegs[a]));
|
||||
} else {
|
||||
// todo: support stack based args, arg 0 is at [FP+off] where off
|
||||
// is the # of regs to be pushed in genProlog()
|
||||
|
@ -694,13 +703,13 @@ namespace nanojit
|
|||
}
|
||||
else {
|
||||
// saved param
|
||||
prepResultReg(ins, rmask(savedRegs[a]));
|
||||
deprecated_prepResultReg(ins, rmask(savedRegs[a]));
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_call(LIns *ins) {
|
||||
Register retReg = ( ins->isop(LIR_fcall) ? F1 : retRegs[0] );
|
||||
prepResultReg(ins, rmask(retReg));
|
||||
deprecated_prepResultReg(ins, rmask(retReg));
|
||||
|
||||
// Do this after we've handled the call result, so we don't
|
||||
// force the call result to be spilled unnecessarily.
|
||||
|
@ -770,7 +779,7 @@ namespace nanojit
|
|||
|
||||
void Assembler::asm_regarg(ArgSize sz, LInsp p, Register r)
|
||||
{
|
||||
NanoAssert(r != UnknownReg);
|
||||
NanoAssert(r != deprecated_UnknownReg);
|
||||
if (sz & ARGSIZE_MASK_INT)
|
||||
{
|
||||
#ifdef NANOJIT_64BIT
|
||||
|
@ -787,20 +796,20 @@ namespace nanojit
|
|||
asm_li(r, p->imm32());
|
||||
} else {
|
||||
if (p->isUsed()) {
|
||||
if (!p->hasKnownReg()) {
|
||||
if (!p->deprecated_hasKnownReg()) {
|
||||
// load it into the arg reg
|
||||
int d = findMemFor(p);
|
||||
if (p->isop(LIR_alloc)) {
|
||||
NanoAssert(isS16(d));
|
||||
ADDI(r, FP, d);
|
||||
} else if (p->isQuad()) {
|
||||
} else if (p->isI64() || p->isF64()) {
|
||||
LD(r, d, FP);
|
||||
} else {
|
||||
LWZ(r, d, FP);
|
||||
}
|
||||
} else {
|
||||
// it must be in a saved reg
|
||||
MR(r, p->getReg());
|
||||
MR(r, p->deprecated_getReg());
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
@ -812,7 +821,7 @@ namespace nanojit
|
|||
}
|
||||
else if (sz == ARGSIZE_F) {
|
||||
if (p->isUsed()) {
|
||||
Register rp = p->getReg();
|
||||
Register rp = p->deprecated_getReg();
|
||||
if (!isKnownReg(rp) || !IsFpReg(rp)) {
|
||||
// load it into the arg reg
|
||||
int d = findMemFor(p);
|
||||
|
@ -858,7 +867,7 @@ namespace nanojit
|
|||
LInsp lhs = ins->oprnd1();
|
||||
LInsp rhs = ins->oprnd2();
|
||||
RegisterMask allow = GpRegs;
|
||||
Register rr = prepResultReg(ins, allow);
|
||||
Register rr = deprecated_prepResultReg(ins, allow);
|
||||
Register ra = findRegFor(lhs, GpRegs);
|
||||
|
||||
if (rhs->isconst()) {
|
||||
|
@ -964,9 +973,9 @@ namespace nanojit
|
|||
LInsp lhs = ins->oprnd1();
|
||||
LInsp rhs = ins->oprnd2();
|
||||
RegisterMask allow = FpRegs;
|
||||
Register rr = prepResultReg(ins, allow);
|
||||
Register rr = deprecated_prepResultReg(ins, allow);
|
||||
Register ra, rb;
|
||||
findRegFor2(allow, lhs, ra, rhs, rb);
|
||||
findRegFor2(allow, lhs, ra, allow, rhs, rb);
|
||||
switch (op) {
|
||||
case LIR_fadd: FADD(rr, ra, rb); break;
|
||||
case LIR_fsub: FSUB(rr, ra, rb); break;
|
||||
|
@ -979,7 +988,7 @@ namespace nanojit
|
|||
}
|
||||
|
||||
void Assembler::asm_i2f(LIns *ins) {
|
||||
Register r = prepResultReg(ins, FpRegs);
|
||||
Register r = deprecated_prepResultReg(ins, FpRegs);
|
||||
Register v = findRegFor(ins->oprnd1(), GpRegs);
|
||||
const int d = 16; // natural aligned
|
||||
|
||||
|
@ -1002,7 +1011,7 @@ namespace nanojit
|
|||
}
|
||||
|
||||
void Assembler::asm_u2f(LIns *ins) {
|
||||
Register r = prepResultReg(ins, FpRegs);
|
||||
Register r = deprecated_prepResultReg(ins, FpRegs);
|
||||
Register v = findRegFor(ins->oprnd1(), GpRegs);
|
||||
const int d = 16;
|
||||
|
||||
|
@ -1023,9 +1032,20 @@ namespace nanojit
|
|||
#endif
|
||||
}
|
||||
|
||||
void Assembler::asm_f2i(LInsp) {
|
||||
NanoAssertMsg(0, "NJ_F2I_SUPPORTED not yet supported for this architecture");
|
||||
}
|
||||
|
||||
// XXX: this is sub-optimal, see https://bugzilla.mozilla.org/show_bug.cgi?id=540368#c7.
|
||||
void Assembler::asm_q2i(LIns *ins) {
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
int d = findMemFor(ins->oprnd1());
|
||||
LWZ(rr, d+4, FP);
|
||||
}
|
||||
|
||||
void Assembler::asm_promote(LIns *ins) {
|
||||
LOpcode op = ins->opcode();
|
||||
Register r = prepResultReg(ins, GpRegs);
|
||||
Register r = deprecated_prepResultReg(ins, GpRegs);
|
||||
Register v = findRegFor(ins->oprnd1(), GpRegs);
|
||||
switch (op) {
|
||||
default:
|
||||
|
@ -1042,17 +1062,17 @@ namespace nanojit
|
|||
|
||||
void Assembler::asm_quad(LIns *ins) {
|
||||
#ifdef NANOJIT_64BIT
|
||||
Register r = ins->getReg();
|
||||
Register r = ins->deprecated_getReg();
|
||||
if (isKnownReg(r) && (rmask(r) & FpRegs)) {
|
||||
// FPR already assigned, fine, use it
|
||||
freeRsrcOf(ins, false);
|
||||
deprecated_freeRsrcOf(ins, false);
|
||||
} else {
|
||||
// use a GPR register; its okay to copy doubles with GPR's
|
||||
// but *not* okay to copy non-doubles with FPR's
|
||||
r = prepResultReg(ins, GpRegs);
|
||||
r = deprecated_prepResultReg(ins, GpRegs);
|
||||
}
|
||||
#else
|
||||
Register r = prepResultReg(ins, FpRegs);
|
||||
Register r = deprecated_prepResultReg(ins, FpRegs);
|
||||
#endif
|
||||
|
||||
if (rmask(r) & FpRegs) {
|
||||
|
@ -1173,16 +1193,16 @@ namespace nanojit
|
|||
}
|
||||
|
||||
void Assembler::asm_cmov(LIns *ins) {
|
||||
NanoAssert(ins->isop(LIR_cmov) || ins->isop(LIR_qcmov));
|
||||
LIns* cond = ins->oprnd1();
|
||||
LIns* iftrue = ins->oprnd2();
|
||||
LIns* iffalse = ins->oprnd3();
|
||||
|
||||
NanoAssert(cond->isCmp());
|
||||
NanoAssert(iftrue->isQuad() == iffalse->isQuad());
|
||||
NanoAssert((ins->opcode() == LIR_cmov && iftrue->isI32() && iffalse->isI32()) ||
|
||||
(ins->opcode() == LIR_qcmov && iftrue->isI64() && iffalse->isI64()));
|
||||
|
||||
// fixme: we could handle fpu registers here, too, since we're just branching
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
findSpecificRegFor(iftrue, rr);
|
||||
Register rf = findRegFor(iffalse, GpRegs & ~rmask(rr));
|
||||
NIns *after = _nIns;
|
||||
|
@ -1191,26 +1211,25 @@ namespace nanojit
|
|||
asm_branch(false, cond, after);
|
||||
}
|
||||
|
||||
RegisterMask Assembler::hint(LIns *i, RegisterMask allow) {
|
||||
LOpcode op = i->opcode();
|
||||
RegisterMask prefer = ~0LL;
|
||||
RegisterMask Assembler::hint(LIns* ins) {
|
||||
LOpcode op = ins->opcode();
|
||||
RegisterMask prefer = 0;
|
||||
if (op == LIR_icall || op == LIR_qcall)
|
||||
prefer = rmask(R3);
|
||||
else if (op == LIR_fcall)
|
||||
prefer = rmask(F1);
|
||||
else if (op == LIR_param) {
|
||||
if (i->paramArg() < 8) {
|
||||
prefer = rmask(argRegs[i->paramArg()]);
|
||||
if (ins->paramKind() == 0) {
|
||||
if (ins->paramArg() < 8) {
|
||||
prefer = rmask(argRegs[ins->paramArg()]);
|
||||
}
|
||||
}
|
||||
}
|
||||
// narrow the allow set to whatever is preferred and also free
|
||||
if (_allocator.free & allow & prefer)
|
||||
allow &= prefer;
|
||||
return allow;
|
||||
return prefer;
|
||||
}
|
||||
|
||||
void Assembler::asm_neg_not(LIns *ins) {
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
Register ra = findRegFor(ins->oprnd1(), GpRegs);
|
||||
if (ins->isop(LIR_neg)) {
|
||||
NEG(rr, ra);
|
||||
|
@ -1220,13 +1239,13 @@ namespace nanojit
|
|||
}
|
||||
|
||||
void Assembler::asm_qlo(LIns *ins) {
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
int d = findMemFor(ins->oprnd1());
|
||||
LWZ(rr, d+4, FP);
|
||||
}
|
||||
|
||||
void Assembler::asm_qhi(LIns *ins) {
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
int d = findMemFor(ins->oprnd1());
|
||||
LWZ(rr, d, FP);
|
||||
TODO(asm_qhi);
|
||||
|
@ -1245,9 +1264,6 @@ namespace nanojit
|
|||
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
|
||||
IF_PEDANTIC( pedanticTop = _nIns; )
|
||||
}
|
||||
if (!_nExitIns) {
|
||||
codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::nativePageReset()
|
||||
|
@ -1391,6 +1407,9 @@ namespace nanojit
|
|||
}
|
||||
|
||||
void Assembler::swapCodeChunks() {
|
||||
if (!_nExitIns) {
|
||||
codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
|
||||
}
|
||||
SWAP(NIns*, _nIns, _nExitIns);
|
||||
SWAP(NIns*, codeStart, exitStart);
|
||||
SWAP(NIns*, codeEnd, exitEnd);
|
||||
|
|
|
@ -58,6 +58,7 @@ namespace nanojit
|
|||
#define NJ_ALIGN_STACK 16
|
||||
#define NJ_JTBL_SUPPORTED 1
|
||||
#define NJ_EXPANDED_LOADSTORE_SUPPORTED 0
|
||||
#define NJ_F2I_SUPPORTED 0
|
||||
|
||||
enum ConditionRegister {
|
||||
CR0 = 0,
|
||||
|
@ -159,7 +160,7 @@ namespace nanojit
|
|||
Rlr = 8,
|
||||
Rctr = 9,
|
||||
|
||||
UnknownReg = 127,
|
||||
deprecated_UnknownReg = 127,
|
||||
FirstReg = R0,
|
||||
LastReg = F31
|
||||
};
|
||||
|
@ -258,6 +259,9 @@ namespace nanojit
|
|||
static const int NumSavedRegs = 18; // R13-R30
|
||||
#endif
|
||||
|
||||
static inline bool IsGpReg(Register r) {
|
||||
return r <= R31;
|
||||
}
|
||||
static inline bool IsFpReg(Register r) {
|
||||
return r >= F0;
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ namespace nanojit
|
|||
void Assembler::asm_call(LInsp ins)
|
||||
{
|
||||
Register retReg = ( ins->isop(LIR_fcall) ? F0 : retRegs[0] );
|
||||
prepResultReg(ins, rmask(retReg));
|
||||
deprecated_prepResultReg(ins, rmask(retReg));
|
||||
|
||||
// Do this after we've handled the call result, so we don't
|
||||
// force the call result to be spilled unnecessarily.
|
||||
|
@ -245,9 +245,9 @@ namespace nanojit
|
|||
*(uint32_t*)&branch[1] |= (intptr_t)location & 0x3FF;
|
||||
}
|
||||
|
||||
RegisterMask Assembler::hint(LIns* i, RegisterMask allow)
|
||||
RegisterMask Assembler::hint(LIns* ins)
|
||||
{
|
||||
return allow;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void Assembler::asm_qjoin(LIns *ins)
|
||||
|
@ -258,7 +258,7 @@ namespace nanojit
|
|||
LIns* lo = ins->oprnd1();
|
||||
LIns* hi = ins->oprnd2();
|
||||
|
||||
Register rr = ins->getReg();
|
||||
Register rr = ins->deprecated_getReg();
|
||||
if (isKnownReg(rr) && (rmask(rr) & FpRegs))
|
||||
evict(ins);
|
||||
|
||||
|
@ -279,7 +279,7 @@ namespace nanojit
|
|||
STW32(rl, d, FP);
|
||||
}
|
||||
|
||||
freeRsrcOf(ins, false); // if we had a reg in use, emit a ST to flush it to mem
|
||||
deprecated_freeRsrcOf(ins, false); // if we had a reg in use, emit a ST to flush it to mem
|
||||
}
|
||||
|
||||
|
||||
|
@ -288,12 +288,12 @@ namespace nanojit
|
|||
underrunProtect(24);
|
||||
if (i->isop(LIR_alloc)) {
|
||||
ADD(FP, L2, r);
|
||||
int32_t d = disp(i);
|
||||
int32_t d = deprecated_disp(i);
|
||||
SET32(d, L2);
|
||||
}
|
||||
else if (i->isconst()) {
|
||||
if (!i->getArIndex()) {
|
||||
i->markAsClear();
|
||||
if (!i->deprecated_getArIndex()) {
|
||||
i->deprecated_markAsClear();
|
||||
}
|
||||
int v = i->imm32();
|
||||
SET32(v, r);
|
||||
|
@ -334,17 +334,13 @@ namespace nanojit
|
|||
{
|
||||
// make sure what is in a register
|
||||
Register ra, rb;
|
||||
if (base->isop(LIR_alloc)) {
|
||||
rb = FP;
|
||||
dr += findMemFor(base);
|
||||
ra = findRegFor(value, GpRegs);
|
||||
} else if (base->isconst()) {
|
||||
if (base->isconst()) {
|
||||
// absolute address
|
||||
dr += base->imm32();
|
||||
ra = findRegFor(value, GpRegs);
|
||||
rb = G0;
|
||||
} else {
|
||||
findRegFor2(GpRegs, value, ra, base, rb);
|
||||
getBaseReg2(GpRegs, value, ra, GpRegs, base, rb, dr);
|
||||
}
|
||||
STW32(ra, dr, rb);
|
||||
}
|
||||
|
@ -384,9 +380,9 @@ namespace nanojit
|
|||
underrunProtect(72);
|
||||
LIns* base = ins->oprnd1();
|
||||
int db = ins->disp();
|
||||
Register rr = ins->getReg();
|
||||
Register rr = ins->deprecated_getReg();
|
||||
|
||||
int dr = disp(ins);
|
||||
int dr = deprecated_disp(ins);
|
||||
Register rb;
|
||||
if (base->isop(LIR_alloc)) {
|
||||
rb = FP;
|
||||
|
@ -394,15 +390,15 @@ namespace nanojit
|
|||
} else {
|
||||
rb = findRegFor(base, GpRegs);
|
||||
}
|
||||
ins->setReg(UnknownReg);
|
||||
ins->clearReg();
|
||||
|
||||
// don't use an fpu reg to simply load & store the value.
|
||||
if (dr)
|
||||
asm_mmq(FP, dr, rb, db);
|
||||
|
||||
freeRsrcOf(ins, false);
|
||||
deprecated_freeRsrcOf(ins, false);
|
||||
|
||||
if (rr != UnknownReg)
|
||||
if (rr != deprecated_UnknownReg)
|
||||
{
|
||||
NanoAssert(rmask(rr)&FpRegs);
|
||||
_allocator.retire(rr);
|
||||
|
@ -473,9 +469,9 @@ namespace nanojit
|
|||
|
||||
// if value already in a reg, use that, otherwise
|
||||
// try to get it into XMM regs before FPU regs.
|
||||
Register rv = ( value->isUnusedOrHasUnknownReg()
|
||||
Register rv = ( !value->isInReg()
|
||||
? findRegFor(value, FpRegs)
|
||||
: value->getReg() );
|
||||
: value->deprecated_getReg() );
|
||||
|
||||
STDF32(rv, dr, rb);
|
||||
}
|
||||
|
@ -581,9 +577,7 @@ namespace nanojit
|
|||
LInsp lhs = cond->oprnd1();
|
||||
LInsp rhs = cond->oprnd2();
|
||||
|
||||
NanoAssert((!lhs->isQuad() && !rhs->isQuad()) || (lhs->isQuad() && rhs->isQuad()));
|
||||
|
||||
NanoAssert(!lhs->isQuad() && !rhs->isQuad());
|
||||
NanoAssert(lhs->isI32() && rhs->isI32());
|
||||
|
||||
// ready to issue the compare
|
||||
if (rhs->isconst())
|
||||
|
@ -601,7 +595,7 @@ namespace nanojit
|
|||
else
|
||||
{
|
||||
Register ra, rb;
|
||||
findRegFor2(GpRegs, lhs, ra, rhs, rb);
|
||||
findRegFor2(GpRegs, lhs, ra, GpRegs, rhs, rb);
|
||||
SUBCC(ra, rb, G0);
|
||||
}
|
||||
}
|
||||
|
@ -609,7 +603,7 @@ namespace nanojit
|
|||
void Assembler::asm_fcond(LInsp ins)
|
||||
{
|
||||
// only want certain regs
|
||||
Register r = prepResultReg(ins, AllowableFlagRegs);
|
||||
Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
|
||||
underrunProtect(8);
|
||||
LOpcode condop = ins->opcode();
|
||||
NanoAssert(condop >= LIR_feq && condop <= LIR_fge);
|
||||
|
@ -632,7 +626,7 @@ namespace nanojit
|
|||
underrunProtect(8);
|
||||
// only want certain regs
|
||||
LOpcode op = ins->opcode();
|
||||
Register r = prepResultReg(ins, AllowableFlagRegs);
|
||||
Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
|
||||
|
||||
if (op == LIR_eq)
|
||||
MOVEI(1, 1, 0, 0, r);
|
||||
|
@ -665,31 +659,31 @@ namespace nanojit
|
|||
LInsp lhs = ins->oprnd1();
|
||||
LInsp rhs = ins->oprnd2();
|
||||
|
||||
Register rb = UnknownReg;
|
||||
Register rb = deprecated_UnknownReg;
|
||||
RegisterMask allow = GpRegs;
|
||||
bool forceReg = (op == LIR_mul || !rhs->isconst());
|
||||
|
||||
if (lhs != rhs && forceReg)
|
||||
{
|
||||
if ((rb = asm_binop_rhs_reg(ins)) == UnknownReg) {
|
||||
if ((rb = asm_binop_rhs_reg(ins)) == deprecated_UnknownReg) {
|
||||
rb = findRegFor(rhs, allow);
|
||||
}
|
||||
allow &= ~rmask(rb);
|
||||
}
|
||||
else if ((op == LIR_add||op == LIR_iaddp) && lhs->isop(LIR_alloc) && rhs->isconst()) {
|
||||
// add alloc+const, use lea
|
||||
Register rr = prepResultReg(ins, allow);
|
||||
Register rr = deprecated_prepResultReg(ins, allow);
|
||||
int d = findMemFor(lhs) + rhs->imm32();
|
||||
ADD(FP, L2, rr);
|
||||
SET32(d, L2);
|
||||
}
|
||||
|
||||
Register rr = prepResultReg(ins, allow);
|
||||
Register rr = deprecated_prepResultReg(ins, allow);
|
||||
// if this is last use of lhs in reg, we can re-use result reg
|
||||
// else, lhs already has a register assigned.
|
||||
Register ra = ( lhs->isUnusedOrHasUnknownReg()
|
||||
Register ra = ( !lhs->isInReg()
|
||||
? findSpecificRegFor(lhs, rr)
|
||||
: lhs->getReg() );
|
||||
: lhs->deprecated_getReg() );
|
||||
|
||||
if (forceReg)
|
||||
{
|
||||
|
@ -749,14 +743,14 @@ namespace nanojit
|
|||
{
|
||||
underrunProtect(8);
|
||||
LOpcode op = ins->opcode();
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
|
||||
LIns* lhs = ins->oprnd1();
|
||||
// if this is last use of lhs in reg, we can re-use result reg
|
||||
// else, lhs already has a register assigned.
|
||||
Register ra = ( lhs->isUnusedOrHasUnknownReg()
|
||||
Register ra = ( !lhs->isInReg()
|
||||
? findSpecificRegFor(lhs, rr)
|
||||
: lhs->getReg() );
|
||||
: lhs->deprecated_getReg() );
|
||||
|
||||
if (op == LIR_not)
|
||||
ORN(G0, rr, rr);
|
||||
|
@ -773,7 +767,7 @@ namespace nanojit
|
|||
LOpcode op = ins->opcode();
|
||||
LIns* base = ins->oprnd1();
|
||||
int d = ins->disp();
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
Register ra = getBaseReg(base, d, GpRegs);
|
||||
switch(op) {
|
||||
case LIR_ldzb:
|
||||
|
@ -809,9 +803,9 @@ namespace nanojit
|
|||
LIns* iffalse = ins->oprnd3();
|
||||
|
||||
NanoAssert(condval->isCmp());
|
||||
NanoAssert(op == LIR_qcmov || (!iftrue->isQuad() && !iffalse->isQuad()));
|
||||
NanoAssert(op == LIR_cmov && iftrue->isI32() && iffalse->isI32());
|
||||
|
||||
const Register rr = prepResultReg(ins, GpRegs);
|
||||
const Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
|
||||
// this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
|
||||
// (This is true on Intel, is it true on all architectures?)
|
||||
|
@ -841,7 +835,7 @@ namespace nanojit
|
|||
void Assembler::asm_qhi(LInsp ins)
|
||||
{
|
||||
underrunProtect(12);
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
LIns *q = ins->oprnd1();
|
||||
int d = findMemFor(q);
|
||||
LDSW32(FP, d+4, rr);
|
||||
|
@ -851,13 +845,13 @@ namespace nanojit
|
|||
{
|
||||
uint32_t a = ins->paramArg();
|
||||
uint32_t kind = ins->paramKind();
|
||||
prepResultReg(ins, rmask(argRegs[a]));
|
||||
deprecated_prepResultReg(ins, rmask(argRegs[a]));
|
||||
}
|
||||
|
||||
void Assembler::asm_int(LInsp ins)
|
||||
{
|
||||
underrunProtect(8);
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
int32_t val = ins->imm32();
|
||||
if (val == 0)
|
||||
XOR(rr, rr, rr);
|
||||
|
@ -868,21 +862,21 @@ namespace nanojit
|
|||
void Assembler::asm_quad(LInsp ins)
|
||||
{
|
||||
underrunProtect(64);
|
||||
Register rr = ins->getReg();
|
||||
if (rr != UnknownReg)
|
||||
Register rr = ins->deprecated_getReg();
|
||||
if (rr != deprecated_UnknownReg)
|
||||
{
|
||||
// @todo -- add special-cases for 0 and 1
|
||||
_allocator.retire(rr);
|
||||
ins->setReg(UnknownReg);
|
||||
ins->clearReg();
|
||||
NanoAssert((rmask(rr) & FpRegs) != 0);
|
||||
findMemFor(ins);
|
||||
int d = disp(ins);
|
||||
int d = deprecated_disp(ins);
|
||||
LDDF32(FP, d, rr);
|
||||
}
|
||||
|
||||
// @todo, if we used xor, ldsd, fldz, etc above, we don't need mem here
|
||||
int d = disp(ins);
|
||||
freeRsrcOf(ins, false);
|
||||
int d = deprecated_disp(ins);
|
||||
deprecated_freeRsrcOf(ins, false);
|
||||
if (d)
|
||||
{
|
||||
STW32(L2, d+4, FP);
|
||||
|
@ -899,13 +893,13 @@ namespace nanojit
|
|||
void Assembler::asm_fneg(LInsp ins)
|
||||
{
|
||||
underrunProtect(4);
|
||||
Register rr = prepResultReg(ins, FpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, FpRegs);
|
||||
LIns* lhs = ins->oprnd1();
|
||||
|
||||
// lhs into reg, prefer same reg as result
|
||||
// if this is last use of lhs in reg, we can re-use result reg
|
||||
// else, lhs already has a different reg assigned
|
||||
Register ra = ( lhs->isUnusedOrHasUnknownReg()
|
||||
Register ra = ( !lhs->isInReg()
|
||||
? findSpecificRegFor(lhs, rr)
|
||||
: findRegFor(lhs, FpRegs) );
|
||||
|
||||
|
@ -923,7 +917,7 @@ namespace nanojit
|
|||
Register ra = findRegFor(lhs, FpRegs);
|
||||
Register rb = (rhs == lhs) ? ra : findRegFor(rhs, FpRegs);
|
||||
|
||||
Register rr = prepResultReg(ins, allow);
|
||||
Register rr = deprecated_prepResultReg(ins, allow);
|
||||
|
||||
if (op == LIR_fadd)
|
||||
FADDD(ra, rb, rr);
|
||||
|
@ -940,7 +934,7 @@ namespace nanojit
|
|||
{
|
||||
underrunProtect(32);
|
||||
// where our result goes
|
||||
Register rr = prepResultReg(ins, FpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, FpRegs);
|
||||
int d = findMemFor(ins->oprnd1());
|
||||
FITOD(rr, rr);
|
||||
LDDF32(FP, d, rr);
|
||||
|
@ -950,7 +944,7 @@ namespace nanojit
|
|||
{
|
||||
underrunProtect(72);
|
||||
// where our result goes
|
||||
Register rr = prepResultReg(ins, FpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, FpRegs);
|
||||
Register rt = registerAllocTmp(FpRegs & ~(rmask(rr)));
|
||||
Register gr = findRegFor(ins->oprnd1(), GpRegs);
|
||||
int disp = -8;
|
||||
|
@ -965,6 +959,10 @@ namespace nanojit
|
|||
SETHI(0x43300000, G1);
|
||||
}
|
||||
|
||||
void Assembler::asm_f2i(LInsp) {
|
||||
NanoAssertMsg(0, "NJ_F2I_SUPPORTED not yet supported for this architecture");
|
||||
}
|
||||
|
||||
void Assembler::asm_nongp_copy(Register r, Register s)
|
||||
{
|
||||
underrunProtect(4);
|
||||
|
@ -1038,7 +1036,7 @@ namespace nanojit
|
|||
|
||||
Register Assembler::asm_binop_rhs_reg(LInsp ins)
|
||||
{
|
||||
return UnknownReg;
|
||||
return deprecated_UnknownReg;
|
||||
}
|
||||
|
||||
void Assembler::nativePageSetup()
|
||||
|
@ -1046,8 +1044,6 @@ namespace nanojit
|
|||
NanoAssert(!_inExit);
|
||||
if (!_nIns)
|
||||
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
|
||||
if (!_nExitIns)
|
||||
codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
|
||||
}
|
||||
|
||||
// Increment the 32-bit profiling counter at pCtr, without
|
||||
|
@ -1079,16 +1075,22 @@ namespace nanojit
|
|||
if (ins->isop(LIR_ret)) {
|
||||
findSpecificRegFor(val, retRegs[0]);
|
||||
} else {
|
||||
NanoAssert(ins->isop(LIR_fret));
|
||||
findSpecificRegFor(val, F0);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_q2i(LIns *) {
|
||||
NanoAssert(0); // q2i shouldn't occur on 32-bit platforms
|
||||
}
|
||||
|
||||
void Assembler::asm_promote(LIns *) {
|
||||
// i2q or u2q
|
||||
TODO(asm_promote);
|
||||
NanoAssert(0); // i2q and u2q shouldn't occur on 32-bit platforms
|
||||
}
|
||||
|
||||
void Assembler::swapCodeChunks() {
|
||||
if (!_nExitIns)
|
||||
codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
|
||||
SWAP(NIns*, _nIns, _nExitIns);
|
||||
SWAP(NIns*, codeStart, exitStart);
|
||||
SWAP(NIns*, codeEnd, exitEnd);
|
||||
|
|
|
@ -75,6 +75,7 @@ namespace nanojit
|
|||
#define NJ_MAX_PARAMETERS 1
|
||||
#define NJ_JTBL_SUPPORTED 0
|
||||
#define NJ_EXPANDED_LOADSTORE_SUPPORTED 0
|
||||
#define NJ_F2I_SUPPORTED 0
|
||||
|
||||
const int NJ_ALIGN_STACK = 16;
|
||||
|
||||
|
@ -163,7 +164,7 @@ namespace nanojit
|
|||
|
||||
FirstReg = 0,
|
||||
LastReg = 29,
|
||||
UnknownReg = 30
|
||||
deprecated_UnknownReg = 30
|
||||
}
|
||||
Register;
|
||||
|
||||
|
|
|
@ -61,8 +61,6 @@ better code
|
|||
- stack based LIR_param
|
||||
|
||||
tracing
|
||||
- asm_qjoin
|
||||
- asm_qhi
|
||||
- nFragExit
|
||||
|
||||
*/
|
||||
|
@ -452,6 +450,7 @@ namespace nanojit
|
|||
void Assembler::CVTSI2SD(R l, R r) { emitprr(X64_cvtsi2sd,l,r); asm_output("cvtsi2sd %s, %s",RQ(l),RL(r)); }
|
||||
void Assembler::CVTSS2SD(R l, R r) { emitprr(X64_cvtss2sd,l,r); asm_output("cvtss2sd %s, %s",RQ(l),RL(r)); }
|
||||
void Assembler::CVTSD2SS(R l, R r) { emitprr(X64_cvtsd2ss,l,r); asm_output("cvtsd2ss %s, %s",RL(l),RQ(r)); }
|
||||
void Assembler::CVTSD2SI(R l, R r) { emitprr(X64_cvtsd2si,l,r); asm_output("cvtsd2si %s, %s",RL(l),RQ(r)); }
|
||||
void Assembler::UCOMISD( R l, R r) { emitprr(X64_ucomisd, l,r); asm_output("ucomisd %s, %s", RQ(l),RQ(r)); }
|
||||
void Assembler::MOVQRX( R l, R r) { emitprr(X64_movqrx, r,l); asm_output("movq %s, %s", RQ(l),RQ(r)); } // Nb: r and l are deliberately reversed within the emitprr() call.
|
||||
void Assembler::MOVQXR( R l, R r) { emitprr(X64_movqxr, l,r); asm_output("movq %s, %s", RQ(l),RQ(r)); }
|
||||
|
@ -615,16 +614,15 @@ namespace nanojit
|
|||
#ifdef _DEBUG
|
||||
RegisterMask originalAllow = allow;
|
||||
#endif
|
||||
rb = UnknownReg;
|
||||
LIns *a = ins->oprnd1();
|
||||
LIns *b = ins->oprnd2();
|
||||
if (a != b) {
|
||||
rb = findRegFor(b, allow);
|
||||
allow &= ~rmask(rb);
|
||||
}
|
||||
rr = prepResultReg(ins, allow);
|
||||
rr = deprecated_prepResultReg(ins, allow);
|
||||
// if this is last use of a in reg, we can re-use result reg
|
||||
if (a->isUnusedOrHasUnknownReg()) {
|
||||
if (!a->isInReg()) {
|
||||
ra = findSpecificRegForUnallocated(a, rr);
|
||||
} else if (!(allow & rmask(a->getReg()))) {
|
||||
// 'a' already has a register assigned, but it's not valid.
|
||||
|
@ -714,7 +712,7 @@ namespace nanojit
|
|||
Register rr, ra;
|
||||
if (op == LIR_mul) {
|
||||
// imul has true 3-addr form, it doesn't clobber ra
|
||||
rr = prepResultReg(ins, GpRegs);
|
||||
rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
LIns *a = ins->oprnd1();
|
||||
ra = findRegFor(a, GpRegs);
|
||||
IMULI(rr, ra, imm);
|
||||
|
@ -761,7 +759,7 @@ namespace nanojit
|
|||
if (ins->opcode() == LIR_mod) {
|
||||
// LIR_mod expects the LIR_div to be near
|
||||
div = ins->oprnd1();
|
||||
prepResultReg(ins, rmask(RDX));
|
||||
deprecated_prepResultReg(ins, rmask(RDX));
|
||||
} else {
|
||||
div = ins;
|
||||
evictIfActive(RDX);
|
||||
|
@ -772,10 +770,10 @@ namespace nanojit
|
|||
LIns *lhs = div->oprnd1();
|
||||
LIns *rhs = div->oprnd2();
|
||||
|
||||
prepResultReg(div, rmask(RAX));
|
||||
deprecated_prepResultReg(div, rmask(RAX));
|
||||
|
||||
Register rhsReg = findRegFor(rhs, (GpRegs ^ (rmask(RAX)|rmask(RDX))));
|
||||
Register lhsReg = lhs->isUnusedOrHasUnknownReg()
|
||||
Register rhsReg = findRegFor(rhs, GpRegs & ~(rmask(RAX)|rmask(RDX)));
|
||||
Register lhsReg = !lhs->isInReg()
|
||||
? findSpecificRegForUnallocated(lhs, RAX)
|
||||
: lhs->getReg();
|
||||
IDIV(rhsReg);
|
||||
|
@ -830,7 +828,10 @@ namespace nanojit
|
|||
|
||||
// binary op with fp registers
|
||||
void Assembler::asm_fop(LIns *ins) {
|
||||
Register rr, ra, rb;
|
||||
// NB, rb is always filled in by regalloc_binary,
|
||||
// but compilers can't always tell that: init to UnspecifiedReg
|
||||
// to avoid a warning.
|
||||
Register rr, ra, rb = UnspecifiedReg;
|
||||
regalloc_binary(ins, FpRegs, rr, ra, rb);
|
||||
switch (ins->opcode()) {
|
||||
default: TODO(asm_fop);
|
||||
|
@ -858,7 +859,7 @@ namespace nanojit
|
|||
|
||||
void Assembler::asm_call(LIns *ins) {
|
||||
Register retReg = ( ins->isop(LIR_fcall) ? XMM0 : retRegs[0] );
|
||||
prepResultReg(ins, rmask(retReg));
|
||||
deprecated_prepResultReg(ins, rmask(retReg));
|
||||
|
||||
// Do this after we've handled the call result, so we don't
|
||||
// force the call result to be spilled unnecessarily.
|
||||
|
@ -931,7 +932,7 @@ namespace nanojit
|
|||
|
||||
void Assembler::asm_regarg(ArgSize sz, LIns *p, Register r) {
|
||||
if (sz == ARGSIZE_I) {
|
||||
NanoAssert(!p->isQuad());
|
||||
NanoAssert(p->isI32());
|
||||
if (p->isconst()) {
|
||||
asm_quad(r, int64_t(p->imm32()));
|
||||
return;
|
||||
|
@ -939,7 +940,7 @@ namespace nanojit
|
|||
// sign extend int32 to int64
|
||||
MOVSXDR(r, r);
|
||||
} else if (sz == ARGSIZE_U) {
|
||||
NanoAssert(!p->isQuad());
|
||||
NanoAssert(p->isI32());
|
||||
if (p->isconst()) {
|
||||
asm_quad(r, uint64_t(uint32_t(p->imm32())));
|
||||
return;
|
||||
|
@ -964,11 +965,11 @@ namespace nanojit
|
|||
MOVQSPR(stk_off, r); // movq [rsp+d8], r
|
||||
if (sz == ARGSIZE_I) {
|
||||
// extend int32 to int64
|
||||
NanoAssert(!p->isQuad());
|
||||
NanoAssert(p->isI32());
|
||||
MOVSXDR(r, r);
|
||||
} else if (sz == ARGSIZE_U) {
|
||||
// extend uint32 to uint64
|
||||
NanoAssert(!p->isQuad());
|
||||
NanoAssert(p->isI32());
|
||||
MOVLR(r, r);
|
||||
}
|
||||
} else {
|
||||
|
@ -976,6 +977,13 @@ namespace nanojit
|
|||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_q2i(LIns *ins) {
|
||||
Register rr, ra;
|
||||
regalloc_unary(ins, GpRegs, rr, ra);
|
||||
NanoAssert(IsGpReg(ra));
|
||||
MOVLR(rr, ra); // 32bit mov zeros the upper 32bits of the target
|
||||
}
|
||||
|
||||
void Assembler::asm_promote(LIns *ins) {
|
||||
Register rr, ra;
|
||||
regalloc_unary(ins, GpRegs, rr, ra);
|
||||
|
@ -993,33 +1001,45 @@ namespace nanojit
|
|||
// chains longer. So we precede with XORPS to clear the target register.
|
||||
|
||||
void Assembler::asm_i2f(LIns *ins) {
|
||||
Register r = prepResultReg(ins, FpRegs);
|
||||
Register r = deprecated_prepResultReg(ins, FpRegs);
|
||||
Register b = findRegFor(ins->oprnd1(), GpRegs);
|
||||
CVTSI2SD(r, b); // cvtsi2sd xmmr, b only writes xmm:0:64
|
||||
XORPS(r); // xorps xmmr,xmmr to break dependency chains
|
||||
}
|
||||
|
||||
void Assembler::asm_u2f(LIns *ins) {
|
||||
Register r = prepResultReg(ins, FpRegs);
|
||||
Register r = deprecated_prepResultReg(ins, FpRegs);
|
||||
Register b = findRegFor(ins->oprnd1(), GpRegs);
|
||||
NanoAssert(!ins->oprnd1()->isQuad());
|
||||
NanoAssert(ins->oprnd1()->isI32());
|
||||
// since oprnd1 value is 32bit, its okay to zero-extend the value without worrying about clobbering.
|
||||
CVTSQ2SD(r, b); // convert int64 to double
|
||||
XORPS(r); // xorps xmmr,xmmr to break dependency chains
|
||||
MOVLR(b, b); // zero extend u32 to int64
|
||||
}
|
||||
|
||||
void Assembler::asm_f2i(LIns *ins) {
|
||||
LIns *lhs = ins->oprnd1();
|
||||
|
||||
NanoAssert(ins->isI32() && lhs->isF64());
|
||||
Register r = prepareResultReg(ins, GpRegs);
|
||||
Register b = findRegFor(lhs, FpRegs);
|
||||
|
||||
CVTSD2SI(r, b);
|
||||
|
||||
freeResourcesOf(ins);
|
||||
}
|
||||
|
||||
void Assembler::asm_cmov(LIns *ins) {
|
||||
LIns* cond = ins->oprnd1();
|
||||
LIns* iftrue = ins->oprnd2();
|
||||
LIns* iffalse = ins->oprnd3();
|
||||
NanoAssert(cond->isCmp());
|
||||
NanoAssert((ins->isop(LIR_qcmov) && iftrue->isQuad() && iffalse->isQuad()) ||
|
||||
(ins->isop(LIR_cmov) && !iftrue->isQuad() && !iffalse->isQuad()));
|
||||
NanoAssert((ins->isop(LIR_cmov) && iftrue->isI32() && iffalse->isI32()) ||
|
||||
(ins->isop(LIR_qcmov) && iftrue->isI64() && iffalse->isI64()));
|
||||
|
||||
// this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
|
||||
// (This is true on Intel, is it true on all architectures?)
|
||||
const Register rr = prepResultReg(ins, GpRegs);
|
||||
const Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
const Register rf = findRegFor(iffalse, GpRegs & ~rmask(rr));
|
||||
|
||||
LOpcode condop = cond->opcode();
|
||||
|
@ -1142,7 +1162,7 @@ namespace nanojit
|
|||
LIns *a = cond->oprnd1();
|
||||
Register ra, rb;
|
||||
if (a != b) {
|
||||
findRegFor2(GpRegs, a, ra, b, rb);
|
||||
findRegFor2(GpRegs, a, ra, GpRegs, b, rb);
|
||||
} else {
|
||||
// optimize-me: this will produce a const result!
|
||||
ra = rb = findRegFor(a, GpRegs);
|
||||
|
@ -1249,7 +1269,7 @@ namespace nanojit
|
|||
if (op == LIR_feq) {
|
||||
// result = ZF & !PF, must do logic on flags
|
||||
// r = al|bl|cl|dl, can only use rh without rex prefix
|
||||
Register r = prepResultReg(ins, 1<<RAX|1<<RCX|1<<RDX|1<<RBX);
|
||||
Register r = deprecated_prepResultReg(ins, 1<<RAX|1<<RCX|1<<RDX|1<<RBX);
|
||||
MOVZX8(r, r); // movzx8 r,rl r[8:63] = 0
|
||||
X86_AND8R(r); // and rl,rh rl &= rh
|
||||
X86_SETNP(r); // setnp rh rh = !PF
|
||||
|
@ -1262,7 +1282,7 @@ namespace nanojit
|
|||
op = LIR_fge;
|
||||
LIns *t = a; a = b; b = t;
|
||||
}
|
||||
Register r = prepResultReg(ins, GpRegs); // x64 can use any GPR as setcc target
|
||||
Register r = deprecated_prepResultReg(ins, GpRegs); // x64 can use any GPR as setcc target
|
||||
MOVZX8(r, r);
|
||||
if (op == LIR_fgt)
|
||||
SETA(r);
|
||||
|
@ -1274,38 +1294,37 @@ namespace nanojit
|
|||
|
||||
void Assembler::fcmp(LIns *a, LIns *b) {
|
||||
Register ra, rb;
|
||||
findRegFor2(FpRegs, a, ra, b, rb);
|
||||
findRegFor2(FpRegs, a, ra, FpRegs, b, rb);
|
||||
UCOMISD(ra, rb);
|
||||
}
|
||||
|
||||
void Assembler::asm_restore(LIns *ins, Register r) {
|
||||
if (ins->isop(LIR_alloc)) {
|
||||
int d = disp(ins);
|
||||
int d = arDisp(ins);
|
||||
LEAQRM(r, d, FP);
|
||||
}
|
||||
else if (ins->isconst()) {
|
||||
if (!ins->getArIndex()) {
|
||||
ins->markAsClear();
|
||||
}
|
||||
ins->clearReg();
|
||||
// unsafe to use xor r,r for zero because it changes cc's
|
||||
MOVI(r, ins->imm32());
|
||||
}
|
||||
else if (ins->isconstq() && IsGpReg(r)) {
|
||||
if (!ins->getArIndex()) {
|
||||
ins->markAsClear();
|
||||
}
|
||||
ins->clearReg();
|
||||
// unsafe to use xor r,r for zero because it changes cc's
|
||||
asm_quad(r, ins->imm64());
|
||||
}
|
||||
else {
|
||||
int d = findMemFor(ins);
|
||||
if (IsFpReg(r)) {
|
||||
NanoAssert(ins->isQuad());
|
||||
NanoAssert(ins->isI64() || ins->isF64());
|
||||
// load 64bits into XMM. don't know if double or int64, assume double.
|
||||
MOVSDRM(r, d, FP);
|
||||
} else if (ins->isQuad()) {
|
||||
} else if (ins->isI64() || ins->isF64()) {
|
||||
NanoAssert(IsGpReg(r));
|
||||
MOVQRM(r, d, FP);
|
||||
} else {
|
||||
NanoAssert(ins->isI32());
|
||||
NanoAssert(IsGpReg(r));
|
||||
MOVLRM(r, d, FP);
|
||||
}
|
||||
}
|
||||
|
@ -1314,7 +1333,7 @@ namespace nanojit
|
|||
void Assembler::asm_cond(LIns *ins) {
|
||||
LOpcode op = ins->opcode();
|
||||
// unlike x86-32, with a rex prefix we can use any GP register as an 8bit target
|
||||
Register r = prepResultReg(ins, GpRegs);
|
||||
Register r = deprecated_prepResultReg(ins, GpRegs);
|
||||
// SETcc only sets low 8 bits, so extend
|
||||
MOVZX8(r, r);
|
||||
switch (op) {
|
||||
|
@ -1352,7 +1371,7 @@ namespace nanojit
|
|||
releaseRegisters();
|
||||
assignSavedRegs();
|
||||
LIns *value = ins->oprnd1();
|
||||
Register r = ins->isop(LIR_ret) ? RAX : XMM0;
|
||||
Register r = ins->isop(LIR_fret) ? XMM0 : RAX;
|
||||
findSpecificRegFor(value, r);
|
||||
}
|
||||
|
||||
|
@ -1374,34 +1393,31 @@ namespace nanojit
|
|||
dr = ins->disp();
|
||||
LIns *base = ins->oprnd1();
|
||||
rb = getBaseReg(base, dr, BaseRegs);
|
||||
if (ins->isUnusedOrHasUnknownReg() || !(allow & rmask(ins->getReg()))) {
|
||||
rr = prepResultReg(ins, allow & ~rmask(rb));
|
||||
if (!ins->isInRegMask(allow)) {
|
||||
rr = deprecated_prepResultReg(ins, allow & ~rmask(rb));
|
||||
} else {
|
||||
// keep already assigned register
|
||||
rr = ins->getReg();
|
||||
NanoAssert(allow & rmask(rr));
|
||||
freeRsrcOf(ins, false);
|
||||
deprecated_freeRsrcOf(ins, false);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_load64(LIns *ins) {
|
||||
|
||||
Register rr, rb;
|
||||
int32_t dr;
|
||||
switch (ins->opcode()) {
|
||||
case LIR_ldq:
|
||||
case LIR_ldqc:
|
||||
regalloc_load(ins, GpRegs, rr, dr, rb);
|
||||
NanoAssert(IsGpReg(rr));
|
||||
MOVQRM(rr, dr, rb); // general 64bit load, 32bit const displacement
|
||||
break;
|
||||
case LIR_ldf:
|
||||
case LIR_ldfc:
|
||||
regalloc_load(ins, GpRegs, rr, dr, rb);
|
||||
if (IsGpReg(rr)) {
|
||||
// general 64bit load, 32bit const displacement
|
||||
MOVQRM(rr, dr, rb);
|
||||
} else {
|
||||
NanoAssert(IsFpReg(rr));
|
||||
// load 64bits into XMM. don't know if double or int64, assume double.
|
||||
MOVSDRM(rr, dr, rb);
|
||||
}
|
||||
regalloc_load(ins, FpRegs, rr, dr, rb);
|
||||
NanoAssert(IsFpReg(rr));
|
||||
MOVSDRM(rr, dr, rb); // load 64bits into XMM
|
||||
break;
|
||||
case LIR_ld32f:
|
||||
case LIR_ldc32f:
|
||||
|
@ -1418,7 +1434,7 @@ namespace nanojit
|
|||
}
|
||||
|
||||
void Assembler::asm_load32(LIns *ins) {
|
||||
NanoAssert(!ins->isQuad());
|
||||
NanoAssert(ins->isI32());
|
||||
Register r, b;
|
||||
int32_t d;
|
||||
regalloc_load(ins, GpRegs, r, d, b);
|
||||
|
@ -1451,61 +1467,29 @@ namespace nanojit
|
|||
}
|
||||
|
||||
void Assembler::asm_store64(LOpcode op, LIns *value, int d, LIns *base) {
|
||||
NanoAssert(value->isQuad());
|
||||
|
||||
Register b = getBaseReg(base, d, BaseRegs);
|
||||
Register r;
|
||||
|
||||
// if we have to choose a register, use a GPR, but not the base reg
|
||||
if (value->isUnusedOrHasUnknownReg()) {
|
||||
RegisterMask allow;
|
||||
// If op is LIR_st32f and we have no reg, prefer FPR over GPR: saves an instruction later,
|
||||
// and the value is almost certainly going to operated on as FP later anyway.
|
||||
// XXX: isFloat doesn't cover float/fmod! see bug 520208.
|
||||
if (op == LIR_st32f || value->isFloat() || value->isop(LIR_float) || value->isop(LIR_fmod)) {
|
||||
allow = FpRegs;
|
||||
} else {
|
||||
allow = GpRegs;
|
||||
}
|
||||
r = findRegFor(value, allow & ~rmask(b));
|
||||
} else {
|
||||
r = value->getReg();
|
||||
}
|
||||
NanoAssert(value->isI64() || value->isF64());
|
||||
|
||||
switch (op) {
|
||||
case LIR_stqi:
|
||||
case LIR_stfi:
|
||||
{
|
||||
if (IsGpReg(r)) {
|
||||
// gpr store
|
||||
MOVQMR(r, d, b);
|
||||
}
|
||||
else {
|
||||
// xmm store
|
||||
MOVSDMR(r, d, b);
|
||||
}
|
||||
case LIR_stqi: {
|
||||
Register r, b;
|
||||
getBaseReg2(GpRegs, value, r, BaseRegs, base, b, d);
|
||||
MOVQMR(r, d, b); // gpr store
|
||||
break;
|
||||
}
|
||||
case LIR_st32f:
|
||||
{
|
||||
// need a scratch FPR reg
|
||||
case LIR_stfi: {
|
||||
Register b = getBaseReg(base, d, BaseRegs);
|
||||
Register r = findRegFor(value, FpRegs);
|
||||
MOVSDMR(r, d, b); // xmm store
|
||||
break;
|
||||
}
|
||||
case LIR_st32f: {
|
||||
Register b = getBaseReg(base, d, BaseRegs);
|
||||
Register r = findRegFor(value, FpRegs);
|
||||
Register t = registerAllocTmp(FpRegs & ~rmask(r));
|
||||
|
||||
// store
|
||||
MOVSSMR(t, d, b);
|
||||
|
||||
// cvt to single-precision
|
||||
if (IsGpReg(r))
|
||||
{
|
||||
CVTSD2SS(t, t);
|
||||
MOVQXR(t, r); // xmm <- gpr: use movq xmm, r/m64 (66 REX.W 0F 6E /r)
|
||||
}
|
||||
else
|
||||
{
|
||||
NanoAssert(IsFpReg(r));
|
||||
CVTSD2SS(t, r);
|
||||
}
|
||||
XORPS(t); // break dependency chains
|
||||
MOVSSMR(t, d, b); // store
|
||||
CVTSD2SS(t, r); // cvt to single-precision
|
||||
XORPS(t); // break dependency chains
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
@ -1516,14 +1500,11 @@ namespace nanojit
|
|||
|
||||
void Assembler::asm_store32(LOpcode op, LIns *value, int d, LIns *base) {
|
||||
|
||||
// quirk of x86-64: reg cannot appear to be ah/bh/ch/dh
|
||||
// for single-byte stores with REX prefix
|
||||
const RegisterMask SrcRegs =
|
||||
(op == LIR_stb) ?
|
||||
(GpRegs & ~(1<<RSP | 1<<RBP | 1<<RSI | 1<<RDI)) :
|
||||
GpRegs;
|
||||
// Quirk of x86-64: reg cannot appear to be ah/bh/ch/dh for
|
||||
// single-byte stores with REX prefix.
|
||||
const RegisterMask SrcRegs = (op == LIR_stb) ? SingleByteStoreRegs : GpRegs;
|
||||
|
||||
NanoAssert(!value->isQuad());
|
||||
NanoAssert(value->isI32());
|
||||
Register b = getBaseReg(base, d, BaseRegs);
|
||||
Register r = findRegFor(value, SrcRegs & ~rmask(b));
|
||||
|
||||
|
@ -1563,7 +1544,7 @@ namespace nanojit
|
|||
}
|
||||
|
||||
void Assembler::asm_int(LIns *ins) {
|
||||
Register r = prepResultReg(ins, GpRegs);
|
||||
Register r = deprecated_prepResultReg(ins, GpRegs);
|
||||
int32_t v = ins->imm32();
|
||||
if (v == 0) {
|
||||
// special case for zero
|
||||
|
@ -1576,7 +1557,7 @@ namespace nanojit
|
|||
void Assembler::asm_quad(LIns *ins) {
|
||||
uint64_t v = ins->imm64();
|
||||
RegisterMask allow = v == 0 ? GpRegs|FpRegs : GpRegs;
|
||||
Register r = prepResultReg(ins, allow);
|
||||
Register r = deprecated_prepResultReg(ins, allow);
|
||||
if (v == 0) {
|
||||
if (IsGpReg(r)) {
|
||||
// special case for zero
|
||||
|
@ -1591,7 +1572,7 @@ namespace nanojit
|
|||
}
|
||||
|
||||
void Assembler::asm_qjoin(LIns*) {
|
||||
TODO(asm_qjoin);
|
||||
NanoAssert(0); // qjoin shouldn't occur on non-SoftFloat platforms
|
||||
}
|
||||
|
||||
void Assembler::asm_param(LIns *ins) {
|
||||
|
@ -1602,7 +1583,7 @@ namespace nanojit
|
|||
// first four or six args always in registers for x86_64 ABI
|
||||
if (a < (uint32_t)NumArgRegs) {
|
||||
// incoming arg in register
|
||||
prepResultReg(ins, rmask(argRegs[a]));
|
||||
deprecated_prepResultReg(ins, rmask(argRegs[a]));
|
||||
} else {
|
||||
// todo: support stack based args, arg 0 is at [FP+off] where off
|
||||
// is the # of regs to be pushed in genProlog()
|
||||
|
@ -1611,16 +1592,16 @@ namespace nanojit
|
|||
}
|
||||
else {
|
||||
// saved param
|
||||
prepResultReg(ins, rmask(savedRegs[a]));
|
||||
deprecated_prepResultReg(ins, rmask(savedRegs[a]));
|
||||
}
|
||||
}
|
||||
|
||||
// register allocation for 2-address style unary ops of the form R = (op) R
|
||||
void Assembler::regalloc_unary(LIns *ins, RegisterMask allow, Register &rr, Register &ra) {
|
||||
LIns *a = ins->oprnd1();
|
||||
rr = prepResultReg(ins, allow);
|
||||
rr = deprecated_prepResultReg(ins, allow);
|
||||
// if this is last use of a in reg, we can re-use result reg
|
||||
if (a->isUnusedOrHasUnknownReg()) {
|
||||
if (!a->isInReg()) {
|
||||
ra = findSpecificRegForUnallocated(a, rr);
|
||||
} else {
|
||||
// 'a' already has a register assigned. Caller must emit a copy
|
||||
|
@ -1649,7 +1630,7 @@ namespace nanojit
|
|||
// this is just hideous - can't use RIP-relative load, can't use
|
||||
// absolute-address load, and cant move imm64 const to XMM.
|
||||
// so do it all in a GPR. hrmph.
|
||||
rr = prepResultReg(ins, GpRegs);
|
||||
rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
ra = findRegFor(ins->oprnd1(), GpRegs & ~rmask(rr));
|
||||
XORQRR(rr, ra); // xor rr, ra
|
||||
asm_quad(rr, negateMask[0]); // mov rr, 0x8000000000000000
|
||||
|
@ -1657,14 +1638,11 @@ namespace nanojit
|
|||
}
|
||||
|
||||
void Assembler::asm_qhi(LIns*) {
|
||||
TODO(asm_qhi);
|
||||
NanoAssert(0); // qhi shouldn't occur on non-SoftFloat platforms
|
||||
}
|
||||
|
||||
void Assembler::asm_qlo(LIns *ins) {
|
||||
Register rr, ra;
|
||||
regalloc_unary(ins, GpRegs, rr, ra);
|
||||
NanoAssert(IsGpReg(ra));
|
||||
MOVLR(rr, ra); // 32bit mov zeros the upper 32bits of the target
|
||||
void Assembler::asm_qlo(LIns *) {
|
||||
NanoAssert(0); // qlo shouldn't occur on non-SoftFloat platforms
|
||||
}
|
||||
|
||||
void Assembler::asm_spill(Register rr, int d, bool /*pop*/, bool quad) {
|
||||
|
@ -1855,8 +1833,8 @@ namespace nanojit
|
|||
#endif
|
||||
}
|
||||
|
||||
RegisterMask Assembler::hint(LIns *, RegisterMask allow) {
|
||||
return allow;
|
||||
RegisterMask Assembler::hint(LIns* /*ins*/) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void Assembler::nativePageSetup() {
|
||||
|
@ -1865,9 +1843,6 @@ namespace nanojit
|
|||
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
|
||||
IF_PEDANTIC( pedanticTop = _nIns; )
|
||||
}
|
||||
if (!_nExitIns) {
|
||||
codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::nativePageReset()
|
||||
|
@ -1910,6 +1885,9 @@ namespace nanojit
|
|||
}
|
||||
|
||||
void Assembler::swapCodeChunks() {
|
||||
if (!_nExitIns) {
|
||||
codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
|
||||
}
|
||||
SWAP(NIns*, _nIns, _nExitIns);
|
||||
SWAP(NIns*, codeStart, exitStart);
|
||||
SWAP(NIns*, codeEnd, exitEnd);
|
||||
|
|
|
@ -62,6 +62,7 @@ namespace nanojit
|
|||
#define NJ_ALIGN_STACK 16
|
||||
#define NJ_JTBL_SUPPORTED 1
|
||||
#define NJ_EXPANDED_LOADSTORE_SUPPORTED 1
|
||||
#define NJ_F2I_SUPPORTED 1
|
||||
|
||||
enum Register {
|
||||
RAX = 0, // 1st int return, # of sse varargs
|
||||
|
@ -99,9 +100,12 @@ namespace nanojit
|
|||
XMM15 = 31, // scratch
|
||||
|
||||
FP = RBP,
|
||||
UnknownReg = 32,
|
||||
|
||||
FirstReg = RAX,
|
||||
LastReg = XMM15
|
||||
LastReg = XMM15,
|
||||
|
||||
deprecated_UnknownReg = 32, // XXX: remove eventually, see bug 538924
|
||||
UnspecifiedReg = 32
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -193,6 +197,7 @@ namespace nanojit
|
|||
X64_cvtsq2sd= 0xC02A0F48F2000005LL, // convert int64 to double r = (double) b
|
||||
X64_cvtss2sd= 0xC05A0F40F3000005LL, // convert float to double r = (double) b
|
||||
X64_cvtsd2ss= 0xC05A0F40F2000005LL, // convert double to float r = (float) b
|
||||
X64_cvtsd2si= 0xC02D0F40F2000005LL, // convert double to int32 r = (int32) b
|
||||
X64_divsd = 0xC05E0F40F2000005LL, // divide scalar double r /= b
|
||||
X64_mulsd = 0xC0590F40F2000005LL, // multiply scalar double r *= b
|
||||
X64_addsd = 0xC0580F40F2000005LL, // add scalar double r += b
|
||||
|
@ -329,6 +334,10 @@ namespace nanojit
|
|||
static const int NumSavedRegs = 5; // rbx, r12-15
|
||||
static const int NumArgRegs = 6;
|
||||
#endif
|
||||
// Warning: when talking about single byte registers, RSP/RBP/RSI/RDI are
|
||||
// actually synonyms for AH/CH/DH/BH. So this value means "any
|
||||
// single-byte GpReg except AH/CH/DH/BH".
|
||||
static const int SingleByteStoreRegs = GpRegs & ~(1<<RSP | 1<<RBP | 1<<RSI | 1<<RDI);
|
||||
|
||||
static inline bool IsFpReg(Register r) {
|
||||
return ((1<<r) & FpRegs) != 0;
|
||||
|
@ -469,6 +478,7 @@ namespace nanojit
|
|||
void CVTSI2SD(Register l, Register r);\
|
||||
void CVTSS2SD(Register l, Register r);\
|
||||
void CVTSD2SS(Register l, Register r);\
|
||||
void CVTSD2SI(Register l, Register r);\
|
||||
void UCOMISD(Register l, Register r);\
|
||||
void MOVQRX(Register l, Register r);\
|
||||
void MOVQXR(Register l, Register r);\
|
||||
|
|
|
@ -93,13 +93,13 @@ namespace nanojit
|
|||
: "%eax", "%esi", "%ecx", "%edx"
|
||||
);
|
||||
#elif defined __SUNPRO_C || defined __SUNPRO_CC
|
||||
asm("xchg %%esi, %%ebx\n"
|
||||
asm("push %%ebx\n"
|
||||
"mov $0x01, %%eax\n"
|
||||
"cpuid\n"
|
||||
"xchg %%esi, %%ebx\n"
|
||||
"pop %%ebx\n"
|
||||
: "=d" (features)
|
||||
: /* We have no inputs */
|
||||
: "%eax", "%ecx", "esi"
|
||||
: "%eax", "%ecx"
|
||||
);
|
||||
#endif
|
||||
return (features & (1<<26)) != 0;
|
||||
|
@ -200,7 +200,7 @@ namespace nanojit
|
|||
void Assembler::asm_call(LInsp ins)
|
||||
{
|
||||
Register retReg = ( ins->isop(LIR_fcall) ? FST0 : retRegs[0] );
|
||||
prepResultReg(ins, rmask(retReg));
|
||||
deprecated_prepResultReg(ins, rmask(retReg));
|
||||
|
||||
// Do this after we've handled the call result, so we don't
|
||||
// force the call result to be spilled unnecessarily.
|
||||
|
@ -291,7 +291,7 @@ namespace nanojit
|
|||
{
|
||||
uint32_t j = argc-i-1;
|
||||
ArgSize sz = sizes[j];
|
||||
Register r = UnknownReg;
|
||||
Register r = UnspecifiedReg;
|
||||
if (n < max_regs && sz != ARGSIZE_F) {
|
||||
r = argRegs[n++]; // tell asm_arg what reg to use
|
||||
}
|
||||
|
@ -312,7 +312,7 @@ namespace nanojit
|
|||
{
|
||||
Register r;
|
||||
RegAlloc ®s = _allocator;
|
||||
#ifdef WIN32
|
||||
#ifdef _MSC_VER
|
||||
_asm
|
||||
{
|
||||
mov ecx, regs
|
||||
|
@ -320,19 +320,13 @@ namespace nanojit
|
|||
btr RegAlloc::free[ecx], eax // free &= ~rmask(i)
|
||||
mov r, eax
|
||||
}
|
||||
#elif defined __SUNPRO_C || defined __SUNPRO_CC
|
||||
asm(
|
||||
"bsf %1, %%edi\n\t"
|
||||
"btr %%edi, (%2)\n\t"
|
||||
"movl %%edi, %0\n\t"
|
||||
: "=a"(r) : "d"(set), "c"(®s.free) : "%edi", "memory" );
|
||||
#else
|
||||
asm(
|
||||
"bsf %1, %%eax\n\t"
|
||||
"btr %%eax, %2\n\t"
|
||||
"movl %%eax, %0\n\t"
|
||||
: "=m"(r) : "m"(set), "m"(regs.free) : "%eax", "memory" );
|
||||
#endif /* WIN32 */
|
||||
#endif /* _MSC_VER */
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -357,36 +351,39 @@ namespace nanojit
|
|||
NanoAssertMsg(0, "Unknown branch type in nPatchBranch");
|
||||
}
|
||||
|
||||
RegisterMask Assembler::hint(LIns* i, RegisterMask allow)
|
||||
RegisterMask Assembler::hint(LIns* ins)
|
||||
{
|
||||
uint32_t op = i->opcode();
|
||||
int prefer = allow;
|
||||
uint32_t op = ins->opcode();
|
||||
int prefer = 0;
|
||||
|
||||
if (op == LIR_icall) {
|
||||
prefer &= rmask(retRegs[0]);
|
||||
prefer = rmask(retRegs[0]);
|
||||
}
|
||||
else if (op == LIR_fcall) {
|
||||
prefer &= rmask(FST0);
|
||||
prefer = rmask(FST0);
|
||||
}
|
||||
else if (op == LIR_param) {
|
||||
if (i->paramKind() == 0) {
|
||||
uint8_t arg = ins->paramArg();
|
||||
if (ins->paramKind() == 0) {
|
||||
uint32_t max_regs = max_abi_regs[_thisfrag->lirbuf->abi];
|
||||
if (i->paramArg() < max_regs)
|
||||
prefer &= rmask(argRegs[i->paramArg()]);
|
||||
if (arg < max_regs)
|
||||
prefer = rmask(argRegs[arg]);
|
||||
} else {
|
||||
if (i->paramArg() < NumSavedRegs)
|
||||
prefer &= rmask(savedRegs[i->paramArg()]);
|
||||
if (arg < NumSavedRegs)
|
||||
prefer = rmask(savedRegs[arg]);
|
||||
}
|
||||
}
|
||||
else if (op == LIR_callh || (op == LIR_rsh && i->oprnd1()->opcode()==LIR_callh)) {
|
||||
prefer &= rmask(retRegs[1]);
|
||||
else if (op == LIR_callh || (op == LIR_rsh && ins->oprnd1()->opcode()==LIR_callh)) {
|
||||
prefer = rmask(retRegs[1]);
|
||||
}
|
||||
else if (i->isCmp()) {
|
||||
prefer &= AllowableFlagRegs;
|
||||
else if (ins->isCmp()) {
|
||||
prefer = AllowableFlagRegs;
|
||||
}
|
||||
else if (i->isconst()) {
|
||||
prefer &= ScratchRegs;
|
||||
else if (ins->isconst()) {
|
||||
prefer = ScratchRegs;
|
||||
}
|
||||
return (_allocator.free & prefer) ? prefer : allow;
|
||||
|
||||
return prefer;
|
||||
}
|
||||
|
||||
void Assembler::asm_qjoin(LIns *ins)
|
||||
|
@ -396,8 +393,7 @@ namespace nanojit
|
|||
LIns* lo = ins->oprnd1();
|
||||
LIns* hi = ins->oprnd2();
|
||||
|
||||
Register rr = ins->getReg();
|
||||
if (isKnownReg(rr) && (rmask(rr) & FpRegs))
|
||||
if (ins->isInRegMask(FpRegs))
|
||||
evict(ins);
|
||||
|
||||
if (hi->isconst())
|
||||
|
@ -421,7 +417,7 @@ namespace nanojit
|
|||
ST(FP, d, r);
|
||||
}
|
||||
|
||||
freeRsrcOf(ins, false); // if we had a reg in use, emit a ST to flush it to mem
|
||||
deprecated_freeRsrcOf(ins, false); // if we had a reg in use, emit a ST to flush it to mem
|
||||
}
|
||||
|
||||
// WARNING: the code generated by this function must not affect the
|
||||
|
@ -436,14 +432,16 @@ namespace nanojit
|
|||
// The value of a LIR_alloc instruction is the address of the
|
||||
// stack allocation. We can rematerialize that from the record we
|
||||
// have of where the allocation lies in the stack.
|
||||
NanoAssert(ins->getArIndex()); // must have stack slots allocated
|
||||
LEA(r, disp(ins), FP);
|
||||
NanoAssert(ins->isInAr()); // must have stack slots allocated
|
||||
LEA(r, arDisp(ins), FP);
|
||||
|
||||
} else if (ins->isconst()) {
|
||||
asm_int(r, ins->imm32(), /*canClobberCCs*/false);
|
||||
if (!ins->getArIndex()) {
|
||||
ins->markAsClear();
|
||||
}
|
||||
ins->clearReg();
|
||||
|
||||
} else if (ins->isconstq()) {
|
||||
asm_quad(r, ins->imm64(), ins->imm64f(), /*canClobberCCs*/false);
|
||||
ins->clearReg();
|
||||
|
||||
} else if (ins->isop(LIR_param) && ins->paramKind() == 0 &&
|
||||
(arg = ins->paramArg()) >= (abi_regcount = max_abi_regs[_thisfrag->lirbuf->abi])) {
|
||||
|
@ -459,9 +457,7 @@ namespace nanojit
|
|||
//
|
||||
int d = (arg - abi_regcount) * sizeof(intptr_t) + 8;
|
||||
LD(r, d, FP);
|
||||
if (!ins->getArIndex()) {
|
||||
ins->markAsClear();
|
||||
}
|
||||
ins->clearReg();
|
||||
|
||||
} else {
|
||||
int d = findMemFor(ins);
|
||||
|
@ -504,17 +500,13 @@ namespace nanojit
|
|||
GpRegs;
|
||||
|
||||
Register ra, rb;
|
||||
if (base->isop(LIR_alloc)) {
|
||||
rb = FP;
|
||||
dr += findMemFor(base);
|
||||
ra = findRegFor(value, SrcRegs);
|
||||
} else if (base->isconst()) {
|
||||
if (base->isconst()) {
|
||||
// absolute address
|
||||
rb = UnknownReg;
|
||||
rb = UnspecifiedReg;
|
||||
dr += base->imm32();
|
||||
ra = findRegFor(value, SrcRegs);
|
||||
} else {
|
||||
findRegFor2(SrcRegs, value, ra, base, rb);
|
||||
getBaseReg2(SrcRegs, value, ra, GpRegs, base, rb, dr);
|
||||
}
|
||||
switch (op) {
|
||||
case LIR_stb:
|
||||
|
@ -538,18 +530,13 @@ namespace nanojit
|
|||
(void)quad;
|
||||
if (d)
|
||||
{
|
||||
// save to spill location
|
||||
if (rmask(rr) & FpRegs)
|
||||
{
|
||||
if (rmask(rr) & XmmRegs) {
|
||||
SSE_STQ(d, FP, rr);
|
||||
} else {
|
||||
FSTQ((pop?1:0), d, FP);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (rmask(rr) & GpRegs) {
|
||||
ST(FP, d, rr);
|
||||
} else if (rmask(rr) & XmmRegs) {
|
||||
SSE_STQ(d, FP, rr);
|
||||
} else {
|
||||
NanoAssert(rmask(rr) & x87Regs);
|
||||
FSTQ((pop?1:0), d, FP);
|
||||
}
|
||||
}
|
||||
else if (pop && (rmask(rr) & x87Regs))
|
||||
|
@ -565,11 +552,15 @@ namespace nanojit
|
|||
|
||||
LIns* base = ins->oprnd1();
|
||||
int db = ins->disp();
|
||||
Register rr = ins->getReg();
|
||||
|
||||
if (isKnownReg(rr) && rmask(rr) & XmmRegs)
|
||||
Register rr = UnspecifiedReg; // init to shut GCC up
|
||||
bool inReg = ins->isInReg();
|
||||
if (inReg)
|
||||
rr = ins->getReg();
|
||||
|
||||
if (inReg && (rmask(rr) & XmmRegs))
|
||||
{
|
||||
freeRsrcOf(ins, false);
|
||||
deprecated_freeRsrcOf(ins, false);
|
||||
Register rb = getBaseReg(base, db, GpRegs);
|
||||
switch (ins->opcode()) {
|
||||
case LIR_ldf:
|
||||
|
@ -589,8 +580,10 @@ namespace nanojit
|
|||
}
|
||||
else
|
||||
{
|
||||
|
||||
int dr = disp(ins);
|
||||
bool inAr = ins->isInAr();
|
||||
int dr = 0;
|
||||
if (inAr)
|
||||
dr = arDisp(ins);
|
||||
Register rb;
|
||||
if (base->isop(LIR_alloc)) {
|
||||
rb = FP;
|
||||
|
@ -598,16 +591,16 @@ namespace nanojit
|
|||
} else {
|
||||
rb = findRegFor(base, GpRegs);
|
||||
}
|
||||
ins->setReg(UnknownReg);
|
||||
ins->clearReg();
|
||||
|
||||
switch (ins->opcode()) {
|
||||
case LIR_ldf:
|
||||
case LIR_ldfc:
|
||||
// don't use an fpu reg to simply load & store the value.
|
||||
if (dr)
|
||||
if (inAr)
|
||||
asm_mmq(FP, dr, rb, db);
|
||||
freeRsrcOf(ins, false);
|
||||
if (isKnownReg(rr))
|
||||
deprecated_freeRsrcOf(ins, false);
|
||||
if (inReg)
|
||||
{
|
||||
NanoAssert(rmask(rr)&x87Regs);
|
||||
_allocator.retire(rr);
|
||||
|
@ -616,22 +609,22 @@ namespace nanojit
|
|||
break;
|
||||
case LIR_ld32f:
|
||||
case LIR_ldc32f:
|
||||
freeRsrcOf(ins, false);
|
||||
if (isKnownReg(rr))
|
||||
deprecated_freeRsrcOf(ins, false);
|
||||
if (inReg)
|
||||
{
|
||||
NanoAssert(rmask(rr)&x87Regs);
|
||||
_allocator.retire(rr);
|
||||
// Be sure to shadow the value onto our local area if there's space for it,
|
||||
// but don't pop the FP stack, we expect the register to stay valid.
|
||||
if (dr)
|
||||
FSTQ(0,dr, FP);
|
||||
if (inAr)
|
||||
FSTQ(0, dr, FP);
|
||||
FLD32(db, rb);
|
||||
}
|
||||
else
|
||||
{
|
||||
// We need to use fpu to expand 32->64, can't use asm_mmq...
|
||||
// just load-and-store-with-pop.
|
||||
NanoAssert(dr != 0);
|
||||
NanoAssert(inAr);
|
||||
FSTPQ(dr, FP);
|
||||
FLD32(db, rb);
|
||||
}
|
||||
|
@ -650,7 +643,7 @@ namespace nanojit
|
|||
Register rb = getBaseReg(base, dr, GpRegs);
|
||||
|
||||
if (op == LIR_st32f) {
|
||||
bool pop = value->isUnusedOrHasUnknownReg();
|
||||
bool pop = !value->isInReg();
|
||||
Register rv = ( pop
|
||||
? findRegFor(value, config.sse2 ? XmmRegs : FpRegs)
|
||||
: value->getReg() );
|
||||
|
@ -692,7 +685,7 @@ namespace nanojit
|
|||
|
||||
} else {
|
||||
NanoAssert(!value->isop(LIR_ldq) && !value->isop(LIR_ldqc));
|
||||
bool pop = value->isUnusedOrHasUnknownReg();
|
||||
bool pop = !value->isInReg();
|
||||
Register rv = ( pop
|
||||
? findRegFor(value, config.sse2 ? XmmRegs : FpRegs)
|
||||
: value->getReg() );
|
||||
|
@ -836,10 +829,7 @@ namespace nanojit
|
|||
LInsp lhs = cond->oprnd1();
|
||||
LInsp rhs = cond->oprnd2();
|
||||
|
||||
NanoAssert((!lhs->isQuad() && !rhs->isQuad()) || (lhs->isQuad() && rhs->isQuad()));
|
||||
|
||||
// Not supported yet.
|
||||
NanoAssert(!lhs->isQuad() && !rhs->isQuad());
|
||||
NanoAssert(lhs->isI32() && rhs->isI32());
|
||||
|
||||
// Ready to issue the compare.
|
||||
if (rhs->isconst()) {
|
||||
|
@ -855,7 +845,7 @@ namespace nanojit
|
|||
|
||||
} else {
|
||||
Register ra, rb;
|
||||
findRegFor2(GpRegs, lhs, ra, rhs, rb);
|
||||
findRegFor2(GpRegs, lhs, ra, GpRegs, rhs, rb);
|
||||
CMP(ra, rb);
|
||||
}
|
||||
}
|
||||
|
@ -962,7 +952,7 @@ namespace nanojit
|
|||
|
||||
bool isConstRhs;
|
||||
RegisterMask allow = GpRegs;
|
||||
Register rb = UnknownReg;
|
||||
Register rb = UnspecifiedReg;
|
||||
|
||||
switch (op) {
|
||||
case LIR_div:
|
||||
|
@ -1002,7 +992,7 @@ namespace nanojit
|
|||
Register rr = prepareResultReg(ins, allow);
|
||||
|
||||
// If 'lhs' isn't in a register, it can be clobbered by 'ins'.
|
||||
Register ra = lhs->isUnusedOrHasUnknownReg() ? rr : lhs->getReg();
|
||||
Register ra = !lhs->isInReg() ? rr : lhs->getReg();
|
||||
|
||||
if (!isConstRhs) {
|
||||
if (lhs == rhs)
|
||||
|
@ -1050,7 +1040,7 @@ namespace nanojit
|
|||
MR(rr, ra);
|
||||
|
||||
freeResourcesOf(ins);
|
||||
if (lhs->isUnusedOrHasUnknownReg()) {
|
||||
if (!lhs->isInReg()) {
|
||||
NanoAssert(ra == rr);
|
||||
findSpecificRegForUnallocated(lhs, ra);
|
||||
}
|
||||
|
@ -1073,7 +1063,7 @@ namespace nanojit
|
|||
|
||||
Register rDivR = findRegFor(divR, (GpRegs & ~(rmask(EAX)|rmask(EDX))));
|
||||
|
||||
Register rDivL = divL->isUnusedOrHasUnknownReg() ? EAX : divL->getReg();
|
||||
Register rDivL = !divL->isInReg() ? EAX : divL->getReg();
|
||||
|
||||
DIV(rDivR);
|
||||
CDQ(); // sign-extend EAX into EDX:EAX
|
||||
|
@ -1083,7 +1073,7 @@ namespace nanojit
|
|||
|
||||
freeResourcesOf(mod);
|
||||
freeResourcesOf(div);
|
||||
if (divL->isUnusedOrHasUnknownReg()) {
|
||||
if (!divL->isInReg()) {
|
||||
NanoAssert(rDivL == EAX);
|
||||
findSpecificRegForUnallocated(divL, EAX);
|
||||
}
|
||||
|
@ -1114,7 +1104,7 @@ namespace nanojit
|
|||
Register rr = prepareResultReg(ins, GpRegs);
|
||||
|
||||
// If 'lhs' isn't in a register, it can be clobbered by 'ins'.
|
||||
Register ra = lhs->isUnusedOrHasUnknownReg() ? rr : lhs->getReg();
|
||||
Register ra = !lhs->isInReg() ? rr : lhs->getReg();
|
||||
|
||||
if (op == LIR_not)
|
||||
NOT(rr);
|
||||
|
@ -1125,7 +1115,7 @@ namespace nanojit
|
|||
MR(rr, ra);
|
||||
|
||||
freeResourcesOf(ins);
|
||||
if (lhs->isUnusedOrHasUnknownReg()) {
|
||||
if (!lhs->isInReg()) {
|
||||
NanoAssert(ra == rr);
|
||||
findSpecificRegForUnallocated(lhs, ra);
|
||||
}
|
||||
|
@ -1136,7 +1126,7 @@ namespace nanojit
|
|||
LOpcode op = ins->opcode();
|
||||
LIns* base = ins->oprnd1();
|
||||
int32_t d = ins->disp();
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
|
||||
if (base->isconst()) {
|
||||
intptr_t addr = base->imm32();
|
||||
|
@ -1188,12 +1178,12 @@ namespace nanojit
|
|||
/* Does LHS have a register yet? If not, re-use the result reg.
|
||||
* @todo -- If LHS is const, we could eliminate a register use.
|
||||
*/
|
||||
Register rleft = ( lhs->isUnusedOrHasUnknownReg()
|
||||
Register rleft = ( !lhs->isInReg()
|
||||
? findSpecificRegForUnallocated(lhs, rr)
|
||||
: lhs->getReg() );
|
||||
|
||||
/* Does RHS have a register yet? If not, try to re-use the result reg. */
|
||||
Register rright = ( rr != rleft && rhs->isUnusedOrHasUnknownReg()
|
||||
Register rright = ( rr != rleft && !rhs->isInReg()
|
||||
? findSpecificRegForUnallocated(rhs, rr)
|
||||
: findRegFor(rhs, GpRegs & ~(rmask(rleft))) );
|
||||
|
||||
|
@ -1260,9 +1250,9 @@ namespace nanojit
|
|||
LIns* iffalse = ins->oprnd3();
|
||||
|
||||
NanoAssert(condval->isCmp());
|
||||
NanoAssert(op == LIR_qcmov || (!iftrue->isQuad() && !iffalse->isQuad()));
|
||||
NanoAssert(op == LIR_cmov && iftrue->isI32() && iffalse->isI32());
|
||||
|
||||
const Register rr = prepResultReg(ins, GpRegs);
|
||||
const Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
|
||||
// this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
|
||||
// (This is true on Intel, is it true on all architectures?)
|
||||
|
@ -1292,10 +1282,20 @@ namespace nanojit
|
|||
|
||||
void Assembler::asm_qhi(LInsp ins)
|
||||
{
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
LIns *q = ins->oprnd1();
|
||||
int d = findMemFor(q);
|
||||
LD(rr, d+4, FP);
|
||||
if (q->isconstq())
|
||||
{
|
||||
// This should only be possible if ExprFilter isn't in use,
|
||||
// as it will fold qhi(qconst()) properly... still, if it's
|
||||
// disabled, we need this for proper behavior
|
||||
LDi(rr, q->imm64_1());
|
||||
}
|
||||
else
|
||||
{
|
||||
int d = findMemFor(q);
|
||||
LD(rr, d+4, FP);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_param(LInsp ins)
|
||||
|
@ -1308,17 +1308,17 @@ namespace nanojit
|
|||
uint32_t abi_regcount = max_abi_regs[abi];
|
||||
if (a < abi_regcount) {
|
||||
// Incoming arg in register.
|
||||
prepResultReg(ins, rmask(argRegs[a]));
|
||||
deprecated_prepResultReg(ins, rmask(argRegs[a]));
|
||||
} else {
|
||||
// Incoming arg is on stack, and EBP points nearby (see genPrologue()).
|
||||
Register r = prepResultReg(ins, GpRegs);
|
||||
Register r = deprecated_prepResultReg(ins, GpRegs);
|
||||
int d = (a - abi_regcount) * sizeof(intptr_t) + 8;
|
||||
LD(r, d, FP);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// saved param
|
||||
prepResultReg(ins, rmask(savedRegs[a]));
|
||||
deprecated_prepResultReg(ins, rmask(savedRegs[a]));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1339,80 +1339,51 @@ namespace nanojit
|
|||
LDi(r, val);
|
||||
}
|
||||
|
||||
void Assembler::asm_quad(LInsp ins)
|
||||
void Assembler::asm_quad(Register r, uint64_t q, double d, bool canClobberCCs)
|
||||
{
|
||||
Register rr = ins->getReg();
|
||||
// Quads require non-standard handling. There is no load-64-bit-immediate
|
||||
// instruction on i386, so in the general case, we must load it from memory.
|
||||
// This is unlike most other LIR operations which can be computed directly
|
||||
// in a register. We can special-case 0.0 and various other small ints
|
||||
// (1.0 on x87, any int32_t value on SSE2), but for all other values, we
|
||||
// allocate an 8-byte chunk via dataAlloc and load from there. Note that
|
||||
// this implies that quads never require spill area, since they will always
|
||||
// be rematerialized from const data (or inline instructions in the special cases).
|
||||
|
||||
// Quads require non-standard handling. Except for rematerializable
|
||||
// values like small integers, we have to do the following to put a
|
||||
// 64-bit immediate in an XMM register:
|
||||
//
|
||||
// store imm64_0 to address A: mov -40(ebp), 0
|
||||
// store imm64_1 to address A+4: mov -36(ebp), 1074266112
|
||||
// load address A into XMM reg: movq xmm2, -40(ebp)
|
||||
//
|
||||
// In other words, putting an immediate in an XMM reg requires loading
|
||||
// it from memory. This is unlike most other LIR operations which can
|
||||
// be computed directly in a register.
|
||||
//
|
||||
// If we handled this like other cases, when 'ins' is in memory rather
|
||||
// than a register (ie. isKnownReg(rr) is false) we would generate
|
||||
// code to compute it and immediately spill (store) it. But that
|
||||
// would be stupid in this case, because computing it involved loading
|
||||
// it! (Via the 'movq' in the example.) So instead we avoid the
|
||||
// spill and also the load, and just put it directly into the spill
|
||||
// slots. That's why we don't use prepareResultReg() here like
|
||||
// everywhere else.
|
||||
|
||||
if (isKnownReg(rr)) {
|
||||
NanoAssert((rmask(rr) & FpRegs) != 0);
|
||||
|
||||
// @todo -- add special-cases for 0 and 1
|
||||
const double d = ins->imm64f();
|
||||
const uint64_t q = ins->imm64();
|
||||
if (rmask(rr) & XmmRegs) {
|
||||
if (q == 0.0) {
|
||||
// test (int64)0 since -0.0 == 0.0
|
||||
SSE_XORPDr(rr, rr);
|
||||
} else if (d == 1.0) {
|
||||
// 1.0 is extremely frequent and worth special-casing!
|
||||
// Nb: k_ONE is static because we actually load its value
|
||||
// at runtime.
|
||||
static const double k_ONE = 1.0;
|
||||
LDSDm(rr, &k_ONE);
|
||||
} else if (d && d == (int)d) {
|
||||
// Can it fit in 32bits? If so, use cvtsi2sd which is faster.
|
||||
Register rt = registerAllocTmp(GpRegs);
|
||||
SSE_CVTSI2SD(rr, rt);
|
||||
SSE_XORPDr(rr,rr); // zero rr to ensure no dependency stalls
|
||||
asm_int(rt, (int)d, /*canClobberCCs*/true);
|
||||
} else {
|
||||
findMemFor(ins); // get stack space into which we can generate the value
|
||||
const int d = disp(ins);
|
||||
SSE_LDQ(rr, d, FP); // load it into a register
|
||||
}
|
||||
if (rmask(r) & XmmRegs) {
|
||||
if (q == 0) {
|
||||
// test (int64)0 since -0.0 == 0.0
|
||||
SSE_XORPDr(r, r);
|
||||
} else if (d && d == (int)d && canClobberCCs) {
|
||||
// can fit in 32bits? then use cvt which is faster
|
||||
Register tr = registerAllocTmp(GpRegs);
|
||||
SSE_CVTSI2SD(r, tr);
|
||||
SSE_XORPDr(r, r); // zero r to ensure no dependency stalls
|
||||
asm_int(tr, (int)d, canClobberCCs);
|
||||
} else {
|
||||
if (q == 0.0) {
|
||||
// test (int64)0 since -0.0 == 0.0
|
||||
FLDZ();
|
||||
} else if (d == 1.0) {
|
||||
FLD1();
|
||||
} else {
|
||||
int d = findMemFor(ins);
|
||||
FLDQ(d,FP);
|
||||
}
|
||||
const uint64_t* p = findQuadConstant(q);
|
||||
LDSDm(r, (const double*)p);
|
||||
}
|
||||
} else {
|
||||
NanoAssert(r == FST0);
|
||||
if (q == 0) {
|
||||
// test (int64)0 since -0.0 == 0.0
|
||||
FLDZ();
|
||||
} else if (d == 1.0) {
|
||||
FLD1();
|
||||
} else {
|
||||
const uint64_t* p = findQuadConstant(q);
|
||||
FLDQdm((const double*)p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// @todo, if we used xorpd, ldsd, fldz, fld1 above, we don't need mem here
|
||||
// [that's only true if we also rematerialize them in asm_restore();
|
||||
// otherwise we need to store the value in memory so it can be
|
||||
// restored later on]
|
||||
int d = disp(ins);
|
||||
if (d)
|
||||
{
|
||||
STi(FP,d+4,ins->imm64_1());
|
||||
STi(FP,d, ins->imm64_0());
|
||||
void Assembler::asm_quad(LInsp ins)
|
||||
{
|
||||
if (ins->isInReg()) {
|
||||
Register rr = ins->getReg();
|
||||
NanoAssert(rmask(rr) & FpRegs);
|
||||
asm_quad(rr, ins->imm64(), ins->imm64f(), /*canClobberCCs*/true);
|
||||
}
|
||||
|
||||
freeResourcesOf(ins);
|
||||
|
@ -1424,23 +1395,34 @@ namespace nanojit
|
|||
|
||||
if (!config.sse2)
|
||||
{
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
int d = findMemFor(q);
|
||||
LD(rr, d, FP);
|
||||
Register rr = deprecated_prepResultReg(ins, GpRegs);
|
||||
if (q->isconstq())
|
||||
{
|
||||
// This should only be possible if ExprFilter isn't in use,
|
||||
// as it will fold qlo(qconst()) properly... still, if it's
|
||||
// disabled, we need this for proper behavior
|
||||
LDi(rr, q->imm64_0());
|
||||
}
|
||||
else
|
||||
{
|
||||
int d = findMemFor(q);
|
||||
LD(rr, d, FP);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Register rr = ins->getReg();
|
||||
if (!isKnownReg(rr)) {
|
||||
if (ins->isInReg()) {
|
||||
Register rr = ins->getReg();
|
||||
deprecated_freeRsrcOf(ins, false);
|
||||
Register qr = findRegFor(q, XmmRegs);
|
||||
SSE_MOVD(rr, qr);
|
||||
} else {
|
||||
// store quad in spill loc
|
||||
int d = disp(ins);
|
||||
freeRsrcOf(ins, false);
|
||||
NanoAssert(ins->isInAr());
|
||||
int d = arDisp(ins);
|
||||
deprecated_freeRsrcOf(ins, false);
|
||||
Register qr = findRegFor(q, XmmRegs);
|
||||
SSE_MOVDm(d, FP, qr);
|
||||
} else {
|
||||
freeRsrcOf(ins, false);
|
||||
Register qr = findRegFor(q, XmmRegs);
|
||||
SSE_MOVD(rr,qr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1472,11 +1454,11 @@ namespace nanojit
|
|||
|
||||
// If 'lhs' isn't in a register, it can be clobbered by 'ins'.
|
||||
Register ra;
|
||||
if (lhs->isUnusedOrHasUnknownReg()) {
|
||||
if (!lhs->isInReg()) {
|
||||
ra = rr;
|
||||
} else if (!(rmask(lhs->getReg()) & XmmRegs)) {
|
||||
// We need to evict lhs from x87Regs, which then puts us in
|
||||
// the same situation as the isUnusedOrHasUnknownReg() case.
|
||||
// the same situation as the !isInReg() case.
|
||||
evict(lhs);
|
||||
ra = rr;
|
||||
} else {
|
||||
|
@ -1489,7 +1471,7 @@ namespace nanojit
|
|||
SSE_MOVSD(rr, ra);
|
||||
|
||||
freeResourcesOf(ins);
|
||||
if (lhs->isUnusedOrHasUnknownReg()) {
|
||||
if (!lhs->isInReg()) {
|
||||
NanoAssert(ra == rr);
|
||||
findSpecificRegForUnallocated(lhs, ra);
|
||||
}
|
||||
|
@ -1498,12 +1480,12 @@ namespace nanojit
|
|||
verbose_only( Register rr = ) prepareResultReg(ins, x87Regs);
|
||||
NanoAssert(FST0 == rr);
|
||||
|
||||
NanoAssert(lhs->isUnusedOrHasUnknownReg() || FST0 == lhs->getReg());
|
||||
NanoAssert(!lhs->isInReg() || FST0 == lhs->getReg());
|
||||
|
||||
FCHS();
|
||||
|
||||
freeResourcesOf(ins);
|
||||
if (lhs->isUnusedOrHasUnknownReg())
|
||||
if (!lhs->isInReg())
|
||||
findSpecificRegForUnallocated(lhs, FST0);
|
||||
}
|
||||
}
|
||||
|
@ -1516,14 +1498,21 @@ namespace nanojit
|
|||
if (sz == ARGSIZE_Q)
|
||||
{
|
||||
// ref arg - use lea
|
||||
if (isKnownReg(r))
|
||||
{
|
||||
if (r != UnspecifiedReg) {
|
||||
NanoAssert(rmask(r) & FpRegs);
|
||||
|
||||
// arg in specific reg
|
||||
int da = findMemFor(ins);
|
||||
if (ins->isconstq())
|
||||
{
|
||||
const uint64_t* p = findQuadConstant(ins->imm64());
|
||||
LDi(r, uint32_t(p));
|
||||
}
|
||||
else
|
||||
{
|
||||
int da = findMemFor(ins);
|
||||
|
||||
LEA(r, da, FP);
|
||||
LEA(r, da, FP);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1532,22 +1521,20 @@ namespace nanojit
|
|||
}
|
||||
else if (sz == ARGSIZE_I || sz == ARGSIZE_U)
|
||||
{
|
||||
if (isKnownReg(r)) {
|
||||
if (r != UnspecifiedReg) {
|
||||
if (ins->isconst()) {
|
||||
// Rematerialize the constant.
|
||||
asm_int(r, ins->imm32(), /*canClobberCCs*/true);
|
||||
} else if (ins->isUsed()) {
|
||||
if (!ins->hasKnownReg()) {
|
||||
int d = disp(ins);
|
||||
NanoAssert(d != 0);
|
||||
if (ins->isop(LIR_alloc)) {
|
||||
LEA(r, d, FP);
|
||||
} else {
|
||||
LD(r, d, FP);
|
||||
}
|
||||
} else if (ins->isInReg()) {
|
||||
if (r != ins->getReg())
|
||||
MR(r, ins->getReg());
|
||||
} else if (ins->isInAr()) {
|
||||
int d = arDisp(ins);
|
||||
NanoAssert(d != 0);
|
||||
if (ins->isop(LIR_alloc)) {
|
||||
LEA(r, d, FP);
|
||||
} else {
|
||||
if (r != ins->getReg())
|
||||
MR(r, ins->getReg());
|
||||
LD(r, d, FP);
|
||||
}
|
||||
|
||||
} else {
|
||||
|
@ -1582,13 +1569,14 @@ namespace nanojit
|
|||
Register ra = findRegFor(ins, GpRegs);
|
||||
PUSHr(ra);
|
||||
}
|
||||
else if (!ins->hasKnownReg())
|
||||
else if (ins->isInReg())
|
||||
{
|
||||
PUSHm(disp(ins), FP);
|
||||
PUSHr(ins->getReg());
|
||||
}
|
||||
else
|
||||
{
|
||||
PUSHr(ins->getReg());
|
||||
NanoAssert(ins->isInAr());
|
||||
PUSHm(arDisp(ins), FP);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1602,7 +1590,7 @@ namespace nanojit
|
|||
}
|
||||
else {
|
||||
Register ra;
|
||||
if (ins->isUnusedOrHasUnknownReg() || ins->isop(LIR_alloc))
|
||||
if (!ins->isInReg() || ins->isop(LIR_alloc))
|
||||
ra = findRegFor(ins, GpRegs & (~SavedRegs));
|
||||
else
|
||||
ra = ins->getReg();
|
||||
|
@ -1614,7 +1602,7 @@ namespace nanojit
|
|||
|
||||
void Assembler::asm_farg(LInsp ins, int32_t& stkd)
|
||||
{
|
||||
NanoAssert(ins->isQuad());
|
||||
NanoAssert(ins->isF64());
|
||||
Register r = findRegFor(ins, FpRegs);
|
||||
if (rmask(r) & XmmRegs) {
|
||||
SSE_STQ(stkd, SP, r);
|
||||
|
@ -1650,17 +1638,17 @@ namespace nanojit
|
|||
LIns *rhs = ins->oprnd2();
|
||||
|
||||
RegisterMask allow = XmmRegs;
|
||||
Register rb = UnknownReg;
|
||||
Register rb = UnspecifiedReg;
|
||||
if (lhs != rhs) {
|
||||
rb = findRegFor(rhs,allow);
|
||||
allow &= ~rmask(rb);
|
||||
}
|
||||
|
||||
Register rr = prepResultReg(ins, allow);
|
||||
Register rr = deprecated_prepResultReg(ins, allow);
|
||||
Register ra;
|
||||
|
||||
// if this is last use of lhs in reg, we can re-use result reg
|
||||
if (lhs->isUnusedOrHasUnknownReg()) {
|
||||
if (!lhs->isInReg()) {
|
||||
ra = findSpecificRegForUnallocated(lhs, rr);
|
||||
} else if ((rmask(lhs->getReg()) & XmmRegs) == 0) {
|
||||
// We need this case on AMD64, because it's possible that
|
||||
|
@ -1693,28 +1681,53 @@ namespace nanojit
|
|||
// if you only have one fpu reg. use divr/subr.
|
||||
LIns* rhs = ins->oprnd1();
|
||||
LIns* lhs = ins->oprnd2();
|
||||
Register rr = prepResultReg(ins, rmask(FST0));
|
||||
Register rr = deprecated_prepResultReg(ins, rmask(FST0));
|
||||
|
||||
// make sure rhs is in memory
|
||||
int db = findMemFor(rhs);
|
||||
if (rhs->isconstq())
|
||||
{
|
||||
const uint64_t* p = findQuadConstant(rhs->imm64());
|
||||
|
||||
// lhs into reg, prefer same reg as result
|
||||
// lhs into reg, prefer same reg as result
|
||||
|
||||
// last use of lhs in reg, can reuse rr
|
||||
// else, lhs already has a different reg assigned
|
||||
if (lhs->isUnusedOrHasUnknownReg())
|
||||
findSpecificRegForUnallocated(lhs, rr);
|
||||
// last use of lhs in reg, can reuse rr
|
||||
// else, lhs already has a different reg assigned
|
||||
if (!lhs->isInReg())
|
||||
findSpecificRegForUnallocated(lhs, rr);
|
||||
|
||||
NanoAssert(lhs->getReg()==FST0);
|
||||
// assume that the lhs is in ST(0) and rhs is on stack
|
||||
if (op == LIR_fadd)
|
||||
{ FADD(db, FP); }
|
||||
else if (op == LIR_fsub)
|
||||
{ FSUBR(db, FP); }
|
||||
else if (op == LIR_fmul)
|
||||
{ FMUL(db, FP); }
|
||||
else if (op == LIR_fdiv)
|
||||
{ FDIVR(db, FP); }
|
||||
NanoAssert(lhs->getReg()==FST0);
|
||||
// assume that the lhs is in ST(0) and rhs is on stack
|
||||
if (op == LIR_fadd)
|
||||
{ FADDdm((const double*)p); }
|
||||
else if (op == LIR_fsub)
|
||||
{ FSUBRdm((const double*)p); }
|
||||
else if (op == LIR_fmul)
|
||||
{ FMULdm((const double*)p); }
|
||||
else if (op == LIR_fdiv)
|
||||
{ FDIVRdm((const double*)p); }
|
||||
}
|
||||
else
|
||||
{
|
||||
// make sure rhs is in memory
|
||||
int db = findMemFor(rhs);
|
||||
|
||||
// lhs into reg, prefer same reg as result
|
||||
|
||||
// last use of lhs in reg, can reuse rr
|
||||
// else, lhs already has a different reg assigned
|
||||
if (!lhs->isInReg())
|
||||
findSpecificRegForUnallocated(lhs, rr);
|
||||
|
||||
NanoAssert(lhs->getReg()==FST0);
|
||||
// assume that the lhs is in ST(0) and rhs is on stack
|
||||
if (op == LIR_fadd)
|
||||
{ FADD(db, FP); }
|
||||
else if (op == LIR_fsub)
|
||||
{ FSUBR(db, FP); }
|
||||
else if (op == LIR_fmul)
|
||||
{ FMUL(db, FP); }
|
||||
else if (op == LIR_fdiv)
|
||||
{ FDIVR(db, FP); }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1767,8 +1780,8 @@ namespace nanojit
|
|||
SSE_CVTSI2SD(rr, rt);
|
||||
SSE_XORPDr(rr,rr); // zero rr to ensure no dependency stalls
|
||||
|
||||
Register ra;
|
||||
if (lhs->isUsed() && (ra = lhs->getReg(), isKnownReg(ra)) && (rmask(ra) & GpRegs)) {
|
||||
if (lhs->isInRegMask(GpRegs)) {
|
||||
Register ra = lhs->getReg();
|
||||
LEA(rt, 0x80000000, ra);
|
||||
|
||||
} else {
|
||||
|
@ -1790,6 +1803,26 @@ namespace nanojit
|
|||
freeResourcesOf(ins);
|
||||
}
|
||||
|
||||
void Assembler::asm_f2i(LInsp ins)
|
||||
{
|
||||
LIns *lhs = ins->oprnd1();
|
||||
|
||||
if (config.sse2) {
|
||||
Register rr = prepareResultReg(ins, GpRegs);
|
||||
Register ra = findRegFor(lhs, XmmRegs);
|
||||
SSE_CVTSD2SI(rr, ra);
|
||||
} else {
|
||||
int pop = !lhs->isInReg();
|
||||
findSpecificRegFor(lhs, FST0);
|
||||
if (ins->isInReg())
|
||||
evict(ins);
|
||||
int d = findMemFor(ins);
|
||||
FIST((pop?1:0), d, FP);
|
||||
}
|
||||
|
||||
freeResourcesOf(ins);
|
||||
}
|
||||
|
||||
void Assembler::asm_nongp_copy(Register rd, Register rs)
|
||||
{
|
||||
if ((rmask(rd) & XmmRegs) && (rmask(rs) & XmmRegs)) {
|
||||
|
@ -1855,7 +1888,7 @@ namespace nanojit
|
|||
NanoAssert(condop >= LIR_feq && condop <= LIR_fge);
|
||||
LIns* lhs = cond->oprnd1();
|
||||
LIns* rhs = cond->oprnd2();
|
||||
NanoAssert(lhs->isQuad() && rhs->isQuad());
|
||||
NanoAssert(lhs->isF64() && rhs->isF64());
|
||||
|
||||
if (config.sse2) {
|
||||
// First, we convert (a < b) into (b > a), and (a <= b) into (b >= a).
|
||||
|
@ -1893,7 +1926,7 @@ namespace nanojit
|
|||
|
||||
evictIfActive(EAX);
|
||||
Register ra, rb;
|
||||
findRegFor2(XmmRegs, lhs, ra, rhs, rb);
|
||||
findRegFor2(XmmRegs, lhs, ra, XmmRegs, rhs, rb);
|
||||
|
||||
TEST_AH(mask);
|
||||
LAHF();
|
||||
|
@ -1917,7 +1950,7 @@ namespace nanojit
|
|||
// LESS_THAN 001 SETAE/JAE fails
|
||||
|
||||
Register ra, rb;
|
||||
findRegFor2(XmmRegs, lhs, ra, rhs, rb);
|
||||
findRegFor2(XmmRegs, lhs, ra, XmmRegs, rhs, rb);
|
||||
SSE_UCOMISD(ra, rb);
|
||||
}
|
||||
|
||||
|
@ -1981,7 +2014,7 @@ namespace nanojit
|
|||
}
|
||||
|
||||
evictIfActive(EAX);
|
||||
int pop = lhs->isUnusedOrHasUnknownReg();
|
||||
int pop = !lhs->isInReg();
|
||||
findSpecificRegFor(lhs, FST0);
|
||||
|
||||
if (lhs == rhs) {
|
||||
|
@ -1994,10 +2027,18 @@ namespace nanojit
|
|||
FCOMP();
|
||||
FLDr(FST0); // DUP
|
||||
} else {
|
||||
int d = findMemFor(rhs);
|
||||
TEST_AH(mask);
|
||||
FNSTSW_AX(); // requires EAX to be free
|
||||
FCOM((pop?1:0), d, FP);
|
||||
if (rhs->isconstq())
|
||||
{
|
||||
const uint64_t* p = findQuadConstant(rhs->imm64());
|
||||
FCOMdm((pop?1:0), (const double*)p);
|
||||
}
|
||||
else
|
||||
{
|
||||
int d = findMemFor(rhs);
|
||||
FCOM((pop?1:0), d, FP);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2019,8 +2060,6 @@ namespace nanojit
|
|||
NanoAssert(!_inExit);
|
||||
if (!_nIns)
|
||||
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
|
||||
if (!_nExitIns)
|
||||
codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
|
||||
}
|
||||
|
||||
// enough room for n bytes
|
||||
|
@ -2049,17 +2088,23 @@ namespace nanojit
|
|||
if (ins->isop(LIR_ret)) {
|
||||
findSpecificRegFor(val, retRegs[0]);
|
||||
} else {
|
||||
NanoAssert(ins->isop(LIR_fret));
|
||||
findSpecificRegFor(val, FST0);
|
||||
fpu_pop();
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_q2i(LIns *) {
|
||||
NanoAssert(0); // q2i shouldn't occur on 32-bit platforms
|
||||
}
|
||||
|
||||
void Assembler::asm_promote(LIns *) {
|
||||
// i2q or u2q
|
||||
TODO(asm_promote);
|
||||
NanoAssert(0); // i2q and u2q shouldn't occur on 32-bit platforms
|
||||
}
|
||||
|
||||
void Assembler::swapCodeChunks() {
|
||||
if (!_nExitIns)
|
||||
codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
|
||||
SWAP(NIns*, _nIns, _nExitIns);
|
||||
SWAP(NIns*, codeStart, exitStart);
|
||||
SWAP(NIns*, codeEnd, exitEnd);
|
||||
|
|
|
@ -97,6 +97,8 @@ namespace nanojit
|
|||
#define NJ_MAX_PARAMETERS 1
|
||||
#define NJ_JTBL_SUPPORTED 1
|
||||
#define NJ_EXPANDED_LOADSTORE_SUPPORTED 1
|
||||
#define NJ_USES_QUAD_CONSTANTS 1
|
||||
#define NJ_F2I_SUPPORTED 1
|
||||
|
||||
// Preserve a 16-byte stack alignment, to support the use of
|
||||
// SSE instructions like MOVDQA (if not by Tamarin itself,
|
||||
|
@ -141,7 +143,8 @@ namespace nanojit
|
|||
|
||||
FirstReg = 0,
|
||||
LastReg = 16,
|
||||
UnknownReg = 17
|
||||
deprecated_UnknownReg = 17, // XXX: remove eventually, see bug 538924
|
||||
UnspecifiedReg = 17
|
||||
}
|
||||
Register;
|
||||
|
||||
|
@ -184,7 +187,8 @@ namespace nanojit
|
|||
NIns* asm_fbranch(bool, LIns*, NIns*);\
|
||||
void asm_cmp(LIns *cond); \
|
||||
void asm_div_mod(LIns *cond); \
|
||||
void asm_load(int d, Register r);
|
||||
void asm_load(int d, Register r); \
|
||||
void asm_quad(Register r, uint64_t q, double d, bool canClobberCCs);
|
||||
|
||||
#define IMM8(i) \
|
||||
_nIns -= 1; \
|
||||
|
@ -220,8 +224,8 @@ namespace nanojit
|
|||
|
||||
// underrunProtect(6) is necessary for worst-case
|
||||
#define MODRMm(r,d,b) \
|
||||
NanoAssert(unsigned(r)<8 && ((b)==UnknownReg || unsigned(b)<8)); \
|
||||
if ((b) == UnknownReg) {\
|
||||
NanoAssert(unsigned(r)<8 && ((b)==UnspecifiedReg || unsigned(b)<8)); \
|
||||
if ((b) == UnspecifiedReg) {\
|
||||
IMM32(d);\
|
||||
*(--_nIns) = (uint8_t) (0<<6 | (r)<<3 | 5);\
|
||||
} else if ((b) == ESP) { \
|
||||
|
@ -519,17 +523,17 @@ namespace nanojit
|
|||
count_st();\
|
||||
NanoAssert(((unsigned)reg)<4); \
|
||||
ALUm(0x88,reg,disp,base); \
|
||||
asm_output("mov8 %d(%s),%s",disp,base==UnknownReg?"0":gpn(base),gpn(reg)); } while(0)
|
||||
asm_output("mov8 %d(%s),%s",disp,base==UnspecifiedReg?"0":gpn(base),gpn(reg)); } while(0)
|
||||
|
||||
#define ST16(base,disp,reg) do { \
|
||||
count_st();\
|
||||
ALUm16(0x89,reg,disp,base); \
|
||||
asm_output("mov16 %d(%s),%s",disp,base==UnknownReg?"0":gpn(base),gpn(reg)); } while(0)
|
||||
asm_output("mov16 %d(%s),%s",disp,base==UnspecifiedReg?"0":gpn(base),gpn(reg)); } while(0)
|
||||
|
||||
#define ST(base,disp,reg) do { \
|
||||
count_st();\
|
||||
ALUm(0x89,reg,disp,base); \
|
||||
asm_output("mov %d(%s),%s",disp,base==UnknownReg?"0":gpn(base),gpn(reg)); } while(0)
|
||||
asm_output("mov %d(%s),%s",disp,base==UnspecifiedReg?"0":gpn(base),gpn(reg)); } while(0)
|
||||
|
||||
#define ST8i(base,disp,imm) do { \
|
||||
count_st();\
|
||||
|
@ -722,7 +726,7 @@ namespace nanojit
|
|||
*(--_nIns) = 0x10;\
|
||||
*(--_nIns) = 0x0f;\
|
||||
*(--_nIns) = 0xf2;\
|
||||
asm_output("movsd %s,(#%p) // =%f",gpn(r),(void*)daddr,*daddr); \
|
||||
asm_output("movsd %s,(%p) // =%f",gpn(r),(void*)daddr,*daddr); \
|
||||
} while(0)
|
||||
|
||||
#define STSD(d,b,r)do { \
|
||||
|
@ -761,6 +765,12 @@ namespace nanojit
|
|||
asm_output("cvtsi2sd %s,%s",gpn(xr),gpn(gr)); \
|
||||
} while(0)
|
||||
|
||||
#define SSE_CVTSD2SI(gr,xr) do{ \
|
||||
count_fpu();\
|
||||
SSE(0xf20f2d, (gr)&7, (xr)&7); \
|
||||
asm_output("cvtsd2si %s,%s",gpn(gr),gpn(xr)); \
|
||||
} while(0)
|
||||
|
||||
#define SSE_CVTSD2SS(xr,gr) do{ \
|
||||
count_fpu();\
|
||||
SSE(0xf20f5a, (xr)&7, (gr)&7); \
|
||||
|
@ -892,6 +902,11 @@ namespace nanojit
|
|||
MODRMm((uint8_t)(o), d, b); \
|
||||
*(--_nIns) = (uint8_t)((o)>>8)
|
||||
|
||||
#define FPUdm(o, m) \
|
||||
underrunProtect(6); \
|
||||
MODRMdm((uint8_t)(o), m); \
|
||||
*(--_nIns) = (uint8_t)((o)>>8)
|
||||
|
||||
#define TEST_AH(i) do { \
|
||||
count_alu();\
|
||||
underrunProtect(3); \
|
||||
|
@ -914,26 +929,39 @@ namespace nanojit
|
|||
#define FCHS() do { count_fpu(); FPUc(0xd9e0); asm_output("fchs"); } while(0)
|
||||
#define FLD1() do { count_fpu(); FPUc(0xd9e8); asm_output("fld1"); fpu_push(); } while(0)
|
||||
#define FLDZ() do { count_fpu(); FPUc(0xd9ee); asm_output("fldz"); fpu_push(); } while(0)
|
||||
#define FFREE(r) do { count_fpu(); FPU(0xddc0, r); asm_output("ffree %s",fpn(r)); } while(0)
|
||||
#define FFREE(r) do { count_fpu(); FPU(0xddc0, r); asm_output("ffree %s",gpn(r)); } while(0)
|
||||
#define FST32(p,d,b) do { count_stq(); FPUm(0xd902|(p), d, b); asm_output("fst%s32 %d(%s)",((p)?"p":""),d,gpn(b)); if (p) fpu_pop(); } while(0)
|
||||
#define FSTQ(p,d,b) do { count_stq(); FPUm(0xdd02|(p), d, b); asm_output("fst%sq %d(%s)",((p)?"p":""),d,gpn(b)); if (p) fpu_pop(); } while(0)
|
||||
#define FSTPQ(d,b) FSTQ(1,d,b)
|
||||
#define FCOM(p,d,b) do { count_fpuld(); FPUm(0xdc02|(p), d, b); asm_output("fcom%s %d(%s)",((p)?"p":""),d,gpn(b)); if (p) fpu_pop(); } while(0)
|
||||
#define FCOMdm(p,m) do { const double* const dm = m; \
|
||||
count_fpuld(); FPUdm(0xdc02|(p), dm); asm_output("fcom%s (%p)",((p)?"p":""),(void*)dm); if (p) fpu_pop(); } while(0)
|
||||
#define FLD32(d,b) do { count_ldq(); FPUm(0xd900, d, b); asm_output("fld32 %d(%s)",d,gpn(b)); fpu_push();} while(0)
|
||||
#define FLDQ(d,b) do { count_ldq(); FPUm(0xdd00, d, b); asm_output("fldq %d(%s)",d,gpn(b)); fpu_push();} while(0)
|
||||
#define FLDQdm(m) do { const double* const dm = m; \
|
||||
count_ldq(); FPUdm(0xdd00, dm); asm_output("fldq (%p)",(void*)dm); fpu_push();} while(0)
|
||||
#define FILDQ(d,b) do { count_fpuld(); FPUm(0xdf05, d, b); asm_output("fildq %d(%s)",d,gpn(b)); fpu_push(); } while(0)
|
||||
#define FILD(d,b) do { count_fpuld(); FPUm(0xdb00, d, b); asm_output("fild %d(%s)",d,gpn(b)); fpu_push(); } while(0)
|
||||
#define FIST(p,d,b) do { count_fpu(); FPUm(0xdb02|(p), d, b); asm_output("fist%s %d(%s)",((p)?"p":""),d,gpn(b)); if(p) fpu_pop(); } while(0)
|
||||
#define FADD(d,b) do { count_fpu(); FPUm(0xdc00, d, b); asm_output("fadd %d(%s)",d,gpn(b)); } while(0)
|
||||
#define FADDdm(m) do { const double* const dm = m; \
|
||||
count_ldq(); FPUdm(0xdc00, dm); asm_output("fadd (%p)",(void*)dm); } while(0)
|
||||
#define FSUB(d,b) do { count_fpu(); FPUm(0xdc04, d, b); asm_output("fsub %d(%s)",d,gpn(b)); } while(0)
|
||||
#define FSUBR(d,b) do { count_fpu(); FPUm(0xdc05, d, b); asm_output("fsubr %d(%s)",d,gpn(b)); } while(0)
|
||||
#define FSUBRdm(m) do { const double* const dm = m; \
|
||||
count_ldq(); FPUdm(0xdc05, dm); asm_output("fsubr (%p)",(void*)dm); } while(0)
|
||||
#define FMUL(d,b) do { count_fpu(); FPUm(0xdc01, d, b); asm_output("fmul %d(%s)",d,gpn(b)); } while(0)
|
||||
#define FMULdm(m) do { const double* const dm = m; \
|
||||
count_ldq(); FPUdm(0xdc01, dm); asm_output("fmul (%p)",(void*)dm); } while(0)
|
||||
#define FDIV(d,b) do { count_fpu(); FPUm(0xdc06, d, b); asm_output("fdiv %d(%s)",d,gpn(b)); } while(0)
|
||||
#define FDIVR(d,b) do { count_fpu(); FPUm(0xdc07, d, b); asm_output("fdivr %d(%s)",d,gpn(b)); } while(0)
|
||||
#define FDIVRdm(m) do { const double* const dm = m; \
|
||||
count_ldq(); FPUdm(0xdc07, dm); asm_output("fdivr (%p)",(void*)dm); } while(0)
|
||||
#define FINCSTP() do { count_fpu(); FPUc(0xd9f7); asm_output("fincstp"); } while(0)
|
||||
#define FSTP(r) do { count_fpu(); FPU(0xddd8, r&7); asm_output("fstp %s",fpn(r)); fpu_pop();} while(0)
|
||||
#define FSTP(r) do { count_fpu(); FPU(0xddd8, r&7); asm_output("fstp %s",gpn(r)); fpu_pop();} while(0)
|
||||
#define FCOMP() do { count_fpu(); FPUc(0xD8D9); asm_output("fcomp"); fpu_pop();} while(0)
|
||||
#define FCOMPP() do { count_fpu(); FPUc(0xDED9); asm_output("fcompp"); fpu_pop();fpu_pop();} while(0)
|
||||
#define FLDr(r) do { count_ldq(); FPU(0xd9c0,r); asm_output("fld %s",fpn(r)); fpu_push(); } while(0)
|
||||
#define FLDr(r) do { count_ldq(); FPU(0xd9c0,r); asm_output("fld %s",gpn(r)); fpu_push(); } while(0)
|
||||
#define EMMS() do { count_fpu(); FPUc(0x0f77); asm_output("emms"); } while (0)
|
||||
|
||||
// standard direct call
|
||||
|
@ -956,6 +984,5 @@ namespace nanojit
|
|||
debug_only(if ((c->_argtypes & ARGSIZE_MASK_ANY)==ARGSIZE_F) fpu_push();)\
|
||||
} while (0)
|
||||
|
||||
|
||||
}
|
||||
#endif // __nanojit_Nativei386__
|
||||
|
|
|
@ -55,7 +55,7 @@ namespace nanojit
|
|||
|
||||
bool RegAlloc::isConsistent(Register r, LIns* i) const
|
||||
{
|
||||
NanoAssert(r != UnknownReg);
|
||||
NanoAssert(r != deprecated_UnknownReg);
|
||||
return (isFree(r) && !getActive(r) && !i) ||
|
||||
(!isFree(r) && getActive(r)== i && i );
|
||||
}
|
||||
|
|
|
@ -44,11 +44,6 @@
|
|||
|
||||
namespace nanojit
|
||||
{
|
||||
inline RegisterMask rmask(Register r)
|
||||
{
|
||||
return RegisterMask(1) << r;
|
||||
}
|
||||
|
||||
class RegAlloc
|
||||
{
|
||||
public:
|
||||
|
@ -64,7 +59,7 @@ namespace nanojit
|
|||
|
||||
bool isFree(Register r) const
|
||||
{
|
||||
NanoAssert(r != UnknownReg);
|
||||
NanoAssert(r != deprecated_UnknownReg);
|
||||
return (free & rmask(r)) != 0;
|
||||
}
|
||||
|
||||
|
@ -84,7 +79,7 @@ namespace nanojit
|
|||
{
|
||||
// Count++;
|
||||
NanoAssert(v);
|
||||
NanoAssert(r != UnknownReg);
|
||||
NanoAssert(r != deprecated_UnknownReg);
|
||||
NanoAssert(active[r] == NULL);
|
||||
active[r] = v;
|
||||
useActive(r);
|
||||
|
@ -92,7 +87,7 @@ namespace nanojit
|
|||
|
||||
void useActive(Register r)
|
||||
{
|
||||
NanoAssert(r != UnknownReg);
|
||||
NanoAssert(r != deprecated_UnknownReg);
|
||||
NanoAssert(active[r] != NULL);
|
||||
usepri[r] = priority++;
|
||||
}
|
||||
|
@ -100,7 +95,7 @@ namespace nanojit
|
|||
void removeActive(Register r)
|
||||
{
|
||||
//registerReleaseCount++;
|
||||
NanoAssert(r != UnknownReg);
|
||||
NanoAssert(r != deprecated_UnknownReg);
|
||||
NanoAssert(active[r] != NULL);
|
||||
|
||||
// remove the given register from the active list
|
||||
|
@ -109,19 +104,19 @@ namespace nanojit
|
|||
|
||||
void retire(Register r)
|
||||
{
|
||||
NanoAssert(r != UnknownReg);
|
||||
NanoAssert(r != deprecated_UnknownReg);
|
||||
NanoAssert(active[r] != NULL);
|
||||
active[r] = NULL;
|
||||
free |= rmask(r);
|
||||
}
|
||||
|
||||
int32_t getPriority(Register r) {
|
||||
NanoAssert(r != UnknownReg && active[r]);
|
||||
NanoAssert(r != deprecated_UnknownReg && active[r]);
|
||||
return usepri[r];
|
||||
}
|
||||
|
||||
LIns* getActive(Register r) const {
|
||||
NanoAssert(r != UnknownReg);
|
||||
NanoAssert(r != deprecated_UnknownReg);
|
||||
return active[r];
|
||||
}
|
||||
|
||||
|
@ -171,7 +166,7 @@ namespace nanojit
|
|||
// * And vice versa: an LIns with an in-use reservation that
|
||||
// names R must be named by 'active[R]'.
|
||||
//
|
||||
// * If an LIns's reservation names 'UnknownReg' then LIns
|
||||
// * If an LIns's reservation names 'deprecated_UnknownReg' then LIns
|
||||
// should not be in 'active'.
|
||||
//
|
||||
LIns* active[LastReg + 1]; // active[r] = LIns that defines r
|
||||
|
|
|
@ -89,11 +89,13 @@ void*
|
|||
nanojit::CodeAlloc::allocCodeChunk(size_t nbytes) {
|
||||
void * buffer;
|
||||
posix_memalign(&buffer, 4096, nbytes);
|
||||
VMPI_setPageProtection(buffer, nbytes, true /* exec */, true /* write */);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
void
|
||||
nanojit::CodeAlloc::freeCodeChunk(void *p, size_t nbytes) {
|
||||
VMPI_setPageProtection(p, nbytes, false /* exec */, true /* write */);
|
||||
::free(p);
|
||||
}
|
||||
|
||||
|
@ -155,13 +157,28 @@ nanojit::CodeAlloc::freeCodeChunk(void *p, size_t nbytes) {
|
|||
|
||||
void*
|
||||
nanojit::CodeAlloc::allocCodeChunk(size_t nbytes) {
|
||||
return valloc(nbytes);
|
||||
void* mem = valloc(nbytes);
|
||||
VMPI_setPageProtection(mem, nbytes, true /* exec */, true /* write */);
|
||||
return mem;
|
||||
}
|
||||
|
||||
void
|
||||
nanojit::CodeAlloc::freeCodeChunk(void *p, size_t nbytes) {
|
||||
VMPI_setPageProtection(p, nbytes, false /* exec */, true /* write */);
|
||||
::free(p);
|
||||
}
|
||||
|
||||
#endif // WIN32
|
||||
|
||||
// All of the allocCodeChunk/freeCodeChunk implementations above allocate
|
||||
// code memory as RWX and then free it, so the explicit page protection api's
|
||||
// below are no-ops.
|
||||
|
||||
void
|
||||
nanojit::CodeAlloc::markCodeChunkWrite(void*, size_t)
|
||||
{}
|
||||
|
||||
void
|
||||
nanojit::CodeAlloc::markCodeChunkExec(void*, size_t)
|
||||
{}
|
||||
|
||||
|
|
|
@ -38,15 +38,6 @@
|
|||
|
||||
#include "VMPI.h"
|
||||
|
||||
#ifdef AVMPLUS_ARM
|
||||
#define ARM_ARCH AvmCore::config.arch
|
||||
#define ARM_VFP AvmCore::config.vfp
|
||||
#define ARM_THUMB2 AvmCore::config.thumb2
|
||||
#else
|
||||
#define ARM_VFP 1
|
||||
#define ARM_THUMB2 1
|
||||
#endif
|
||||
|
||||
#if !defined(AVMPLUS_LITTLE_ENDIAN) && !defined(AVMPLUS_BIG_ENDIAN)
|
||||
#ifdef IS_BIG_ENDIAN
|
||||
#define AVMPLUS_BIG_ENDIAN
|
||||
|
@ -111,7 +102,7 @@ __declspec(naked) static inline __int64 rdtsc()
|
|||
ret;
|
||||
}
|
||||
}
|
||||
#elif defined(__SUNPRO_C) || defined (__SUNPRO_CC)
|
||||
#elif defined(SOLARIS)
|
||||
static inline unsigned long long rdtsc(void)
|
||||
{
|
||||
unsigned long long int x;
|
||||
|
@ -129,21 +120,12 @@ static __inline__ unsigned long long rdtsc(void)
|
|||
|
||||
#elif defined(__x86_64__)
|
||||
|
||||
#if defined(__SUNPRO_C) || defined (__SUNPRO_CC)
|
||||
static inline uint64_t rdtsc(void)
|
||||
{
|
||||
unsigned hi, lo;
|
||||
asm volatile ("rdtsc" : "=a"(lo), "=d"(hi));
|
||||
return ( (uint64_t)lo)|( ((uint64_t)hi)<<32 );
|
||||
}
|
||||
#else
|
||||
static __inline__ uint64_t rdtsc(void)
|
||||
{
|
||||
unsigned hi, lo;
|
||||
__asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
|
||||
return ( (uint64_t)lo)|( ((uint64_t)hi)<<32 );
|
||||
}
|
||||
#endif
|
||||
|
||||
#elif defined(_MSC_VER) && defined(_M_AMD64)
|
||||
|
||||
|
@ -232,32 +214,32 @@ namespace avmplus {
|
|||
#if defined (AVMPLUS_ARM)
|
||||
// Whether or not to generate VFP instructions.
|
||||
# if defined (NJ_FORCE_SOFTFLOAT)
|
||||
static const bool vfp = false;
|
||||
static const bool arm_vfp = false;
|
||||
# else
|
||||
bool vfp;
|
||||
bool arm_vfp;
|
||||
# endif
|
||||
|
||||
// The ARM architecture version.
|
||||
# if defined (NJ_FORCE_ARM_ARCH_VERSION)
|
||||
static const unsigned int arch = NJ_FORCE_ARM_ARCH_VERSION;
|
||||
static const unsigned int arm_arch = NJ_FORCE_ARM_ARCH_VERSION;
|
||||
# else
|
||||
unsigned int arch;
|
||||
unsigned int arm_arch;
|
||||
# endif
|
||||
|
||||
// Support for Thumb, even if it isn't used by nanojit. This is used to
|
||||
// determine whether or not to generate interworking branches.
|
||||
# if defined (NJ_FORCE_NO_ARM_THUMB)
|
||||
static const bool thumb = false;
|
||||
static const bool arm_thumb = false;
|
||||
# else
|
||||
bool thumb;
|
||||
bool arm_thumb;
|
||||
# endif
|
||||
|
||||
// Support for Thumb2, even if it isn't used by nanojit. This is used to
|
||||
// determine whether or not to use some of the ARMv6T2 instructions.
|
||||
# if defined (NJ_FORCE_NO_ARM_THUMB2)
|
||||
static const bool thumb2 = false;
|
||||
static const bool arm_thumb2 = false;
|
||||
# else
|
||||
bool thumb2;
|
||||
bool arm_thumb2;
|
||||
# endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -70,21 +70,6 @@
|
|||
#define UNLESS_64BIT(...) __VA_ARGS__
|
||||
#endif
|
||||
|
||||
// set ARM_VFP constant if not already set
|
||||
#if !defined(ARM_VFP)
|
||||
#ifdef AVMPLUS_ARM
|
||||
#if defined(NJ_ARM_VFP)
|
||||
#define ARM_VFP 1
|
||||
#else
|
||||
#define ARM_VFP 0
|
||||
#endif
|
||||
#else
|
||||
// some LIR features should test VFP on ARM,
|
||||
// but can be set to "always on" on non-ARM
|
||||
#define ARM_VFP 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Embed no-op macros that let Valgrind work with the JIT.
|
||||
#ifdef MOZ_VALGRIND
|
||||
# define JS_VALGRIND
|
||||
|
|
|
@ -619,6 +619,13 @@ MapContextOptionNameToFlag(JSContext* cx, const char* name)
|
|||
|
||||
extern JSClass global_class;
|
||||
|
||||
#if defined(JS_TRACER) && defined(DEBUG)
|
||||
namespace js {
|
||||
extern struct JSClass jitstats_class;
|
||||
void InitJITStatsClass(JSContext *cx, JSObject *glob);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
ProcessArgs(JSContext *cx, JSObject *obj, char **argv, int argc)
|
||||
{
|
||||
|
@ -728,11 +735,9 @@ ProcessArgs(JSContext *cx, JSObject *obj, char **argv, int argc)
|
|||
enableJit = !enableJit;
|
||||
JS_ToggleOptions(cx, JSOPTION_JIT);
|
||||
#if defined(JS_TRACER) && defined(DEBUG)
|
||||
extern struct JSClass jitstats_class;
|
||||
extern void js_InitJITStatsClass(JSContext *cx, JSObject *glob);
|
||||
js_InitJITStatsClass(cx, JS_GetGlobalObject(cx));
|
||||
js::InitJITStatsClass(cx, JS_GetGlobalObject(cx));
|
||||
JS_DefineObject(cx, JS_GetGlobalObject(cx), "tracemonkey",
|
||||
&jitstats_class, NULL, 0);
|
||||
&js::jitstats_class, NULL, 0);
|
||||
#endif
|
||||
break;
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ script regress-316885-01.js
|
|||
script regress-316885-02.js
|
||||
script regress-316885-03.js
|
||||
skip-if(!xulRuntime.shell) script regress-319980-01.js # slow
|
||||
skip-if(!xulRuntime.shell&&!isDebugBuild) script regress-324278.js # slow
|
||||
skip script regress-324278.js # slow, obsoleted by 98409 fix
|
||||
script regress-331719.js
|
||||
skip script regress-338653.js # slow, killed on x86_64
|
||||
script regress-341877-01.js
|
||||
|
|
|
@ -51,20 +51,15 @@ printStatus (summary);
|
|||
var N = 100*1000;
|
||||
|
||||
function build(N) {
|
||||
// We used to exploit the fact that regexp literals were shared between
|
||||
// function invocations, but ES5 fixes this design flaw, so we have to
|
||||
// make a regexp for each new function f, and store it as a property of f.
|
||||
// Thus we build the following chain:
|
||||
//
|
||||
// chainTop: function->regexp->function->regexp....->null
|
||||
//
|
||||
// Exploit the fact that (in ES3), regexp literals are shared between
|
||||
// function invocations. Thus we build the following chain:
|
||||
// chainTop: function->regexp->function->regexp....->null
|
||||
// to check how GC would deal with this chain.
|
||||
|
||||
var chainTop = null;
|
||||
for (var i = 0; i != N; ++i) {
|
||||
var f = Function('some_arg'+i, ' return some_arg'+i+'.re;');
|
||||
var re = /test/;
|
||||
f.re = re;
|
||||
var f = Function('some_arg'+i, ' return /test/;');
|
||||
var re = f();
|
||||
re.previous = chainTop;
|
||||
chainTop = f;
|
||||
}
|
||||
|
@ -73,7 +68,7 @@ function build(N) {
|
|||
|
||||
function check(chainTop, N) {
|
||||
for (var i = 0; i != N; ++i) {
|
||||
var re = chainTop(chainTop);
|
||||
var re = chainTop();
|
||||
chainTop = re.previous;
|
||||
}
|
||||
if (chainTop !== null)
|
||||
|
|
|
@ -58,16 +58,16 @@ function test()
|
|||
TestPassCount++;
|
||||
} else {
|
||||
TestFailCount++;
|
||||
}
|
||||
|
||||
var ex = new Error;
|
||||
print ("=== FAILED ===");
|
||||
print (ex.stack);
|
||||
if (thrown) {
|
||||
print (" threw exception:");
|
||||
print (thrown);
|
||||
var ex = new Error;
|
||||
print ("=== FAILED ===");
|
||||
print (ex.stack);
|
||||
if (thrown) {
|
||||
print (" threw exception:");
|
||||
print (thrown);
|
||||
}
|
||||
print ("==============");
|
||||
}
|
||||
print ("==============");
|
||||
}
|
||||
|
||||
function checkThrows(fun, todo) {
|
||||
|
@ -180,7 +180,7 @@ function test()
|
|||
|
||||
print ("done");
|
||||
|
||||
checkSuccess(TestFailCount, 0, "typed array test failures");
|
||||
reportCompare(0, TestFailCount, "typed array test failures");
|
||||
|
||||
exitFunc ('test');
|
||||
}
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
url-prefix ../../jsreftest.html?test=js1_8_5/regress/
|
||||
script regress-533876.js
|
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
* Any copyright is dedicated to the Public Domain.
|
||||
* http://creativecommons.org/licenses/publicdomain/
|
||||
* Contributors: Gary Kwong and Jason Orendorff
|
||||
*/
|
||||
gTestfile = 'regress-533876';
|
||||
|
||||
var x = [0];
|
||||
eval();
|
||||
|
||||
x.__proto__ = this; // x has non-dictionary scope
|
||||
try {
|
||||
DIE;
|
||||
} catch(e) {
|
||||
}
|
||||
|
||||
delete eval; // force dictionary scope for global
|
||||
gc();
|
||||
var f = eval("function () { return /x/; }");
|
||||
x.watch('x', f); // clone property from global to x, including SPROP_IN_DICTIONARY flag
|
|
@ -0,0 +1 @@
|
|||
gTestsubsuite='regress';
|
|
@ -55,7 +55,7 @@ The general format in EBNF is:
|
|||
flag ::= "slow" | "allow-oom"
|
||||
|
||||
attribute ::= name ":" value
|
||||
name ::= "TMFLAGS"
|
||||
name ::= "TMFLAGS" | "error"
|
||||
value ::= <string>
|
||||
|
||||
The metaline may appear anywhere in the first line of the file: this allows it
|
||||
|
@ -65,6 +65,10 @@ The meaning of the items:
|
|||
|
||||
slow Test runs slowly. Do not run if the --no-slow option is given.
|
||||
allow-oom If the test runs out of memory, it counts as passing.
|
||||
valgrind Run test under valgrind.
|
||||
|
||||
error The test should be considered to pass iff it throws the
|
||||
given JS exception.
|
||||
TMFLAGS Set the environment variable TMFLAGS to the given value.
|
||||
|
||||
* END
|
||||
|
|
|
@ -35,3 +35,7 @@ if (HAVE_TM) {
|
|||
} else {
|
||||
checkStats = function() {};
|
||||
}
|
||||
|
||||
var appendToActual = function(s) {
|
||||
actual += s + ',';
|
||||
}
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
actual = '';
|
||||
expected = '5,4,3,2,1,X,5,4,3,2,1,Y,5,4,3,2,1,';
|
||||
|
||||
function f() {
|
||||
for (var i = 0; i < 5; ++i) {
|
||||
var args = arguments;
|
||||
appendToActual(args[i]);
|
||||
}
|
||||
}
|
||||
|
||||
f(5, 4, 3, 2, 1);
|
||||
appendToActual("X");
|
||||
f(5, 4, 3, 2, 1);
|
||||
appendToActual("Y");
|
||||
f(5, 4, 3, 2, 1);
|
||||
|
||||
|
||||
assertEq(actual, expected)
|
|
@ -0,0 +1,23 @@
|
|||
actual = '';
|
||||
expected = 'true,';
|
||||
|
||||
var isNotEmpty = function (args, i) {
|
||||
var o = args[i];
|
||||
if (!(o && o.length)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
var f = function(obj) {
|
||||
for (var i = 0; i < arguments.length; i++) {
|
||||
if (!isNotEmpty(arguments, i))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
appendToActual(f([1], [1], [1], "asdf", [1]));
|
||||
|
||||
|
||||
assertEq(actual, expected)
|
|
@ -0,0 +1,23 @@
|
|||
actual = '';
|
||||
expected = 'true,';
|
||||
|
||||
function isNotEmpty(args, i) {
|
||||
var o = args[i];
|
||||
if (!(o && o.length)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
function f(obj) {
|
||||
for (var i = 0; i < arguments.length; i++) {
|
||||
if (!isNotEmpty(arguments, i))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
appendToActual(f([1], [1], [1], "asdf", [1]));
|
||||
|
||||
|
||||
assertEq(actual, expected)
|
|
@ -0,0 +1,17 @@
|
|||
actual = '';
|
||||
expected = 'true,';
|
||||
|
||||
var isNotEmpty = function (obj) {
|
||||
for (var i = 0; i < arguments.length; i++) {
|
||||
var o = arguments[i];
|
||||
if (!(o && o.length)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
appendToActual(isNotEmpty([1], [1], [1], "asdf", [1]));
|
||||
|
||||
|
||||
assertEq(actual, expected)
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче