This commit is contained in:
Robert Sayre 2010-02-05 10:54:18 -05:00
Родитель 29906e8f35 9a213027b0
Коммит 3ba6119c11
30 изменённых файлов: 2867 добавлений и 130 удалений

Просмотреть файл

@ -1508,6 +1508,8 @@ static JSStdName standard_class_names[] = {
{js_InitTypedArrayClasses, EAGER_CLASS_ATOM(Int32Array), NULL}, {js_InitTypedArrayClasses, EAGER_CLASS_ATOM(Int32Array), NULL},
{js_InitTypedArrayClasses, EAGER_CLASS_ATOM(Uint32Array), NULL}, {js_InitTypedArrayClasses, EAGER_CLASS_ATOM(Uint32Array), NULL},
{js_InitTypedArrayClasses, EAGER_CLASS_ATOM(Float32Array), NULL}, {js_InitTypedArrayClasses, EAGER_CLASS_ATOM(Float32Array), NULL},
{js_InitTypedArrayClasses, EAGER_CLASS_ATOM(Float64Array), NULL},
{js_InitTypedArrayClasses, EAGER_CLASS_ATOM(Uint8ClampedArray), NULL},
{NULL, 0, NULL, NULL} {NULL, 0, NULL, NULL}
}; };

Просмотреть файл

@ -576,4 +576,7 @@ JS_DECLARE_CALLINFO(js_String_p_charCodeAt0_int)
JS_DECLARE_CALLINFO(js_String_p_charCodeAt_double_int) JS_DECLARE_CALLINFO(js_String_p_charCodeAt_double_int)
JS_DECLARE_CALLINFO(js_String_p_charCodeAt_int_int) JS_DECLARE_CALLINFO(js_String_p_charCodeAt_int_int)
/* Defined in jstypedarray.cpp. */
JS_DECLARE_CALLINFO(js_TypedArray_uint8_clamp_double)
#endif /* jsbuiltins_h___ */ #endif /* jsbuiltins_h___ */

Просмотреть файл

@ -1294,6 +1294,12 @@ js_DropAllEmptyScopeLocks(JSContext *cx, JSScope *scope)
if (CX_THREAD_IS_RUNNING_GC(cx)) if (CX_THREAD_IS_RUNNING_GC(cx))
return; return;
/*
* The title cannot be owned at this point by another cx on this or
* another thread as that would imply a missing JS_LOCK_OBJ call.
*/
JS_ASSERT(!scope->title.ownercx);
LOGIT(&scope->title, '0'); LOGIT(&scope->title, '0');
scope->title.u.count = 0; scope->title.u.count = 0;
ThinUnlock(&scope->title.lock, CX_THINLOCK_ID(cx)); ThinUnlock(&scope->title.lock, CX_THINLOCK_ID(cx));

Просмотреть файл

@ -3069,8 +3069,20 @@ js_NonEmptyObject(JSContext* cx, JSObject* proto)
{ {
JS_ASSERT(!(js_ObjectClass.flags & JSCLASS_HAS_PRIVATE)); JS_ASSERT(!(js_ObjectClass.flags & JSCLASS_HAS_PRIVATE));
JSObject *obj = js_NewObjectWithClassProto(cx, &js_ObjectClass, proto, JSVAL_VOID); JSObject *obj = js_NewObjectWithClassProto(cx, &js_ObjectClass, proto, JSVAL_VOID);
if (obj && !js_GetMutableScope(cx, obj)) if (!obj)
obj = NULL; return NULL;
JS_LOCK_OBJ(cx, obj);
JSScope *scope = js_GetMutableScope(cx, obj);
if (!scope) {
JS_UNLOCK_OBJ(cx, obj);
return NULL;
}
/*
* See comments in the JSOP_NEWINIT case of jsops.cpp why we cannot
* assume that cx owns the scope and skip the unlock call.
*/
JS_UNLOCK_SCOPE(cx, scope);
return obj; return obj;
} }

Просмотреть файл

@ -115,6 +115,8 @@ JS_PROTO(Uint16Array, 33, js_InitTypedArrayClasses)
JS_PROTO(Int32Array, 34, js_InitTypedArrayClasses) JS_PROTO(Int32Array, 34, js_InitTypedArrayClasses)
JS_PROTO(Uint32Array, 35, js_InitTypedArrayClasses) JS_PROTO(Uint32Array, 35, js_InitTypedArrayClasses)
JS_PROTO(Float32Array, 36, js_InitTypedArrayClasses) JS_PROTO(Float32Array, 36, js_InitTypedArrayClasses)
JS_PROTO(Float64Array, 37, js_InitTypedArrayClasses)
JS_PROTO(Uint8ClampedArray, 38, js_InitTypedArrayClasses)
#undef SCRIPT_INIT #undef SCRIPT_INIT
#undef XML_INIT #undef XML_INIT

Просмотреть файл

@ -7355,39 +7355,11 @@ CheckForSSE2()
#if defined(_MSC_VER) && defined(WINCE) #if defined(_MSC_VER) && defined(WINCE)
// these come in from jswince.asm // these come in from jswince.asm
extern "C" int js_arm_try_thumb_op();
extern "C" int js_arm_try_armv6t2_op();
extern "C" int js_arm_try_armv5_op(); extern "C" int js_arm_try_armv5_op();
extern "C" int js_arm_try_armv6_op(); extern "C" int js_arm_try_armv6_op();
extern "C" int js_arm_try_armv7_op(); extern "C" int js_arm_try_armv7_op();
extern "C" int js_arm_try_vfp_op(); extern "C" int js_arm_try_vfp_op();
static bool
arm_check_thumb()
{
bool ret = false;
__try {
js_arm_try_thumb_op();
ret = true;
} __except(GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
ret = false;
}
return ret;
}
static bool
arm_check_thumb2()
{
bool ret = false;
__try {
js_arm_try_armv6t2_op();
ret = true;
} __except(GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
ret = false;
}
return ret;
}
static unsigned int static unsigned int
arm_check_arch() arm_check_arch()
{ {
@ -7450,7 +7422,6 @@ enable_debugger_exceptions()
// Assume ARMv4 by default. // Assume ARMv4 by default.
static unsigned int arm_arch = 4; static unsigned int arm_arch = 4;
static bool arm_has_thumb = false;
static bool arm_has_vfp = false; static bool arm_has_vfp = false;
static bool arm_has_neon = false; static bool arm_has_neon = false;
static bool arm_has_iwmmxt = false; static bool arm_has_iwmmxt = false;
@ -7471,7 +7442,6 @@ arm_read_auxv()
hwcap = strtoul(getenv("ARM_FORCE_HWCAP"), NULL, 0); hwcap = strtoul(getenv("ARM_FORCE_HWCAP"), NULL, 0);
// hardcode these values to avoid depending on specific versions // hardcode these values to avoid depending on specific versions
// of the hwcap header, e.g. HWCAP_NEON // of the hwcap header, e.g. HWCAP_NEON
arm_has_thumb = (hwcap & 4) != 0;
arm_has_vfp = (hwcap & 64) != 0; arm_has_vfp = (hwcap & 64) != 0;
arm_has_iwmmxt = (hwcap & 512) != 0; arm_has_iwmmxt = (hwcap & 512) != 0;
// this flag is only present on kernel 2.6.29 // this flag is only present on kernel 2.6.29
@ -7512,27 +7482,6 @@ arm_read_auxv()
arm_tests_initialized = true; arm_tests_initialized = true;
} }
static bool
arm_check_thumb()
{
if (!arm_tests_initialized)
arm_read_auxv();
return arm_has_thumb;
}
static bool
arm_check_thumb2()
{
if (!arm_tests_initialized)
arm_read_auxv();
// ARMv6T2 also supports Thumb2, but Linux doesn't provide an easy way to test for this as
// there is no associated bit in auxv. ARMv7 always supports Thumb2, and future architectures
// are assumed to be backwards-compatible.
return (arm_arch >= 7);
}
static unsigned int static unsigned int
arm_check_arch() arm_check_arch()
{ {
@ -7553,10 +7502,6 @@ arm_check_vfp()
#else #else
#warning Not sure how to check for architecture variant on your platform. Assuming ARMv4. #warning Not sure how to check for architecture variant on your platform. Assuming ARMv4.
static bool
arm_check_thumb() { return false; }
static bool
arm_check_thumb2() { return false; }
static unsigned int static unsigned int
arm_check_arch() { return 4; } arm_check_arch() { return 4; }
static bool static bool
@ -7619,27 +7564,17 @@ InitJIT(TraceMonitor *tm)
disable_debugger_exceptions(); disable_debugger_exceptions();
bool arm_vfp = arm_check_vfp(); bool arm_vfp = arm_check_vfp();
bool arm_thumb = arm_check_thumb();
bool arm_thumb2 = arm_check_thumb2();
unsigned int arm_arch = arm_check_arch(); unsigned int arm_arch = arm_check_arch();
enable_debugger_exceptions(); enable_debugger_exceptions();
avmplus::AvmCore::config.arm_vfp = arm_vfp; avmplus::AvmCore::config.arm_vfp = arm_vfp;
avmplus::AvmCore::config.soft_float = !arm_vfp; avmplus::AvmCore::config.soft_float = !arm_vfp;
avmplus::AvmCore::config.arm_thumb = arm_thumb;
avmplus::AvmCore::config.arm_thumb2 = arm_thumb2;
avmplus::AvmCore::config.arm_arch = arm_arch; avmplus::AvmCore::config.arm_arch = arm_arch;
// Sanity-check the configuration detection. // Sanity-check the configuration detection.
// * We don't understand architectures prior to ARMv4. // * We don't understand architectures prior to ARMv4.
JS_ASSERT(arm_arch >= 4); JS_ASSERT(arm_arch >= 4);
// * All architectures support Thumb with the possible exception of ARMv4.
JS_ASSERT((arm_thumb) || (arm_arch == 4));
// * Only ARMv6T2 and ARMv7(+) support Thumb2, but ARMv6 does not.
JS_ASSERT((arm_thumb2) || (arm_arch <= 6));
// * All architectures that support Thumb2 also support Thumb.
JS_ASSERT((arm_thumb2 && arm_thumb) || (!arm_thumb2));
#endif #endif
did_we_check_processor_features = true; did_we_check_processor_features = true;
} }
@ -12282,11 +12217,25 @@ TraceRecorder::setElem(int lval_spindex, int idx_spindex, int v_spindex)
if (isNumber(v)) { if (isNumber(v)) {
if (isPromoteInt(v_ins) && if (isPromoteInt(v_ins) &&
tarray->type != js::TypedArray::TYPE_FLOAT32) { tarray->type != js::TypedArray::TYPE_FLOAT32 &&
tarray->type != js::TypedArray::TYPE_FLOAT64) {
LIns *v_ins_int = demote(lir, v_ins); LIns *v_ins_int = demote(lir, v_ins);
if (tarray->type == js::TypedArray::TYPE_UINT8_CLAMPED) {
/* Wrap v_ins_int in some magic to clamp it */
v_ins_int = lir->ins_choose(lir->ins2i(LIR_lt, v_ins_int, 0),
lir->insImm(0),
lir->ins_choose(lir->ins2i(LIR_gt, v_ins_int, 0xff),
lir->insImm(0xff),
v_ins_int,
avmplus::AvmCore::use_cmov()),
avmplus::AvmCore::use_cmov());
}
switch (tarray->type) { switch (tarray->type) {
case js::TypedArray::TYPE_INT8: case js::TypedArray::TYPE_INT8:
case js::TypedArray::TYPE_UINT8: case js::TypedArray::TYPE_UINT8:
case js::TypedArray::TYPE_UINT8_CLAMPED:
addr_ins = lir->ins2(LIR_piadd, data_ins, pidx_ins); addr_ins = lir->ins2(LIR_piadd, data_ins, pidx_ins);
lir->insStore(LIR_stb, v_ins_int, addr_ins, 0); lir->insStore(LIR_stb, v_ins_int, addr_ins, 0);
break; break;
@ -12300,6 +12249,8 @@ TraceRecorder::setElem(int lval_spindex, int idx_spindex, int v_spindex)
addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 2)); addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 2));
lir->insStore(LIR_sti, v_ins_int, addr_ins, 0); lir->insStore(LIR_sti, v_ins_int, addr_ins, 0);
break; break;
case js::TypedArray::TYPE_FLOAT32:
case js::TypedArray::TYPE_FLOAT64:
default: default:
JS_NOT_REACHED("Unknown typed array in tracer"); JS_NOT_REACHED("Unknown typed array in tracer");
} }
@ -12324,6 +12275,13 @@ TraceRecorder::setElem(int lval_spindex, int idx_spindex, int v_spindex)
addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 2)); addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 2));
lir->insStore(LIR_st32f, v_ins, addr_ins, 0); lir->insStore(LIR_st32f, v_ins, addr_ins, 0);
break; break;
case js::TypedArray::TYPE_FLOAT64:
addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 3));
lir->insStore(LIR_stfi, v_ins, addr_ins, 0);
break;
case js::TypedArray::TYPE_UINT8_CLAMPED:
addr_ins = lir->ins2(LIR_piadd, data_ins, pidx_ins);
lir->insStore(LIR_stb, lir->insCall(&js_TypedArray_uint8_clamp_double_ci, &v_ins), addr_ins, 0);
default: default:
JS_NOT_REACHED("Unknown typed array type in tracer"); JS_NOT_REACHED("Unknown typed array type in tracer");
} }
@ -13290,6 +13248,7 @@ TraceRecorder::typedArrayElement(jsval& oval, jsval& ival, jsval*& vp, LIns*& v_
v_ins = lir->ins1(LIR_i2f, lir->insLoad(LIR_ldsb, addr_ins, 0)); v_ins = lir->ins1(LIR_i2f, lir->insLoad(LIR_ldsb, addr_ins, 0));
break; break;
case js::TypedArray::TYPE_UINT8: case js::TypedArray::TYPE_UINT8:
case js::TypedArray::TYPE_UINT8_CLAMPED:
addr_ins = lir->ins2(LIR_piadd, data_ins, pidx_ins); addr_ins = lir->ins2(LIR_piadd, data_ins, pidx_ins);
v_ins = lir->ins1(LIR_u2f, lir->insLoad(LIR_ldzb, addr_ins, 0)); v_ins = lir->ins1(LIR_u2f, lir->insLoad(LIR_ldzb, addr_ins, 0));
break; break;
@ -13313,6 +13272,10 @@ TraceRecorder::typedArrayElement(jsval& oval, jsval& ival, jsval*& vp, LIns*& v_
addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 2)); addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 2));
v_ins = lir->insLoad(LIR_ld32f, addr_ins, 0); v_ins = lir->insLoad(LIR_ld32f, addr_ins, 0);
break; break;
case js::TypedArray::TYPE_FLOAT64:
addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 3));
v_ins = lir->insLoad(LIR_ldf, addr_ins, 0);
break;
default: default:
JS_NOT_REACHED("Unknown typed array type in tracer"); JS_NOT_REACHED("Unknown typed array type in tracer");
} }

Просмотреть файл

@ -304,6 +304,115 @@ TypedArray::obj_setAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty
return false; return false;
} }
/* Helper clamped uint8 type */
int32 JS_FASTCALL
js_TypedArray_uint8_clamp_double(const double x)
{
// Not < so that NaN coerces to 0
if (!(x >= 0))
return 0;
if (x > 255)
return 255;
jsdouble toTruncate = x + 0.5;
JSUint8 y = JSUint8(toTruncate);
/*
* now val is rounded to nearest, ties rounded up. We want
* rounded to nearest ties to even, so check whether we had a
* tie.
*/
if (y == toTruncate) {
/*
* It was a tie (since adding 0.5 gave us the exact integer
* we want). Since we rounded up, we either already have an
* even number or we have an odd number but the number we
* want is one less. So just unconditionally masking out the
* ones bit should do the trick to get us the value we
* want.
*/
return (y & ~1);
}
return y;
}
JS_DEFINE_CALLINFO_1(extern, INT32, js_TypedArray_uint8_clamp_double, DOUBLE, 1, 1)
struct uint8_clamped {
uint8 val;
uint8_clamped() { }
uint8_clamped(const uint8_clamped& other) : val(other.val) { }
// invoke our assignment helpers for constructor conversion
uint8_clamped(uint8 x) { *this = x; }
uint8_clamped(uint16 x) { *this = x; }
uint8_clamped(uint32 x) { *this = x; }
uint8_clamped(int8 x) { *this = x; }
uint8_clamped(int16 x) { *this = x; }
uint8_clamped(int32 x) { *this = x; }
uint8_clamped(jsdouble x) { *this = x; }
inline uint8_clamped& operator= (const uint8_clamped& x) {
val = x.val;
return *this;
}
inline uint8_clamped& operator= (uint8 x) {
val = x;
return *this;
}
inline uint8_clamped& operator= (uint16 x) {
val = (x > 255) ? 255 : 0;
return *this;
}
inline uint8_clamped& operator= (uint32 x) {
val = (x > 255) ? 255 : 0;
return *this;
}
inline uint8_clamped& operator= (int8 x) {
val = (x >= 0) ? uint8(x) : 0;
return *this;
}
inline uint8_clamped& operator= (int16 x) {
val = (x >= 0)
? ((x < 255)
? uint8(x)
: 255)
: 0;
return *this;
}
inline uint8_clamped& operator= (int32 x) {
val = (x >= 0)
? ((x < 255)
? uint8(x)
: 255)
: 0;
return *this;
}
inline uint8_clamped& operator= (const jsdouble x) {
val = js_TypedArray_uint8_clamp_double(x);
return *this;
}
inline operator uint8() const {
return val;
}
};
/* Make sure the compiler isn't doing some funky stuff */
JS_STATIC_ASSERT(sizeof(uint8_clamped) == 1);
template<typename NativeType> static inline const int TypeIDOfType(); template<typename NativeType> static inline const int TypeIDOfType();
template<> inline const int TypeIDOfType<int8>() { return TypedArray::TYPE_INT8; } template<> inline const int TypeIDOfType<int8>() { return TypedArray::TYPE_INT8; }
template<> inline const int TypeIDOfType<uint8>() { return TypedArray::TYPE_UINT8; } template<> inline const int TypeIDOfType<uint8>() { return TypedArray::TYPE_UINT8; }
@ -312,6 +421,8 @@ template<> inline const int TypeIDOfType<uint16>() { return TypedArray::TYPE_UIN
template<> inline const int TypeIDOfType<int32>() { return TypedArray::TYPE_INT32; } template<> inline const int TypeIDOfType<int32>() { return TypedArray::TYPE_INT32; }
template<> inline const int TypeIDOfType<uint32>() { return TypedArray::TYPE_UINT32; } template<> inline const int TypeIDOfType<uint32>() { return TypedArray::TYPE_UINT32; }
template<> inline const int TypeIDOfType<float>() { return TypedArray::TYPE_FLOAT32; } template<> inline const int TypeIDOfType<float>() { return TypedArray::TYPE_FLOAT32; }
template<> inline const int TypeIDOfType<double>() { return TypedArray::TYPE_FLOAT64; }
template<> inline const int TypeIDOfType<uint8_clamped>() { return TypedArray::TYPE_UINT8_CLAMPED; }
template<typename NativeType> class TypedArrayTemplate; template<typename NativeType> class TypedArrayTemplate;
@ -322,6 +433,8 @@ typedef TypedArrayTemplate<uint16> Uint16Array;
typedef TypedArrayTemplate<int32> Int32Array; typedef TypedArrayTemplate<int32> Int32Array;
typedef TypedArrayTemplate<uint32> Uint32Array; typedef TypedArrayTemplate<uint32> Uint32Array;
typedef TypedArrayTemplate<float> Float32Array; typedef TypedArrayTemplate<float> Float32Array;
typedef TypedArrayTemplate<double> Float64Array;
typedef TypedArrayTemplate<uint8_clamped> Uint8ClampedArray;
template<typename NativeType> template<typename NativeType>
class TypedArrayTemplate class TypedArrayTemplate
@ -860,7 +973,8 @@ class TypedArrayTemplate
*dest++ = NativeType(*src++); *dest++ = NativeType(*src++);
break; break;
} }
case TypedArray::TYPE_UINT8: { case TypedArray::TYPE_UINT8:
case TypedArray::TYPE_UINT8_CLAMPED: {
uint8 *src = static_cast<uint8*>(tarray->data); uint8 *src = static_cast<uint8*>(tarray->data);
for (uintN i = 0; i < length; ++i) for (uintN i = 0; i < length; ++i)
*dest++ = NativeType(*src++); *dest++ = NativeType(*src++);
@ -896,6 +1010,12 @@ class TypedArrayTemplate
*dest++ = NativeType(*src++); *dest++ = NativeType(*src++);
break; break;
} }
case TypedArray::TYPE_FLOAT64: {
double *src = static_cast<double*>(tarray->data);
for (uintN i = 0; i < length; ++i)
*dest++ = NativeType(*src++);
break;
}
default: default:
JS_NOT_REACHED("copyFrom with a TypedArray of unknown type"); JS_NOT_REACHED("copyFrom with a TypedArray of unknown type");
break; break;
@ -994,6 +1114,15 @@ TypedArrayTemplate<float>::copyIndexToValue(JSContext *cx, uint32 index, jsval *
*vp = JSVAL_VOID; *vp = JSVAL_VOID;
} }
template<>
void
TypedArrayTemplate<double>::copyIndexToValue(JSContext *cx, uint32 index, jsval *vp)
{
double val = getIndex(index);
if (!js_NewWeaklyRootedNumber(cx, jsdouble(val), vp))
*vp = JSVAL_VOID;
}
/*** /***
*** JS impl *** JS impl
***/ ***/
@ -1108,6 +1237,8 @@ IMPL_TYPED_ARRAY_STATICS(Uint16Array);
IMPL_TYPED_ARRAY_STATICS(Int32Array); IMPL_TYPED_ARRAY_STATICS(Int32Array);
IMPL_TYPED_ARRAY_STATICS(Uint32Array); IMPL_TYPED_ARRAY_STATICS(Uint32Array);
IMPL_TYPED_ARRAY_STATICS(Float32Array); IMPL_TYPED_ARRAY_STATICS(Float32Array);
IMPL_TYPED_ARRAY_STATICS(Float64Array);
IMPL_TYPED_ARRAY_STATICS(Uint8ClampedArray);
JSClass TypedArray::fastClasses[TYPE_MAX] = { JSClass TypedArray::fastClasses[TYPE_MAX] = {
IMPL_TYPED_ARRAY_FAST_CLASS(Int8Array), IMPL_TYPED_ARRAY_FAST_CLASS(Int8Array),
@ -1116,7 +1247,9 @@ JSClass TypedArray::fastClasses[TYPE_MAX] = {
IMPL_TYPED_ARRAY_FAST_CLASS(Uint16Array), IMPL_TYPED_ARRAY_FAST_CLASS(Uint16Array),
IMPL_TYPED_ARRAY_FAST_CLASS(Int32Array), IMPL_TYPED_ARRAY_FAST_CLASS(Int32Array),
IMPL_TYPED_ARRAY_FAST_CLASS(Uint32Array), IMPL_TYPED_ARRAY_FAST_CLASS(Uint32Array),
IMPL_TYPED_ARRAY_FAST_CLASS(Float32Array) IMPL_TYPED_ARRAY_FAST_CLASS(Float32Array),
IMPL_TYPED_ARRAY_FAST_CLASS(Float64Array),
IMPL_TYPED_ARRAY_FAST_CLASS(Uint8ClampedArray)
}; };
JSClass TypedArray::slowClasses[TYPE_MAX] = { JSClass TypedArray::slowClasses[TYPE_MAX] = {
@ -1126,7 +1259,9 @@ JSClass TypedArray::slowClasses[TYPE_MAX] = {
IMPL_TYPED_ARRAY_SLOW_CLASS(Uint16Array), IMPL_TYPED_ARRAY_SLOW_CLASS(Uint16Array),
IMPL_TYPED_ARRAY_SLOW_CLASS(Int32Array), IMPL_TYPED_ARRAY_SLOW_CLASS(Int32Array),
IMPL_TYPED_ARRAY_SLOW_CLASS(Uint32Array), IMPL_TYPED_ARRAY_SLOW_CLASS(Uint32Array),
IMPL_TYPED_ARRAY_SLOW_CLASS(Float32Array) IMPL_TYPED_ARRAY_SLOW_CLASS(Float32Array),
IMPL_TYPED_ARRAY_SLOW_CLASS(Float64Array),
IMPL_TYPED_ARRAY_SLOW_CLASS(Uint8ClampedArray)
}; };
JS_FRIEND_API(JSObject *) JS_FRIEND_API(JSObject *)
@ -1148,6 +1283,8 @@ js_InitTypedArrayClasses(JSContext *cx, JSObject *obj)
INIT_TYPED_ARRAY_CLASS(Int32Array,TYPE_INT32); INIT_TYPED_ARRAY_CLASS(Int32Array,TYPE_INT32);
INIT_TYPED_ARRAY_CLASS(Uint32Array,TYPE_UINT32); INIT_TYPED_ARRAY_CLASS(Uint32Array,TYPE_UINT32);
INIT_TYPED_ARRAY_CLASS(Float32Array,TYPE_FLOAT32); INIT_TYPED_ARRAY_CLASS(Float32Array,TYPE_FLOAT32);
INIT_TYPED_ARRAY_CLASS(Float64Array,TYPE_FLOAT64);
INIT_TYPED_ARRAY_CLASS(Uint8ClampedArray,TYPE_UINT8_CLAMPED);
proto = js_InitClass(cx, obj, NULL, &ArrayBuffer::jsclass, proto = js_InitClass(cx, obj, NULL, &ArrayBuffer::jsclass,
ArrayBuffer::class_constructor, 1, ArrayBuffer::class_constructor, 1,
@ -1213,6 +1350,12 @@ TypedArrayConstruct(JSContext *cx, jsint atype, uintN argc, jsval *argv, jsval *
case TypedArray::TYPE_FLOAT32: case TypedArray::TYPE_FLOAT32:
return Float32Array::class_constructor(cx, cx->globalObject, argc, argv, rv); return Float32Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
case TypedArray::TYPE_FLOAT64:
return Float64Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
case TypedArray::TYPE_UINT8_CLAMPED:
return Uint8ClampedArray::class_constructor(cx, cx->globalObject, argc, argv, rv);
default: default:
JS_NOT_REACHED("shouldn't have gotten here"); JS_NOT_REACHED("shouldn't have gotten here");
return false; return false;

Просмотреть файл

@ -101,6 +101,14 @@ struct JS_FRIEND_API(TypedArray) {
TYPE_INT32, TYPE_INT32,
TYPE_UINT32, TYPE_UINT32,
TYPE_FLOAT32, TYPE_FLOAT32,
TYPE_FLOAT64,
/*
* Special type that's a uint8, but assignments are clamped to 0 .. 255.
* Treat the raw data type as a uint8.
*/
TYPE_UINT8_CLAMPED,
TYPE_MAX TYPE_MAX
}; };

Просмотреть файл

@ -766,9 +766,11 @@ FragmentAssembler::endFragment()
if (mParent.mAssm.error() != nanojit::None) { if (mParent.mAssm.error() != nanojit::None) {
cerr << "error during assembly: "; cerr << "error during assembly: ";
switch (mParent.mAssm.error()) { switch (mParent.mAssm.error()) {
case nanojit::ConditionalBranchTooFar: cerr << "ConditionalBranchTooFar"; break;
case nanojit::StackFull: cerr << "StackFull"; break; case nanojit::StackFull: cerr << "StackFull"; break;
case nanojit::UnknownBranch: cerr << "UnknownBranch"; break; case nanojit::UnknownBranch: cerr << "UnknownBranch"; break;
case nanojit::None: cerr << "None"; break; case nanojit::None: cerr << "None"; break;
default: NanoAssert(0); break;
} }
cerr << endl; cerr << endl;
std::exit(1); std::exit(1);
@ -2169,9 +2171,6 @@ processCmdLine(int argc, char **argv, CmdLineOptions& opts)
avmplus::AvmCore::config.arm_arch = arm_arch; avmplus::AvmCore::config.arm_arch = arm_arch;
avmplus::AvmCore::config.arm_vfp = arm_vfp; avmplus::AvmCore::config.arm_vfp = arm_vfp;
avmplus::AvmCore::config.soft_float = !arm_vfp; avmplus::AvmCore::config.soft_float = !arm_vfp;
// This doesn't allow us to test ARMv6T2 (which also supports Thumb2), but this shouldn't
// really matter here.
avmplus::AvmCore::config.arm_thumb2 = (arm_arch >= 7);
#endif #endif
} }

Просмотреть файл

@ -1 +1 @@
f4ece4c13545709edbd5b8f856ec39f155223892 42624af8095495e387d20c7e474dcc602694b300

Просмотреть файл

@ -494,7 +494,7 @@ namespace nanojit
evict(ins); evict(ins);
r = registerAlloc(ins, allow, hint(ins)); r = registerAlloc(ins, allow, hint(ins));
} else } else
#elif defined(NANOJIT_PPC) #elif defined(NANOJIT_PPC) || defined(NANOJIT_MIPS)
if (((rmask(r)&GpRegs) && !(allow&GpRegs)) || if (((rmask(r)&GpRegs) && !(allow&GpRegs)) ||
((rmask(r)&FpRegs) && !(allow&FpRegs))) ((rmask(r)&FpRegs) && !(allow&FpRegs)))
{ {
@ -1092,7 +1092,7 @@ namespace nanojit
NanoAssert(!_inExit); NanoAssert(!_inExit);
// save used parts of current block on fragment's code list, free the rest // save used parts of current block on fragment's code list, free the rest
#ifdef NANOJIT_ARM #if defined(NANOJIT_ARM) || defined(NANOJIT_MIPS)
// [codeStart, _nSlot) ... gap ... [_nIns, codeEnd) // [codeStart, _nSlot) ... gap ... [_nIns, codeEnd)
if (_nExitIns) { if (_nExitIns) {
_codeAlloc.addRemainder(codeList, exitStart, exitEnd, _nExitSlot, _nExitIns); _codeAlloc.addRemainder(codeList, exitStart, exitEnd, _nExitSlot, _nExitIns);

Просмотреть файл

@ -193,6 +193,7 @@ namespace nanojit
None = 0 None = 0
,StackFull ,StackFull
,UnknownBranch ,UnknownBranch
,ConditionalBranchTooFar
}; };
typedef SeqBuilder<NIns*> NInsList; typedef SeqBuilder<NIns*> NInsList;

Просмотреть файл

@ -269,6 +269,11 @@ namespace nanojit
extern "C" void __clear_cache(char *BEG, char *END); extern "C" void __clear_cache(char *BEG, char *END);
#endif #endif
#if defined(AVMPLUS_UNIX) && defined(NANOJIT_MIPS)
#include <asm/cachectl.h>
extern "C" int cacheflush(char *addr, int nbytes, int cache);
#endif
#ifdef AVMPLUS_SPARC #ifdef AVMPLUS_SPARC
#ifdef __linux__ // bugzilla 502369 #ifdef __linux__ // bugzilla 502369
void sync_instruction_memory(caddr_t v, u_int len) void sync_instruction_memory(caddr_t v, u_int len)
@ -329,6 +334,12 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
sync_instruction_memory((char*)start, len); sync_instruction_memory((char*)start, len);
} }
#elif defined(AVMPLUS_UNIX) && defined(NANOJIT_MIPS)
void CodeAlloc::flushICache(void *start, size_t len) {
// FIXME Use synci on MIPS32R2
cacheflush((char *)start, len, BCACHE);
}
#elif defined AVMPLUS_UNIX #elif defined AVMPLUS_UNIX
#ifdef ANDROID #ifdef ANDROID
void CodeAlloc::flushICache(void *start, size_t len) { void CodeAlloc::flushICache(void *start, size_t len) {

Просмотреть файл

@ -2290,11 +2290,6 @@ namespace nanojit
} }
} }
#define HOWTO_DEBUG \
" One way to debug this: change the failing NanoAssertMsgf(0, ...) call to a\n" \
" printf(...) call and rerun with verbose output. If you're lucky, this error\n" \
" message will appear before the block containing the erroneous instruction.\n\n"
void ValidateWriter::typeCheckArgs(LOpcode op, int nArgs, LTy formals[], LIns* args[]) void ValidateWriter::typeCheckArgs(LOpcode op, int nArgs, LTy formals[], LIns* args[])
{ {
// Type-check the arguments. // Type-check the arguments.
@ -2309,15 +2304,10 @@ namespace nanojit
// to be caught by test suites whereas error messages may not // to be caught by test suites whereas error messages may not
// be. // be.
NanoAssertMsgf(0, NanoAssertMsgf(0,
"\n\n" "LIR type error (%s): arg %d of '%s' is '%s' "
"LIR type error (%s):\n" "which has type %s (expected %s)",
" in instruction with opcode: %s\n" _whereInPipeline, i+1, lirNames[op],
" in argument %d with opcode: %s\n" lirNames[args[i]->opcode()],
" argument has type %s, expected %s\n"
HOWTO_DEBUG,
_whereInPipeline,
lirNames[op],
i+1, lirNames[args[i]->opcode()],
type2string(actual), type2string(formal)); type2string(actual), type2string(formal));
} }
} }
@ -2327,27 +2317,16 @@ namespace nanojit
LIns* arg, const char* shouldBeDesc) LIns* arg, const char* shouldBeDesc)
{ {
NanoAssertMsgf(0, NanoAssertMsgf(0,
"\n\n" "LIR structure error (%s): %s %d of '%s' is '%s' (expected %s)",
" LIR structure error (%s):\n" _whereInPipeline, argDesc, argN,
" in instruction with opcode: %s\n" lirNames[op], lirNames[arg->opcode()], shouldBeDesc);
" %s %d has opcode: %s\n"
" it should be: %s\n"
HOWTO_DEBUG,
_whereInPipeline,
lirNames[op],
argDesc, argN, lirNames[arg->opcode()],
shouldBeDesc);
} }
void ValidateWriter::errorPlatformShouldBe(LOpcode op, int nBits) void ValidateWriter::errorPlatformShouldBe(LOpcode op, int nBits)
{ {
NanoAssertMsgf(0, NanoAssertMsgf(0,
"\n\n" "LIR platform error (%s): '%s' should only occur on %d-bit platforms",
" LIR structure error (%s):\n" _whereInPipeline, lirNames[op], nBits);
" %s should only occur on %d-bit platforms\n"
HOWTO_DEBUG,
_whereInPipeline,
lirNames[op], nBits);
} }
void ValidateWriter::checkLInsIsACondOrConst(LOpcode op, int argN, LIns* ins) void ValidateWriter::checkLInsIsACondOrConst(LOpcode op, int argN, LIns* ins)

Просмотреть файл

@ -64,6 +64,8 @@
#include "NativeSparc.h" #include "NativeSparc.h"
#elif defined(NANOJIT_X64) #elif defined(NANOJIT_X64)
#include "NativeX64.h" #include "NativeX64.h"
#elif defined(NANOJIT_MIPS)
#include "NativeMIPS.h"
#else #else
#error "unknown nanojit architecture" #error "unknown nanojit architecture"
#endif #endif

Просмотреть файл

@ -1947,7 +1947,11 @@ Assembler::asm_ld_imm(Register d, int32_t imm, bool chk /* = true */)
// immediate. If this isn't possible, load it from memory. // immediate. If this isn't possible, load it from memory.
// - We cannot use MOV(W|T) on cores older than the introduction of // - We cannot use MOV(W|T) on cores older than the introduction of
// Thumb-2 or if the target register is the PC. // Thumb-2 or if the target register is the PC.
if (config.arm_thumb2 && (d != PC)) { //
// (Note that we use Thumb-2 if arm_arch is ARMv7 or later; the only earlier
// ARM core that provided Thumb-2 is ARMv6T2/ARM1156, which is a real-time
// core that nanojit is unlikely to ever target.)
if (config.arm_arch >= 7 && (d != PC)) {
// ARMv6T2 and above have MOVW and MOVT. // ARMv6T2 and above have MOVW and MOVT.
uint32_t high_h = (uint32_t)imm >> 16; uint32_t high_h = (uint32_t)imm >> 16;
uint32_t low_h = imm & 0xffff; uint32_t low_h = imm & 0xffff;
@ -2630,13 +2634,12 @@ Assembler::asm_load32(LInsp ins)
void void
Assembler::asm_cmov(LInsp ins) Assembler::asm_cmov(LInsp ins)
{ {
LOpcode op = ins->opcode();
LIns* condval = ins->oprnd1(); LIns* condval = ins->oprnd1();
LIns* iftrue = ins->oprnd2(); LIns* iftrue = ins->oprnd2();
LIns* iffalse = ins->oprnd3(); LIns* iffalse = ins->oprnd3();
NanoAssert(condval->isCmp()); NanoAssert(condval->isCmp());
NanoAssert(op == LIR_cmov && iftrue->isI32() && iffalse->isI32()); NanoAssert(ins->opcode() == LIR_cmov && iftrue->isI32() && iffalse->isI32());
const Register rr = deprecated_prepResultReg(ins, GpRegs); const Register rr = deprecated_prepResultReg(ins, GpRegs);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

636
js/src/nanojit/NativeMIPS.h Normal file
Просмотреть файл

@ -0,0 +1,636 @@
/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is [Open Source Virtual Machine].
*
* The Initial Developer of the Original Code is
* MIPS Technologies Inc
* Portions created by the Initial Developer are Copyright (C) 2009
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Chris Dearman <chris@mips.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef __nanojit_NativeMIPS__
#define __nanojit_NativeMIPS__
#include "../vprof/vprof.h"
#ifdef PERFM
#define DOPROF
#endif
#define count_instr() _nvprof("mips", 1)
#define count_mov() do { _nvprof("mips-mov", 1); count_instr(); } while (0)
#define count_jmp() do { _nvprof("mips-jmp", 1); count_instr(); } while (0)
#define count_prolog() do { _nvprof("mips-prolog", 1); count_instr(); } while (0)
#define count_alu() do { _nvprof("mips-alu", 1); count_instr(); } while (0)
#define count_misc() do { _nvprof("mips-misc", 1); count_instr(); } while (0)
#define count_fpu() do { _nvprof("mips-fpu", 1); count_instr(); } while (0)
#define count_br() do { _nvprof("mips-br", 1); count_instr(); } while (0)
namespace nanojit
{
// Req: NJ_MAX_STACK_ENTRY is number of instructions to hold in LIR stack
#if 0
// FIXME: Inconsistent use in signed/unsigned expressions makes this generate errors
static const uint32_t NJ_MAX_STACK_ENTRY = 256;
#else
#define NJ_MAX_STACK_ENTRY 256
#endif
static const int NJ_ALIGN_STACK = 8;
typedef uint32_t NIns; // REQ: Instruction count
typedef uint64_t RegisterMask; // REQ: Large enough to hold LastReg-FirstReg bits
#define _rmask_(r) (1LL<<(r))
typedef enum { // REQ: Register identifiers
// Register numbers for Native code generator
ZERO = 0, AT = 1, V0 = 2, V1 = 3, A0 = 4, A1 = 5, A2 = 6, A3 = 7,
T0 = 8, T1 = 9, T2 = 10, T3 = 11, T4 = 12, T5 = 13, T6 = 14, T7 = 15,
S0 = 16, S1 = 17, S2 = 18, S3 = 19, S4 = 20, S5 = 21, S6 = 22, S7 = 23,
T8 = 24, T9 = 25, K0 = 26, K1 = 27, GP = 28, SP = 29, FP = 30, RA = 31,
F0 = 32, F1 = 33, F2 = 34, F3 = 35, F4 = 36, F5 = 37, F6 = 38, F7 = 39,
F8 = 40, F9 = 41, F10 = 42, F11 = 43, F12 = 44, F13 = 45, F14 = 46, F15 = 47,
F16 = 48, F17 = 49, F18 = 50, F19 = 51, F20 = 52, F21 = 53, F22 = 54, F23 = 55,
F24 = 56, F25 = 57, F26 = 58, F27 = 59, F28 = 60, F29 = 61, F30 = 62, F31 = 63,
// FP register aliases
FV0 = F0, FV1 = F2,
FA0 = F12, FA1 = F14,
FT0 = F4, FT1 = F6, FT2 = F8, FT3 = F10, FT4 = F16, FT5 = F18,
FS0 = F20, FS1 = F22, FS2 = F24, FS3 = F26, FS4 = F28, FS5 = F30,
// Wellknown register names used by code generator
FirstReg = ZERO,
LastReg = F31,
deprecated_UnknownReg = 127
} Register;
// REQ: register names
verbose_only(extern const char* regNames[];)
// REQ: Bytes of icache to flush after Assembler::patch
const size_t LARGEST_BRANCH_PATCH = 2 * sizeof(NIns);
// REQ: largest value passed to underrunProtect
static const int LARGEST_UNDERRUN_PROT = 32;
// REQ: Number of callee saved registers
#ifdef FPCALLEESAVED
static const int NumSavedRegs = 14;
#else
static const int NumSavedRegs = 8;
#endif
// REQ: Callee saved registers
const RegisterMask SavedRegs =
#ifdef FPCALLEESAVED
_rmask_(FS0) | _rmask_(FS1) | _rmask_(FS2) |
_rmask_(FS3) | _rmask_(FS4) | _rmask_(FS5) |
#endif
_rmask_(S0) | _rmask_(S1) | _rmask_(S2) | _rmask_(S3) |
_rmask_(S4) | _rmask_(S5) | _rmask_(S6) | _rmask_(S7);
// REQ: General purpose registers
static const RegisterMask GpRegs =
_rmask_(V0) | _rmask_(V1) |
_rmask_(A0) | _rmask_(A1) | _rmask_(A2) | _rmask_(A3) |
_rmask_(S0) | _rmask_(S1) | _rmask_(S2) | _rmask_(S3) |
_rmask_(S4) | _rmask_(S5) | _rmask_(S6) | _rmask_(S7) |
_rmask_(T0) | _rmask_(T1) | _rmask_(T2) | _rmask_(T3) |
_rmask_(T4) | _rmask_(T5) | _rmask_(T6) | _rmask_(T7) |
_rmask_(T8) | _rmask_(T9);
// REQ: Floating point registers
static const RegisterMask FpRegs =
#ifdef FPCALLEESAVED
_rmask_(FS0) | _rmask_(FS1) | _rmask_(FS2) |
_rmask_(FS3) | _rmask_(FS4) | _rmask_(FS5) |
#endif
_rmask_(FV0) | _rmask_(FV1) |
_rmask_(FA0) | _rmask_(FA1) |
_rmask_(FT0) | _rmask_(FT1) | _rmask_(FT2) |
_rmask_(FT3) | _rmask_(FT4) | _rmask_(FT5);
static const RegisterMask AllowableFlagRegs = GpRegs; // REQ: Registers that can hold flag results FIXME
static inline bool IsFpReg(Register r)
{
return (_rmask_(r) & FpRegs) != 0;
}
static inline bool IsGpReg(Register r)
{
return (_rmask_(r) & GpRegs) != 0;
}
#define GPR(r) ((r)&31)
#define FPR(r) ((r)&31)
// REQ: Platform specific declarations to include in Stats structure
#define DECLARE_PLATFORM_STATS()
// REQ: Platform specific declarations to include in Assembler class
#define DECLARE_PLATFORM_ASSEMBLER() \
const static Register argRegs[4]; \
const static Register retRegs[2]; \
void nativePageSetup(void); \
void nativePageReset(void); \
void underrunProtect(int bytes); \
NIns *_nSlot; \
NIns *_nExitSlot; \
int max_out_args; \
Register ovreg; \
\
void asm_ldst(int op, Register r, int offset, Register b); \
void asm_ldst64(bool store, Register fr, int offset, Register b); \
void asm_store_imm64(LIns *value, int dr, Register rbase); \
void asm_li32(Register r, int32_t imm); \
void asm_li_d(Register fr, int32_t msw, int32_t lsw); \
void asm_li(Register r, int32_t imm); \
void asm_j(NIns*, bool bdelay); \
NIns *asm_branch_far(bool, LIns*, NIns*); \
NIns *asm_branch_near(bool, LIns*, NIns*); \
void asm_cmp(LOpcode condop, LIns *a, LIns *b, Register cr); \
void asm_move(Register d, Register s); \
void asm_regarg(ArgSize sz, LInsp p, Register r); \
void asm_stkarg(LInsp arg, int stkd); \
void asm_arg(ArgSize sz, LInsp arg, Register& r, Register& fr, int& stkd); \
void asm_arg_64(LInsp arg, Register& r, Register& fr, int& stkd) ;
// REQ: Platform specific declarations to include in RegAlloc class
#define DECLARE_PLATFORM_REGALLOC()
// REQ:
#define swapptrs() do { \
NIns* _tins = _nIns; _nIns = _nExitIns; _nExitIns = _tins; \
NIns* _nslot = _nSlot; _nSlot = _nExitSlot; _nExitSlot = _nslot; \
} while (0)
#define TODO(x) do { verbose_only(avmplus::AvmLog(#x);) NanoAssertMsgf(false, "%s", #x); } while (0)
#ifdef MIPSDEBUG
#define TAG(fmt, ...) do { debug_only(verbose_outputf(" # MIPS: " fmt, ##__VA_ARGS__);) } while (0)
#else
#define TAG(fmt, ...) do { } while (0)
#endif
#define EMIT(ins, fmt, ...) do { \
underrunProtect(4); \
*(--_nIns) = (NIns) (ins); \
debug_only(codegenBreak(_nIns);) \
asm_output(fmt, ##__VA_ARGS__); \
} while (0)
// Emit code in trampoline/literal area
// Assume that underrunProtect has already been called
// This is a bit hacky...
#define TRAMP(ins, fmt, ...) do { \
verbose_only( \
NIns *save_nIns = _nIns; _nIns = _nSlot; \
) \
*_nSlot = (NIns)ins; \
debug_only(codegenBreak(_nSlot);) \
_nSlot++; \
verbose_only(setOutputForEOL("<= trampoline");) \
asm_output(fmt, ##__VA_ARGS__); \
verbose_only( \
_nIns = save_nIns; \
) \
} while (0)
#define MR(d, s) asm_move(d, s)
// underrun guarantees that there is always room to insert a jump
#define JMP(t) asm_j(t, true)
// Opcodes: bits 31..26
#define OP_SPECIAL 0x00
#define OP_REGIMM 0x01
#define OP_J 0x02
#define OP_JAL 0x03
#define OP_BEQ 0x04
#define OP_BNE 0x05
#define OP_ADDIU 0x09
#define OP_SLTIU 0x0b
#define OP_ANDI 0x0c
#define OP_ORI 0x0d
#define OP_XORI 0x0e
#define OP_LUI 0x0f
#define OP_COP1 0x11
#define OP_COP1X 0x13
#define OP_SPECIAL2 0x1c
#define OP_LB 0x20
#define OP_LH 0x21
#define OP_LW 0x23
#define OP_LBU 0x24
#define OP_LHU 0x25
#define OP_SB 0x28
#define OP_SH 0x29
#define OP_SW 0x2b
#define OP_LWC1 0x31
#define OP_LDC1 0x35
#define OP_SWC1 0x39
#define OP_SDC1 0x3d
// REGIMM: bits 20..16
#define REGIMM_BLTZ 0x00
#define REGIMM_BGEZ 0x01
// COP1: bits 25..21
#define COP1_ADD 0x00
#define COP1_SUB 0x01
#define COP1_MUL 0x02
#define COP1_DIV 0x03
#define COP1_MOV 0x06
#define COP1_NEG 0x07
#define COP1_BC 0x08
#define COP1_TRUNCW 0x0d
#define COP1_CVTD 0x21
// COP1X: bits 5..0
#define COP1X_LDXC1 0x01
#define COP1X_SDXC1 0x09
// SPECIAL: bits 5..0
#define SPECIAL_SLL 0x00
#define SPECIAL_MOVCI 0x01
#define SPECIAL_SRL 0x02
#define SPECIAL_SRA 0x03
#define SPECIAL_SLLV 0x04
#define SPECIAL_SRLV 0x06
#define SPECIAL_SRAV 0x07
#define SPECIAL_JR 0x08
#define SPECIAL_JALR 0x09
#define SPECIAL_MOVN 0x0b
#define SPECIAL_MFHI 0x10
#define SPECIAL_MFLO 0x12
#define SPECIAL_MULT 0x18
#define SPECIAL_ADDU 0x21
#define SPECIAL_SUBU 0x23
#define SPECIAL_AND 0x24
#define SPECIAL_OR 0x25
#define SPECIAL_XOR 0x26
#define SPECIAL_NOR 0x27
#define SPECIAL_SLT 0x2a
#define SPECIAL_SLTU 0x2b
// SPECIAL2: bits 5..0
#define SPECIAL2_MUL 0x02
// FORMAT: bits 25..21
#define FMT_S 0x10
#define FMT_D 0x11
#define FMT_W 0x14
#define FMT_L 0x15
#define FMT_PS 0x16
// CONDITION: bits 4..0
#define COND_F 0x0
#define COND_UN 0x1
#define COND_EQ 0x2
#define COND_UEQ 0x3
#define COND_OLT 0x4
#define COND_ULT 0x5
#define COND_OLE 0x6
#define COND_ULE 0x7
#define COND_SF 0x8
#define COND_NGLE 0x9
#define COND_SEQ 0xa
#define COND_NGL 0xb
#define COND_LT 0xc
#define COND_NGE 0xd
#define COND_LE 0xe
#define COND_NGT 0xf
// Helper definitions to encode different classes of MIPS instructions
// Parameters are in instruction order
#define R_FORMAT(op, rs, rt, rd, re, func) \
(((op)<<26)|(GPR(rs)<<21)|(GPR(rt)<<16)|(GPR(rd)<<11)|((re)<<6)|(func))
#define I_FORMAT(op, rs, rt, simm) \
(((op)<<26)|(GPR(rs)<<21)|(GPR(rt)<<16)|((simm)&0xffff))
#define J_FORMAT(op, index) \
(((op)<<26)|(index))
#define U_FORMAT(op, rs, rt, uimm) \
(((op)<<26)|(GPR(rs)<<21)|(GPR(rt)<<16)|((uimm)&0xffff))
#define F_FORMAT(op, ffmt, ft, fs, fd, func) \
(((op)<<26)|((ffmt)<<21)|(FPR(ft)<<16)|(FPR(fs)<<11)|(FPR(fd)<<6)|(func))
#define oname(op) Assembler::oname[op]
#define cname(cond) Assembler::cname[cond]
#define fname(ffmt) Assembler::fname[ffmt]
#define fpn(fr) gpn(fr)
#define BOFFSET(targ) (uint32_t(targ - (_nIns+1)))
#define LDST(op, rt, offset, base) \
do { count_misc(); EMIT(I_FORMAT(op, base, rt, offset), \
"%s %s, %d(%s)", oname[op], gpn(rt), offset, gpn(base)); } while (0)
#define BX(op, rs, rt, targ) \
do { count_br(); EMIT(I_FORMAT(op, rs, rt, BOFFSET(targ)), \
"%s %s, %s, %p", oname[op], gpn(rt), gpn(rs), targ); } while (0)
// MIPS instructions
// Parameters are in "assembler" order
#define ADDIU(rt, rs, simm) \
do { count_alu(); EMIT(I_FORMAT(OP_ADDIU, rs, rt, simm), \
"addiu %s, %s, %d", gpn(rt), gpn(rs), simm); } while (0)
#define trampADDIU(rt, rs, simm) \
do { count_alu(); TRAMP(I_FORMAT(OP_ADDIU, rs, rt, simm), \
"addiu %s, %s, %d", gpn(rt), gpn(rs), simm); } while (0)
#define ADDU(rd, rs, rt) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, rs, rt, rd, 0, SPECIAL_ADDU), \
"addu %s, %s, %s", gpn(rd), gpn(rs), gpn(rt)); } while (0)
#define AND(rd, rs, rt) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, rs, rt, rd, 0, SPECIAL_AND), \
"and %s, %s, %s", gpn(rd), gpn(rs), gpn(rt)); } while (0)
#define ANDI(rt, rs, uimm) \
do { count_alu(); EMIT(U_FORMAT(OP_ANDI, rs, rt, uimm), \
"andi %s, %s, 0x%x", gpn(rt), gpn(rs), ((uimm)&0xffff)); } while (0)
#define BC1F(targ) \
do { count_br(); EMIT(I_FORMAT(OP_COP1, COP1_BC, 0, BOFFSET(targ)), \
"bc1f %p", targ); } while (0)
#define BC1T(targ) \
do { count_br(); EMIT(I_FORMAT(OP_COP1, COP1_BC, 1, BOFFSET(targ)), \
"bc1t %p", targ); } while (0)
#define B(targ) BX(OP_BEQ, ZERO, ZERO, targ)
#define BEQ(rs, rt, targ) BX(OP_BEQ, rs, rt, targ)
#define BNE(rs, rt, targ) BX(OP_BNE, rs, rt, targ)
#define BLEZ(rs, targ) BX(OP_BLEZ, rs, ZERO, targ)
#define BGTZ(rs, targ) BX(OP_BGTZ, rs, ZERO, targ)
#define BGEZ(rs, targ) BX(OP_REGIMM, rs, REGIMM_BGEZ, targ)
#define BLTZ(rs, targ) BX(OP_REGIMM, rs, REGIMM_BLTZ, targ)
#define JINDEX(dest) ((uint32_t(dest)>>2)&0x03ffffff)
#define J(dest) \
do { count_jmp(); EMIT(J_FORMAT(OP_J, JINDEX(dest)), \
"j %p", dest); } while (0)
#define trampJ(dest) \
do { count_jmp(); TRAMP(J_FORMAT(OP_J, (uint32_t(dest)>>2)&0x3fffffff), \
"j %p", dest); } while (0)
#define JAL(dest) \
do { count_jmp(); EMIT(J_FORMAT(OP_JAL, ((dest)>>2)&0x3fffffff), \
"jal 0x%x", uint32_t(dest)); } while (0)
#define JALR(rs) \
do { count_jmp(); EMIT(R_FORMAT(OP_SPECIAL, rs, 0, RA, 0, SPECIAL_JALR), \
"jalr %s", gpn(rs)); } while (0)
#define JR(rs) \
do { count_jmp(); EMIT(R_FORMAT(OP_SPECIAL, rs, 0, 0, 0, SPECIAL_JR), \
"jr %s", gpn(rs)); } while (0)
#define trampJR(rs) \
do { count_jmp(); TRAMP(R_FORMAT(OP_SPECIAL, rs, 0, 0, 0, SPECIAL_JR), \
"jr %s", gpn(rs)); } while (0)
#define LB(rt, offset, base) \
LDST(OP_LB, rt, offset, base)
#define LH(rt, offset, base) \
LDST(OP_LH, rt, offset, base)
#define LUI(rt, uimm) \
do { count_alu(); EMIT(U_FORMAT(OP_LUI, 0, rt, uimm), \
"lui %s, 0x%x", gpn(rt), ((uimm)&0xffff)); } while (0)
#define LW(rt, offset, base) \
LDST(OP_LW, rt, offset, base)
#define MFHI(rd) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, 0, 0, rd, 0, SPECIAL_MFHI), \
"mfhi %s", gpn(rd)); } while (0)
#define MFLO(rd) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, 0, 0, rd, 0, SPECIAL_MFLO), \
"mflo %s", gpn(rd)); } while (0)
#define MUL(rd, rs, rt) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL2, rs, rt, rd, 0, SPECIAL2_MUL), \
"mul %s, %s, %s", gpn(rd), gpn(rs), gpn(rt)); } while (0)
#define MULT(rs, rt) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, rs, rt, 0, 0, SPECIAL_MULT), \
"mult %s, %s", gpn(rs), gpn(rt)); } while (0)
#define MOVE(rd, rs) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, rs, ZERO, rd, 0, SPECIAL_ADDU), \
"move %s, %s", gpn(rd), gpn(rs)); } while (0)
#define MOVN(rd, rs, rt) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, rs, rt, rd, 0, SPECIAL_MOVN), \
"movn %s, %s, %s", gpn(rd), gpn(rs), gpn(rt)); } while (0)
#define NEGU(rd, rt) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, ZERO, rt, rd, 0, SPECIAL_SUBU), \
"negu %s, %s", gpn(rd), gpn(rt)); } while (0)
#define NOP() \
do { count_misc(); EMIT(R_FORMAT(OP_SPECIAL, 0, 0, 0, 0, SPECIAL_SLL), \
"nop"); } while (0)
#define trampNOP() \
do { count_misc(); TRAMP(R_FORMAT(OP_SPECIAL, 0, 0, 0, 0, SPECIAL_SLL), \
"nop"); } while (0)
#define NOR(rd, rs, rt) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, rs, rt, rd, 0, SPECIAL_NOR), \
"nor %s, %s, %s", gpn(rd), gpn(rs), gpn(rt)); } while (0)
#define NOT(rd, rs) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, rs, ZERO, rd, 0, SPECIAL_NOR), \
"not %s, %s", gpn(rd), gpn(rs)); } while (0)
#define OR(rd, rs, rt) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, rs, rt, rd, 0, SPECIAL_OR), \
"or %s, %s, %s", gpn(rd), gpn(rs), gpn(rt)); } while (0)
#define ORI(rt, rs, uimm) \
do { count_alu(); EMIT(U_FORMAT(OP_ORI, rs, rt, uimm), \
"ori %s, %s, 0x%x", gpn(rt), gpn(rs), ((uimm)&0xffff)); } while (0)
#define SLTIU(rt, rs, simm) \
do { count_alu(); EMIT(I_FORMAT(OP_SLTIU, rs, rt, simm), \
"sltiu %s, %s, %d", gpn(rt), gpn(rs), simm); } while (0)
#define SLT(rd, rs, rt) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, rs, rt, rd, 0, SPECIAL_SLT), \
"slt %s, %s, %s", gpn(rd), gpn(rs), gpn(rt)); } while (0)
#define SLTU(rd, rs, rt) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, rs, rt, rd, 0, SPECIAL_SLTU), \
"sltu %s, %s, %s", gpn(rd), gpn(rs), gpn(rt)); } while (0)
#define SLL(rd, rt, sa) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, 0, rt, rd, sa, SPECIAL_SLL), \
"sll %s, %s, %d", gpn(rd), gpn(rt), sa); } while (0)
#define SLLV(rd, rt, rs) \
do { count_misc(); EMIT(R_FORMAT(OP_SPECIAL, rs, rt, rd, 0, SPECIAL_SLLV), \
"sllv %s, %s, %s", gpn(rd), gpn(rt), gpn(rs)); } while (0)
#define SRA(rd, rt, sa) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, 0, rt, rd, sa, SPECIAL_SRA), \
"sra %s, %s, %d", gpn(rd), gpn(rt), sa); } while (0)
#define SRAV(rd, rt, rs) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, rs, rt, rd, 0, SPECIAL_SRAV), \
"srav %s, %s, %s", gpn(rd), gpn(rt), gpn(rs)); } while (0)
#define SRL(rd, rt, sa) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, 0, rt, rd, sa, SPECIAL_SRL), \
"srl %s, %s, %d", gpn(rd), gpn(rt), sa); } while (0)
#define SRLV(rd, rt, rs) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, rs, rt, rd, 0, SPECIAL_SRLV), \
"srlv %s, %s, %s", gpn(rd), gpn(rt), gpn(rs)); } while (0)
#define SUBU(rd, rs, rt) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, rs, rt, rd, 0, SPECIAL_SUBU), \
"subu %s, %s, %s", gpn(rd), gpn(rs), gpn(rt)); } while (0)
#define SW(rt, offset, base) \
LDST(OP_SW, rt, offset, base)
#define XOR(rd, rs, rt) \
do { count_alu(); EMIT(R_FORMAT(OP_SPECIAL, rs, rt, rd, 0, SPECIAL_XOR), \
"xor %s, %s, %s", gpn(rd), gpn(rs), gpn(rt)); } while (0)
#define XORI(rt, rs, uimm) \
do { count_alu(); EMIT(U_FORMAT(OP_XORI, rs, rt, uimm), \
"xori %s, %s, 0x%x", gpn(rt), gpn(rs), ((uimm)&0xffff)); } while (0)
/* FPU instructions */
#ifdef NJ_SOFTFLOAT
#if !defined(__mips_soft_float) || __mips_soft_float != 1
#error NJ_SOFTFLOAT defined but not compiled with -msoft-float
#endif
#define LWC1(ft, offset, base) NanoAssertMsg(0, "softfloat LWC1")
#define SWC1(ft, offset, base) NanoAssertMsg(0, "softfloat SWC1")
#define LDC1(ft, offset, base) NanoAssertMsg(0, "softfloat LDC1")
#define SDC1(ft, offset, base) NanoAssertMsg(0, "softfloat SDC1")
#define LDXC1(fd, index, base) NanoAssertMsg(0, "softfloat LDXC1")
#define SDXC1(fs, index, base) NanoAssertMsg(0, "softfloat SDXC1")
#define MFC1(rt, fs) NanoAssertMsg(0, "softfloat MFC1")
#define MTC1(rt, fs) NanoAssertMsg(0, "softfloat MTC1")
#define MOVF(rt, fs, cc) NanoAssertMsg(0, "softfloat MOVF")
#define CVT_D_W(fd, fs) NanoAssertMsg(0, "softfloat CVT_D_W")
#define C_EQ_D(fs, ft) NanoAssertMsg(0, "softfloat C_EQ_D")
#define C_LE_D(fs, ft) NanoAssertMsg(0, "softfloat C_LE_D")
#define C_LT_D(fs, ft) NanoAssertMsg(0, "softfloat C_LT_D")
#define ADD_D(fd, fs, ft) NanoAssertMsg(0, "softfloat ADD_D")
#define DIV_D(fd, fs, ft) NanoAssertMsg(0, "softfloat DIV_D")
#define MOV_D(fd, fs) NanoAssertMsg(0, "softfloat MOV_D")
#define MUL_D(fd, fs, ft) NanoAssertMsg(0, "softfloat MUL_D")
#define NEG_D(fd, fs) NanoAssertMsg(0, "softfloat NEG_D")
#define SUB_D(fd, fs, ft) NanoAssertMsg(0, "softfloat SUB_D")
#define TRUNC_W_D(fd,fs) NanoAssertMsg(0, "softfloat TRUNC_W_D")
#else
#if defined(__mips_soft_float) && __mips_soft_float != 0
#error compiled with -msoft-float but NJ_SOFTFLOAT not defined
#endif
#define FOP_FMT2(ffmt, fd, fs, func, name) \
do { count_fpu(); EMIT(F_FORMAT(OP_COP1, ffmt, 0, fs, fd, func), \
"%s.%s %s, %s", name, fname[ffmt], fpn(fd), fpn(fs)); } while (0)
#define FOP_FMT3(ffmt, fd, fs, ft, func, name) \
do { count_fpu(); EMIT(F_FORMAT(OP_COP1, ffmt, ft, fs, fd, func), \
"%s.%s %s, %s, %s", name, fname[ffmt], fpn(fd), fpn(fs), fpn(ft)); } while (0)
#define C_COND_FMT(cond, ffmt, fs, ft) \
do { count_fpu(); EMIT(F_FORMAT(OP_COP1, ffmt, ft, fs, 0, 0x30|(cond)), \
"c.%s.%s %s, %s", cname[cond], fname[ffmt], fpn(fs), fpn(ft)); } while (0)
#define MFC1(rt, fs) \
do { count_fpu(); EMIT(F_FORMAT(OP_COP1, 0, rt, fs, 0, 0), \
"mfc1 %s, %s", gpn(rt), fpn(fs)); } while (0)
#define MTC1(rt, fs) \
do { count_fpu(); EMIT(F_FORMAT(OP_COP1, 4, rt, fs, 0, 0), \
"mtc1 %s, %s", gpn(rt), fpn(fs)); } while (0)
#define MOVF(rd, rs, cc) \
do { count_fpu(); EMIT(R_FORMAT(OP_SPECIAL, rs, (cc)<<2, rd, 0, SPECIAL_MOVCI), \
"movf %s, %s, $fcc%d", gpn(rd), gpn(rs), cc); } while (0)
#define CVT_D_W(fd, fs) \
do { count_fpu(); EMIT(F_FORMAT(OP_COP1, FMT_W, 0, fs, fd, COP1_CVTD), \
"cvt.d.w %s, %s", fpn(fd), fpn(fs)); } while (0)
#define TRUNC_W_D(fd, fs) \
do { count_fpu(); EMIT(F_FORMAT(OP_COP1, FMT_D, 0, fs, fd, COP1_TRUNCW), \
"trunc.w.d %s, %s", fpn(fd), fpn(fs)); } while (0)
#define LWC1(ft, offset, base) LDST(OP_LWC1, ft, offset, base)
#define SWC1(ft, offset, base) LDST(OP_SWC1, ft, offset, base)
#define LDC1(ft, offset, base) LDST(OP_LDC1, ft, offset, base)
#define SDC1(ft, offset, base) LDST(OP_SDC1, ft, offset, base)
#define LDXC1(fd, index, base) \
do { count_fpu(); EMIT(R_FORMAT(OP_COP1X, base, index, 0, fd, COP1X_LDXC1), \
"ldxc1 %s, %s(%s)", fpn(fd), gpn(index), gpn(base)); } while (0)
#define SDXC1(fs, index, base) \
do { count_fpu(); EMIT(R_FORMAT(OP_COP1X, base, index, fs, 0, COP1X_SDXC1), \
"sdxc1 %s, %s(%s)", fpn(fs), gpn(index), gpn(base)); } while (0)
#define C_EQ_D(fs, ft) C_COND_FMT(COND_EQ, FMT_D, fs, ft)
#define C_LE_D(fs, ft) C_COND_FMT(COND_LE, FMT_D, fs, ft)
#define C_LT_D(fs, ft) C_COND_FMT(COND_LT, FMT_D, fs, ft)
#define ADD_D(fd, fs, ft) FOP_FMT3(FMT_D, fd, fs, ft, COP1_ADD, "add")
#define DIV_D(fd, fs, ft) FOP_FMT3(FMT_D, fd, fs, ft, COP1_DIV, "div")
#define MOV_D(fd, fs) FOP_FMT2(FMT_D, fd, fs, COP1_MOV, "mov")
#define MUL_D(fd, fs, ft) FOP_FMT3(FMT_D, fd, fs, ft, COP1_MUL, "mul")
#define NEG_D(fd, fs) FOP_FMT2(FMT_D, fd, fs, COP1_NEG, "neg")
#define SUB_D(fd, fs, ft) FOP_FMT3(FMT_D, fd, fs, ft, COP1_SUB, "sub")
#endif
}
#endif // __nanojit_NativeMIPS__

Просмотреть файл

@ -223,6 +223,16 @@ namespace nanojit
emit(op | uint64_t(uint32_t(offset))<<32); emit(op | uint64_t(uint32_t(offset))<<32);
} }
void Assembler::emit_target64(size_t underrun, uint64_t op, NIns* target) {
NanoAssert(underrun >= 16);
underrunProtect(underrun); // must do this before calculating offset
// Nb: at this point in time, _nIns points to the most recently
// written instruction, ie. the jump's successor.
((uint64_t*)_nIns)[-1] = (uint64_t) target;
_nIns -= 8;
emit(op);
}
// 3-register modrm32+sib form // 3-register modrm32+sib form
void Assembler::emitrxb(uint64_t op, Register r, Register x, Register b) { void Assembler::emitrxb(uint64_t op, Register r, Register x, Register b) {
emit(rexrxb(mod_rxb(op, r, x, b), r, x, b)); emit(rexrxb(mod_rxb(op, r, x, b), r, x, b));
@ -514,6 +524,7 @@ namespace nanojit
void Assembler::JMP8( S n, NIns* t) { emit_target8(n, X64_jmp8,t); asm_output("jmp %p", t); } void Assembler::JMP8( S n, NIns* t) { emit_target8(n, X64_jmp8,t); asm_output("jmp %p", t); }
void Assembler::JMP32(S n, NIns* t) { emit_target32(n,X64_jmp, t); asm_output("jmp %p", t); } void Assembler::JMP32(S n, NIns* t) { emit_target32(n,X64_jmp, t); asm_output("jmp %p", t); }
void Assembler::JMP64(S n, NIns* t) { emit_target64(n,X64_jmpi, t); asm_output("jmp %p", t); }
void Assembler::JMPX(R indexreg, NIns** table) { emitrxb_imm(X64_jmpx, (R)0, indexreg, (Register)5, (int32_t)(uintptr_t)table); asm_output("jmpq [%s*8 + %p]", RQ(indexreg), (void*)table); } void Assembler::JMPX(R indexreg, NIns** table) { emitrxb_imm(X64_jmpx, (R)0, indexreg, (Register)5, (int32_t)(uintptr_t)table); asm_output("jmpq [%s*8 + %p]", RQ(indexreg), (void*)table); }
@ -605,7 +616,7 @@ namespace nanojit
JMP32(8, target); JMP32(8, target);
} }
} else { } else {
TODO(jmp64); JMP64(16, target);
} }
} }
@ -1077,6 +1088,10 @@ namespace nanojit
} }
NIns* Assembler::asm_branch(bool onFalse, LIns *cond, NIns *target) { NIns* Assembler::asm_branch(bool onFalse, LIns *cond, NIns *target) {
if (target && !isTargetWithinS32(target)) {
setError(ConditionalBranchTooFar);
NanoAssert(0);
}
NanoAssert(cond->isCond()); NanoAssert(cond->isCond());
LOpcode condop = cond->opcode(); LOpcode condop = cond->opcode();
if (condop >= LIR_feq && condop <= LIR_fge) if (condop >= LIR_feq && condop <= LIR_fge)
@ -1714,6 +1729,11 @@ namespace nanojit
} else if (patch[0] == 0x0F && (patch[1] & 0xF0) == 0x80) { } else if (patch[0] == 0x0F && (patch[1] & 0xF0) == 0x80) {
// jcc disp32 // jcc disp32
next = patch+6; next = patch+6;
} else if ((patch[0] == 0xFF) && (patch[1] == 0x25)) {
// jmp 64bit target
next = patch+6;
((int64_t*)next)[0] = int64_t(target);
return;
} else { } else {
next = 0; next = 0;
TODO(unknown_patch); TODO(unknown_patch);

Просмотреть файл

@ -205,6 +205,7 @@ namespace nanojit
X64_imul = 0xC0AF0F4000000004LL, // 32bit signed mul r *= b X64_imul = 0xC0AF0F4000000004LL, // 32bit signed mul r *= b
X64_imuli = 0xC069400000000003LL, // 32bit signed mul r = b * imm32 X64_imuli = 0xC069400000000003LL, // 32bit signed mul r = b * imm32
X64_imul8 = 0x00C06B4000000004LL, // 32bit signed mul r = b * imm8 X64_imul8 = 0x00C06B4000000004LL, // 32bit signed mul r = b * imm8
X64_jmpi = 0x0000000025FF0006LL, // jump *0(rip)
X64_jmp = 0x00000000E9000005LL, // jump near rel32 X64_jmp = 0x00000000E9000005LL, // jump near rel32
X64_jmp8 = 0x00EB000000000002LL, // jump near rel8 X64_jmp8 = 0x00EB000000000002LL, // jump near rel8
X64_jo = 0x00000000800F0006LL, // jump near if overflow X64_jo = 0x00000000800F0006LL, // jump near if overflow
@ -367,6 +368,7 @@ namespace nanojit
void emit8(uint64_t op, int64_t val);\ void emit8(uint64_t op, int64_t val);\
void emit_target8(size_t underrun, uint64_t op, NIns* target);\ void emit_target8(size_t underrun, uint64_t op, NIns* target);\
void emit_target32(size_t underrun, uint64_t op, NIns* target);\ void emit_target32(size_t underrun, uint64_t op, NIns* target);\
void emit_target64(size_t underrun, uint64_t op, NIns* target); \
void emitrr(uint64_t op, Register r, Register b);\ void emitrr(uint64_t op, Register r, Register b);\
void emitrxb(uint64_t op, Register r, Register x, Register b);\ void emitrxb(uint64_t op, Register r, Register x, Register b);\
void emitxb(uint64_t op, Register x, Register b) { emitrxb(op, (Register)0, x, b); }\ void emitxb(uint64_t op, Register x, Register b) { emitrxb(op, (Register)0, x, b); }\
@ -528,6 +530,7 @@ namespace nanojit
void MOVSSRM(Register r, int d, Register b);\ void MOVSSRM(Register r, int d, Register b);\
void JMP8(size_t n, NIns* t);\ void JMP8(size_t n, NIns* t);\
void JMP32(size_t n, NIns* t);\ void JMP32(size_t n, NIns* t);\
void JMP64(size_t n, NIns* t);\
void JMPX(Register indexreg, NIns** table);\ void JMPX(Register indexreg, NIns** table);\
void JMPXB(Register indexreg, Register tablereg);\ void JMPXB(Register indexreg, Register tablereg);\
void JO(size_t n, NIns* t);\ void JO(size_t n, NIns* t);\

Просмотреть файл

@ -226,22 +226,6 @@ namespace avmplus {
unsigned int arm_arch; unsigned int arm_arch;
# endif # endif
// Support for Thumb, even if it isn't used by nanojit. This is used to
// determine whether or not to generate interworking branches.
# if defined (NJ_FORCE_NO_ARM_THUMB)
static const bool arm_thumb = false;
# else
bool arm_thumb;
# endif
// Support for Thumb2, even if it isn't used by nanojit. This is used to
// determine whether or not to use some of the ARMv6T2 instructions.
# if defined (NJ_FORCE_NO_ARM_THUMB2)
static const bool arm_thumb2 = false;
# else
bool arm_thumb2;
# endif
#endif #endif
#if defined (NJ_FORCE_SOFTFLOAT) #if defined (NJ_FORCE_SOFTFLOAT)

Просмотреть файл

@ -61,6 +61,10 @@ ifeq (sparc,$(TARGET_CPU))
nanojit_cpu_cxxsrc := NativeSparc.cpp nanojit_cpu_cxxsrc := NativeSparc.cpp
endif endif
ifeq (mips,$(TARGET_CPU))
nanojit_cpu_cxxsrc := NativeMIPS.cpp
endif
avmplus_CXXSRCS := $(avmplus_CXXSRCS) \ avmplus_CXXSRCS := $(avmplus_CXXSRCS) \
$(curdir)/Allocator.cpp \ $(curdir)/Allocator.cpp \
$(curdir)/Assembler.cpp \ $(curdir)/Assembler.cpp \

Просмотреть файл

@ -54,6 +54,8 @@
#define NANOJIT_SPARC #define NANOJIT_SPARC
#elif defined AVMPLUS_AMD64 #elif defined AVMPLUS_AMD64
#define NANOJIT_X64 #define NANOJIT_X64
#elif defined AVMPLUS_MIPS
#define NANOJIT_MIPS
#else #else
#error "unknown nanojit architecture" #error "unknown nanojit architecture"
#endif #endif

Просмотреть файл

@ -68,6 +68,7 @@ TEST_FILES = \
js1_7/ \ js1_7/ \
js1_8/ \ js1_8/ \
js1_8_1/ \ js1_8_1/ \
js1_8_5/ \
$(NULL) $(NULL)
PKG_STAGE = $(DIST)/test-package-stage PKG_STAGE = $(DIST)/test-package-stage

Просмотреть файл

@ -239,3 +239,4 @@ script regress-96284-002.js
script scope-001.js script scope-001.js
script toLocaleFormat-01.js script toLocaleFormat-01.js
fails-if(xulRuntime.OS=="WINNT") script toLocaleFormat-02.js fails-if(xulRuntime.OS=="WINNT") script toLocaleFormat-02.js
script regress-543839.js

Просмотреть файл

@ -0,0 +1,33 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/*
* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/licenses/publicdomain/
* Contributor: Igor Bukanov
*/
var gTestfile = 'regress-543839.js';
//-----------------------------------------------------------------------------
var BUGNUMBER = 543839;
var summary = 'js_GetMutableScope caller must lock the object';
var actual;
var expect = 1;
printBugNumber(BUGNUMBER);
printStatus (summary);
jit(true);
function test()
{
jit(true);
for (var i = 0; i != 100; ++i)
var tmp = { a: 1 };
return 1;
}
test();
test();
test();
actual = evalcx("test()", this);
reportCompare(expect, actual, summary);

Просмотреть файл

@ -81,7 +81,7 @@ function test()
check(function() thrown, todo); check(function() thrown, todo);
} }
var buf; var buf, buf2;
buf = new ArrayBuffer(100); buf = new ArrayBuffer(100);
check(function() buf); check(function() buf);
@ -132,6 +132,9 @@ function test()
check(function() (new Int32Array(buf)).length == 1); check(function() (new Int32Array(buf)).length == 1);
check(function() (new Uint32Array(buf)).length == 1); check(function() (new Uint32Array(buf)).length == 1);
check(function() (new Float32Array(buf)).length == 1); check(function() (new Float32Array(buf)).length == 1);
checkThrows(function() (new Float64Array(buf)));
buf2 = new ArrayBuffer(8);
check(function() (new Float64Array(buf2)).length == 1);
buf = new ArrayBuffer(5); buf = new ArrayBuffer(5);
check(function() buf); check(function() buf);
@ -178,9 +181,22 @@ function test()
checkThrows(function() a[-10] = 0); checkThrows(function() a[-10] = 0);
check(function() (a[0] = "10") && (a[0] == 10)); check(function() (a[0] = "10") && (a[0] == 10));
// check Uint8ClampedArray, which is an extension to this extension
a = new Uint8ClampedArray(4);
a[0] = 128;
a[1] = 512;
a[2] = -123.723;
a[3] = "foopy";
check(function() a[0] == 128);
check(function() a[1] == 255);
check(function() a[2] == 0);
check(function() a[3] == 0);
print ("done"); print ("done");
reportCompare(0, TestFailCount, "typed array test failures"); reportCompare(0, TestFailCount, "typed array tests");
exitFunc ('test'); exitFunc ('test');
} }

Просмотреть файл

@ -0,0 +1,2 @@
include regress/jstests.list
include extensions/jstests.list

Просмотреть файл

@ -1,3 +1,3 @@
url-prefix ../../jsreftest.html?test=js1_8_5/regress/ url-prefix ../../jsreftest.html?test=js1_8_5/regress/
script regress-533876.js fails script regress-533876.js
script regress-541455.js script regress-541455.js

Просмотреть файл

@ -13,3 +13,4 @@ include js1_6/jstests.list
include js1_7/jstests.list include js1_7/jstests.list
include js1_8/jstests.list include js1_8/jstests.list
include js1_8_1/jstests.list include js1_8_1/jstests.list
include js1_8_5/jstests.list