Backed out 2 changesets (bug 1416723) for failures in dom/serviceworkers/test/test_serviceworker_interfaces.html on a CLOSED TREE

Backed out changeset b2242216d11b (bug 1416723)
Backed out changeset bfaf82051dfd (bug 1416723)
This commit is contained in:
shindli 2018-07-26 14:23:43 +03:00
Родитель f36ee2923a
Коммит 9319e91d10
189 изменённых файлов: 29973 добавлений и 1591 удалений

Просмотреть файл

@ -48,6 +48,12 @@
#define IF_BDATA(real,imaginary) imaginary
#endif
#ifdef ENABLE_SIMD
# define IF_SIMD(real,imaginary) real
#else
# define IF_SIMD(real,imaginary) imaginary
#endif
#ifdef ENABLE_SHARED_ARRAY_BUFFER
#define IF_SAB(real,imaginary) real
#else
@ -100,6 +106,7 @@ IF_SAB(real,imaginary)(SharedArrayBuffer, InitViaClassSpec, OCLASP(SharedA
IF_INTL(real,imaginary) (Intl, InitIntlClass, CLASP(Intl)) \
IF_BDATA(real,imaginary)(TypedObject, InitTypedObjectModuleObject, OCLASP(TypedObjectModule)) \
real(Reflect, InitReflect, nullptr) \
IF_SIMD(real,imaginary)(SIMD, InitSimdClass, OCLASP(Simd)) \
real(WeakSet, InitWeakSetClass, OCLASP(WeakSet)) \
real(TypedArray, InitViaClassSpec, &js::TypedArrayObject::sharedTypedArrayPrototypeClass) \
IF_SAB(real,imaginary)(Atomics, InitAtomicsClass, OCLASP(Atomics)) \

1644
js/src/builtin/SIMD.cpp Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

298
js/src/builtin/SIMD.h Normal file
Просмотреть файл

@ -0,0 +1,298 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef builtin_SIMD_h
#define builtin_SIMD_h
#include "jsapi.h"
#include "NamespaceImports.h"
#include "builtin/SIMDConstants.h"
#include "jit/IonTypes.h"
#include "js/Conversions.h"
/*
* JS SIMD functions.
* Spec matching polyfill:
* https://github.com/tc39/ecmascript_simd/blob/master/src/ecmascript_simd.js
*/
namespace js {
class GlobalObject;
// These classes implement the concept containing the following constraints:
// - requires typename Elem: this is the scalar lane type, stored in each lane
// of the SIMD vector.
// - requires static const unsigned lanes: this is the number of lanes (length)
// of the SIMD vector.
// - requires static const SimdType type: this is the SimdType enum value
// corresponding to the SIMD type.
// - requires static bool Cast(JSContext*, JS::HandleValue, Elem*): casts a
// given Value to the current scalar lane type and saves it in the Elem
// out-param.
// - requires static Value ToValue(Elem): returns a Value of the right type
// containing the given value.
//
// This concept is used in the templates above to define the functions
// associated to a given type and in their implementations, to avoid code
// redundancy.
struct Float32x4 {
typedef float Elem;
static const unsigned lanes = 4;
static const SimdType type = SimdType::Float32x4;
static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
double d;
if (!ToNumber(cx, v, &d))
return false;
*out = float(d);
return true;
}
static Value ToValue(Elem value) {
return DoubleValue(JS::CanonicalizeNaN(value));
}
};
struct Float64x2 {
typedef double Elem;
static const unsigned lanes = 2;
static const SimdType type = SimdType::Float64x2;
static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
return ToNumber(cx, v, out);
}
static Value ToValue(Elem value) {
return DoubleValue(JS::CanonicalizeNaN(value));
}
};
struct Int8x16 {
typedef int8_t Elem;
static const unsigned lanes = 16;
static const SimdType type = SimdType::Int8x16;
static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
return ToInt8(cx, v, out);
}
static Value ToValue(Elem value) {
return NumberValue(value);
}
};
struct Int16x8 {
typedef int16_t Elem;
static const unsigned lanes = 8;
static const SimdType type = SimdType::Int16x8;
static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
return ToInt16(cx, v, out);
}
static Value ToValue(Elem value) {
return NumberValue(value);
}
};
struct Int32x4 {
typedef int32_t Elem;
static const unsigned lanes = 4;
static const SimdType type = SimdType::Int32x4;
static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
return ToInt32(cx, v, out);
}
static Value ToValue(Elem value) {
return NumberValue(value);
}
};
struct Uint8x16 {
typedef uint8_t Elem;
static const unsigned lanes = 16;
static const SimdType type = SimdType::Uint8x16;
static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
return ToUint8(cx, v, out);
}
static Value ToValue(Elem value) {
return NumberValue(value);
}
};
struct Uint16x8 {
typedef uint16_t Elem;
static const unsigned lanes = 8;
static const SimdType type = SimdType::Uint16x8;
static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
return ToUint16(cx, v, out);
}
static Value ToValue(Elem value) {
return NumberValue(value);
}
};
struct Uint32x4 {
typedef uint32_t Elem;
static const unsigned lanes = 4;
static const SimdType type = SimdType::Uint32x4;
static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
return ToUint32(cx, v, out);
}
static Value ToValue(Elem value) {
return NumberValue(value);
}
};
struct Bool8x16 {
typedef int8_t Elem;
static const unsigned lanes = 16;
static const SimdType type = SimdType::Bool8x16;
static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
*out = ToBoolean(v) ? -1 : 0;
return true;
}
static Value ToValue(Elem value) {
return BooleanValue(value);
}
};
struct Bool16x8 {
typedef int16_t Elem;
static const unsigned lanes = 8;
static const SimdType type = SimdType::Bool16x8;
static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
*out = ToBoolean(v) ? -1 : 0;
return true;
}
static Value ToValue(Elem value) {
return BooleanValue(value);
}
};
struct Bool32x4 {
typedef int32_t Elem;
static const unsigned lanes = 4;
static const SimdType type = SimdType::Bool32x4;
static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
*out = ToBoolean(v) ? -1 : 0;
return true;
}
static Value ToValue(Elem value) {
return BooleanValue(value);
}
};
struct Bool64x2 {
typedef int64_t Elem;
static const unsigned lanes = 2;
static const SimdType type = SimdType::Bool64x2;
static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
*out = ToBoolean(v) ? -1 : 0;
return true;
}
static Value ToValue(Elem value) {
return BooleanValue(value);
}
};
// Get the well known name of the SIMD.* object corresponding to type.
PropertyName* SimdTypeToName(const JSAtomState& atoms, SimdType type);
// Check if name is the well known name of a SIMD type.
// Returns true and sets *type iff name is known.
bool IsSimdTypeName(const JSAtomState& atoms, const PropertyName* name, SimdType* type);
const char* SimdTypeToString(SimdType type);
template<typename V>
JSObject* CreateSimd(JSContext* cx, const typename V::Elem* data);
template<typename V>
bool IsVectorObject(HandleValue v);
template<typename V>
MOZ_MUST_USE bool ToSimdConstant(JSContext* cx, HandleValue v, jit::SimdConstant* out);
JSObject*
InitSimdClass(JSContext* cx, Handle<GlobalObject*> global);
namespace jit {
extern const JSJitInfo JitInfo_SimdInt32x4_extractLane;
extern const JSJitInfo JitInfo_SimdFloat32x4_extractLane;
} // namespace jit
#define DECLARE_SIMD_FLOAT32X4_FUNCTION(Name, Func, Operands) \
extern MOZ_MUST_USE bool \
simd_float32x4_##Name(JSContext* cx, unsigned argc, Value* vp);
FLOAT32X4_FUNCTION_LIST(DECLARE_SIMD_FLOAT32X4_FUNCTION)
#undef DECLARE_SIMD_FLOAT32X4_FUNCTION
#define DECLARE_SIMD_FLOAT64X2_FUNCTION(Name, Func, Operands) \
extern MOZ_MUST_USE bool \
simd_float64x2_##Name(JSContext* cx, unsigned argc, Value* vp);
FLOAT64X2_FUNCTION_LIST(DECLARE_SIMD_FLOAT64X2_FUNCTION)
#undef DECLARE_SIMD_FLOAT64X2_FUNCTION
#define DECLARE_SIMD_INT8X16_FUNCTION(Name, Func, Operands) \
extern MOZ_MUST_USE bool \
simd_int8x16_##Name(JSContext* cx, unsigned argc, Value* vp);
INT8X16_FUNCTION_LIST(DECLARE_SIMD_INT8X16_FUNCTION)
#undef DECLARE_SIMD_INT8X16_FUNCTION
#define DECLARE_SIMD_INT16X8_FUNCTION(Name, Func, Operands) \
extern MOZ_MUST_USE bool \
simd_int16x8_##Name(JSContext* cx, unsigned argc, Value* vp);
INT16X8_FUNCTION_LIST(DECLARE_SIMD_INT16X8_FUNCTION)
#undef DECLARE_SIMD_INT16X8_FUNCTION
#define DECLARE_SIMD_INT32X4_FUNCTION(Name, Func, Operands) \
extern MOZ_MUST_USE bool \
simd_int32x4_##Name(JSContext* cx, unsigned argc, Value* vp);
INT32X4_FUNCTION_LIST(DECLARE_SIMD_INT32X4_FUNCTION)
#undef DECLARE_SIMD_INT32X4_FUNCTION
#define DECLARE_SIMD_UINT8X16_FUNCTION(Name, Func, Operands) \
extern MOZ_MUST_USE bool \
simd_uint8x16_##Name(JSContext* cx, unsigned argc, Value* vp);
UINT8X16_FUNCTION_LIST(DECLARE_SIMD_UINT8X16_FUNCTION)
#undef DECLARE_SIMD_UINT8X16_FUNCTION
#define DECLARE_SIMD_UINT16X8_FUNCTION(Name, Func, Operands) \
extern MOZ_MUST_USE bool \
simd_uint16x8_##Name(JSContext* cx, unsigned argc, Value* vp);
UINT16X8_FUNCTION_LIST(DECLARE_SIMD_UINT16X8_FUNCTION)
#undef DECLARE_SIMD_UINT16X8_FUNCTION
#define DECLARE_SIMD_UINT32X4_FUNCTION(Name, Func, Operands) \
extern MOZ_MUST_USE bool \
simd_uint32x4_##Name(JSContext* cx, unsigned argc, Value* vp);
UINT32X4_FUNCTION_LIST(DECLARE_SIMD_UINT32X4_FUNCTION)
#undef DECLARE_SIMD_UINT32X4_FUNCTION
#define DECLARE_SIMD_BOOL8X16_FUNCTION(Name, Func, Operands) \
extern MOZ_MUST_USE bool \
simd_bool8x16_##Name(JSContext* cx, unsigned argc, Value* vp);
BOOL8X16_FUNCTION_LIST(DECLARE_SIMD_BOOL8X16_FUNCTION)
#undef DECLARE_SIMD_BOOL8X16_FUNCTION
#define DECLARE_SIMD_BOOL16X8_FUNCTION(Name, Func, Operands) \
extern MOZ_MUST_USE bool \
simd_bool16x8_##Name(JSContext* cx, unsigned argc, Value* vp);
BOOL16X8_FUNCTION_LIST(DECLARE_SIMD_BOOL16X8_FUNCTION)
#undef DECLARE_SIMD_BOOL16X8_FUNCTION
#define DECLARE_SIMD_BOOL32X4_FUNCTION(Name, Func, Operands) \
extern MOZ_MUST_USE bool \
simd_bool32x4_##Name(JSContext* cx, unsigned argc, Value* vp);
BOOL32X4_FUNCTION_LIST(DECLARE_SIMD_BOOL32X4_FUNCTION)
#undef DECLARE_SIMD_BOOL32X4_FUNCTION
#define DECLARE_SIMD_BOOL64X2_FUNCTION(Name, Func, Operands) \
extern MOZ_MUST_USE bool \
simd_bool64x2_##Name(JSContext* cx, unsigned argc, Value* vp);
BOOL64X2_FUNCTION_LIST(DECLARE_SIMD_BOOL64X2_FUNCTION)
#undef DECLARE_SIMD_BOOL64X2_FUNCTION
} /* namespace js */
#endif /* builtin_SIMD_h */

Просмотреть файл

@ -0,0 +1,941 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef builtin_SIMDConstants_h
#define builtin_SIMDConstants_h
#include "mozilla/Assertions.h"
#include "builtin/TypedObjectConstants.h"
// Bool8x16.
#define BOOL8X16_UNARY_FUNCTION_LIST(V) \
V(not, (UnaryFunc<Bool8x16, LogicalNot, Bool8x16>), 1) \
V(check, (UnaryFunc<Bool8x16, Identity, Bool8x16>), 1) \
V(splat, (FuncSplat<Bool8x16>), 1) \
V(allTrue, (AllTrue<Bool8x16>), 1) \
V(anyTrue, (AnyTrue<Bool8x16>), 1)
#define BOOL8X16_BINARY_FUNCTION_LIST(V) \
V(extractLane, (ExtractLane<Bool8x16>), 2) \
V(and, (BinaryFunc<Bool8x16, And, Bool8x16>), 2) \
V(or, (BinaryFunc<Bool8x16, Or, Bool8x16>), 2) \
V(xor, (BinaryFunc<Bool8x16, Xor, Bool8x16>), 2) \
#define BOOL8X16_TERNARY_FUNCTION_LIST(V) \
V(replaceLane, (ReplaceLane<Bool8x16>), 3)
#define BOOL8X16_FUNCTION_LIST(V) \
BOOL8X16_UNARY_FUNCTION_LIST(V) \
BOOL8X16_BINARY_FUNCTION_LIST(V) \
BOOL8X16_TERNARY_FUNCTION_LIST(V)
// Bool 16x8.
#define BOOL16X8_UNARY_FUNCTION_LIST(V) \
V(not, (UnaryFunc<Bool16x8, LogicalNot, Bool16x8>), 1) \
V(check, (UnaryFunc<Bool16x8, Identity, Bool16x8>), 1) \
V(splat, (FuncSplat<Bool16x8>), 1) \
V(allTrue, (AllTrue<Bool16x8>), 1) \
V(anyTrue, (AnyTrue<Bool16x8>), 1)
#define BOOL16X8_BINARY_FUNCTION_LIST(V) \
V(extractLane, (ExtractLane<Bool16x8>), 2) \
V(and, (BinaryFunc<Bool16x8, And, Bool16x8>), 2) \
V(or, (BinaryFunc<Bool16x8, Or, Bool16x8>), 2) \
V(xor, (BinaryFunc<Bool16x8, Xor, Bool16x8>), 2) \
#define BOOL16X8_TERNARY_FUNCTION_LIST(V) \
V(replaceLane, (ReplaceLane<Bool16x8>), 3)
#define BOOL16X8_FUNCTION_LIST(V) \
BOOL16X8_UNARY_FUNCTION_LIST(V) \
BOOL16X8_BINARY_FUNCTION_LIST(V) \
BOOL16X8_TERNARY_FUNCTION_LIST(V)
// Bool32x4.
#define BOOL32X4_UNARY_FUNCTION_LIST(V) \
V(not, (UnaryFunc<Bool32x4, LogicalNot, Bool32x4>), 1) \
V(check, (UnaryFunc<Bool32x4, Identity, Bool32x4>), 1) \
V(splat, (FuncSplat<Bool32x4>), 1) \
V(allTrue, (AllTrue<Bool32x4>), 1) \
V(anyTrue, (AnyTrue<Bool32x4>), 1)
#define BOOL32X4_BINARY_FUNCTION_LIST(V) \
V(extractLane, (ExtractLane<Bool32x4>), 2) \
V(and, (BinaryFunc<Bool32x4, And, Bool32x4>), 2) \
V(or, (BinaryFunc<Bool32x4, Or, Bool32x4>), 2) \
V(xor, (BinaryFunc<Bool32x4, Xor, Bool32x4>), 2) \
#define BOOL32X4_TERNARY_FUNCTION_LIST(V) \
V(replaceLane, (ReplaceLane<Bool32x4>), 3)
#define BOOL32X4_FUNCTION_LIST(V) \
BOOL32X4_UNARY_FUNCTION_LIST(V) \
BOOL32X4_BINARY_FUNCTION_LIST(V) \
BOOL32X4_TERNARY_FUNCTION_LIST(V)
// Bool64x2.
#define BOOL64X2_UNARY_FUNCTION_LIST(V) \
V(not, (UnaryFunc<Bool64x2, LogicalNot, Bool64x2>), 1) \
V(check, (UnaryFunc<Bool64x2, Identity, Bool64x2>), 1) \
V(splat, (FuncSplat<Bool64x2>), 1) \
V(allTrue, (AllTrue<Bool64x2>), 1) \
V(anyTrue, (AnyTrue<Bool64x2>), 1)
#define BOOL64X2_BINARY_FUNCTION_LIST(V) \
V(extractLane, (ExtractLane<Bool64x2>), 2) \
V(and, (BinaryFunc<Bool64x2, And, Bool64x2>), 2) \
V(or, (BinaryFunc<Bool64x2, Or, Bool64x2>), 2) \
V(xor, (BinaryFunc<Bool64x2, Xor, Bool64x2>), 2) \
#define BOOL64X2_TERNARY_FUNCTION_LIST(V) \
V(replaceLane, (ReplaceLane<Bool64x2>), 3)
#define BOOL64X2_FUNCTION_LIST(V) \
BOOL64X2_UNARY_FUNCTION_LIST(V) \
BOOL64X2_BINARY_FUNCTION_LIST(V) \
BOOL64X2_TERNARY_FUNCTION_LIST(V)
// Float32x4.
#define FLOAT32X4_UNARY_FUNCTION_LIST(V) \
V(abs, (UnaryFunc<Float32x4, Abs, Float32x4>), 1) \
V(check, (UnaryFunc<Float32x4, Identity, Float32x4>), 1) \
V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Float32x4>), 1) \
V(fromInt8x16Bits, (FuncConvertBits<Int8x16, Float32x4>), 1) \
V(fromInt16x8Bits, (FuncConvertBits<Int16x8, Float32x4>), 1) \
V(fromInt32x4, (FuncConvert<Int32x4, Float32x4>), 1) \
V(fromInt32x4Bits, (FuncConvertBits<Int32x4, Float32x4>), 1) \
V(fromUint8x16Bits, (FuncConvertBits<Uint8x16, Float32x4>), 1) \
V(fromUint16x8Bits, (FuncConvertBits<Uint16x8, Float32x4>), 1) \
V(fromUint32x4, (FuncConvert<Uint32x4, Float32x4>), 1) \
V(fromUint32x4Bits, (FuncConvertBits<Uint32x4, Float32x4>), 1) \
V(neg, (UnaryFunc<Float32x4, Neg, Float32x4>), 1) \
V(reciprocalApproximation, (UnaryFunc<Float32x4, RecApprox, Float32x4>), 1) \
V(reciprocalSqrtApproximation, (UnaryFunc<Float32x4, RecSqrtApprox, Float32x4>), 1) \
V(splat, (FuncSplat<Float32x4>), 1) \
V(sqrt, (UnaryFunc<Float32x4, Sqrt, Float32x4>), 1)
#define FLOAT32X4_BINARY_FUNCTION_LIST(V) \
V(add, (BinaryFunc<Float32x4, Add, Float32x4>), 2) \
V(div, (BinaryFunc<Float32x4, Div, Float32x4>), 2) \
V(equal, (CompareFunc<Float32x4, Equal, Bool32x4>), 2) \
V(extractLane, (ExtractLane<Float32x4>), 2) \
V(greaterThan, (CompareFunc<Float32x4, GreaterThan, Bool32x4>), 2) \
V(greaterThanOrEqual, (CompareFunc<Float32x4, GreaterThanOrEqual, Bool32x4>), 2) \
V(lessThan, (CompareFunc<Float32x4, LessThan, Bool32x4>), 2) \
V(lessThanOrEqual, (CompareFunc<Float32x4, LessThanOrEqual, Bool32x4>), 2) \
V(load, (Load<Float32x4, 4>), 2) \
V(load3, (Load<Float32x4, 3>), 2) \
V(load2, (Load<Float32x4, 2>), 2) \
V(load1, (Load<Float32x4, 1>), 2) \
V(max, (BinaryFunc<Float32x4, Maximum, Float32x4>), 2) \
V(maxNum, (BinaryFunc<Float32x4, MaxNum, Float32x4>), 2) \
V(min, (BinaryFunc<Float32x4, Minimum, Float32x4>), 2) \
V(minNum, (BinaryFunc<Float32x4, MinNum, Float32x4>), 2) \
V(mul, (BinaryFunc<Float32x4, Mul, Float32x4>), 2) \
V(notEqual, (CompareFunc<Float32x4, NotEqual, Bool32x4>), 2) \
V(sub, (BinaryFunc<Float32x4, Sub, Float32x4>), 2)
#define FLOAT32X4_TERNARY_FUNCTION_LIST(V) \
V(replaceLane, (ReplaceLane<Float32x4>), 3) \
V(select, (Select<Float32x4, Bool32x4>), 3) \
V(store, (Store<Float32x4, 4>), 3) \
V(store3, (Store<Float32x4, 3>), 3) \
V(store2, (Store<Float32x4, 2>), 3) \
V(store1, (Store<Float32x4, 1>), 3)
#define FLOAT32X4_SHUFFLE_FUNCTION_LIST(V) \
V(swizzle, Swizzle<Float32x4>, 5) \
V(shuffle, Shuffle<Float32x4>, 6)
#define FLOAT32X4_FUNCTION_LIST(V) \
FLOAT32X4_UNARY_FUNCTION_LIST(V) \
FLOAT32X4_BINARY_FUNCTION_LIST(V) \
FLOAT32X4_TERNARY_FUNCTION_LIST(V) \
FLOAT32X4_SHUFFLE_FUNCTION_LIST(V)
// Float64x2.
#define FLOAT64X2_UNARY_FUNCTION_LIST(V) \
V(abs, (UnaryFunc<Float64x2, Abs, Float64x2>), 1) \
V(check, (UnaryFunc<Float64x2, Identity, Float64x2>), 1) \
V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Float64x2>), 1) \
V(fromInt8x16Bits, (FuncConvertBits<Int8x16, Float64x2>), 1) \
V(fromInt16x8Bits, (FuncConvertBits<Int16x8, Float64x2>), 1) \
V(fromInt32x4Bits, (FuncConvertBits<Int32x4, Float64x2>), 1) \
V(fromUint8x16Bits, (FuncConvertBits<Uint8x16, Float64x2>), 1) \
V(fromUint16x8Bits, (FuncConvertBits<Uint16x8, Float64x2>), 1) \
V(fromUint32x4Bits, (FuncConvertBits<Uint32x4, Float64x2>), 1) \
V(neg, (UnaryFunc<Float64x2, Neg, Float64x2>), 1) \
V(reciprocalApproximation, (UnaryFunc<Float64x2, RecApprox, Float64x2>), 1) \
V(reciprocalSqrtApproximation, (UnaryFunc<Float64x2, RecSqrtApprox, Float64x2>), 1) \
V(splat, (FuncSplat<Float64x2>), 1) \
V(sqrt, (UnaryFunc<Float64x2, Sqrt, Float64x2>), 1)
#define FLOAT64X2_BINARY_FUNCTION_LIST(V) \
V(add, (BinaryFunc<Float64x2, Add, Float64x2>), 2) \
V(div, (BinaryFunc<Float64x2, Div, Float64x2>), 2) \
V(equal, (CompareFunc<Float64x2, Equal, Bool64x2>), 2) \
V(extractLane, (ExtractLane<Float64x2>), 2) \
V(greaterThan, (CompareFunc<Float64x2, GreaterThan, Bool64x2>), 2) \
V(greaterThanOrEqual, (CompareFunc<Float64x2, GreaterThanOrEqual, Bool64x2>), 2) \
V(lessThan, (CompareFunc<Float64x2, LessThan, Bool64x2>), 2) \
V(lessThanOrEqual, (CompareFunc<Float64x2, LessThanOrEqual, Bool64x2>), 2) \
V(load, (Load<Float64x2, 2>), 2) \
V(load1, (Load<Float64x2, 1>), 2) \
V(max, (BinaryFunc<Float64x2, Maximum, Float64x2>), 2) \
V(maxNum, (BinaryFunc<Float64x2, MaxNum, Float64x2>), 2) \
V(min, (BinaryFunc<Float64x2, Minimum, Float64x2>), 2) \
V(minNum, (BinaryFunc<Float64x2, MinNum, Float64x2>), 2) \
V(mul, (BinaryFunc<Float64x2, Mul, Float64x2>), 2) \
V(notEqual, (CompareFunc<Float64x2, NotEqual, Bool64x2>), 2) \
V(sub, (BinaryFunc<Float64x2, Sub, Float64x2>), 2)
#define FLOAT64X2_TERNARY_FUNCTION_LIST(V) \
V(replaceLane, (ReplaceLane<Float64x2>), 3) \
V(select, (Select<Float64x2, Bool64x2>), 3) \
V(store, (Store<Float64x2, 2>), 3) \
V(store1, (Store<Float64x2, 1>), 3)
#define FLOAT64X2_SHUFFLE_FUNCTION_LIST(V) \
V(swizzle, Swizzle<Float64x2>, 3) \
V(shuffle, Shuffle<Float64x2>, 4)
#define FLOAT64X2_FUNCTION_LIST(V) \
FLOAT64X2_UNARY_FUNCTION_LIST(V) \
FLOAT64X2_BINARY_FUNCTION_LIST(V) \
FLOAT64X2_TERNARY_FUNCTION_LIST(V) \
FLOAT64X2_SHUFFLE_FUNCTION_LIST(V)
// Int8x16.
#define INT8X16_UNARY_FUNCTION_LIST(V) \
V(check, (UnaryFunc<Int8x16, Identity, Int8x16>), 1) \
V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Int8x16>), 1) \
V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Int8x16>), 1) \
V(fromInt16x8Bits, (FuncConvertBits<Int16x8, Int8x16>), 1) \
V(fromInt32x4Bits, (FuncConvertBits<Int32x4, Int8x16>), 1) \
V(fromUint8x16Bits, (FuncConvertBits<Uint8x16, Int8x16>), 1) \
V(fromUint16x8Bits, (FuncConvertBits<Uint16x8, Int8x16>), 1) \
V(fromUint32x4Bits, (FuncConvertBits<Uint32x4, Int8x16>), 1) \
V(neg, (UnaryFunc<Int8x16, Neg, Int8x16>), 1) \
V(not, (UnaryFunc<Int8x16, Not, Int8x16>), 1) \
V(splat, (FuncSplat<Int8x16>), 1)
#define INT8X16_BINARY_FUNCTION_LIST(V) \
V(add, (BinaryFunc<Int8x16, Add, Int8x16>), 2) \
V(addSaturate, (BinaryFunc<Int8x16, AddSaturate, Int8x16>), 2) \
V(and, (BinaryFunc<Int8x16, And, Int8x16>), 2) \
V(equal, (CompareFunc<Int8x16, Equal, Bool8x16>), 2) \
V(extractLane, (ExtractLane<Int8x16>), 2) \
V(greaterThan, (CompareFunc<Int8x16, GreaterThan, Bool8x16>), 2) \
V(greaterThanOrEqual, (CompareFunc<Int8x16, GreaterThanOrEqual, Bool8x16>), 2) \
V(lessThan, (CompareFunc<Int8x16, LessThan, Bool8x16>), 2) \
V(lessThanOrEqual, (CompareFunc<Int8x16, LessThanOrEqual, Bool8x16>), 2) \
V(load, (Load<Int8x16, 16>), 2) \
V(mul, (BinaryFunc<Int8x16, Mul, Int8x16>), 2) \
V(notEqual, (CompareFunc<Int8x16, NotEqual, Bool8x16>), 2) \
V(or, (BinaryFunc<Int8x16, Or, Int8x16>), 2) \
V(sub, (BinaryFunc<Int8x16, Sub, Int8x16>), 2) \
V(subSaturate, (BinaryFunc<Int8x16, SubSaturate, Int8x16>), 2) \
V(shiftLeftByScalar, (BinaryScalar<Int8x16, ShiftLeft>), 2) \
V(shiftRightByScalar, (BinaryScalar<Int8x16, ShiftRightArithmetic>), 2) \
V(xor, (BinaryFunc<Int8x16, Xor, Int8x16>), 2)
#define INT8X16_TERNARY_FUNCTION_LIST(V) \
V(replaceLane, (ReplaceLane<Int8x16>), 3) \
V(select, (Select<Int8x16, Bool8x16>), 3) \
V(store, (Store<Int8x16, 16>), 3)
#define INT8X16_SHUFFLE_FUNCTION_LIST(V) \
V(swizzle, Swizzle<Int8x16>, 17) \
V(shuffle, Shuffle<Int8x16>, 18)
#define INT8X16_FUNCTION_LIST(V) \
INT8X16_UNARY_FUNCTION_LIST(V) \
INT8X16_BINARY_FUNCTION_LIST(V) \
INT8X16_TERNARY_FUNCTION_LIST(V) \
INT8X16_SHUFFLE_FUNCTION_LIST(V)
// Uint8x16.
#define UINT8X16_UNARY_FUNCTION_LIST(V) \
V(check, (UnaryFunc<Uint8x16, Identity, Uint8x16>), 1) \
V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Uint8x16>), 1) \
V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Uint8x16>), 1) \
V(fromInt8x16Bits, (FuncConvertBits<Int8x16, Uint8x16>), 1) \
V(fromInt16x8Bits, (FuncConvertBits<Int16x8, Uint8x16>), 1) \
V(fromInt32x4Bits, (FuncConvertBits<Int32x4, Uint8x16>), 1) \
V(fromUint16x8Bits, (FuncConvertBits<Uint16x8, Uint8x16>), 1) \
V(fromUint32x4Bits, (FuncConvertBits<Uint32x4, Uint8x16>), 1) \
V(neg, (UnaryFunc<Uint8x16, Neg, Uint8x16>), 1) \
V(not, (UnaryFunc<Uint8x16, Not, Uint8x16>), 1) \
V(splat, (FuncSplat<Uint8x16>), 1)
#define UINT8X16_BINARY_FUNCTION_LIST(V) \
V(add, (BinaryFunc<Uint8x16, Add, Uint8x16>), 2) \
V(addSaturate, (BinaryFunc<Uint8x16, AddSaturate, Uint8x16>), 2) \
V(and, (BinaryFunc<Uint8x16, And, Uint8x16>), 2) \
V(equal, (CompareFunc<Uint8x16, Equal, Bool8x16>), 2) \
V(extractLane, (ExtractLane<Uint8x16>), 2) \
V(greaterThan, (CompareFunc<Uint8x16, GreaterThan, Bool8x16>), 2) \
V(greaterThanOrEqual, (CompareFunc<Uint8x16, GreaterThanOrEqual, Bool8x16>), 2) \
V(lessThan, (CompareFunc<Uint8x16, LessThan, Bool8x16>), 2) \
V(lessThanOrEqual, (CompareFunc<Uint8x16, LessThanOrEqual, Bool8x16>), 2) \
V(load, (Load<Uint8x16, 16>), 2) \
V(mul, (BinaryFunc<Uint8x16, Mul, Uint8x16>), 2) \
V(notEqual, (CompareFunc<Uint8x16, NotEqual, Bool8x16>), 2) \
V(or, (BinaryFunc<Uint8x16, Or, Uint8x16>), 2) \
V(sub, (BinaryFunc<Uint8x16, Sub, Uint8x16>), 2) \
V(subSaturate, (BinaryFunc<Uint8x16, SubSaturate, Uint8x16>), 2) \
V(shiftLeftByScalar, (BinaryScalar<Uint8x16, ShiftLeft>), 2) \
V(shiftRightByScalar, (BinaryScalar<Uint8x16, ShiftRightLogical>), 2) \
V(xor, (BinaryFunc<Uint8x16, Xor, Uint8x16>), 2)
#define UINT8X16_TERNARY_FUNCTION_LIST(V) \
V(replaceLane, (ReplaceLane<Uint8x16>), 3) \
V(select, (Select<Uint8x16, Bool8x16>), 3) \
V(store, (Store<Uint8x16, 16>), 3)
#define UINT8X16_SHUFFLE_FUNCTION_LIST(V) \
V(swizzle, Swizzle<Uint8x16>, 17) \
V(shuffle, Shuffle<Uint8x16>, 18)
#define UINT8X16_FUNCTION_LIST(V) \
UINT8X16_UNARY_FUNCTION_LIST(V) \
UINT8X16_BINARY_FUNCTION_LIST(V) \
UINT8X16_TERNARY_FUNCTION_LIST(V) \
UINT8X16_SHUFFLE_FUNCTION_LIST(V)
// Int16x8.
#define INT16X8_UNARY_FUNCTION_LIST(V) \
V(check, (UnaryFunc<Int16x8, Identity, Int16x8>), 1) \
V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Int16x8>), 1) \
V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Int16x8>), 1) \
V(fromInt8x16Bits, (FuncConvertBits<Int8x16, Int16x8>), 1) \
V(fromInt32x4Bits, (FuncConvertBits<Int32x4, Int16x8>), 1) \
V(fromUint8x16Bits, (FuncConvertBits<Uint8x16, Int16x8>), 1) \
V(fromUint16x8Bits, (FuncConvertBits<Uint16x8, Int16x8>), 1) \
V(fromUint32x4Bits, (FuncConvertBits<Uint32x4, Int16x8>), 1) \
V(neg, (UnaryFunc<Int16x8, Neg, Int16x8>), 1) \
V(not, (UnaryFunc<Int16x8, Not, Int16x8>), 1) \
V(splat, (FuncSplat<Int16x8>), 1)
#define INT16X8_BINARY_FUNCTION_LIST(V) \
V(add, (BinaryFunc<Int16x8, Add, Int16x8>), 2) \
V(addSaturate, (BinaryFunc<Int16x8, AddSaturate, Int16x8>), 2) \
V(and, (BinaryFunc<Int16x8, And, Int16x8>), 2) \
V(equal, (CompareFunc<Int16x8, Equal, Bool16x8>), 2) \
V(extractLane, (ExtractLane<Int16x8>), 2) \
V(greaterThan, (CompareFunc<Int16x8, GreaterThan, Bool16x8>), 2) \
V(greaterThanOrEqual, (CompareFunc<Int16x8, GreaterThanOrEqual, Bool16x8>), 2) \
V(lessThan, (CompareFunc<Int16x8, LessThan, Bool16x8>), 2) \
V(lessThanOrEqual, (CompareFunc<Int16x8, LessThanOrEqual, Bool16x8>), 2) \
V(load, (Load<Int16x8, 8>), 2) \
V(mul, (BinaryFunc<Int16x8, Mul, Int16x8>), 2) \
V(notEqual, (CompareFunc<Int16x8, NotEqual, Bool16x8>), 2) \
V(or, (BinaryFunc<Int16x8, Or, Int16x8>), 2) \
V(sub, (BinaryFunc<Int16x8, Sub, Int16x8>), 2) \
V(subSaturate, (BinaryFunc<Int16x8, SubSaturate, Int16x8>), 2) \
V(shiftLeftByScalar, (BinaryScalar<Int16x8, ShiftLeft>), 2) \
V(shiftRightByScalar, (BinaryScalar<Int16x8, ShiftRightArithmetic>), 2) \
V(xor, (BinaryFunc<Int16x8, Xor, Int16x8>), 2)
#define INT16X8_TERNARY_FUNCTION_LIST(V) \
V(replaceLane, (ReplaceLane<Int16x8>), 3) \
V(select, (Select<Int16x8, Bool16x8>), 3) \
V(store, (Store<Int16x8, 8>), 3)
#define INT16X8_SHUFFLE_FUNCTION_LIST(V) \
V(swizzle, Swizzle<Int16x8>, 9) \
V(shuffle, Shuffle<Int16x8>, 10)
#define INT16X8_FUNCTION_LIST(V) \
INT16X8_UNARY_FUNCTION_LIST(V) \
INT16X8_BINARY_FUNCTION_LIST(V) \
INT16X8_TERNARY_FUNCTION_LIST(V) \
INT16X8_SHUFFLE_FUNCTION_LIST(V)
// Uint16x8.
#define UINT16X8_UNARY_FUNCTION_LIST(V) \
V(check, (UnaryFunc<Uint16x8, Identity, Uint16x8>), 1) \
V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Uint16x8>), 1) \
V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Uint16x8>), 1) \
V(fromInt8x16Bits, (FuncConvertBits<Int8x16, Uint16x8>), 1) \
V(fromInt16x8Bits, (FuncConvertBits<Int16x8, Uint16x8>), 1) \
V(fromInt32x4Bits, (FuncConvertBits<Int32x4, Uint16x8>), 1) \
V(fromUint8x16Bits, (FuncConvertBits<Uint8x16, Uint16x8>), 1) \
V(fromUint32x4Bits, (FuncConvertBits<Uint32x4, Uint16x8>), 1) \
V(neg, (UnaryFunc<Uint16x8, Neg, Uint16x8>), 1) \
V(not, (UnaryFunc<Uint16x8, Not, Uint16x8>), 1) \
V(splat, (FuncSplat<Uint16x8>), 1)
#define UINT16X8_BINARY_FUNCTION_LIST(V) \
V(add, (BinaryFunc<Uint16x8, Add, Uint16x8>), 2) \
V(addSaturate, (BinaryFunc<Uint16x8, AddSaturate, Uint16x8>), 2) \
V(and, (BinaryFunc<Uint16x8, And, Uint16x8>), 2) \
V(equal, (CompareFunc<Uint16x8, Equal, Bool16x8>), 2) \
V(extractLane, (ExtractLane<Uint16x8>), 2) \
V(greaterThan, (CompareFunc<Uint16x8, GreaterThan, Bool16x8>), 2) \
V(greaterThanOrEqual, (CompareFunc<Uint16x8, GreaterThanOrEqual, Bool16x8>), 2) \
V(lessThan, (CompareFunc<Uint16x8, LessThan, Bool16x8>), 2) \
V(lessThanOrEqual, (CompareFunc<Uint16x8, LessThanOrEqual, Bool16x8>), 2) \
V(load, (Load<Uint16x8, 8>), 2) \
V(mul, (BinaryFunc<Uint16x8, Mul, Uint16x8>), 2) \
V(notEqual, (CompareFunc<Uint16x8, NotEqual, Bool16x8>), 2) \
V(or, (BinaryFunc<Uint16x8, Or, Uint16x8>), 2) \
V(sub, (BinaryFunc<Uint16x8, Sub, Uint16x8>), 2) \
V(subSaturate, (BinaryFunc<Uint16x8, SubSaturate, Uint16x8>), 2) \
V(shiftLeftByScalar, (BinaryScalar<Uint16x8, ShiftLeft>), 2) \
V(shiftRightByScalar, (BinaryScalar<Uint16x8, ShiftRightLogical>), 2) \
V(xor, (BinaryFunc<Uint16x8, Xor, Uint16x8>), 2)
#define UINT16X8_TERNARY_FUNCTION_LIST(V) \
V(replaceLane, (ReplaceLane<Uint16x8>), 3) \
V(select, (Select<Uint16x8, Bool16x8>), 3) \
V(store, (Store<Uint16x8, 8>), 3)
#define UINT16X8_SHUFFLE_FUNCTION_LIST(V) \
V(swizzle, Swizzle<Uint16x8>, 9) \
V(shuffle, Shuffle<Uint16x8>, 10)
#define UINT16X8_FUNCTION_LIST(V) \
UINT16X8_UNARY_FUNCTION_LIST(V) \
UINT16X8_BINARY_FUNCTION_LIST(V) \
UINT16X8_TERNARY_FUNCTION_LIST(V) \
UINT16X8_SHUFFLE_FUNCTION_LIST(V)
// Int32x4.
#define INT32X4_UNARY_FUNCTION_LIST(V) \
V(check, (UnaryFunc<Int32x4, Identity, Int32x4>), 1) \
V(fromFloat32x4, (FuncConvert<Float32x4, Int32x4>), 1) \
V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Int32x4>), 1) \
V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Int32x4>), 1) \
V(fromInt8x16Bits, (FuncConvertBits<Int8x16, Int32x4>), 1) \
V(fromInt16x8Bits, (FuncConvertBits<Int16x8, Int32x4>), 1) \
V(fromUint8x16Bits, (FuncConvertBits<Uint8x16, Int32x4>), 1) \
V(fromUint16x8Bits, (FuncConvertBits<Uint16x8, Int32x4>), 1) \
V(fromUint32x4Bits, (FuncConvertBits<Uint32x4, Int32x4>), 1) \
V(neg, (UnaryFunc<Int32x4, Neg, Int32x4>), 1) \
V(not, (UnaryFunc<Int32x4, Not, Int32x4>), 1) \
V(splat, (FuncSplat<Int32x4>), 0)
#define INT32X4_BINARY_FUNCTION_LIST(V) \
V(add, (BinaryFunc<Int32x4, Add, Int32x4>), 2) \
V(and, (BinaryFunc<Int32x4, And, Int32x4>), 2) \
V(equal, (CompareFunc<Int32x4, Equal, Bool32x4>), 2) \
V(extractLane, (ExtractLane<Int32x4>), 2) \
V(greaterThan, (CompareFunc<Int32x4, GreaterThan, Bool32x4>), 2) \
V(greaterThanOrEqual, (CompareFunc<Int32x4, GreaterThanOrEqual, Bool32x4>), 2) \
V(lessThan, (CompareFunc<Int32x4, LessThan, Bool32x4>), 2) \
V(lessThanOrEqual, (CompareFunc<Int32x4, LessThanOrEqual, Bool32x4>), 2) \
V(load, (Load<Int32x4, 4>), 2) \
V(load3, (Load<Int32x4, 3>), 2) \
V(load2, (Load<Int32x4, 2>), 2) \
V(load1, (Load<Int32x4, 1>), 2) \
V(mul, (BinaryFunc<Int32x4, Mul, Int32x4>), 2) \
V(notEqual, (CompareFunc<Int32x4, NotEqual, Bool32x4>), 2) \
V(or, (BinaryFunc<Int32x4, Or, Int32x4>), 2) \
V(sub, (BinaryFunc<Int32x4, Sub, Int32x4>), 2) \
V(shiftLeftByScalar, (BinaryScalar<Int32x4, ShiftLeft>), 2) \
V(shiftRightByScalar, (BinaryScalar<Int32x4, ShiftRightArithmetic>), 2) \
V(xor, (BinaryFunc<Int32x4, Xor, Int32x4>), 2)
#define INT32X4_TERNARY_FUNCTION_LIST(V) \
V(replaceLane, (ReplaceLane<Int32x4>), 3) \
V(select, (Select<Int32x4, Bool32x4>), 3) \
V(store, (Store<Int32x4, 4>), 3) \
V(store3, (Store<Int32x4, 3>), 3) \
V(store2, (Store<Int32x4, 2>), 3) \
V(store1, (Store<Int32x4, 1>), 3)
#define INT32X4_SHUFFLE_FUNCTION_LIST(V) \
V(swizzle, Swizzle<Int32x4>, 5) \
V(shuffle, Shuffle<Int32x4>, 6)
#define INT32X4_FUNCTION_LIST(V) \
INT32X4_UNARY_FUNCTION_LIST(V) \
INT32X4_BINARY_FUNCTION_LIST(V) \
INT32X4_TERNARY_FUNCTION_LIST(V) \
INT32X4_SHUFFLE_FUNCTION_LIST(V)
// Uint32x4.
#define UINT32X4_UNARY_FUNCTION_LIST(V) \
V(check, (UnaryFunc<Uint32x4, Identity, Uint32x4>), 1) \
V(fromFloat32x4, (FuncConvert<Float32x4, Uint32x4>), 1) \
V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Uint32x4>), 1) \
V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Uint32x4>), 1) \
V(fromInt8x16Bits, (FuncConvertBits<Int8x16, Uint32x4>), 1) \
V(fromInt16x8Bits, (FuncConvertBits<Int16x8, Uint32x4>), 1) \
V(fromInt32x4Bits, (FuncConvertBits<Int32x4, Uint32x4>), 1) \
V(fromUint8x16Bits, (FuncConvertBits<Uint8x16, Uint32x4>), 1) \
V(fromUint16x8Bits, (FuncConvertBits<Uint16x8, Uint32x4>), 1) \
V(neg, (UnaryFunc<Uint32x4, Neg, Uint32x4>), 1) \
V(not, (UnaryFunc<Uint32x4, Not, Uint32x4>), 1) \
V(splat, (FuncSplat<Uint32x4>), 0)
#define UINT32X4_BINARY_FUNCTION_LIST(V) \
V(add, (BinaryFunc<Uint32x4, Add, Uint32x4>), 2) \
V(and, (BinaryFunc<Uint32x4, And, Uint32x4>), 2) \
V(equal, (CompareFunc<Uint32x4, Equal, Bool32x4>), 2) \
V(extractLane, (ExtractLane<Uint32x4>), 2) \
V(greaterThan, (CompareFunc<Uint32x4, GreaterThan, Bool32x4>), 2) \
V(greaterThanOrEqual, (CompareFunc<Uint32x4, GreaterThanOrEqual, Bool32x4>), 2) \
V(lessThan, (CompareFunc<Uint32x4, LessThan, Bool32x4>), 2) \
V(lessThanOrEqual, (CompareFunc<Uint32x4, LessThanOrEqual, Bool32x4>), 2) \
V(load, (Load<Uint32x4, 4>), 2) \
V(load3, (Load<Uint32x4, 3>), 2) \
V(load2, (Load<Uint32x4, 2>), 2) \
V(load1, (Load<Uint32x4, 1>), 2) \
V(mul, (BinaryFunc<Uint32x4, Mul, Uint32x4>), 2) \
V(notEqual, (CompareFunc<Uint32x4, NotEqual, Bool32x4>), 2) \
V(or, (BinaryFunc<Uint32x4, Or, Uint32x4>), 2) \
V(sub, (BinaryFunc<Uint32x4, Sub, Uint32x4>), 2) \
V(shiftLeftByScalar, (BinaryScalar<Uint32x4, ShiftLeft>), 2) \
V(shiftRightByScalar, (BinaryScalar<Uint32x4, ShiftRightLogical>), 2) \
V(xor, (BinaryFunc<Uint32x4, Xor, Uint32x4>), 2)
#define UINT32X4_TERNARY_FUNCTION_LIST(V) \
V(replaceLane, (ReplaceLane<Uint32x4>), 3) \
V(select, (Select<Uint32x4, Bool32x4>), 3) \
V(store, (Store<Uint32x4, 4>), 3) \
V(store3, (Store<Uint32x4, 3>), 3) \
V(store2, (Store<Uint32x4, 2>), 3) \
V(store1, (Store<Uint32x4, 1>), 3)
#define UINT32X4_SHUFFLE_FUNCTION_LIST(V) \
V(swizzle, Swizzle<Uint32x4>, 5) \
V(shuffle, Shuffle<Uint32x4>, 6)
#define UINT32X4_FUNCTION_LIST(V) \
UINT32X4_UNARY_FUNCTION_LIST(V) \
UINT32X4_BINARY_FUNCTION_LIST(V) \
UINT32X4_TERNARY_FUNCTION_LIST(V) \
UINT32X4_SHUFFLE_FUNCTION_LIST(V)
/*
* The FOREACH macros below partition all of the SIMD operations into disjoint
* sets.
*/
// Operations available on all SIMD types. Mixed arity.
#define FOREACH_COMMON_SIMD_OP(_) \
_(extractLane) \
_(replaceLane) \
_(check) \
_(splat)
// Lanewise operations available on numeric SIMD types.
// Include lane-wise select here since it is not arithmetic and defined on
// numeric types too.
#define FOREACH_LANE_SIMD_OP(_) \
_(select) \
_(swizzle) \
_(shuffle)
// Memory operations available on numeric SIMD types.
#define FOREACH_MEMORY_SIMD_OP(_) \
_(load) \
_(store)
// Memory operations available on numeric X4 SIMD types.
#define FOREACH_MEMORY_X4_SIMD_OP(_) \
_(load1) \
_(load2) \
_(load3) \
_(store1) \
_(store2) \
_(store3)
// Unary operations on Bool vectors.
#define FOREACH_BOOL_SIMD_UNOP(_) \
_(allTrue) \
_(anyTrue)
// Unary bitwise SIMD operators defined on all integer and boolean SIMD types.
#define FOREACH_BITWISE_SIMD_UNOP(_) \
_(not)
// Binary bitwise SIMD operators defined on all integer and boolean SIMD types.
#define FOREACH_BITWISE_SIMD_BINOP(_) \
_(and) \
_(or) \
_(xor)
// Bitwise shifts defined on integer SIMD types.
#define FOREACH_SHIFT_SIMD_OP(_) \
_(shiftLeftByScalar) \
_(shiftRightByScalar)
// Unary arithmetic operators defined on numeric SIMD types.
#define FOREACH_NUMERIC_SIMD_UNOP(_) \
_(neg)
// Binary arithmetic operators defined on numeric SIMD types.
#define FOREACH_NUMERIC_SIMD_BINOP(_) \
_(add) \
_(sub) \
_(mul)
// Unary arithmetic operators defined on floating point SIMD types.
#define FOREACH_FLOAT_SIMD_UNOP(_) \
_(abs) \
_(sqrt) \
_(reciprocalApproximation) \
_(reciprocalSqrtApproximation)
// Binary arithmetic operators defined on floating point SIMD types.
#define FOREACH_FLOAT_SIMD_BINOP(_) \
_(div) \
_(max) \
_(min) \
_(maxNum) \
_(minNum)
// Binary operations on small integer (< 32 bits) vectors.
#define FOREACH_SMINT_SIMD_BINOP(_) \
_(addSaturate) \
_(subSaturate)
// Comparison operators defined on numeric SIMD types.
#define FOREACH_COMP_SIMD_OP(_) \
_(lessThan) \
_(lessThanOrEqual) \
_(equal) \
_(notEqual) \
_(greaterThan) \
_(greaterThanOrEqual)
/*
* All SIMD operations, excluding casts.
*/
#define FORALL_SIMD_NONCAST_OP(_) \
FOREACH_COMMON_SIMD_OP(_) \
FOREACH_LANE_SIMD_OP(_) \
FOREACH_MEMORY_SIMD_OP(_) \
FOREACH_MEMORY_X4_SIMD_OP(_) \
FOREACH_BOOL_SIMD_UNOP(_) \
FOREACH_BITWISE_SIMD_UNOP(_) \
FOREACH_BITWISE_SIMD_BINOP(_) \
FOREACH_SHIFT_SIMD_OP(_) \
FOREACH_NUMERIC_SIMD_UNOP(_) \
FOREACH_NUMERIC_SIMD_BINOP(_) \
FOREACH_FLOAT_SIMD_UNOP(_) \
FOREACH_FLOAT_SIMD_BINOP(_) \
FOREACH_SMINT_SIMD_BINOP(_) \
FOREACH_COMP_SIMD_OP(_)
/*
* All operations on integer SIMD types, excluding casts and
* FOREACH_MEMORY_X4_OP.
*/
#define FORALL_INT_SIMD_OP(_) \
FOREACH_COMMON_SIMD_OP(_) \
FOREACH_LANE_SIMD_OP(_) \
FOREACH_MEMORY_SIMD_OP(_) \
FOREACH_BITWISE_SIMD_UNOP(_) \
FOREACH_BITWISE_SIMD_BINOP(_) \
FOREACH_SHIFT_SIMD_OP(_) \
FOREACH_NUMERIC_SIMD_UNOP(_) \
FOREACH_NUMERIC_SIMD_BINOP(_) \
FOREACH_COMP_SIMD_OP(_)
/*
* All operations on floating point SIMD types, excluding casts and
* FOREACH_MEMORY_X4_OP.
*/
#define FORALL_FLOAT_SIMD_OP(_) \
FOREACH_COMMON_SIMD_OP(_) \
FOREACH_LANE_SIMD_OP(_) \
FOREACH_MEMORY_SIMD_OP(_) \
FOREACH_NUMERIC_SIMD_UNOP(_) \
FOREACH_NUMERIC_SIMD_BINOP(_) \
FOREACH_FLOAT_SIMD_UNOP(_) \
FOREACH_FLOAT_SIMD_BINOP(_) \
FOREACH_COMP_SIMD_OP(_)
/*
* All operations on Bool SIMD types.
*
* These types don't have casts, so no need to specialize.
*/
#define FORALL_BOOL_SIMD_OP(_) \
FOREACH_COMMON_SIMD_OP(_) \
FOREACH_BOOL_SIMD_UNOP(_) \
FOREACH_BITWISE_SIMD_UNOP(_) \
FOREACH_BITWISE_SIMD_BINOP(_)
/*
* The sets of cast operations are listed per type below.
*
* These sets are not disjoint.
*/
#define FOREACH_INT8X16_SIMD_CAST(_) \
_(fromFloat32x4Bits) \
_(fromFloat64x2Bits) \
_(fromInt16x8Bits) \
_(fromInt32x4Bits)
#define FOREACH_INT16X8_SIMD_CAST(_) \
_(fromFloat32x4Bits) \
_(fromFloat64x2Bits) \
_(fromInt8x16Bits) \
_(fromInt32x4Bits)
#define FOREACH_INT32X4_SIMD_CAST(_) \
_(fromFloat32x4) \
_(fromFloat32x4Bits) \
_(fromFloat64x2Bits) \
_(fromInt8x16Bits) \
_(fromInt16x8Bits)
#define FOREACH_FLOAT32X4_SIMD_CAST(_)\
_(fromFloat64x2Bits) \
_(fromInt8x16Bits) \
_(fromInt16x8Bits) \
_(fromInt32x4) \
_(fromInt32x4Bits)
#define FOREACH_FLOAT64X2_SIMD_CAST(_)\
_(fromFloat32x4Bits) \
_(fromInt8x16Bits) \
_(fromInt16x8Bits) \
_(fromInt32x4Bits)
// All operations on Int32x4.
#define FORALL_INT32X4_SIMD_OP(_) \
FORALL_INT_SIMD_OP(_) \
FOREACH_MEMORY_X4_SIMD_OP(_) \
FOREACH_INT32X4_SIMD_CAST(_)
// All operations on Float32X4
#define FORALL_FLOAT32X4_SIMD_OP(_) \
FORALL_FLOAT_SIMD_OP(_) \
FOREACH_MEMORY_X4_SIMD_OP(_) \
FOREACH_FLOAT32X4_SIMD_CAST(_)
/*
* All SIMD operations assuming only 32x4 types exist.
* This is used in the current asm.js impl.
*/
#define FORALL_SIMD_ASMJS_OP(_) \
FORALL_SIMD_NONCAST_OP(_) \
_(fromFloat32x4) \
_(fromFloat32x4Bits) \
_(fromInt8x16Bits) \
_(fromInt16x8Bits) \
_(fromInt32x4) \
_(fromInt32x4Bits) \
_(fromUint8x16Bits) \
_(fromUint16x8Bits) \
_(fromUint32x4) \
_(fromUint32x4Bits)
// All operations on Int8x16 or Uint8x16 in the asm.js world.
// Note: this does not include conversions and casts to/from Uint8x16 because
// this list is shared between Int8x16 and Uint8x16.
#define FORALL_INT8X16_ASMJS_OP(_) \
FORALL_INT_SIMD_OP(_) \
FOREACH_SMINT_SIMD_BINOP(_) \
_(fromInt16x8Bits) \
_(fromInt32x4Bits) \
_(fromFloat32x4Bits)
// All operations on Int16x8 or Uint16x8 in the asm.js world.
// Note: this does not include conversions and casts to/from Uint16x8 because
// this list is shared between Int16x8 and Uint16x8.
#define FORALL_INT16X8_ASMJS_OP(_) \
FORALL_INT_SIMD_OP(_) \
FOREACH_SMINT_SIMD_BINOP(_) \
_(fromInt8x16Bits) \
_(fromInt32x4Bits) \
_(fromFloat32x4Bits)
// All operations on Int32x4 or Uint32x4 in the asm.js world.
// Note: this does not include conversions and casts to/from Uint32x4 because
// this list is shared between Int32x4 and Uint32x4.
#define FORALL_INT32X4_ASMJS_OP(_) \
FORALL_INT_SIMD_OP(_) \
FOREACH_MEMORY_X4_SIMD_OP(_) \
_(fromInt8x16Bits) \
_(fromInt16x8Bits) \
_(fromFloat32x4) \
_(fromFloat32x4Bits)
// All operations on Float32X4 in the asm.js world.
#define FORALL_FLOAT32X4_ASMJS_OP(_) \
FORALL_FLOAT_SIMD_OP(_) \
FOREACH_MEMORY_X4_SIMD_OP(_) \
_(fromInt8x16Bits) \
_(fromInt16x8Bits) \
_(fromInt32x4Bits) \
_(fromInt32x4) \
_(fromUint32x4)
namespace js {
// Complete set of SIMD types.
// It must be kept in sync with the enumeration of values in
// TypedObjectConstants.h; in particular we need to ensure that Count is
// appropriately set with respect to the number of actual types.
enum class SimdType {
Int8x16 = JS_SIMDTYPEREPR_INT8X16,
Int16x8 = JS_SIMDTYPEREPR_INT16X8,
Int32x4 = JS_SIMDTYPEREPR_INT32X4,
Uint8x16 = JS_SIMDTYPEREPR_UINT8X16,
Uint16x8 = JS_SIMDTYPEREPR_UINT16X8,
Uint32x4 = JS_SIMDTYPEREPR_UINT32X4,
Float32x4 = JS_SIMDTYPEREPR_FLOAT32X4,
Float64x2 = JS_SIMDTYPEREPR_FLOAT64X2,
Bool8x16 = JS_SIMDTYPEREPR_BOOL8X16,
Bool16x8 = JS_SIMDTYPEREPR_BOOL16X8,
Bool32x4 = JS_SIMDTYPEREPR_BOOL32X4,
Bool64x2 = JS_SIMDTYPEREPR_BOOL64X2,
Count
};
// The integer SIMD types have a lot of operations that do the exact same thing
// for signed and unsigned integer types. Sometimes it is simpler to treat
// signed and unsigned integer SIMD types as the same type, using a SimdSign to
// distinguish the few cases where there is a difference.
enum class SimdSign {
// Signedness is not applicable to this type. (i.e., Float or Bool).
NotApplicable,
// Treat as an unsigned integer with a range 0 .. 2^N-1.
Unsigned,
// Treat as a signed integer in two's complement encoding.
Signed,
};
// Get the signedness of a SIMD type.
inline SimdSign
GetSimdSign(SimdType t)
{
switch(t) {
case SimdType::Int8x16:
case SimdType::Int16x8:
case SimdType::Int32x4:
return SimdSign::Signed;
case SimdType::Uint8x16:
case SimdType::Uint16x8:
case SimdType::Uint32x4:
return SimdSign::Unsigned;
default:
return SimdSign::NotApplicable;
}
}
inline bool
IsSignedIntSimdType(SimdType type)
{
return GetSimdSign(type) == SimdSign::Signed;
}
// Get the boolean SIMD type with the same shape as t.
//
// This is the result type of a comparison operation, and it can also be used to
// identify the geometry of a SIMD type.
inline SimdType
GetBooleanSimdType(SimdType t)
{
switch(t) {
case SimdType::Int8x16:
case SimdType::Uint8x16:
case SimdType::Bool8x16:
return SimdType::Bool8x16;
case SimdType::Int16x8:
case SimdType::Uint16x8:
case SimdType::Bool16x8:
return SimdType::Bool16x8;
case SimdType::Int32x4:
case SimdType::Uint32x4:
case SimdType::Float32x4:
case SimdType::Bool32x4:
return SimdType::Bool32x4;
case SimdType::Float64x2:
case SimdType::Bool64x2:
return SimdType::Bool64x2;
case SimdType::Count:
break;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Bad SIMD type");
}
// Get the number of lanes in a SIMD type.
inline unsigned
GetSimdLanes(SimdType t)
{
switch(t) {
case SimdType::Int8x16:
case SimdType::Uint8x16:
case SimdType::Bool8x16:
return 16;
case SimdType::Int16x8:
case SimdType::Uint16x8:
case SimdType::Bool16x8:
return 8;
case SimdType::Int32x4:
case SimdType::Uint32x4:
case SimdType::Float32x4:
case SimdType::Bool32x4:
return 4;
case SimdType::Float64x2:
case SimdType::Bool64x2:
return 2;
case SimdType::Count:
break;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Bad SIMD type");
}
// Complete set of SIMD operations.
//
// No SIMD types implement all of these operations.
//
// C++ defines keywords and/or/xor/not, so prepend Fn_ to all named functions to
// avoid clashes.
//
// Note: because of a gcc < v4.8's compiler bug, uint8_t can't be used as the
// storage class here. See bug 1243810. See also
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=64037 .
enum class SimdOperation {
// The constructor call. No Fn_ prefix here.
Constructor,
// All the operations, except for casts.
#define DEFOP(x) Fn_##x,
FORALL_SIMD_NONCAST_OP(DEFOP)
#undef DEFOP
// Int <-> Float conversions.
Fn_fromInt32x4,
Fn_fromUint32x4,
Fn_fromFloat32x4,
// Bitcasts. One for each type with a memory representation.
Fn_fromInt8x16Bits,
Fn_fromInt16x8Bits,
Fn_fromInt32x4Bits,
Fn_fromUint8x16Bits,
Fn_fromUint16x8Bits,
Fn_fromUint32x4Bits,
Fn_fromFloat32x4Bits,
Fn_fromFloat64x2Bits,
Last = Fn_fromFloat64x2Bits
};
} // namespace js
#endif /* builtin_SIMDConstants_h */

Просмотреть файл

@ -3980,7 +3980,12 @@ static bool
IsSimdAvailable(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
args.rval().set(BooleanValue(cx->jitSupportsSimd()));
#if defined(JS_CODEGEN_NONE) || !defined(ENABLE_SIMD)
bool available = false;
#else
bool available = cx->jitSupportsSimd();
#endif
args.rval().set(BooleanValue(available));
return true;
}

Просмотреть файл

@ -11,6 +11,7 @@
#include "jsutil.h"
#include "builtin/SIMDConstants.h"
#include "gc/Marking.h"
#include "js/Vector.h"
#include "util/StringBuffer.h"
@ -243,6 +244,10 @@ ScalarTypeDescr::typeName(Type type)
JS_FOR_EACH_SCALAR_TYPE_REPR(NUMERIC_TYPE_TO_STRING)
#undef NUMERIC_TYPE_TO_STRING
case Scalar::Int64:
case Scalar::Float32x4:
case Scalar::Int8x16:
case Scalar::Int16x8:
case Scalar::Int32x4:
case Scalar::MaxTypedArrayViewType:
break;
}
@ -280,6 +285,10 @@ ScalarTypeDescr::call(JSContext* cx, unsigned argc, Value* vp)
JS_FOR_EACH_SCALAR_TYPE_REPR(SCALARTYPE_CALL)
#undef SCALARTYPE_CALL
case Scalar::Int64:
case Scalar::Float32x4:
case Scalar::Int8x16:
case Scalar::Int16x8:
case Scalar::Int32x4:
case Scalar::MaxTypedArrayViewType:
MOZ_CRASH();
}
@ -392,6 +401,50 @@ js::ReferenceTypeDescr::call(JSContext* cx, unsigned argc, Value* vp)
MOZ_CRASH("Unhandled Reference type");
}
/***************************************************************************
* SIMD type objects
*
* Note: these are partially defined in SIMD.cpp
*/
SimdType
SimdTypeDescr::type() const {
uint32_t t = uint32_t(getReservedSlot(JS_DESCR_SLOT_TYPE).toInt32());
MOZ_ASSERT(t < uint32_t(SimdType::Count));
return SimdType(t);
}
uint32_t
SimdTypeDescr::size(SimdType t)
{
MOZ_ASSERT(unsigned(t) < unsigned(SimdType::Count));
switch (t) {
case SimdType::Int8x16:
case SimdType::Int16x8:
case SimdType::Int32x4:
case SimdType::Uint8x16:
case SimdType::Uint16x8:
case SimdType::Uint32x4:
case SimdType::Float32x4:
case SimdType::Float64x2:
case SimdType::Bool8x16:
case SimdType::Bool16x8:
case SimdType::Bool32x4:
case SimdType::Bool64x2:
return 16;
case SimdType::Count:
break;
}
MOZ_CRASH("unexpected SIMD type");
}
uint32_t
SimdTypeDescr::alignment(SimdType t)
{
MOZ_ASSERT(unsigned(t) < unsigned(SimdType::Count));
return size(t);
}
/***************************************************************************
* ArrayMetaTypeDescr class
*/
@ -1613,6 +1666,7 @@ TypeDescr::hasProperty(const JSAtomState& names, jsid id)
switch (kind()) {
case type::Scalar:
case type::Reference:
case type::Simd:
return false;
case type::Array:
@ -1685,6 +1739,7 @@ TypedObject::obj_hasProperty(JSContext* cx, HandleObject obj, HandleId id, bool*
switch (typedObj->typeDescr().kind()) {
case type::Scalar:
case type::Reference:
case type::Simd:
break;
case type::Array: {
@ -1736,6 +1791,9 @@ TypedObject::obj_getProperty(JSContext* cx, HandleObject obj, HandleValue receiv
case type::Reference:
break;
case type::Simd:
break;
case type::Array:
if (JSID_IS_ATOM(id, cx->names().length)) {
if (!typedObj->isAttached()) {
@ -1782,6 +1840,7 @@ TypedObject::obj_getElement(JSContext* cx, HandleObject obj, HandleValue receive
switch (descr->kind()) {
case type::Scalar:
case type::Reference:
case type::Simd:
case type::Struct:
break;
@ -1827,6 +1886,9 @@ TypedObject::obj_setProperty(JSContext* cx, HandleObject obj, HandleId id, Handl
case type::Reference:
break;
case type::Simd:
break;
case type::Array: {
if (JSID_IS_ATOM(id, cx->names().length)) {
if (receiver.isObject() && obj == &receiver.toObject()) {
@ -1894,6 +1956,7 @@ TypedObject::obj_getOwnPropertyDescriptor(JSContext* cx, HandleObject obj, Handl
switch (descr->kind()) {
case type::Scalar:
case type::Reference:
case type::Simd:
break;
case type::Array:
@ -1947,6 +2010,7 @@ IsOwnId(JSContext* cx, HandleObject obj, HandleId id)
switch (typedObj->typeDescr().kind()) {
case type::Scalar:
case type::Reference:
case type::Simd:
return false;
case type::Array:
@ -1985,7 +2049,8 @@ TypedObject::obj_newEnumerate(JSContext* cx, HandleObject obj, AutoIdVector& pro
RootedId id(cx);
switch (descr->kind()) {
case type::Scalar:
case type::Reference: {
case type::Reference:
case type::Simd: {
// Nothing to enumerate.
break;
}
@ -2470,6 +2535,22 @@ js::GetTypedObjectModule(JSContext* cx, unsigned argc, Value* vp)
return true;
}
bool
js::GetSimdTypeDescr(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
MOZ_ASSERT(args.length() == 1);
MOZ_ASSERT(args[0].isInt32());
// One of the JS_SIMDTYPEREPR_* constants / a SimdType enum value.
// getOrCreateSimdTypeDescr() will do the range check.
int32_t simdTypeRepr = args[0].toInt32();
Rooted<GlobalObject*> global(cx, cx->global());
MOZ_ASSERT(global);
auto* obj = GlobalObject::getOrCreateSimdTypeDescr(cx, global, SimdType(simdTypeRepr));
args.rval().setObject(*obj);
return true;
}
#define JS_STORE_SCALAR_CLASS_IMPL(_constant, T, _name) \
bool \
js::StoreScalar##T::Func(JSContext* cx, unsigned argc, Value* vp) \
@ -2660,6 +2741,7 @@ visitReferences(TypeDescr& descr,
switch (descr.kind()) {
case type::Scalar:
case type::Simd:
return;
case type::Reference:

Просмотреть файл

@ -121,6 +121,7 @@ namespace type {
enum Kind {
Scalar = JS_TYPEREPR_SCALAR_KIND,
Reference = JS_TYPEREPR_REFERENCE_KIND,
Simd = JS_TYPEREPR_SIMD_KIND,
Struct = JS_TYPEREPR_STRUCT_KIND,
Array = JS_TYPEREPR_ARRAY_KIND
};
@ -132,6 +133,7 @@ enum Kind {
class SimpleTypeDescr;
class ComplexTypeDescr;
class SimdTypeDescr;
class StructTypeDescr;
class TypedProto;
@ -253,6 +255,14 @@ class ScalarTypeDescr : public SimpleTypeDescr
"TypedObjectConstants.h must be consistent with Scalar::Type");
static_assert(Scalar::Uint8Clamped == JS_SCALARTYPEREPR_UINT8_CLAMPED,
"TypedObjectConstants.h must be consistent with Scalar::Type");
static_assert(Scalar::Float32x4 == JS_SCALARTYPEREPR_FLOAT32X4,
"TypedObjectConstants.h must be consistent with Scalar::Type");
static_assert(Scalar::Int8x16 == JS_SCALARTYPEREPR_INT8X16,
"TypedObjectConstants.h must be consistent with Scalar::Type");
static_assert(Scalar::Int16x8 == JS_SCALARTYPEREPR_INT16X8,
"TypedObjectConstants.h must be consistent with Scalar::Type");
static_assert(Scalar::Int32x4 == JS_SCALARTYPEREPR_INT32X4,
"TypedObjectConstants.h must be consistent with Scalar::Type");
return Type(getReservedSlot(JS_DESCR_SLOT_TYPE).toInt32());
}
@ -330,6 +340,25 @@ class ComplexTypeDescr : public TypeDescr
}
};
enum class SimdType;
/*
* SIMD Type descriptors.
*/
class SimdTypeDescr : public ComplexTypeDescr
{
public:
static const type::Kind Kind = type::Simd;
static const bool Opaque = false;
static const Class class_;
static uint32_t size(SimdType t);
static uint32_t alignment(SimdType t);
static MOZ_MUST_USE bool call(JSContext* cx, unsigned argc, Value* vp);
static bool is(const Value& v);
SimdType type() const;
};
bool IsTypedObjectClass(const Class* clasp); // Defined below
bool IsTypedObjectArray(JSObject& obj);
@ -765,6 +794,16 @@ class InlineOpaqueTypedObject : public InlineTypedObject
static const Class class_;
};
// Class for the global SIMD object.
class SimdObject : public NativeObject
{
public:
static const Class class_;
static MOZ_MUST_USE bool toString(JSContext* cx, unsigned int argc, Value* vp);
static MOZ_MUST_USE bool resolve(JSContext* cx, JS::HandleObject obj, JS::HandleId,
bool* resolved);
};
/*
* Usage: NewOpaqueTypedObject(typeObj)
*
@ -862,6 +901,16 @@ MOZ_MUST_USE bool ClampToUint8(JSContext* cx, unsigned argc, Value* vp);
*/
MOZ_MUST_USE bool GetTypedObjectModule(JSContext* cx, unsigned argc, Value* vp);
/*
* Usage: GetSimdTypeDescr(simdTypeRepr)
*
* Returns one of the SIMD type objects, identified by `simdTypeRepr` which must
* be one of the JS_SIMDTYPEREPR_* constants.
*
* The SIMD pseudo-module must have been initialized for this to be safe.
*/
MOZ_MUST_USE bool GetSimdTypeDescr(JSContext* cx, unsigned argc, Value* vp);
/*
* Usage: Store_int8(targetDatum, targetOffset, value)
* ...
@ -996,7 +1045,8 @@ inline bool
IsComplexTypeDescrClass(const Class* clasp)
{
return clasp == &StructTypeDescr::class_ ||
clasp == &ArrayTypeDescr::class_;
clasp == &ArrayTypeDescr::class_ ||
clasp == &SimdTypeDescr::class_;
}
inline bool

Просмотреть файл

@ -53,6 +53,9 @@ function TypedObjectGet(descr, typedObj, offset) {
case JS_TYPEREPR_REFERENCE_KIND:
return TypedObjectGetReference(descr, typedObj, offset);
case JS_TYPEREPR_SIMD_KIND:
return TypedObjectGetSimd(descr, typedObj, offset);
case JS_TYPEREPR_ARRAY_KIND:
case JS_TYPEREPR_STRUCT_KIND:
return TypedObjectGetDerived(descr, typedObj, offset);
@ -134,6 +137,144 @@ function TypedObjectGetReference(descr, typedObj, offset) {
return undefined;
}
function TypedObjectGetSimd(descr, typedObj, offset) {
var type = DESCR_TYPE(descr);
var simdTypeDescr = GetSimdTypeDescr(type);
switch (type) {
case JS_SIMDTYPEREPR_FLOAT32X4:
var x = Load_float32(typedObj, offset + 0);
var y = Load_float32(typedObj, offset + 4);
var z = Load_float32(typedObj, offset + 8);
var w = Load_float32(typedObj, offset + 12);
return simdTypeDescr(x, y, z, w);
case JS_SIMDTYPEREPR_FLOAT64X2:
var x = Load_float64(typedObj, offset + 0);
var y = Load_float64(typedObj, offset + 8);
return simdTypeDescr(x, y);
case JS_SIMDTYPEREPR_INT8X16:
var s0 = Load_int8(typedObj, offset + 0);
var s1 = Load_int8(typedObj, offset + 1);
var s2 = Load_int8(typedObj, offset + 2);
var s3 = Load_int8(typedObj, offset + 3);
var s4 = Load_int8(typedObj, offset + 4);
var s5 = Load_int8(typedObj, offset + 5);
var s6 = Load_int8(typedObj, offset + 6);
var s7 = Load_int8(typedObj, offset + 7);
var s8 = Load_int8(typedObj, offset + 8);
var s9 = Load_int8(typedObj, offset + 9);
var s10 = Load_int8(typedObj, offset + 10);
var s11 = Load_int8(typedObj, offset + 11);
var s12 = Load_int8(typedObj, offset + 12);
var s13 = Load_int8(typedObj, offset + 13);
var s14 = Load_int8(typedObj, offset + 14);
var s15 = Load_int8(typedObj, offset + 15);
return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15);
case JS_SIMDTYPEREPR_INT16X8:
var s0 = Load_int16(typedObj, offset + 0);
var s1 = Load_int16(typedObj, offset + 2);
var s2 = Load_int16(typedObj, offset + 4);
var s3 = Load_int16(typedObj, offset + 6);
var s4 = Load_int16(typedObj, offset + 8);
var s5 = Load_int16(typedObj, offset + 10);
var s6 = Load_int16(typedObj, offset + 12);
var s7 = Load_int16(typedObj, offset + 14);
return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7);
case JS_SIMDTYPEREPR_INT32X4:
var x = Load_int32(typedObj, offset + 0);
var y = Load_int32(typedObj, offset + 4);
var z = Load_int32(typedObj, offset + 8);
var w = Load_int32(typedObj, offset + 12);
return simdTypeDescr(x, y, z, w);
case JS_SIMDTYPEREPR_UINT8X16:
var s0 = Load_uint8(typedObj, offset + 0);
var s1 = Load_uint8(typedObj, offset + 1);
var s2 = Load_uint8(typedObj, offset + 2);
var s3 = Load_uint8(typedObj, offset + 3);
var s4 = Load_uint8(typedObj, offset + 4);
var s5 = Load_uint8(typedObj, offset + 5);
var s6 = Load_uint8(typedObj, offset + 6);
var s7 = Load_uint8(typedObj, offset + 7);
var s8 = Load_uint8(typedObj, offset + 8);
var s9 = Load_uint8(typedObj, offset + 9);
var s10 = Load_uint8(typedObj, offset + 10);
var s11 = Load_uint8(typedObj, offset + 11);
var s12 = Load_uint8(typedObj, offset + 12);
var s13 = Load_uint8(typedObj, offset + 13);
var s14 = Load_uint8(typedObj, offset + 14);
var s15 = Load_uint8(typedObj, offset + 15);
return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15);
case JS_SIMDTYPEREPR_UINT16X8:
var s0 = Load_uint16(typedObj, offset + 0);
var s1 = Load_uint16(typedObj, offset + 2);
var s2 = Load_uint16(typedObj, offset + 4);
var s3 = Load_uint16(typedObj, offset + 6);
var s4 = Load_uint16(typedObj, offset + 8);
var s5 = Load_uint16(typedObj, offset + 10);
var s6 = Load_uint16(typedObj, offset + 12);
var s7 = Load_uint16(typedObj, offset + 14);
return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7);
case JS_SIMDTYPEREPR_UINT32X4:
var x = Load_uint32(typedObj, offset + 0);
var y = Load_uint32(typedObj, offset + 4);
var z = Load_uint32(typedObj, offset + 8);
var w = Load_uint32(typedObj, offset + 12);
return simdTypeDescr(x, y, z, w);
case JS_SIMDTYPEREPR_BOOL8X16:
var s0 = Load_int8(typedObj, offset + 0);
var s1 = Load_int8(typedObj, offset + 1);
var s2 = Load_int8(typedObj, offset + 2);
var s3 = Load_int8(typedObj, offset + 3);
var s4 = Load_int8(typedObj, offset + 4);
var s5 = Load_int8(typedObj, offset + 5);
var s6 = Load_int8(typedObj, offset + 6);
var s7 = Load_int8(typedObj, offset + 7);
var s8 = Load_int8(typedObj, offset + 8);
var s9 = Load_int8(typedObj, offset + 9);
var s10 = Load_int8(typedObj, offset + 10);
var s11 = Load_int8(typedObj, offset + 11);
var s12 = Load_int8(typedObj, offset + 12);
var s13 = Load_int8(typedObj, offset + 13);
var s14 = Load_int8(typedObj, offset + 14);
var s15 = Load_int8(typedObj, offset + 15);
return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15);
case JS_SIMDTYPEREPR_BOOL16X8:
var s0 = Load_int16(typedObj, offset + 0);
var s1 = Load_int16(typedObj, offset + 2);
var s2 = Load_int16(typedObj, offset + 4);
var s3 = Load_int16(typedObj, offset + 6);
var s4 = Load_int16(typedObj, offset + 8);
var s5 = Load_int16(typedObj, offset + 10);
var s6 = Load_int16(typedObj, offset + 12);
var s7 = Load_int16(typedObj, offset + 14);
return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7);
case JS_SIMDTYPEREPR_BOOL32X4:
var x = Load_int32(typedObj, offset + 0);
var y = Load_int32(typedObj, offset + 4);
var z = Load_int32(typedObj, offset + 8);
var w = Load_int32(typedObj, offset + 12);
return simdTypeDescr(x, y, z, w);
case JS_SIMDTYPEREPR_BOOL64X2:
var x = Load_int32(typedObj, offset + 0);
var y = Load_int32(typedObj, offset + 8);
return simdTypeDescr(x, y);
}
assert(false, "Unhandled SIMD type: " + type);
return undefined;
}
///////////////////////////////////////////////////////////////////////////
// Setting values
//
@ -155,6 +296,10 @@ function TypedObjectSet(descr, typedObj, offset, name, fromValue) {
TypedObjectSetReference(descr, typedObj, offset, name, fromValue);
return;
case JS_TYPEREPR_SIMD_KIND:
TypedObjectSetSimd(descr, typedObj, offset, fromValue);
return;
case JS_TYPEREPR_ARRAY_KIND:
var length = DESCR_ARRAY_LENGTH(descr);
if (TypedObjectSetArray(descr, length, typedObj, offset, fromValue))
@ -269,6 +414,106 @@ function TypedObjectSetReference(descr, typedObj, offset, name, fromValue) {
}
// Sets `fromValue` to `this` assuming that `this` is a scalar type.
function TypedObjectSetSimd(descr, typedObj, offset, fromValue) {
if (!IsObject(fromValue) || !ObjectIsTypedObject(fromValue))
ThrowTypeError(JSMSG_CANT_CONVERT_TO,
typeof(fromValue),
DESCR_STRING_REPR(descr));
if (!DescrsEquiv(descr, TypedObjectTypeDescr(fromValue)))
ThrowTypeError(JSMSG_CANT_CONVERT_TO,
typeof(fromValue),
DESCR_STRING_REPR(descr));
var type = DESCR_TYPE(descr);
switch (type) {
case JS_SIMDTYPEREPR_FLOAT32X4:
Store_float32(typedObj, offset + 0, Load_float32(fromValue, 0));
Store_float32(typedObj, offset + 4, Load_float32(fromValue, 4));
Store_float32(typedObj, offset + 8, Load_float32(fromValue, 8));
Store_float32(typedObj, offset + 12, Load_float32(fromValue, 12));
break;
case JS_SIMDTYPEREPR_FLOAT64X2:
Store_float64(typedObj, offset + 0, Load_float64(fromValue, 0));
Store_float64(typedObj, offset + 8, Load_float64(fromValue, 8));
break;
case JS_SIMDTYPEREPR_INT8X16:
case JS_SIMDTYPEREPR_BOOL8X16:
Store_int8(typedObj, offset + 0, Load_int8(fromValue, 0));
Store_int8(typedObj, offset + 1, Load_int8(fromValue, 1));
Store_int8(typedObj, offset + 2, Load_int8(fromValue, 2));
Store_int8(typedObj, offset + 3, Load_int8(fromValue, 3));
Store_int8(typedObj, offset + 4, Load_int8(fromValue, 4));
Store_int8(typedObj, offset + 5, Load_int8(fromValue, 5));
Store_int8(typedObj, offset + 6, Load_int8(fromValue, 6));
Store_int8(typedObj, offset + 7, Load_int8(fromValue, 7));
Store_int8(typedObj, offset + 8, Load_int8(fromValue, 8));
Store_int8(typedObj, offset + 9, Load_int8(fromValue, 9));
Store_int8(typedObj, offset + 10, Load_int8(fromValue, 10));
Store_int8(typedObj, offset + 11, Load_int8(fromValue, 11));
Store_int8(typedObj, offset + 12, Load_int8(fromValue, 12));
Store_int8(typedObj, offset + 13, Load_int8(fromValue, 13));
Store_int8(typedObj, offset + 14, Load_int8(fromValue, 14));
Store_int8(typedObj, offset + 15, Load_int8(fromValue, 15));
break;
case JS_SIMDTYPEREPR_INT16X8:
case JS_SIMDTYPEREPR_BOOL16X8:
Store_int16(typedObj, offset + 0, Load_int16(fromValue, 0));
Store_int16(typedObj, offset + 2, Load_int16(fromValue, 2));
Store_int16(typedObj, offset + 4, Load_int16(fromValue, 4));
Store_int16(typedObj, offset + 6, Load_int16(fromValue, 6));
Store_int16(typedObj, offset + 8, Load_int16(fromValue, 8));
Store_int16(typedObj, offset + 10, Load_int16(fromValue, 10));
Store_int16(typedObj, offset + 12, Load_int16(fromValue, 12));
Store_int16(typedObj, offset + 14, Load_int16(fromValue, 14));
break;
case JS_SIMDTYPEREPR_INT32X4:
case JS_SIMDTYPEREPR_BOOL32X4:
case JS_SIMDTYPEREPR_BOOL64X2:
Store_int32(typedObj, offset + 0, Load_int32(fromValue, 0));
Store_int32(typedObj, offset + 4, Load_int32(fromValue, 4));
Store_int32(typedObj, offset + 8, Load_int32(fromValue, 8));
Store_int32(typedObj, offset + 12, Load_int32(fromValue, 12));
break;
case JS_SIMDTYPEREPR_UINT8X16:
Store_uint8(typedObj, offset + 0, Load_uint8(fromValue, 0));
Store_uint8(typedObj, offset + 1, Load_uint8(fromValue, 1));
Store_uint8(typedObj, offset + 2, Load_uint8(fromValue, 2));
Store_uint8(typedObj, offset + 3, Load_uint8(fromValue, 3));
Store_uint8(typedObj, offset + 4, Load_uint8(fromValue, 4));
Store_uint8(typedObj, offset + 5, Load_uint8(fromValue, 5));
Store_uint8(typedObj, offset + 6, Load_uint8(fromValue, 6));
Store_uint8(typedObj, offset + 7, Load_uint8(fromValue, 7));
Store_uint8(typedObj, offset + 8, Load_uint8(fromValue, 8));
Store_uint8(typedObj, offset + 9, Load_uint8(fromValue, 9));
Store_uint8(typedObj, offset + 10, Load_uint8(fromValue, 10));
Store_uint8(typedObj, offset + 11, Load_uint8(fromValue, 11));
Store_uint8(typedObj, offset + 12, Load_uint8(fromValue, 12));
Store_uint8(typedObj, offset + 13, Load_uint8(fromValue, 13));
Store_uint8(typedObj, offset + 14, Load_uint8(fromValue, 14));
Store_uint8(typedObj, offset + 15, Load_uint8(fromValue, 15));
break;
case JS_SIMDTYPEREPR_UINT16X8:
Store_uint16(typedObj, offset + 0, Load_uint16(fromValue, 0));
Store_uint16(typedObj, offset + 2, Load_uint16(fromValue, 2));
Store_uint16(typedObj, offset + 4, Load_uint16(fromValue, 4));
Store_uint16(typedObj, offset + 6, Load_uint16(fromValue, 6));
Store_uint16(typedObj, offset + 8, Load_uint16(fromValue, 8));
Store_uint16(typedObj, offset + 10, Load_uint16(fromValue, 10));
Store_uint16(typedObj, offset + 12, Load_uint16(fromValue, 12));
Store_uint16(typedObj, offset + 14, Load_uint16(fromValue, 14));
break;
case JS_SIMDTYPEREPR_UINT32X4:
Store_uint32(typedObj, offset + 0, Load_uint32(fromValue, 0));
Store_uint32(typedObj, offset + 4, Load_uint32(fromValue, 4));
Store_uint32(typedObj, offset + 8, Load_uint32(fromValue, 8));
Store_uint32(typedObj, offset + 12, Load_uint32(fromValue, 12));
break;
default:
assert(false, "Unhandled Simd type: " + type);
}
}
///////////////////////////////////////////////////////////////////////////
// C++ Wrappers
//
@ -384,6 +629,241 @@ function TypedObjectArrayRedimension(newArrayType) {
return NewDerivedTypedObject(newArrayType, this, 0);
}
///////////////////////////////////////////////////////////////////////////
// SIMD
function SimdProtoString(type) {
switch (type) {
case JS_SIMDTYPEREPR_INT8X16:
return "Int8x16";
case JS_SIMDTYPEREPR_INT16X8:
return "Int16x8";
case JS_SIMDTYPEREPR_INT32X4:
return "Int32x4";
case JS_SIMDTYPEREPR_UINT8X16:
return "Uint8x16";
case JS_SIMDTYPEREPR_UINT16X8:
return "Uint16x8";
case JS_SIMDTYPEREPR_UINT32X4:
return "Uint32x4";
case JS_SIMDTYPEREPR_FLOAT32X4:
return "Float32x4";
case JS_SIMDTYPEREPR_FLOAT64X2:
return "Float64x2";
case JS_SIMDTYPEREPR_BOOL8X16:
return "Bool8x16";
case JS_SIMDTYPEREPR_BOOL16X8:
return "Bool16x8";
case JS_SIMDTYPEREPR_BOOL32X4:
return "Bool32x4";
case JS_SIMDTYPEREPR_BOOL64X2:
return "Bool64x2";
}
assert(false, "Unhandled type constant");
return undefined;
}
function SimdTypeToLength(type) {
switch (type) {
case JS_SIMDTYPEREPR_INT8X16:
case JS_SIMDTYPEREPR_BOOL8X16:
return 16;
case JS_SIMDTYPEREPR_INT16X8:
case JS_SIMDTYPEREPR_BOOL16X8:
return 8;
case JS_SIMDTYPEREPR_INT32X4:
case JS_SIMDTYPEREPR_FLOAT32X4:
case JS_SIMDTYPEREPR_BOOL32X4:
return 4;
case JS_SIMDTYPEREPR_FLOAT64X2:
case JS_SIMDTYPEREPR_BOOL64X2:
return 2;
}
assert(false, "Unhandled type constant");
return undefined;
}
// This implements SIMD.*.prototype.valueOf().
// Once we have proper value semantics for SIMD types, this function should just
// perform a type check and return this.
// For now, throw a TypeError unconditionally since valueOf() was probably
// called from ToNumber() which is supposed to throw when attempting to convert
// a SIMD value to a number.
function SimdValueOf() {
if (!IsObject(this) || !ObjectIsTypedObject(this))
ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD", "valueOf", typeof this);
var descr = TypedObjectTypeDescr(this);
if (DESCR_KIND(descr) != JS_TYPEREPR_SIMD_KIND)
ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD", "valueOf", typeof this);
ThrowTypeError(JSMSG_SIMD_TO_NUMBER);
}
function SimdToSource() {
if (!IsObject(this) || !ObjectIsTypedObject(this))
ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD.*", "toSource", typeof this);
var descr = TypedObjectTypeDescr(this);
if (DESCR_KIND(descr) != JS_TYPEREPR_SIMD_KIND)
ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD.*", "toSource", typeof this);
return SimdFormatString(descr, this);
}
function SimdToString() {
if (!IsObject(this) || !ObjectIsTypedObject(this))
ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD.*", "toString", typeof this);
var descr = TypedObjectTypeDescr(this);
if (DESCR_KIND(descr) != JS_TYPEREPR_SIMD_KIND)
ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD.*", "toString", typeof this);
return SimdFormatString(descr, this);
}
function SimdFormatString(descr, typedObj) {
var typerepr = DESCR_TYPE(descr);
var protoString = SimdProtoString(typerepr);
switch (typerepr) {
case JS_SIMDTYPEREPR_INT8X16: {
var s1 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 0);
var s2 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 1);
var s3 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 2);
var s4 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 3);
var s5 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 4);
var s6 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 5);
var s7 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 6);
var s8 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 7);
var s9 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 8);
var s10 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 9);
var s11 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 10);
var s12 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 11);
var s13 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 12);
var s14 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 13);
var s15 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 14);
var s16 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 15);
return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8}, ${s9}, ${s10}, ${s11}, ${s12}, ${s13}, ${s14}, ${s15}, ${s16})`;
}
case JS_SIMDTYPEREPR_INT16X8: {
var s1 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 0);
var s2 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 1);
var s3 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 2);
var s4 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 3);
var s5 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 4);
var s6 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 5);
var s7 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 6);
var s8 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 7);
return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8})`;
}
case JS_SIMDTYPEREPR_INT32X4: {
var x = callFunction(std_SIMD_Int32x4_extractLane, null, typedObj, 0);
var y = callFunction(std_SIMD_Int32x4_extractLane, null, typedObj, 1);
var z = callFunction(std_SIMD_Int32x4_extractLane, null, typedObj, 2);
var w = callFunction(std_SIMD_Int32x4_extractLane, null, typedObj, 3);
return `SIMD.${protoString}(${x}, ${y}, ${z}, ${w})`;
}
case JS_SIMDTYPEREPR_UINT8X16: {
var s1 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 0);
var s2 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 1);
var s3 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 2);
var s4 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 3);
var s5 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 4);
var s6 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 5);
var s7 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 6);
var s8 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 7);
var s9 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 8);
var s10 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 9);
var s11 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 10);
var s12 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 11);
var s13 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 12);
var s14 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 13);
var s15 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 14);
var s16 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 15);
return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8}, ${s9}, ${s10}, ${s11}, ${s12}, ${s13}, ${s14}, ${s15}, ${s16})`;
}
case JS_SIMDTYPEREPR_UINT16X8: {
var s1 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 0);
var s2 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 1);
var s3 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 2);
var s4 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 3);
var s5 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 4);
var s6 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 5);
var s7 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 6);
var s8 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 7);
return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8})`;
}
case JS_SIMDTYPEREPR_UINT32X4: {
var x = callFunction(std_SIMD_Uint32x4_extractLane, null, typedObj, 0);
var y = callFunction(std_SIMD_Uint32x4_extractLane, null, typedObj, 1);
var z = callFunction(std_SIMD_Uint32x4_extractLane, null, typedObj, 2);
var w = callFunction(std_SIMD_Uint32x4_extractLane, null, typedObj, 3);
return `SIMD.${protoString}(${x}, ${y}, ${z}, ${w})`;
}
case JS_SIMDTYPEREPR_FLOAT32X4: {
var x = callFunction(std_SIMD_Float32x4_extractLane, null, typedObj, 0);
var y = callFunction(std_SIMD_Float32x4_extractLane, null, typedObj, 1);
var z = callFunction(std_SIMD_Float32x4_extractLane, null, typedObj, 2);
var w = callFunction(std_SIMD_Float32x4_extractLane, null, typedObj, 3);
return `SIMD.${protoString}(${x}, ${y}, ${z}, ${w})`;
}
case JS_SIMDTYPEREPR_FLOAT64X2: {
var x = callFunction(std_SIMD_Float64x2_extractLane, null, typedObj, 0);
var y = callFunction(std_SIMD_Float64x2_extractLane, null, typedObj, 1);
return `SIMD.${protoString}(${x}, ${y})`;
}
case JS_SIMDTYPEREPR_BOOL8X16: {
var s1 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 0);
var s2 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 1);
var s3 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 2);
var s4 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 3);
var s5 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 4);
var s6 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 5);
var s7 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 6);
var s8 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 7);
var s9 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 8);
var s10 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 9);
var s11 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 10);
var s12 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 11);
var s13 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 12);
var s14 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 13);
var s15 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 14);
var s16 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 15);
return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8}, ${s9}, ${s10}, ${s11}, ${s12}, ${s13}, ${s14}, ${s15}, ${s16})`;
}
case JS_SIMDTYPEREPR_BOOL16X8: {
var s1 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 0);
var s2 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 1);
var s3 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 2);
var s4 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 3);
var s5 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 4);
var s6 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 5);
var s7 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 6);
var s8 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 7);
return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8})`;
}
case JS_SIMDTYPEREPR_BOOL32X4: {
var x = callFunction(std_SIMD_Bool32x4_extractLane, null, typedObj, 0);
var y = callFunction(std_SIMD_Bool32x4_extractLane, null, typedObj, 1);
var z = callFunction(std_SIMD_Bool32x4_extractLane, null, typedObj, 2);
var w = callFunction(std_SIMD_Bool32x4_extractLane, null, typedObj, 3);
return `SIMD.${protoString}(${x}, ${y}, ${z}, ${w})`;
}
case JS_SIMDTYPEREPR_BOOL64X2: {
var x = callFunction(std_SIMD_Bool64x2_extractLane, null, typedObj, 0);
var y = callFunction(std_SIMD_Bool64x2_extractLane, null, typedObj, 1);
return `SIMD.${protoString}(${x}, ${y})`;
}
}
assert(false, "unexpected SIMD kind");
return "?";
}
///////////////////////////////////////////////////////////////////////////
// Miscellaneous

Просмотреть файл

@ -53,7 +53,7 @@
#define JS_DESCR_SLOT_ARRAYPROTO 6 // Lazily created prototype for arrays
#define JS_DESCR_SLOT_TRACE_LIST 7 // List of references for use in tracing
// Slots on scalars, references
// Slots on scalars, references, and SIMD objects
#define JS_DESCR_SLOT_TYPE 8 // Type code
// Slots on array descriptors
@ -75,6 +75,7 @@
#define JS_TYPEREPR_REFERENCE_KIND 2
#define JS_TYPEREPR_STRUCT_KIND 3
#define JS_TYPEREPR_ARRAY_KIND 4
#define JS_TYPEREPR_SIMD_KIND 5
// These constants are for use exclusively in JS code. In C++ code,
// prefer Scalar::Int8 etc, which allows you to write a switch which will
@ -88,6 +89,10 @@
#define JS_SCALARTYPEREPR_FLOAT32 6
#define JS_SCALARTYPEREPR_FLOAT64 7
#define JS_SCALARTYPEREPR_UINT8_CLAMPED 8
#define JS_SCALARTYPEREPR_FLOAT32X4 11
#define JS_SCALARTYPEREPR_INT8X16 12
#define JS_SCALARTYPEREPR_INT16X8 13
#define JS_SCALARTYPEREPR_INT32X4 14
// These constants are for use exclusively in JS code. In C++ code,
// prefer ReferenceTypeRepresentation::TYPE_ANY etc, which allows
@ -97,4 +102,20 @@
#define JS_REFERENCETYPEREPR_OBJECT 1
#define JS_REFERENCETYPEREPR_STRING 2
// These constants are for use exclusively in JS code. In C++ code, prefer
// SimdType::Int32x4 etc, since that allows you to write a switch which will
// receive a warning if you omit a case.
#define JS_SIMDTYPEREPR_INT8X16 0
#define JS_SIMDTYPEREPR_INT16X8 1
#define JS_SIMDTYPEREPR_INT32X4 2
#define JS_SIMDTYPEREPR_UINT8X16 3
#define JS_SIMDTYPEREPR_UINT16X8 4
#define JS_SIMDTYPEREPR_UINT32X4 5
#define JS_SIMDTYPEREPR_FLOAT32X4 6
#define JS_SIMDTYPEREPR_FLOAT64X2 7
#define JS_SIMDTYPEREPR_BOOL8X16 8
#define JS_SIMDTYPEREPR_BOOL16X8 9
#define JS_SIMDTYPEREPR_BOOL32X4 10
#define JS_SIMDTYPEREPR_BOOL64X2 11
#endif

Просмотреть файл

@ -1,3 +1,4 @@
SIMD/nursery-overflow.js
asm.js/testBug1117235.js
asm.js/testParallelCompile.js
auto-regress/bug653395.js

Просмотреть файл

@ -152,6 +152,11 @@ Failed to do range check of element access on a typed object.
### AccessNotDense
### AccessNotSimdObject
The observed type of the target of the property access doesn't guarantee
that it is a SIMD object.
### AccessNotTypedObject
The observed type of the target of the property access doesn't guarantee
@ -217,6 +222,15 @@ the keys have never been observed to be a String, Symbol, or Int32.
IonMonkey only generates inline caches for element accesses which are
either on dense objects (e.g. dense Arrays), or Typed Arrays.
### NoSimdJitSupport
Optimization failed because SIMD JIT support was not enabled.
### SimdTypeNotOptimized
The type observed as being retrieved from this property access did not
match an optimizable type.
### HasCommonInliningPath
Inlining was abandoned because the inlining call path was repeated. A

Просмотреть файл

@ -253,6 +253,9 @@ def main(argv):
# This part is equivalent to:
# skip-if = coverage
if os.getenv('GCOV_PREFIX') is not None:
# GCOV errors.
options.exclude += [os.path.join('asm.js', 'testSIMD.js')] # Bug 1347245
# JSVM errors.
options.exclude += [os.path.join('basic', 'functionnames.js')] # Bug 1369783
options.exclude += [os.path.join('debug', 'Debugger-findScripts-23.js')]

109
js/src/jit-test/lib/simd.js Normal file
Просмотреть файл

@ -0,0 +1,109 @@
if (!this.hasOwnProperty("SIMD"))
quit();
function booleanBinaryX4(op, v, w) {
var arr = [];
var [varr, warr] = [simdToArray(v), simdToArray(w)];
for (var i = 0; i < 4; i++)
arr[i] = op(varr[i], warr[i]);
return arr;
}
function binaryX(op, v, w) {
var arr = [];
var [varr, warr] = [simdToArray(v), simdToArray(w)];
[varr, warr] = [varr.map(Math.fround), warr.map(Math.fround)];
for (var i = 0; i < varr.length; i++)
arr[i] = op(varr[i], warr[i]);
return arr.map(Math.fround);
}
function unaryX4(op, v, coerceFunc) {
var arr = [];
var varr = simdToArray(v).map(coerceFunc);
for (var i = 0; i < 4; i++)
arr[i] = op(varr[i]);
return arr.map(coerceFunc);
}
function assertNear(a, b) {
assertEq((a != a && b != b) || Math.abs(a - b) < 0.001, true);
}
function GetType(v) {
var pt = Object.getPrototypeOf(v);
switch (pt) {
case SIMD.Int8x16.prototype: return SIMD.Int8x16;
case SIMD.Int16x8.prototype: return SIMD.Int16x8;
case SIMD.Int32x4.prototype: return SIMD.Int32x4;
case SIMD.Uint8x16.prototype: return SIMD.Uint8x16;
case SIMD.Uint16x8.prototype: return SIMD.Uint16x8;
case SIMD.Uint32x4.prototype: return SIMD.Uint32x4;
case SIMD.Float32x4.prototype: return SIMD.Float32x4;
case SIMD.Bool8x16.prototype: return SIMD.Bool8x16;
case SIMD.Bool16x8.prototype: return SIMD.Bool16x8;
case SIMD.Bool32x4.prototype: return SIMD.Bool32x4;
}
throw "unexpected SIMD type";
}
function GetLength(t) {
switch (t) {
case SIMD.Int8x16: return 16;
case SIMD.Int16x8: return 8;
case SIMD.Int32x4: return 4;
case SIMD.Uint8x16: return 16;
case SIMD.Uint16x8: return 8;
case SIMD.Uint32x4: return 4;
case SIMD.Float32x4: return 4;
case SIMD.Bool8x16: return 16;
case SIMD.Bool16x8: return 8;
case SIMD.Bool32x4: return 4;
}
throw "unexpected SIMD type";
}
function assertEqVec(v, w) {
var typeV = GetType(v);
var lengthV = GetLength(typeV);
var ext = typeV.extractLane;
assertEq(GetType(w), typeV);
for (var i = 0; i < lengthV; i++)
assertEq(ext(v, i), ext(w, i));
}
function assertEqVecArr(v, w) {
var typeV = GetType(v);
var lengthV = GetLength(typeV);
var ext = typeV.extractLane;
assertEq(w.length, lengthV);
for (var i = 0; i < lengthV; i++)
assertEq(ext(v, i), w[i]);
}
function assertEqX4(vec, arr, ...opts) {
var assertFunc;
if (opts.length == 1 && typeof opts[0] !== 'undefined') {
assertFunc = opts[0];
} else {
assertFunc = assertEq;
}
var Type = GetType(vec);
assertFunc(Type.extractLane(vec, 0), arr[0]);
assertFunc(Type.extractLane(vec, 1), arr[1]);
assertFunc(Type.extractLane(vec, 2), arr[2]);
assertFunc(Type.extractLane(vec, 3), arr[3]);
}
function simdToArray(vec) {
var Type = GetType(vec);
var Length = GetLength(Type);
var a = [];
for (var i = 0; i < Length; i++)
a.push(Type.extractLane(vec, i));
return a;
}

Просмотреть файл

@ -0,0 +1,38 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 50);
function all(B, n) {
var a = B.splat(true);
for (var i = 0; i < n; i++) {
var b = B.replaceLane(a, i, false);
assertEq(B.allTrue(b), false);
var c = B.replaceLane(b, i, true);
assertEq(B.allTrue(c), true);
}
}
function any(B, n) {
var a = B.splat(false);
for (var i = 0; i < n; i++) {
var b = B.replaceLane(a, i, true);
assertEq(B.anyTrue(b), true);
var c = B.replaceLane(b, i, false);
assertEq(B.anyTrue(c), false);
}
}
function f() {
for (var j = 0; j < 200; j++) {
all(SIMD.Bool64x2, 2)
any(SIMD.Bool64x2, 2)
all(SIMD.Bool32x4, 4)
any(SIMD.Bool32x4, 4)
all(SIMD.Bool16x8, 8)
any(SIMD.Bool16x8, 8)
all(SIMD.Bool8x16, 16)
any(SIMD.Bool8x16, 16)
}
}
f()

Просмотреть файл

@ -0,0 +1,30 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 50);
function f() {
var i1 = SIMD.Int32x4(1, 2, 3, 4);
var i2 = SIMD.Int32x4(4, 3, 2, 1);
var f1 = SIMD.Float32x4(1, 2, 3, 4);
var f2 = SIMD.Float32x4(4, 3, 2, 1);
var i8_1 = SIMD.Int8x16(1, 2, 3, 4, 20, 30, 40, 50, 100, 115, 120, 125);
var i8_2 = SIMD.Int8x16(4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9);
for (var i = 0; i < 150; i++) {
assertEqX4(SIMD.Float32x4.add(f1, f2), binaryX((x, y) => x + y, f1, f2));
assertEqX4(SIMD.Float32x4.sub(f1, f2), binaryX((x, y) => x - y, f1, f2));
assertEqX4(SIMD.Float32x4.mul(f1, f2), binaryX((x, y) => x * y, f1, f2));
assertEqX4(SIMD.Int32x4.add(i1, i2), binaryX((x, y) => x + y, i1, i2));
assertEqX4(SIMD.Int32x4.sub(i1, i2), binaryX((x, y) => x - y, i1, i2));
assertEqX4(SIMD.Int32x4.mul(i1, i2), binaryX((x, y) => x * y, i1, i2));
assertEqX4(SIMD.Int8x16.add(i8_1, i8_2), binaryX((x, y) => (x + y) << 24 >> 24, i8_1, i8_2));
assertEqX4(SIMD.Int8x16.sub(i8_1, i8_2), binaryX((x, y) => (x - y) << 24 >> 24, i8_1, i8_2));
assertEqX4(SIMD.Int8x16.mul(i8_1, i8_2), binaryX((x, y) => (x * y) << 24 >> 24, i8_1, i8_2));
}
}
f();

Просмотреть файл

@ -0,0 +1,15 @@
load(libdir + "simd.js");
setJitCompilerOption("ion.warmup.trigger", 50);
function f() {
var b1 = SIMD.Bool32x4(true, false, true, false);
var b2 = SIMD.Bool32x4(true, true, true, true);
do {
assertEqX4(SIMD.Bool32x4.and(b1, b2), booleanBinaryX4((x, y) => x && y, b1, b2));
assertEqX4(SIMD.Bool32x4.or(b1, b2), booleanBinaryX4((x, y) => x || y, b1, b2));
assertEqX4(SIMD.Bool32x4.xor(b1, b2), booleanBinaryX4((x, y) => x != y, b1, b2));
} while (!inIon());
}
f();

Просмотреть файл

@ -0,0 +1,65 @@
load(libdir + "simd.js");
setJitCompilerOption("ion.warmup.trigger", 50);
// Test constant folding into the Bool32x4 constructor.
// Verify that we get the truthiness right, c.f. the ECMA ToBoolean() function.
function f1() {
var B = SIMD.Bool32x4;
var S = SIMD.Bool32x4.splat;
return [
B(false, false, false, true),
B(true),
B(undefined, null, "", "x"),
B({}, 0, 1, -0.0),
B(NaN, -NaN, Symbol(), createIsHTMLDDA()),
S(false),
S(true),
S(undefined),
S(null),
S(""),
S("x"),
S(0),
S(1),
S({}),
S(-0.0),
S(NaN),
S(Symbol()),
S(createIsHTMLDDA())
];
}
function f() {
for (var i = 0; i < 100; i++) {
var a = f1()
assertEqX4(a[0], [false, false, false, true]);
assertEqX4(a[1], [true, false, false, false]);
assertEqX4(a[2], [false, false, false, true]);
assertEqX4(a[3], [true, false, true, false]);
assertEqX4(a[4], [false, false, true, false]);
// Splats.
assertEqX4(a[5], [false, false, false, false]);
assertEqX4(a[6], [true, true, true, true]);
assertEqX4(a[7], [false, false, false, false]);
assertEqX4(a[8], [false, false, false, false]);
assertEqX4(a[9], [false, false, false, false]);
assertEqX4(a[10], [true, true, true, true]);
assertEqX4(a[11], [false, false, false, false]);
assertEqX4(a[12], [true, true, true, true]);
assertEqX4(a[13], [true, true, true, true]);
assertEqX4(a[14], [false, false, false, false]);
assertEqX4(a[15], [false, false, false, false]);
assertEqX4(a[16], [true, true, true, true]);
assertEqX4(a[17], [false, false, false, false]);
}
}
f();

Просмотреть файл

@ -0,0 +1,11 @@
if (typeof TypedObject === "undefined" || typeof SIMD === 'undefined')
quit();
var Int32x4 = SIMD.Int32x4;
var a = Int32x4((4294967295), 200, 300, 400);
addCase( new Array(Math.pow(2,12)) );
for ( var arg = "", i = 0; i < Math.pow(2,12); i++ ) {}
addCase( a );
function addCase(object) {
object.length
}

Просмотреть файл

@ -0,0 +1,31 @@
if (!this.hasOwnProperty("SIMD"))
quit();
setJitCompilerOption("baseline.warmup.trigger", 10);
setJitCompilerOption("ion.warmup.trigger", 30);
function test_1(i) {
if (i >= 40)
return;
var a = SIMD.Float32x4(1.1, 2.2, 3.3, 4.6);
SIMD.Int32x4.fromFloat32x4(a);
test_1(i + 1);
}
test_1(0);
var Float32x4 = SIMD.Float32x4;
function test_2() {
var Array = Float32x4.array(3);
var array = new Array([
Float32x4(1, 2, 3, 4),
Float32x4(5, 6, 7, 8),
Float32x4(9, 10, 11, 12)
]);
if (typeof reportCompare === "function")
reportCompare(true, true);
}
test_2();
evaluate("test_2(); test_2();", {
isRunOnce: true,
});

Просмотреть файл

@ -0,0 +1,9 @@
if (!this.hasOwnProperty("SIMD"))
quit();
var Float64x2 = SIMD.Float64x2;
function test() {
var a = Float64x2(1, 2);
}
test();
test();

Просмотреть файл

@ -0,0 +1,15 @@
if (!this.hasOwnProperty("SIMD"))
quit();
var Int32x4 = SIMD.Int32x4;
function test() {
var a = Int32x4();
var b = Int32x4(10, 20, 30, 40);
var c = SIMD.Int32x4.and(a, b);
assertEq(Int32x4.extractLane(c, 0), 0);
return 0;
}
test();
var u = [], v = [];
for (var j=0; j<u.length; ++j)
v[test()] = t;

Просмотреть файл

@ -0,0 +1,10 @@
if (typeof SIMD !== 'object')
quit(0);
function test() {
return SIMD.Float32x4().toSource();
}
var r = '';
for (var i = 0; i < 10000; i++)
r = test();

Просмотреть файл

@ -0,0 +1,16 @@
if (typeof SIMD !== 'object')
quit(0);
function assertEqVec(v, w) {
[0].forEach(i => v, w);
function assertEqX4(...opts) {}
}
gczeal(1);
function f() {
SIMD.Float32x4();
var i1 = SIMD.Int32x4();
for (j = 0; j < 100000; ++j, eval.eval)
assertEqVec(SIMD.Int32x4.check(i1), i1);
}
f();

Просмотреть файл

@ -0,0 +1,9 @@
if (typeof SIMD === 'undefined')
quit();
Int8x16 = SIMD.Int8x16;
var Int32x4 = SIMD.Int32x4;
function testSwizzleForType(type) { return type(); }
testSwizzleForType(Int8x16);
function testSwizzleInt32x4() { return testSwizzleForType(Int32x4); }
testSwizzleInt32x4();

Просмотреть файл

@ -0,0 +1,9 @@
if (typeof gczeal === 'undefined' || typeof SIMD === 'undefined') {
quit();
}
gczeal(9, 2);
var Int8x16 = SIMD.Int8x16;
var v = Int8x16();
var good = { valueOf: () => 21 };
Int8x16.shiftLeftByScalar(v, good);

Просмотреть файл

@ -0,0 +1,12 @@
if (typeof gczeal === 'undefined' || typeof SIMD === 'undefined') {
quit();
}
gczeal(14,2);
var Float32x4 = SIMD.Float32x4;
function test() {
var v = Float32x4(1,2,3,4);
var good = {valueOf: () => 42};
Float32x4.replaceLane(v, 0, good);
}
test();

Просмотреть файл

@ -0,0 +1,25 @@
load(libdir + 'simd.js');
var ab = new ArrayBuffer(64 * 1024);
var arr = new Uint8Array(ab);
(function(glob, imp, b) {
"use asm";
var arr = new glob.Uint8Array(b);
return {}
})(this, null, ab);
function testSimdX4() {
for (var i = 10; i --> 0;) {
var caught;
try {
v = SIMD.Int32x4.load(arr, 65534);
} catch (e) {
caught = e;
}
assertEq(caught instanceof RangeError, true);
}
}
setJitCompilerOption('ion.warmup.trigger', 0);
testSimdX4();

Просмотреть файл

@ -0,0 +1,10 @@
/*
* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/licenses/publicdomain/
*/
if (!this.hasOwnProperty("TypedObject") || !this.hasOwnProperty("SIMD"))
quit();
var Float32x4 = SIMD.Float32x4;
Float32x4.array(1);

Просмотреть файл

@ -0,0 +1,25 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 50);
function f() {
var f1 = SIMD.Float32x4(1, 2, 3, 4);
var i1 = SIMD.Int32x4(1, 2, -3, 4);
var b1 = SIMD.Bool32x4(true, true, false, true);
var i = 0;
try {
for (; i < 150; i++) {
if (i > 148)
i1 = f1;
assertEqVec(SIMD.Int32x4.check(i1), i1);
assertEqVec(SIMD.Float32x4.check(f1), f1);
assertEqVec(SIMD.Bool32x4.check(b1), b1);
}
} catch (ex) {
assertEq(i, 149);
assertEq(ex instanceof TypeError, true);
}
}
f();

Просмотреть файл

@ -0,0 +1,39 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 50);
function f() {
var f1 = SIMD.Float32x4(1, 2, 3, 4);
var f2 = SIMD.Float32x4(NaN, Infinity, 3.14, -0);
var i1 = SIMD.Int32x4(1, 2, -3, 4);
var i2 = SIMD.Int32x4(1, -2, 3, 0);
var u1 = SIMD.Uint32x4(1, 2, -3, 4);
var u2 = SIMD.Uint32x4(1, -2, 3, 0x80000000);
for (var i = 0; i < 150; i++) {
assertEqX4(SIMD.Int32x4.lessThan(i1, i2), [false, false, true, false]);
assertEqX4(SIMD.Int32x4.lessThanOrEqual(i1, i2), [true, false, true, false]);
assertEqX4(SIMD.Int32x4.equal(i1, i2), [true, false, false, false]);
assertEqX4(SIMD.Int32x4.notEqual(i1, i2), [false, true, true, true]);
assertEqX4(SIMD.Int32x4.greaterThan(i1, i2), [false, true, false, true]);
assertEqX4(SIMD.Int32x4.greaterThanOrEqual(i1, i2), [true, true, false, true]);
assertEqX4(SIMD.Uint32x4.lessThan(u1, u2), [false, true, false, true]);
assertEqX4(SIMD.Uint32x4.lessThanOrEqual(u1, u2), [true, true, false, true]);
assertEqX4(SIMD.Uint32x4.equal(u1, u2), [true, false, false, false]);
assertEqX4(SIMD.Uint32x4.notEqual(u1, u2), [false, true, true, true]);
assertEqX4(SIMD.Uint32x4.greaterThan(u1, u2), [false, false, true, false]);
assertEqX4(SIMD.Uint32x4.greaterThanOrEqual(u1, u2), [true, false, true, false]);
assertEqX4(SIMD.Float32x4.lessThan(f1, f2), [false, true, true, false]);
assertEqX4(SIMD.Float32x4.lessThanOrEqual(f1, f2), [false, true, true, false]);
assertEqX4(SIMD.Float32x4.equal(f1, f2), [false, false, false, false]);
assertEqX4(SIMD.Float32x4.notEqual(f1, f2), [true, true, true, true]);
assertEqX4(SIMD.Float32x4.greaterThan(f1, f2), [false, false, false, true]);
assertEqX4(SIMD.Float32x4.greaterThanOrEqual(f1, f2), [false, false, false, true]);
}
}
f();

Просмотреть файл

@ -0,0 +1,70 @@
load(libdir + 'simd.js');
if (typeof SIMD === "undefined")
quit();
setJitCompilerOption("baseline.warmup.trigger", 10);
setJitCompilerOption("ion.warmup.trigger", 90);
var max = 100; // Make have the warm-up counter high enough to
// consider inlining functions.
var f4 = SIMD.Int32x4; // :TODO: Support Float32x4 arith.
var f4add = f4.add;
var f4sub = f4.sub;
var f4mul = f4.mul;
function c4mul(z1, z2) {
var { re: re1, im: im1 } = z1;
var { re: re2, im: im2 } = z2;
var rere = f4mul(re1, re2);
var reim = f4mul(re1, im2);
var imre = f4mul(im1, re2);
var imim = f4mul(im1, im2);
return { re: f4sub(rere, imim), im: f4add(reim, imre) };
}
function c4inv(z) {
var { re: re, im: im } = z;
var minus = f4(-1, -1, -1, -1);
return { re: re, im: f4mul(im, minus) };
}
function c4inv_inplace(z) {
var res = c4inv(z);
z.re = res.re;
z.im = res.im;
}
function c4norm(z) {
var { re: re, im: im } = c4mul(z, c4inv(z));
return re;
}
function c4scale(z, s) {
var { re: re, im: im } = z;
var f4s = f4(s, s, s, s);
return { re: f4mul(re, f4s), im: f4mul(im, f4s) };
}
var rotate90 = { re: f4(0, 0, 0, 0), im: f4(1, 1, 1, 1) };
var cardinals = { re: f4(1, 0, -1, 0), im: f4(0, 1, 0, -1) };
function test(dots) {
for (var j = 0; j < 4; j++) {
dots = c4mul(rotate90, dots);
if (j % 2 == 0) // Magic !
c4inv_inplace(dots);
dots = c4scale(dots, 2);
}
return dots;
}
assertEqX4(c4norm(cardinals), simdToArray(f4.splat(1)));
var cardinals16 = c4scale(cardinals, 16);
for (var i = 0; i < max; i++) {
var res = test(cardinals);
assertEqX4(c4norm(res), simdToArray(f4.splat(16 * 16)));
assertEqX4(res.re, simdToArray(cardinals16.re));
assertEqX4(res.im, simdToArray(cardinals16.im));
}

Просмотреть файл

@ -0,0 +1,68 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 30);
var cast = (function() {
var i32 = new Int32Array(1);
var f32 = new Float32Array(i32.buffer);
return {
fromInt32Bits(x) {
i32[0] = x;
return f32[0];
},
fromFloat32Bits(x) {
f32[0] = x;
return i32[0];
}
}
})();
function f() {
// No bailout here.
var f4 = SIMD.Float32x4(1, 2, 3, 4);
var i4 = SIMD.Int32x4(1, 2, 3, 4);
var BitOrZero = (x) => x | 0;
for (var i = 0; i < 150; i++) {
assertEqX4(SIMD.Float32x4.fromInt32x4(i4), unaryX4(BitOrZero, f4, Math.fround));
assertEqX4(SIMD.Float32x4.fromInt32x4Bits(i4), unaryX4(cast.fromInt32Bits, f4, Math.fround));
assertEqX4(SIMD.Int32x4.fromFloat32x4(f4), unaryX4(Math.fround, i4, BitOrZero));
assertEqX4(SIMD.Int32x4.fromFloat32x4Bits(f4), unaryX4(cast.fromFloat32Bits, i4, BitOrZero));
}
}
function uglyDuckling(val) {
// We bail out when i == 149 because the conversion will return
// 0x80000000 and the input actually wasn't in bounds.
val = Math.fround(val);
for (var i = 0; i < 150; i++) {
var caught = false;
try {
var v = SIMD.Float32x4(i < 149 ? 0 : val, 0, 0, 0)
SIMD.Int32x4.fromFloat32x4(v);
} catch(e) {
assertEq(e instanceof RangeError, true);
assertEq(i, 149);
caught = true;
}
assertEq(i < 149 || caught, true);
}
}
function dontBail() {
// On x86, the conversion will return 0x80000000, which will imply that we
// check the input values. However, we shouldn't bail out in this case.
for (var i = 0; i < 150; i++) {
var v = SIMD.Float32x4(i < 149 ? 0 : -Math.pow(2, 31), 0, 0, 0)
SIMD.Int32x4.fromFloat32x4(v);
}
}
f();
dontBail();
dontBail();
uglyDuckling(Math.pow(2, 31));
uglyDuckling(NaN);
uglyDuckling(-Math.pow(2, 32));

Просмотреть файл

@ -0,0 +1,33 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 50);
function maxNum(x, y) {
if (x != x)
return y;
if (y != y)
return x;
return Math.max(x, y);
}
function minNum(x, y) {
if (x != x)
return y;
if (y != y)
return x;
return Math.min(x, y);
}
function f() {
var f1 = SIMD.Float32x4(1, 2, 3, 4);
var f2 = SIMD.Float32x4(4, 3, 2, 1);
for (var i = 0; i < 150; i++) {
assertEqX4(SIMD.Float32x4.div(f1, f2), binaryX((x, y) => x / y, f1, f2));
assertEqX4(SIMD.Float32x4.min(f1, f2), binaryX(Math.min, f1, f2));
assertEqX4(SIMD.Float32x4.max(f1, f2), binaryX(Math.max, f1, f2));
assertEqX4(SIMD.Float32x4.minNum(f1, f2), binaryX(minNum, f1, f2));
assertEqX4(SIMD.Float32x4.maxNum(f1, f2), binaryX(maxNum, f1, f2));
}
}
f();

Просмотреть файл

@ -0,0 +1,48 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 50);
function f() {
var i4 = SIMD.Int32x4(1, -2, 3, -4);
var u4 = SIMD.Uint32x4(1, -2, 3, 0x88000000);
var b4 = SIMD.Bool32x4(true, true, false, true);
var bt4 = SIMD.Bool32x4(true, true, true, true);
var bf4 = SIMD.Bool32x4(false, false, false, false);
var v = Math.fround(13.37);
var f4 = SIMD.Float32x4(13.37, NaN, Infinity, -0);
for (var i = 0; i < 150; i++) {
assertEq(SIMD.Int32x4.extractLane(i4, 0), 1);
assertEq(SIMD.Int32x4.extractLane(i4, 1), -2);
assertEq(SIMD.Int32x4.extractLane(i4, 2), 3);
assertEq(SIMD.Int32x4.extractLane(i4, 3), -4);
assertEq(SIMD.Uint32x4.extractLane(u4, 0), 1);
assertEq(SIMD.Uint32x4.extractLane(u4, 1), -2 >>> 0);
assertEq(SIMD.Uint32x4.extractLane(u4, 2), 3);
assertEq(SIMD.Uint32x4.extractLane(u4, 3), 0x88000000);
assertEq(SIMD.Float32x4.extractLane(f4, 0), v);
assertEq(SIMD.Float32x4.extractLane(f4, 1), NaN);
assertEq(SIMD.Float32x4.extractLane(f4, 2), Infinity);
assertEq(SIMD.Float32x4.extractLane(f4, 3), -0);
assertEq(SIMD.Bool32x4.extractLane(b4, 0), true);
assertEq(SIMD.Bool32x4.extractLane(b4, 1), true);
assertEq(SIMD.Bool32x4.extractLane(b4, 2), false);
assertEq(SIMD.Bool32x4.extractLane(b4, 3), true);
assertEq(SIMD.Bool32x4.anyTrue(b4), true);
assertEq(SIMD.Bool32x4.allTrue(b4), false);
assertEq(SIMD.Bool32x4.anyTrue(bt4), true);
assertEq(SIMD.Bool32x4.allTrue(bt4), true);
assertEq(SIMD.Bool32x4.anyTrue(bf4), false);
assertEq(SIMD.Bool32x4.allTrue(bf4), false);
}
}
f();

Просмотреть файл

@ -0,0 +1,81 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 50);
function test(i) {
assertEqX4(SIMD.Int32x4(), [0, 0, 0, 0]);
assertEqX4(SIMD.Int32x4(i), [i, 0, 0, 0]);
assertEqX4(SIMD.Int32x4(i, 1), [i, 1, 0, 0]);
assertEqX4(SIMD.Int32x4(i, 1, 2), [i, 1, 2, 0]);
assertEqX4(SIMD.Int32x4(i, 1, 2, 3), [i, 1, 2, 3]);
assertEqX4(SIMD.Int32x4(i, 1, 2, 3, 4), [i, 1, 2, 3]);
assertEqVecArr(SIMD.Int16x8(), [0, 0, 0, 0, 0, 0, 0, 0]);
assertEqVecArr(SIMD.Int16x8(i), [i, 0, 0, 0, 0, 0, 0, 0]);
assertEqVecArr(SIMD.Int16x8(i, 1), [i, 1, 0, 0, 0, 0, 0, 0]);
assertEqVecArr(SIMD.Int16x8(i, 1, 2), [i, 1, 2, 0, 0, 0, 0, 0]);
assertEqVecArr(SIMD.Int16x8(i, 1, 2, 3), [i, 1, 2, 3, 0, 0, 0, 0]);
assertEqVecArr(SIMD.Int16x8(i, 1, 2, 3, 4), [i, 1, 2, 3, 4, 0, 0, 0]);
assertEqVecArr(SIMD.Int16x8(i, 1, 2, 3, 4, 5, 6),
[i, 1, 2, 3, 4, 5, 6, 0]);
j = i & 32
assertEqVecArr(SIMD.Int8x16(), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
assertEqVecArr(SIMD.Int8x16(j), [j, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
assertEqVecArr(SIMD.Int8x16(j, 1), [j, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
assertEqVecArr(SIMD.Int8x16(j, 1, 2), [j, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
assertEqVecArr(SIMD.Int8x16(j, 1, 2, 3), [j, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
assertEqVecArr(SIMD.Int8x16(j, 1, 2, 3, 4), [j, 1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
assertEqVecArr(SIMD.Int8x16(j, 1, 2, 3, 4, 5, 6),
[j, 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
assertEqVecArr(SIMD.Int8x16(j, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12),
[j, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 0, 0]);
assertEqX4(SIMD.Float32x4(), [NaN, NaN, NaN, NaN]);
assertEqX4(SIMD.Float32x4(i), [i, NaN, NaN, NaN]);
assertEqX4(SIMD.Float32x4(i, 1), [i, 1, NaN, NaN]);
assertEqX4(SIMD.Float32x4(i, 1, 2), [i, 1, 2, NaN]);
assertEqX4(SIMD.Float32x4(i, 1, 2, 3), [i, 1, 2, 3 ]);
assertEqX4(SIMD.Float32x4(i, 1, 2, 3, 4), [i, 1, 2, 3 ]);
var b = i % 2 > 0 ;
assertEqX4(SIMD.Bool32x4(), [false, false, false, false]);
assertEqX4(SIMD.Bool32x4(b), [b, false, false, false]);
assertEqX4(SIMD.Bool32x4(b, true), [b, true, false, false]);
assertEqX4(SIMD.Bool32x4(b, false, true), [b, false, true, false]);
assertEqX4(SIMD.Bool32x4(b, false, true, true), [b, false, true, true ]);
assertEqX4(SIMD.Bool32x4(b, false, true, true, true), [b, false, true, true ]);
assertEqVecArr(SIMD.Bool16x8(),
[false, false, false, false, false, false, false, false]);
assertEqVecArr(SIMD.Bool16x8(b),
[b, false, false, false, false, false, false, false]);
assertEqVecArr(SIMD.Bool16x8(b, true),
[b, true, false, false, false, false, false, false]);
assertEqVecArr(SIMD.Bool16x8(b, false, true),
[b, false, true, false, false, false, false, false]);
assertEqVecArr(SIMD.Bool16x8(b, false, true, true),
[b, false, true, true, false, false, false, false]);
assertEqVecArr(SIMD.Bool16x8(b, false, true, true, true),
[b, false, true, true, true, false, false, false]);
assertEqVecArr(SIMD.Bool16x8(b, false, true, true, true, true),
[b, false, true, true, true, true, false, false]);
assertEqVecArr(SIMD.Bool8x16(),
[false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false]);
assertEqVecArr(SIMD.Bool8x16(b),
[b, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false]);
assertEqVecArr(SIMD.Bool8x16(b, true),
[b, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false]);
assertEqVecArr(SIMD.Bool8x16(b, false, true),
[b, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false]);
assertEqVecArr(SIMD.Bool8x16(b, false, true, true),
[b, false, true, true, false, false, false, false, false, false, false, false, false, false, false, false]);
assertEqVecArr(SIMD.Bool8x16(b, false, true, true, true),
[b, false, true, true, true, false, false, false, false, false, false, false, false, false, false, false]);
assertEqVecArr(SIMD.Bool8x16(b, false, true, true, true, true, false, true, true, true),
[b, false, true, true, true, true, false, true, true, true, false, false, false, false, false, false]);
}
for(var i=0; i<300; i++) {
test(i);
}

Просмотреть файл

@ -0,0 +1,123 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 40);
function f() {
var f32 = new Float32Array(16);
for (var i = 0; i < 16; i++)
f32[i] = i + 1;
var f64 = new Float64Array(f32.buffer);
var i32 = new Int32Array(f32.buffer);
var u32 = new Uint32Array(f32.buffer);
var i16 = new Int16Array(f32.buffer);
var u16 = new Uint16Array(f32.buffer);
var i8 = new Int8Array(f32.buffer);
var u8 = new Uint8Array(f32.buffer);
function testLoad() {
assertEqX4(SIMD.Float32x4.load(f64, 0), [1,2,3,4]);
assertEqX4(SIMD.Float32x4.load(f32, 1), [2,3,4,5]);
assertEqX4(SIMD.Float32x4.load(i32, 2), [3,4,5,6]);
assertEqX4(SIMD.Float32x4.load(i16, 3 << 1), [4,5,6,7]);
assertEqX4(SIMD.Float32x4.load(u16, 4 << 1), [5,6,7,8]);
assertEqX4(SIMD.Float32x4.load(i8 , 5 << 2), [6,7,8,9]);
assertEqX4(SIMD.Float32x4.load(u8 , 6 << 2), [7,8,9,10]);
assertEqX4(SIMD.Float32x4.load(f64, (16 >> 1) - (4 >> 1)), [13,14,15,16]);
assertEqX4(SIMD.Float32x4.load(f32, 16 - 4), [13,14,15,16]);
assertEqX4(SIMD.Float32x4.load(i32, 16 - 4), [13,14,15,16]);
assertEqX4(SIMD.Float32x4.load(i16, (16 << 1) - (4 << 1)), [13,14,15,16]);
assertEqX4(SIMD.Float32x4.load(u16, (16 << 1) - (4 << 1)), [13,14,15,16]);
assertEqX4(SIMD.Float32x4.load(i8, (16 << 2) - (4 << 2)), [13,14,15,16]);
assertEqX4(SIMD.Float32x4.load(u8, (16 << 2) - (4 << 2)), [13,14,15,16]);
}
function testLoad1() {
assertEqX4(SIMD.Float32x4.load1(f64, 0), [1,0,0,0]);
assertEqX4(SIMD.Float32x4.load1(f32, 1), [2,0,0,0]);
assertEqX4(SIMD.Float32x4.load1(i32, 2), [3,0,0,0]);
assertEqX4(SIMD.Float32x4.load1(i16, 3 << 1), [4,0,0,0]);
assertEqX4(SIMD.Float32x4.load1(u16, 4 << 1), [5,0,0,0]);
assertEqX4(SIMD.Float32x4.load1(i8 , 5 << 2), [6,0,0,0]);
assertEqX4(SIMD.Float32x4.load1(u8 , 6 << 2), [7,0,0,0]);
assertEqX4(SIMD.Float32x4.load1(f64, (16 >> 1) - (4 >> 1)), [13,0,0,0]);
assertEqX4(SIMD.Float32x4.load1(f32, 16 - 4), [13,0,0,0]);
assertEqX4(SIMD.Float32x4.load1(i32, 16 - 4), [13,0,0,0]);
assertEqX4(SIMD.Float32x4.load1(i16, (16 << 1) - (4 << 1)), [13,0,0,0]);
assertEqX4(SIMD.Float32x4.load1(u16, (16 << 1) - (4 << 1)), [13,0,0,0]);
assertEqX4(SIMD.Float32x4.load1(i8, (16 << 2) - (4 << 2)), [13,0,0,0]);
assertEqX4(SIMD.Float32x4.load1(u8, (16 << 2) - (4 << 2)), [13,0,0,0]);
}
function testLoad2() {
assertEqX4(SIMD.Float32x4.load2(f64, 0), [1,2,0,0]);
assertEqX4(SIMD.Float32x4.load2(f32, 1), [2,3,0,0]);
assertEqX4(SIMD.Float32x4.load2(i32, 2), [3,4,0,0]);
assertEqX4(SIMD.Float32x4.load2(i16, 3 << 1), [4,5,0,0]);
assertEqX4(SIMD.Float32x4.load2(u16, 4 << 1), [5,6,0,0]);
assertEqX4(SIMD.Float32x4.load2(i8 , 5 << 2), [6,7,0,0]);
assertEqX4(SIMD.Float32x4.load2(u8 , 6 << 2), [7,8,0,0]);
assertEqX4(SIMD.Float32x4.load2(f64, (16 >> 1) - (4 >> 1)), [13,14,0,0]);
assertEqX4(SIMD.Float32x4.load2(f32, 16 - 4), [13,14,0,0]);
assertEqX4(SIMD.Float32x4.load2(i32, 16 - 4), [13,14,0,0]);
assertEqX4(SIMD.Float32x4.load2(i16, (16 << 1) - (4 << 1)), [13,14,0,0]);
assertEqX4(SIMD.Float32x4.load2(u16, (16 << 1) - (4 << 1)), [13,14,0,0]);
assertEqX4(SIMD.Float32x4.load2(i8, (16 << 2) - (4 << 2)), [13,14,0,0]);
assertEqX4(SIMD.Float32x4.load2(u8, (16 << 2) - (4 << 2)), [13,14,0,0]);
}
function testLoad3() {
assertEqX4(SIMD.Float32x4.load3(f64, 0), [1,2,3,0]);
assertEqX4(SIMD.Float32x4.load3(f32, 1), [2,3,4,0]);
assertEqX4(SIMD.Float32x4.load3(i32, 2), [3,4,5,0]);
assertEqX4(SIMD.Float32x4.load3(i16, 3 << 1), [4,5,6,0]);
assertEqX4(SIMD.Float32x4.load3(u16, 4 << 1), [5,6,7,0]);
assertEqX4(SIMD.Float32x4.load3(i8 , 5 << 2), [6,7,8,0]);
assertEqX4(SIMD.Float32x4.load3(u8 , 6 << 2), [7,8,9,0]);
assertEqX4(SIMD.Float32x4.load3(f64, (16 >> 1) - (4 >> 1)), [13,14,15,0]);
assertEqX4(SIMD.Float32x4.load3(f32, 16 - 4), [13,14,15,0]);
assertEqX4(SIMD.Float32x4.load3(i32, 16 - 4), [13,14,15,0]);
assertEqX4(SIMD.Float32x4.load3(i16, (16 << 1) - (4 << 1)), [13,14,15,0]);
assertEqX4(SIMD.Float32x4.load3(u16, (16 << 1) - (4 << 1)), [13,14,15,0]);
assertEqX4(SIMD.Float32x4.load3(i8, (16 << 2) - (4 << 2)), [13,14,15,0]);
assertEqX4(SIMD.Float32x4.load3(u8, (16 << 2) - (4 << 2)), [13,14,15,0]);
}
for (var i = 0; i < 150; i++) {
testLoad();
testLoad1();
testLoad2();
testLoad3();
}
}
f();
function testBailout(uglyDuckling) {
var f32 = new Float32Array(16);
for (var i = 0; i < 16; i++)
f32[i] = i + 1;
var i8 = new Int8Array(f32.buffer);
for (var i = 0; i < 150; i++) {
var caught = false;
try {
SIMD.Float32x4.load(i8, (i < 149) ? 0 : uglyDuckling);
} catch (e) {
print(e);
assertEq(e instanceof RangeError, true);
caught = true;
}
assertEq(i < 149 || caught, true);
}
}
print('Testing range checks...');
testBailout(-1);
testBailout(-15);
testBailout(12 * 4 + 1);

Просмотреть файл

@ -0,0 +1,29 @@
load(libdir + 'simd.js');
if (typeof SIMD === "undefined")
quit();
setJitCompilerOption("baseline.warmup.trigger", 10);
setJitCompilerOption("ion.warmup.trigger", 30);
var i4 = SIMD.Int32x4;
var i4sub = SIMD.Int32x4.sub;
function simdbox(i) {
return i4(i, i, i, i);
}
function test() {
var arr = [];
// overflow the nursery with live SIMD objects.
for (var i = 0; i < 100000; i++) {
arr.push(simdbox(i));
}
return arr;
}
var arr = test();
for (var i = 0; i < arr.length; i++)
assertEqX4(arr[i], [i, i, i, i]);

Просмотреть файл

@ -0,0 +1,70 @@
load(libdir + 'simd.js');
if (!this.hasOwnProperty("SIMD"))
quit();
// This test case ensure that if we are able to optimize SIMD, then we can use
// recover instructions to get rid of the allocations. So, there is no value
// (and the test case would fail) if we are not able to inline SIMD
// constructors.
if (!isSimdAvailable())
quit();
setJitCompilerOption("baseline.warmup.trigger", 10);
setJitCompilerOption("ion.warmup.trigger", 20);
// This function is used to cause an invalidation after having removed a branch
// after DCE. This is made to check if we correctly recover an array
// allocation.
var uceFault = function (i) {
if (i > 98)
uceFault = function (i) { return true; };
return false;
};
// Check that we can correctly recover a boxed value.
var uceFault_simdBox_i4 = eval(uneval(uceFault).replace('uceFault', 'uceFault_simdBox_i4'));
function simdBox_i4(i) {
var a = SIMD.Int32x4(i, i, i, i);
if (uceFault_simdBox_i4(i) || uceFault_simdBox_i4(i))
assertEqX4(a, [i, i, i, i]);
assertRecoveredOnBailout(a, true);
return 0;
}
var uceFault_simdBox_u4 = eval(uneval(uceFault).replace('uceFault', 'uceFault_simdBox_u4'));
function simdBox_u4(i) {
var a = SIMD.Uint32x4(i, 98 - i, i + 0x7ffffff0, i + 0xffffff00);
if (uceFault_simdBox_u4(i) || uceFault_simdBox_u4(i))
assertEqX4(a, [i, 98 - i, i + 0x7ffffff0, i + 0xffffff00].map(x => x >>> 0));
assertRecoveredOnBailout(a, true);
return 0;
}
var uceFault_simdBox_f4 = eval(uneval(uceFault).replace('uceFault', 'uceFault_simdBox_f4'));
function simdBox_f4(i) {
var a = SIMD.Float32x4(i, i + 0.1, i + 0.2, i + 0.3);
if (uceFault_simdBox_f4(i) || uceFault_simdBox_f4(i))
assertEqX4(a, [i, i + 0.1, i + 0.2, i + 0.3].map(Math.fround));
assertRecoveredOnBailout(a, true);
return 0;
}
var uceFault_simdBox_b4 = eval(uneval(uceFault).replace('uceFault', 'uceFault_simdBox_b4'));
function simdBox_b4(i) {
var val1 = i%2 === 0,
val2 = !val1;
var a = SIMD.Bool32x4(val1, val2, val1, val2);
if (uceFault_simdBox_b4(i) || uceFault_simdBox_b4(i))
assertEqX4(a, [val1, val2, val1, val2]);
assertRecoveredOnBailout(a, true);
return 0;
}
for (var i = 0; i < 100; i++) {
simdBox_i4(i);
simdBox_u4(i);
simdBox_f4(i);
simdBox_b4(i);
}

Просмотреть файл

@ -0,0 +1,181 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 50);
function f() {
var f4 = SIMD.Float32x4(1, 2, 3, 4);
var i4 = SIMD.Int32x4(1, 2, 3, 4);
var b4 = SIMD.Bool32x4(true, false, true, false);
for (var i = 0; i < 150; i++) {
assertEqX4(SIMD.Int32x4.replaceLane(i4, 0, 42), [42, 2, 3, 4]);
assertEqX4(SIMD.Int32x4.replaceLane(i4, 1, 42), [1, 42, 3, 4]);
assertEqX4(SIMD.Int32x4.replaceLane(i4, 2, 42), [1, 2, 42, 4]);
assertEqX4(SIMD.Int32x4.replaceLane(i4, 3, 42), [1, 2, 3, 42]);
assertEqX4(SIMD.Float32x4.replaceLane(f4, 0, 42), [42, 2, 3, 4]);
assertEqX4(SIMD.Float32x4.replaceLane(f4, 1, 42), [1, 42, 3, 4]);
assertEqX4(SIMD.Float32x4.replaceLane(f4, 2, 42), [1, 2, 42, 4]);
assertEqX4(SIMD.Float32x4.replaceLane(f4, 3, 42), [1, 2, 3, 42]);
assertEqX4(SIMD.Bool32x4.replaceLane(b4, 0, false), [false, false, true, false]);
assertEqX4(SIMD.Bool32x4.replaceLane(b4, 1, true), [true, true, true, false]);
assertEqX4(SIMD.Bool32x4.replaceLane(b4, 2, false), [true, false, false, false]);
assertEqX4(SIMD.Bool32x4.replaceLane(b4, 3, true), [true, false, true, true]);
}
}
f();
function e() {
var f4 = SIMD.Float32x4(1, 2, 3, 4);
var i4 = SIMD.Int32x4(1, 2, 3, 4);
var b4 = SIMD.Bool32x4(true, false, true, false);
for (let i = 0; i < 150; i++) {
let caught = false;
try {
let x = SIMD.Int32x4.replaceLane(i < 149 ? i4 : f4, 0, 42);
} catch(e) {
assertEq(e instanceof TypeError, true);
assertEq(i, 149);
caught = true;
}
assertEq(i < 149 || caught, true);
}
for (let i = 0; i < 150; i++) {
let caught = false;
try {
let x = SIMD.Int32x4.replaceLane(i < 149 ? i4 : b4, 0, 42);
} catch(e) {
assertEq(e instanceof TypeError, true);
assertEq(i, 149);
caught = true;
}
assertEq(i < 149 || caught, true);
}
for (let i = 0; i < 150; i++) {
let caught = false;
try {
let x = SIMD.Int32x4.replaceLane(i4, i < 149 ? 0 : 4, 42);
} catch(e) {
assertEq(e instanceof RangeError, true);
assertEq(i, 149);
caught = true;
}
assertEq(i < 149 || caught, true);
}
for (let i = 0; i < 150; i++) {
let caught = false;
try {
let x = SIMD.Int32x4.replaceLane(i4, i < 149 ? 0 : 1.1, 42);
} catch(e) {
assertEq(e instanceof RangeError, true);
assertEq(i, 149);
caught = true;
}
assertEq(i < 149 || caught, true);
}
for (let i = 0; i < 150; i++) {
let caught = false;
try {
let x = SIMD.Float32x4.replaceLane(i < 149 ? f4 : i4, 0, 42);
} catch(e) {
assertEq(e instanceof TypeError, true);
assertEq(i, 149);
caught = true;
}
assertEq(i < 149 || caught, true);
}
for (let i = 0; i < 150; i++) {
let caught = false;
try {
let x = SIMD.Float32x4.replaceLane(i < 149 ? f4 : b4, 0, 42);
} catch(e) {
assertEq(e instanceof TypeError, true);
assertEq(i, 149);
caught = true;
}
assertEq(i < 149 || caught, true);
}
for (let i = 0; i < 150; i++) {
let caught = false;
try {
let x = SIMD.Float32x4.replaceLane(f4, i < 149 ? 0 : 4, 42);
} catch(e) {
assertEq(e instanceof RangeError, true);
assertEq(i, 149);
caught = true;
}
assertEq(i < 149 || caught, true);
}
for (let i = 0; i < 150; i++) {
let caught = false;
try {
let x = SIMD.Float32x4.replaceLane(f4, i < 149 ? 0 : 1.1, 42);
} catch(e) {
assertEq(e instanceof RangeError, true);
assertEq(i, 149);
caught = true;
}
assertEq(i < 149 || caught, true);
}
for (let i = 0; i < 150; i++) {
let caught = false;
try {
let x = SIMD.Bool32x4.replaceLane(i < 149 ? b4 : i4, 0, true);
} catch(e) {
assertEq(e instanceof TypeError, true);
assertEq(i, 149);
caught = true;
}
assertEq(i < 149 || caught, true);
}
for (let i = 0; i < 150; i++) {
let caught = false;
try {
let x = SIMD.Bool32x4.replaceLane(i < 149 ? b4 : f4, 0, true);
} catch(e) {
assertEq(e instanceof TypeError, true);
assertEq(i, 149);
caught = true;
}
assertEq(i < 149 || caught, true);
}
for (let i = 0; i < 150; i++) {
let caught = false;
try {
let x = SIMD.Bool32x4.replaceLane(b4, i < 149 ? 0 : 4, true);
} catch(e) {
assertEq(e instanceof RangeError, true);
assertEq(i, 149);
caught = true;
}
assertEq(i < 149 || caught, true);
}
for (let i = 0; i < 150; i++) {
let caught = false;
try {
let x = SIMD.Bool32x4.replaceLane(b4, i < 149 ? 0 : 1.1, true);
} catch(e) {
assertEq(e instanceof RangeError, true);
assertEq(i, 149);
caught = true;
}
assertEq(i < 149 || caught, true);
}
}
e();

Просмотреть файл

@ -0,0 +1,37 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 50);
const INT8_MIN = -128;
const INT8_MAX = 127;
const UINT8_MAX = 255;
function sat8(x) {
if (x < INT8_MIN) return INT8_MIN;
if (x > INT8_MAX) return INT8_MAX;
return x;
}
function usat8(x) {
if (x < 0) return 0;
if (x > UINT8_MAX) return UINT8_MAX;
return x;
}
function f() {
var i1 = SIMD.Int8x16(1, 100, 3, 4);
var i2 = SIMD.Int8x16(4, 30, 2, 1);
var u1 = SIMD.Uint8x16(1, 2, 3, 4);
var u2 = SIMD.Uint8x16(4, 3, 2, 1);
for (var i = 0; i < 150; i++) {
assertEqX4(SIMD.Int8x16.addSaturate(i1, i2), binaryX((x, y) => sat8(x + y), i1, i2));
assertEqX4(SIMD.Int8x16.subSaturate(i1, i2), binaryX((x, y) => sat8(x - y), i1, i2));
assertEqX4(SIMD.Uint8x16.addSaturate(u1, u2), binaryX((x, y) => usat8(x + y), u1, u2));
assertEqX4(SIMD.Uint8x16.subSaturate(u1, u2), binaryX((x, y) => usat8(x - y), u1, u2));
}
}
f();

Просмотреть файл

@ -0,0 +1,35 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 50);
function select(type, mask, ifTrue, ifFalse) {
var arr = [];
for (var i = 0; i < 4; i++) {
var selector = SIMD.Bool32x4.extractLane(mask, i);
arr.push(type.extractLane(selector ? ifTrue : ifFalse, i));
}
return arr;
}
function f() {
var f1 = SIMD.Float32x4(1, 2, 3, 4);
var f2 = SIMD.Float32x4(NaN, Infinity, 3.14, -0);
var i1 = SIMD.Int32x4(2, 3, 5, 8);
var i2 = SIMD.Int32x4(13, 37, 24, 42);
var TTFT = SIMD.Bool32x4(true, true, false, true);
var TFTF = SIMD.Bool32x4(true, false, true, false);
var mask = SIMD.Int32x4(0xdeadbeef, 0xbaadf00d, 0x00ff1ce, 0xdeadc0de);
for (var i = 0; i < 150; i++) {
assertEqX4(SIMD.Float32x4.select(TTFT, f1, f2), select(SIMD.Float32x4, TTFT, f1, f2));
assertEqX4(SIMD.Float32x4.select(TFTF, f1, f2), select(SIMD.Float32x4, TFTF, f1, f2));
assertEqX4(SIMD.Int32x4.select(TFTF, i1, i2), select(SIMD.Int32x4, TFTF, i1, i2));
assertEqX4(SIMD.Int32x4.select(TTFT, i1, i2), select(SIMD.Int32x4, TTFT, i1, i2));
}
}
f();

Просмотреть файл

@ -0,0 +1,75 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 50);
function curry(f, arg) { return f.bind(null, arg); }
function binaryLsh(count, v) { count &= 31; return (v << count) | 0; }
function lsh(count) { return curry(binaryLsh, count); }
function binaryRsh(count, v) { count &= 31; return (v >> count) | 0; }
function rsh(count) { return curry(binaryRsh, count); }
function binaryUlsh(count, v) { count &= 31; return (v << count) >>> 0; }
function ulsh(count) { return curry(binaryUlsh, count); }
function binaryUrsh(count, v) { count &= 31; return v >>> count; }
function ursh(count) { return curry(binaryUrsh, count); }
function f() {
var v = SIMD.Int32x4(1, 2, -3, 4);
var u = SIMD.Uint32x4(1, 0x55005500, -3, 0xaa00aa00);
var a = [1, 2, -3, 4];
var b = [1, 0x55005500, -3, 0xaa00aa00];
var shifts = [-2, -1, 0, 1, 31, 32, 33];
var r;
for (var i = 0; i < 150; i++) {
// Constant shift counts
assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, -1), a.map(lsh(-1)));
assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 0), a.map(lsh(0)));
assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 1), a.map(lsh(1)));
assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 2), a.map(lsh(2)));
assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 31), a.map(lsh(31)));
assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 32), a.map(lsh(32)));
assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 33), a.map(lsh(33)));
assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, -1), a.map(rsh(31)));
assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 0), a.map(rsh(0)));
assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 1), a.map(rsh(1)));
assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 2), a.map(rsh(2)));
assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 31), a.map(rsh(31)));
assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 32), a.map(rsh(32)));
assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 33), a.map(rsh(33)));
assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, -1), b.map(ulsh(-1)));
assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 0), b.map(ulsh(0)));
assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 1), b.map(ulsh(1)));
assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 2), b.map(ulsh(2)));
assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 31), b.map(ulsh(31)));
assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 32), b.map(ulsh(32)));
assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 33), b.map(ulsh(33)));
assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, -1), b.map(ursh(-1)));
assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 0), b.map(ursh(0)));
assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 1), b.map(ursh(1)));
assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 2), b.map(ursh(2)));
assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 31), b.map(ursh(31)));
assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 32), b.map(ursh(32)));
assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 33), b.map(ursh(33)));
// Non constant shift counts
var c = shifts[i % shifts.length];
assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, c), a.map(lsh(c)));
assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, c), a.map(rsh(c)));
assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, c), b.map(ulsh(c)));
assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, c), b.map(ursh(c)));
}
return r;
}
f();

Просмотреть файл

@ -0,0 +1,86 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 50);
function f() {
var i1 = SIMD.Int32x4(1, 2, 3, 4);
var i2 = SIMD.Int32x4(5, 6, 7, 8);
var leet = Math.fround(13.37);
var f1 = SIMD.Float32x4(-.5, -0, Infinity, leet);
var f2 = SIMD.Float32x4(42, .5, 23, -10);
// computes all rotations of a given array
function *gen(arr) {
var previous = arr.slice().splice(0, 4);
var i = 4;
for (var j = 0; j < 8; j++) {
yield previous.slice();
previous = previous.splice(1, previous.length - 1);
previous.push(arr[i]);
i = (i + 1) % arr.length;
}
}
var compI = [];
var baseI = [];
for (var i = 0; i < 8; i++)
baseI.push(SIMD.Int32x4.extractLane(i < 4 ? i1 : i2, i % 4));
for (var k of gen(baseI))
compI.push(k);
var compF = [];
var baseF = [];
for (var i = 0; i < 8; i++)
baseF.push(SIMD.Float32x4.extractLane(i < 4 ? f1 : f2, i % 4));
for (var k of gen(baseF))
compF.push(k);
for (var i = 0; i < 150; i++) {
// Variable lanes
var r = SIMD.Float32x4.shuffle(f1, f2, i % 8, (i + 1) % 8, (i + 2) % 8, (i + 3) % 8);
assertEqX4(r, compF[i % 8]);
// Constant lanes
assertEqX4(SIMD.Float32x4.shuffle(f1, f2, 3, 2, 4, 5), [leet, Infinity, 42, .5]);
// Variable lanes
var r = SIMD.Int32x4.shuffle(i1, i2, i % 8, (i + 1) % 8, (i + 2) % 8, (i + 3) % 8);
assertEqX4(r, compI[i % 8]);
// Constant lanes
assertEqX4(SIMD.Int32x4.shuffle(i1, i2, 3, 2, 4, 5), [4, 3, 5, 6]);
}
}
function testBailouts(expectException, uglyDuckling) {
var i1 = SIMD.Int32x4(1, 2, 3, 4);
var i2 = SIMD.Int32x4(5, 6, 7, 8);
for (var i = 0; i < 150; i++) {
// Test bailouts
var value = i == 149 ? uglyDuckling : 0;
var caught = false;
try {
assertEqX4(SIMD.Int32x4.shuffle(i1, i2, value, 2, 4, 5), [1, 3, 5, 6]);
} catch(e) {
print(e);
caught = true;
assertEq(i, 149);
assertEq(e instanceof TypeError || e instanceof RangeError, true);
}
if (i == 149)
assertEq(caught, expectException);
}
}
f();
testBailouts(true, -1);
testBailouts(true, 8);
testBailouts(true, 2.5);
testBailouts(true, undefined);
testBailouts(true, {});
testBailouts(true, 'one');
testBailouts(false, false);
testBailouts(false, null);
testBailouts(false, " 0.0 ");

Просмотреть файл

@ -0,0 +1,15 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 50);
function f() {
for (var i = 0; i < 150; i++) {
assertEqX4(SIMD.Int32x4.splat(42), [42, 42, 42, 42]);
assertEqX4(SIMD.Float32x4.splat(42), [42, 42, 42, 42]);
assertEqX4(SIMD.Bool32x4.splat(true), [true, true, true, true]);
assertEqX4(SIMD.Bool32x4.splat(false), [false, false, false, false]);
}
}
f();

Просмотреть файл

@ -0,0 +1,143 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 40);
function f() {
var f32 = new Float32Array(16);
for (var i = 0; i < 16; i++)
f32[i] = i + 1;
var f64 = new Float64Array(f32.buffer);
var i32 = new Int32Array(f32.buffer);
var u32 = new Uint32Array(f32.buffer);
var i16 = new Int16Array(f32.buffer);
var u16 = new Uint16Array(f32.buffer);
var i8 = new Int8Array(f32.buffer);
var u8 = new Uint8Array(f32.buffer);
var f4 = SIMD.Float32x4(42, 43, 44, 45);
function check(n) {
assertEq(f32[0], 42);
assertEq(f32[1], n > 1 ? 43 : 2);
assertEq(f32[2], n > 2 ? 44 : 3);
assertEq(f32[3], n > 3 ? 45 : 4);
f32[0] = 1;
f32[1] = 2;
f32[2] = 3;
f32[3] = 4;
}
function testStore() {
SIMD.Float32x4.store(f64, 0, f4);
check(4);
SIMD.Float32x4.store(f32, 0, f4);
check(4);
SIMD.Float32x4.store(i32, 0, f4);
check(4);
SIMD.Float32x4.store(u32, 0, f4);
check(4);
SIMD.Float32x4.store(i16, 0, f4);
check(4);
SIMD.Float32x4.store(u16, 0, f4);
check(4);
SIMD.Float32x4.store(i8, 0, f4);
check(4);
SIMD.Float32x4.store(u8, 0, f4);
check(4);
}
function testStore1() {
SIMD.Float32x4.store1(f64, 0, f4);
check(1);
SIMD.Float32x4.store1(f32, 0, f4);
check(1);
SIMD.Float32x4.store1(i32, 0, f4);
check(1);
SIMD.Float32x4.store1(u32, 0, f4);
check(1);
SIMD.Float32x4.store1(i16, 0, f4);
check(1);
SIMD.Float32x4.store1(u16, 0, f4);
check(1);
SIMD.Float32x4.store1(i8, 0, f4);
check(1);
SIMD.Float32x4.store1(u8, 0, f4);
check(1);
}
function testStore2() {
SIMD.Float32x4.store2(f64, 0, f4);
check(2);
SIMD.Float32x4.store2(f32, 0, f4);
check(2);
SIMD.Float32x4.store2(i32, 0, f4);
check(2);
SIMD.Float32x4.store2(u32, 0, f4);
check(2);
SIMD.Float32x4.store2(i16, 0, f4);
check(2);
SIMD.Float32x4.store2(u16, 0, f4);
check(2);
SIMD.Float32x4.store2(i8, 0, f4);
check(2);
SIMD.Float32x4.store2(u8, 0, f4);
check(2);
}
function testStore3() {
SIMD.Float32x4.store3(f64, 0, f4);
check(3);
SIMD.Float32x4.store3(f32, 0, f4);
check(3);
SIMD.Float32x4.store3(i32, 0, f4);
check(3);
SIMD.Float32x4.store3(u32, 0, f4);
check(3);
SIMD.Float32x4.store3(i16, 0, f4);
check(3);
SIMD.Float32x4.store3(u16, 0, f4);
check(3);
SIMD.Float32x4.store3(i8, 0, f4);
check(3);
SIMD.Float32x4.store3(u8, 0, f4);
check(3);
}
for (var i = 0; i < 150; i++) {
testStore();
testStore1();
testStore2();
testStore3();
}
}
f();
function testBailout(uglyDuckling) {
var f32 = new Float32Array(16);
for (var i = 0; i < 16; i++)
f32[i] = i + 1;
var i8 = new Int8Array(f32.buffer);
var f4 = SIMD.Float32x4(42, 43, 44, 45);
for (var i = 0; i < 150; i++) {
var caught = false;
try {
SIMD.Float32x4.store(i8, (i < 149) ? 0 : (16 << 2) - (4 << 2) + 1, f4);
} catch (e) {
print(e);
assertEq(e instanceof RangeError, true);
caught = true;
}
assertEq(i < 149 || caught, true);
}
}
print('Testing range checks...');
testBailout(-1);
testBailout(-15);
testBailout(12 * 4 + 1);

Просмотреть файл

@ -0,0 +1,104 @@
if (!this.hasOwnProperty("SIMD"))
quit();
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 50);
function f() {
var i4 = SIMD.Int32x4(1, 2, 3, 4);
var leet = Math.fround(13.37);
var f4 = SIMD.Float32x4(-.5, -0, Infinity, leet);
var compI = [
[1,2,3,4],
[2,3,4,1],
[3,4,1,2],
[4,1,2,3]
];
var compF = [
[-.5, -0, Infinity, leet],
[-0, Infinity, leet, -.5],
[Infinity, leet, -.5, -0],
[leet, -.5, -0, Infinity]
];
for (var i = 0; i < 150; i++) {
// Variable lanes
var r = SIMD.Float32x4.swizzle(f4, i % 4, (i + 1) % 4, (i + 2) % 4, (i + 3) % 4);
assertEqX4(r, compF[i % 4]);
// Constant lanes
assertEqX4(SIMD.Float32x4.swizzle(f4, 3, 2, 1, 0), [leet, Infinity, -0, -.5]);
// Variable lanes
var r = SIMD.Int32x4.swizzle(i4, i % 4, (i + 1) % 4, (i + 2) % 4, (i + 3) % 4);
assertEqX4(r, compI[i % 4]);
// Constant lanes
assertEqX4(SIMD.Int32x4.swizzle(i4, 3, 2, 1, 0), [4, 3, 2, 1]);
}
}
function testBailouts(expectException, uglyDuckling) {
var i4 = SIMD.Int32x4(1, 2, 3, 4);
for (var i = 0; i < 150; i++) {
// Test bailouts
var value = i == 149 ? uglyDuckling : 0;
var caught = false;
try {
assertEqX4(SIMD.Int32x4.swizzle(i4, value, 3, 2, 0), [1, 4, 3, 1]);
} catch(e) {
print(e);
caught = true;
assertEq(i, 149);
assertEq(e instanceof TypeError || e instanceof RangeError, true);
}
if (i == 149)
assertEq(caught, expectException);
}
}
function testInt32x4SwizzleBailout() {
// Test out-of-bounds non-constant indices. This is expected to throw.
var i4 = SIMD.Int32x4(1, 2, 3, 4);
for (var i = 0; i < 150; i++) {
assertEqX4(SIMD.Int32x4.swizzle(i4, i, 3, 2, 0), [i + 1, 4, 3, 1]);
}
}
f();
testBailouts(true, -1);
testBailouts(true, 4);
testBailouts(true, 2.5);
testBailouts(true, undefined);
testBailouts(true, {});
testBailouts(true, 'one');
testBailouts(false, false);
testBailouts(false, null);
testBailouts(false, " 0.0 ");
try {
testInt32x4SwizzleBailout();
throw 'not caught';
} catch(e) {
assertEq(e instanceof RangeError, true);
}
(function() {
var zappa = 0;
function testBailouts() {
var i4 = SIMD.Int32x4(1, 2, 3, 4);
for (var i = 0; i < 300; i++) {
var value = i == 299 ? 2.5 : 1;
SIMD.Int32x4.swizzle(i4, value, 3, 2, 0);
zappa = i;
}
}
try { testBailouts(); } catch (e) {}
assertEq(zappa, 298);
})();

Просмотреть файл

@ -0,0 +1,86 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 30);
// Testing Uint32 <-> Float32 conversions.
// These conversions deserve special attention because SSE doesn't provide
// simple conversion instructions.
// Convert an Uint32Array to a Float32Array using scalar conversions.
function cvt_utof_scalar(u32s, f32s) {
assertEq(u32s.length, f32s.length);
for (var i = 0; i < u32s.length; i++) {
f32s[i] = u32s[i];
}
}
// Convert an Uint32Array to a Float32Array using simd conversions.
function cvt_utof_simd(u32s, f32s) {
assertEq(u32s.length, f32s.length);
for (var i = 0; i < u32s.length; i += 4) {
SIMD.Float32x4.store(f32s, i, SIMD.Float32x4.fromUint32x4(SIMD.Uint32x4.load(u32s, i)));
}
}
// Convert a Float32Array to an Uint32Array using scalar conversions.
function cvt_ftou_scalar(f32s, u32s) {
assertEq(f32s.length, u32s.length);
for (var i = 0; i < f32s.length; i++) {
u32s[i] = f32s[i];
}
}
// Convert a Float32Array to an Uint32Array using simd conversions.
function cvt_ftou_simd(f32s, u32s) {
assertEq(f32s.length, u32s.length);
for (var i = 0; i < f32s.length; i += 4) {
SIMD.Uint32x4.store(u32s, i, SIMD.Uint32x4.fromFloat32x4(SIMD.Float32x4.load(f32s, i)));
}
}
function check(a, b) {
assertEq(a.length, b.length);
for (var i = 0; i < a.length; i++) {
assertEq(a[i], b[i]);
}
}
// Uint32x4 --> Float32x4 tests.
var src = new Uint32Array(8000);
var dst1 = new Float32Array(8000);
var dst2 = new Float32Array(8000);
for (var i = 0; i < 2000; i++) {
src[i] = i;
src[i + 2000] = 0x7fffffff - i;
src[i + 4000] = 0x80000000 + i;
src[i + 6000] = 0xffffffff - i;
}
for (var n = 0; n < 10; n++) {
cvt_utof_scalar(src, dst1);
cvt_utof_simd(src, dst2);
check(dst1, dst2);
}
// Float32x4 --> Uint32x4 tests.
var fsrc = dst1;
var fdst1 = new Uint32Array(8000);
var fdst2 = new Uint32Array(8000);
// The 0xffffffff entries in fsrc round to 0x1.0p32f which throws.
// Go as high as 0x0.ffffffp32f.
for (var i = 0; i < 2000; i++) {
fsrc[i + 6000] = 0xffffff7f - i;
}
// Truncation towards 0.
fsrc[1990] = -0.9
fsrc[1991] = 0.9
fsrc[1992] = 1.9
for (var n = 0; n < 10; n++) {
cvt_ftou_scalar(fsrc, fdst1);
cvt_ftou_simd(fsrc, fdst2);
check(fdst1, fdst2);
}

Просмотреть файл

@ -0,0 +1,35 @@
load(libdir + 'simd.js');
setJitCompilerOption("ion.warmup.trigger", 50);
var notf = (function() {
var i32 = new Int32Array(1);
var f32 = new Float32Array(i32.buffer);
return function(x) {
f32[0] = x;
i32[0] = ~i32[0];
return f32[0];
}
})();
function f() {
var f4 = SIMD.Float32x4(1, 2, 3, 4);
var i4 = SIMD.Int32x4(1, 2, 3, 4);
var b4 = SIMD.Bool32x4(true, false, true, false);
var BitOrZero = (x) => x | 0;
for (var i = 0; i < 150; i++) {
assertEqX4(SIMD.Float32x4.neg(f4), unaryX4((x) => -x, f4, Math.fround));
assertEqX4(SIMD.Float32x4.abs(f4), unaryX4(Math.abs, f4, Math.fround));
assertEqX4(SIMD.Float32x4.sqrt(f4), unaryX4(Math.sqrt, f4, Math.fround));
assertEqX4(SIMD.Float32x4.reciprocalApproximation(f4), unaryX4((x) => 1 / x, f4, Math.fround), assertNear);
assertEqX4(SIMD.Float32x4.reciprocalSqrtApproximation(f4), unaryX4((x) => 1 / Math.sqrt(x), f4, Math.fround), assertNear);
assertEqX4(SIMD.Int32x4.not(i4), unaryX4((x) => ~x, i4, BitOrZero));
assertEqX4(SIMD.Int32x4.neg(i4), unaryX4((x) => -x, i4, BitOrZero));
assertEqX4(SIMD.Bool32x4.not(b4), unaryX4((x) => !x, b4, (x) => x ));
}
}
f();

Просмотреть файл

@ -0,0 +1,144 @@
load(libdir + 'simd.js');
setJitCompilerOption("baseline.warmup.trigger", 10);
setJitCompilerOption("ion.warmup.trigger", 30);
var max = 40, pivot = 35;
var i32x4 = SIMD.Int32x4;
var f32x4 = SIMD.Float32x4;
var i32x4Add = SIMD.Int32x4.add;
var FakeSIMDType = function (o) { this.x = o.x; this.y = o.y; this.z = o.z; this.w = o.w; };
if (this.hasOwnProperty("TypedObject")) {
var TO = TypedObject;
FakeSIMDType = new TO.StructType({ x: TO.int32, y: TO.int32, z: TO.int32, w: TO.int32 });
}
function simdunbox_bail_undef(i, lhs, rhs) {
return i32x4Add(lhs, rhs);
}
function simdunbox_bail_object(i, lhs, rhs) {
return i32x4Add(lhs, rhs);
}
function simdunbox_bail_typeobj(i, lhs, rhs) {
return i32x4Add(lhs, rhs);
}
function simdunbox_bail_badsimd(i, lhs, rhs) {
return i32x4Add(lhs, rhs);
}
var arr_undef = [ i32x4(0, 1, 1, 2), i32x4(1, 1, 2, 3) ];
var fail_undef = 0;
var arr_object = [ i32x4(0, 1, 1, 2), i32x4(1, 1, 2, 3) ];
var fail_object = 0;
var arr_typeobj = [ i32x4(0, 1, 1, 2), i32x4(1, 1, 2, 3) ];
var fail_typeobj = 0;
var arr_badsimd = [ i32x4(0, 1, 1, 2), i32x4(1, 1, 2, 3) ];
var fail_badsimd = 0;
for (var i = 0; i < max; i++) {
try {
arr_undef[i + 2] = simdunbox_bail_undef(i, arr_undef[i], arr_undef[i + 1]);
} catch (x) {
arr_undef[i + 2] = arr_undef[i - 1];
fail_undef++;
}
try {
arr_object[i + 2] = simdunbox_bail_object(i, arr_object[i], arr_object[i + 1]);
} catch (x) {
arr_object[i + 2] = arr_object[i - 1];
fail_object++;
}
try {
arr_typeobj[i + 2] = simdunbox_bail_typeobj(i, arr_typeobj[i], arr_typeobj[i + 1]);
} catch (x) {
arr_typeobj[i + 2] = arr_typeobj[i - 1];
fail_typeobj++;
}
try {
arr_badsimd[i + 2] = simdunbox_bail_badsimd(i, arr_badsimd[i], arr_badsimd[i + 1]);
} catch (x) {
arr_badsimd[i + 2] = arr_badsimd[i - 1];
fail_badsimd++;
}
if (i + 2 == pivot) {
arr_undef[pivot] = undefined;
arr_object[pivot] = { x: 0, y: 1, z: 2, w: 3 };
arr_typeobj[pivot] = new FakeSIMDType({ x: 0, y: 1, z: 2, w: 3 });
arr_badsimd[pivot] = f32x4(0, 1, 2, 3);
}
}
assertEq(fail_undef, 2);
assertEq(fail_object, 2);
assertEq(fail_typeobj, 2);
assertEq(fail_badsimd, 2);
// Assert that all SIMD values are correct.
function assertEqX4(real, expected, assertFunc) {
if (typeof assertFunc === 'undefined')
assertFunc = assertEq;
assertFunc(real.x, expected[0]);
assertFunc(real.y, expected[1]);
assertFunc(real.z, expected[2]);
assertFunc(real.w, expected[3]);
}
var fib = [0, 1];
for (i = 0; i < max + 5; i++)
fib[i+2] = (fib[i] + fib[i+1]) | 0;
for (i = 0; i < max; i++) {
if (i == pivot)
continue;
var ref = fib.slice(i < pivot ? i : i - 3);
assertEqX4(arr_undef[i], ref);
assertEqX4(arr_object[i], ref);
assertEqX4(arr_typeobj[i], ref);
assertEqX4(arr_badsimd[i], ref);
}
// Check that unbox operations aren't removed
(function() {
function add(i, v, w) {
if (i % 2 == 0) {
SIMD.Int32x4.add(v, w);
} else {
SIMD.Float32x4.add(v, w);
}
}
var i = 0;
var caught = false;
var f4 = SIMD.Float32x4(1,2,3,4);
var i4 = SIMD.Int32x4(1,2,3,4);
try {
for (; i < 200; i++) {
if (i % 2 == 0) {
add(i, i4, i4);
} else if (i == 199) {
add(i, i4, f4);
} else {
add(i, f4, f4);
}
}
} catch(e) {
print(e);
assertEq(e instanceof TypeError, true);
assertEq(i, 199);
caught = true;
}
assertEq(i < 199 || caught, true);
})();

Просмотреть файл

@ -13,3 +13,41 @@ var v = asmLink(asmCompile('global', `
`), this)();
assertEq(v, NaN);
if (!isSimdAvailable() || typeof SIMD === 'undefined') {
quit(0);
}
var v = asmLink(asmCompile('global', `
"use asm";
var frd = global.Math.fround;
var Float32x4 = global.SIMD.Float32x4;
var splat = Float32x4.splat;
var ext = Float32x4.extractLane;
function e() {
var v = Float32x4(0,0,0,0);
var x = frd(0.);
v = splat(.1e+71);
x = ext(v,0);
x = frd(x / x);
return +x;
}
return e;
`), this)();
assertEq(v, NaN);
// Bug 1130618: without GVN
setJitCompilerOption("ion.gvn.enable", 0);
var v = asmLink(asmCompile('global', `
"use asm";
var Float32x4 = global.SIMD.Float32x4;
var splat = Float32x4.splat;
var ext = Float32x4.extractLane;
function e() {
return +ext(splat(.1e+71),0);
}
return e;
`), this)();
assertEq(v, Infinity);

Просмотреть файл

@ -0,0 +1,28 @@
load(libdir + "asm.js");
load(libdir + "asserts.js");
if (typeof newGlobal !== 'function' ||
!isSimdAvailable() ||
typeof SIMD === 'undefined')
{
quit();
}
var stdlib = new (newGlobal().Proxy)(this, new Proxy({
simdGet: 0,
getOwnPropertyDescriptor(t, pk) {
if (pk === "SIMD" && this.simdGet++ === 1) {
return {};
}
return Reflect.getOwnPropertyDescriptor(t, pk);
}
}, {
get(t, pk, r) {
print("trap", pk);
return Reflect.get(t, pk, r);
}
}));
var m = asmCompile('stdlib', '"use asm"; var i4=stdlib.SIMD.Int32x4; var i4add=i4.add; return {}');
assertAsmLinkFail(m, stdlib);

Просмотреть файл

@ -0,0 +1,198 @@
/* -*- Mode: javascript; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 ; js-indent-level : 2 ; js-curly-indent-offset: 0 -*- */
/* vim: set ts=4 et sw=4 tw=80: */
// Author: Peter Jensen
load(libdir + "asm.js");
if (!isSimdAvailable() || typeof SIMD === 'undefined') {
print("won't run tests as simd extensions aren't activated yet");
quit(0);
}
const NUM_BIRDS = 30;
const NUM_UPDATES = 20;
const ACCEL_DATA_STEPS = 30;
var buffer = new ArrayBuffer(0x200000);
var bufferF32 = new Float32Array(buffer);
var actualBirds = 0;
function init() {
actualBirds = 0;
// Make it a power of two, for quick modulo wrapping.
var accelDataValues = [10.0, 9.5, 9.0, 8.0, 7.0, 6.0, 5.5, 5.0, 5.0, 5.0, 5.5, 6.0, 7.0, 8.0, 9.0, 10.0];
accelDataValues = accelDataValues.map(function(v) { return 50*v; });
var accelDataValuesLength = accelDataValues.length;
assertEq(accelDataValuesLength, 16); // Hard coded in the asm.js module
for (i = 0; i < accelDataValuesLength; i++)
bufferF32[i + NUM_BIRDS * 2] = accelDataValues[i];
}
function addBird(pos, vel) {
bufferF32[actualBirds] = pos;
bufferF32[actualBirds + NUM_BIRDS] = vel;
actualBirds++;
return actualBirds - 1;
}
function getActualBirds() {
return actualBirds;
}
var code = `
"use asm";
var toF = global.Math.fround;
var u8 = new global.Uint8Array(buffer);
var f32 = new global.Float32Array(buffer);
const maxBirds = 100000;
const maxBirdsx4 = 400000;
const maxBirdsx8 = 800000;
const accelMask = 0x3c;
const mk4 = 0x000ffff0;
const getMaxPos = 1000.0;
const getAccelDataSteps = imp.accelDataSteps | 0;
var getActualBirds = imp.getActualBirds;
var i4 = global.SIMD.Int32x4;
var f4 = global.SIMD.Float32x4;
var b4 = global.SIMD.Bool32x4;
var i4add = i4.add;
var i4and = i4.and;
var f4select = f4.select;
var f4add = f4.add;
var f4sub = f4.sub;
var f4mul = f4.mul;
var f4greaterThan = f4.greaterThan;
var f4splat = f4.splat;
var f4load = f4.load;
var f4store = f4.store;
var b4any = b4.anyTrue;
const zerox4 = f4(0.0,0.0,0.0,0.0);
function declareHeapSize() {
f32[0x0007ffff] = toF(0.0);
}
function update(timeDelta) {
timeDelta = toF(timeDelta);
// var steps = Math.ceil(timeDelta/accelData.interval);
var steps = 0;
var subTimeDelta = toF(0.0);
var actualBirds = 0;
var maxPos = toF(0.0);
var maxPosx4 = f4(0.0,0.0,0.0,0.0);
var subTimeDeltax4 = f4(0.0,0.0,0.0,0.0);
var subTimeDeltaSquaredx4 = f4(0.0,0.0,0.0,0.0);
var point5x4 = f4(0.5, 0.5, 0.5, 0.5);
var i = 0;
var len = 0;
var accelIndex = 0;
var newPosx4 = f4(0.0,0.0,0.0,0.0);
var newVelx4 = f4(0.0,0.0,0.0,0.0);
var accel = toF(0.0);
var accelx4 = f4(0.0,0.0,0.0,0.0);
var a = 0;
var posDeltax4 = f4(0.0,0.0,0.0,0.0);
var cmpx4 = b4(0,0,0,0);
var newVelTruex4 = f4(0.0,0.0,0.0,0.0);
steps = getAccelDataSteps | 0;
subTimeDelta = toF(toF(timeDelta / toF(steps | 0)) / toF(1000.0));
actualBirds = getActualBirds() | 0;
maxPos = toF(+getMaxPos);
maxPosx4 = f4splat(maxPos);
subTimeDeltax4 = f4splat(subTimeDelta);
subTimeDeltaSquaredx4 = f4mul(subTimeDeltax4, subTimeDeltax4);
len = ((actualBirds + 3) >> 2) << 4;
for (i = 0; (i | 0) < (len | 0); i = (i + 16) | 0) {
accelIndex = 0;
newPosx4 = f4load(u8, i & mk4);
newVelx4 = f4load(u8, (i & mk4) + maxBirdsx4);
for (a = 0; (a | 0) < (steps | 0); a = (a + 1) | 0) {
accel = toF(f32[(accelIndex & accelMask) + maxBirdsx8 >> 2]);
accelx4 = f4splat(accel);
accelIndex = (accelIndex + 4) | 0;
posDeltax4 = f4mul(point5x4, f4mul(accelx4, subTimeDeltaSquaredx4));
posDeltax4 = f4add(posDeltax4, f4mul(newVelx4, subTimeDeltax4));
newPosx4 = f4add(newPosx4, posDeltax4);
newVelx4 = f4add(newVelx4, f4mul(accelx4, subTimeDeltax4));
cmpx4 = f4greaterThan(newPosx4, maxPosx4);
if (b4any(cmpx4)) {
// Work around unimplemented 'neg' operation, using 0 - x.
newVelTruex4 = f4sub(zerox4, newVelx4);
newVelx4 = f4select(cmpx4, newVelTruex4, newVelx4);
}
}
f4store(u8, i & mk4, newPosx4);
f4store(u8, (i & mk4) + maxBirdsx4, newVelx4);
}
}
return update;
`
var ffi = {
getActualBirds,
accelDataSteps: ACCEL_DATA_STEPS
};
var fbirds = asmLink(asmCompile('global', 'imp', 'buffer', code), this, ffi, buffer);
init();
for (var i = 0; i < NUM_BIRDS; i++) {
addBird(i / 10, Math.exp(2, NUM_BIRDS - i));
}
var b = dateNow();
for (var j = 0; j < NUM_UPDATES; j++) {
fbirds(16);
}
print(dateNow() - b);
assertEq(bufferF32[0], 0);
assertEq(bufferF32[1], 0.10000000149011612);
assertEq(bufferF32[2], 0.20000000298023224);
assertEq(bufferF32[3], 0.30000001192092896);
assertEq(bufferF32[4], 0.4000000059604645);
assertEq(bufferF32[5], 0.5);
assertEq(bufferF32[6], 0.6000000238418579);
assertEq(bufferF32[7], 0.699999988079071);
assertEq(bufferF32[8], 0.800000011920929);
assertEq(bufferF32[9], 0.8999999761581421);
assertEq(bufferF32[10], 1);
assertEq(bufferF32[11], 1.100000023841858);
assertEq(bufferF32[12], 1.2000000476837158);
assertEq(bufferF32[13], 1.2999999523162842);
assertEq(bufferF32[14], 1.399999976158142);
assertEq(bufferF32[15], 1.5);
assertEq(bufferF32[16], 1.600000023841858);
assertEq(bufferF32[17], 1.7000000476837158);
assertEq(bufferF32[18], 1.7999999523162842);
assertEq(bufferF32[19], 1.899999976158142);
assertEq(bufferF32[20], 2);
assertEq(bufferF32[21], 2.0999999046325684);
assertEq(bufferF32[22], 2.200000047683716);
assertEq(bufferF32[23], 2.299999952316284);
assertEq(bufferF32[24], 2.4000000953674316);
assertEq(bufferF32[25], 2.5);
assertEq(bufferF32[26], 2.5999999046325684);
assertEq(bufferF32[27], 2.700000047683716);
assertEq(bufferF32[28], 2.799999952316284);
assertEq(bufferF32[29], 2.9000000953674316);
// Code used to generate the assertEq list above.
function generateAssertList() {
var buf = '';
for (var k = 0; k < NUM_BIRDS; k++) {
buf += 'assertEq(bufferF32['+ k + '], ' + bufferF32[k] + ');\n';
}
print(buf);
}
//generateAssertList();

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -13,6 +13,17 @@ asmLink(asmJS, this, null, asmJSBuf);
var wasmMem = wasmEvalText('(module (memory 1 1) (export "mem" memory))').exports.mem;
assertAsmLinkFail(asmJS, this, null, wasmMem.buffer);
if (!getBuildConfiguration().x64 && isSimdAvailable() && this["SIMD"]) {
var simdJS = asmCompile('stdlib', 'ffis', 'buf', USE_ASM + 'var i32 = new stdlib.Int32Array(buf); var i32x4 = stdlib.SIMD.Int32x4; return {}');
assertAsmLinkFail(simdJS, this, null, asmJSBuf);
assertAsmLinkFail(simdJS, this, null, wasmMem.buffer);
var simdJSBuf = new ArrayBuffer(BUF_MIN);
asmLink(simdJS, this, null, simdJSBuf);
asmLink(simdJS, this, null, simdJSBuf); // multiple SIMD.js instantiations succeed
assertAsmLinkFail(asmJS, this, null, simdJSBuf); // but not asm.js
}
setJitCompilerOption('asmjs.atomics.enable', 1);
var sharedAsmJS = asmCompile('stdlib', 'ffis', 'buf',

Просмотреть файл

@ -0,0 +1,61 @@
if (typeof SIMD === 'undefined' || !isSimdAvailable()) {
print("won't run tests as simd extensions aren't activated yet");
quit(0);
}
(function(global) {
"use asm";
var frd = global.Math.fround;
var fx4 = global.SIMD.Float32x4;
var fc4 = fx4.check;
var fsp = fx4.splat;
function s(){}
function d(x){x=fc4(x);}
function e() {
var x = frd(0);
x = frd(x / x);
s();
d(fsp(x));
}
return e;
})(this)();
(function(m) {
"use asm"
var k = m.SIMD.Bool32x4
var g = m.SIMD.Int32x4
var gc = g.check;
var h = g.select
function f() {
var x = k(0, 0, 0, 0)
var y = g(1, 2, 3, 4)
return gc(h(x, y, y))
}
return f;
})(this)();
t = (function(global) {
"use asm"
var toF = global.Math.fround
var f4 = global.SIMD.Float32x4
var f4c = f4.check
function p(x, y, width, value, max_iterations) {
x = x | 0
y = y | 0
width = width | 0
value = value | 0
max_iterations = max_iterations | 0
}
function m(xf, yf, yd, max_iterations) {
xf = toF(xf)
yf = toF(yf)
yd = toF(yd)
max_iterations = max_iterations | 0
var _ = f4(0, 0, 0, 0), c_im4 = f4(0, 0, 0, 0)
c_im4 = f4(yf, yd, yd, yf)
return f4c(c_im4);
}
return {p:p,m:m};
})(this)
t.p();
t.m();

Просмотреть файл

@ -18,6 +18,26 @@ for (let threshold of [0, 50, 100, 5000, -1]) {
return h
`)()(), 45);
if (isSimdAvailable() && this.SIMD) {
var buf = new ArrayBuffer(BUF_MIN);
new Int32Array(buf)[0] = 10;
new Float32Array(buf)[1] = 42;
assertEq(asmCompile('stdlib', 'ffis', 'buf',
USE_ASM + `
var H = new stdlib.Uint8Array(buf);
var i4 = stdlib.SIMD.Int32x4;
var f4 = stdlib.SIMD.Float32x4;
var i4load = i4.load;
var f4load = f4.load;
var toi4 = i4.fromFloat32x4;
var i4ext = i4.extractLane;
function f(i) { i=i|0; return i4ext(i4load(H, i), 0)|0 }
function g(i) { i=i|0; return (i4ext(toi4(f4load(H, i)),1) + (f(i)|0))|0 }
function h(i) { i=i|0; return g(i)|0 }
return h
`)(this, null, buf)(0), 52);
}
enableGeckoProfiling();
asmLink(asmCompile(USE_ASM + 'function f() {} function g() { f() } function h() { g() } return h'))();
disableGeckoProfiling();

Просмотреть файл

@ -210,6 +210,20 @@ if (jitOptions['baseline.enable']) {
assertStackContainsSeq(stacks, ">,f1,>,<,f1,>,>,<,f1,>,f2,>,<,f1,>,<,f2,>,<,f1,>,f2,>,<,f1,>,>,<,f1,>,<,f1,>,f1,>,>");
}
if (isSimdAvailable() && typeof SIMD !== 'undefined') {
// SIMD out-of-bounds exit
var buf = new ArrayBuffer(0x10000);
var f = asmLink(asmCompile('g','ffi','buf', USE_ASM + 'var f4=g.SIMD.float32x4; var f4l=f4.load; var u8=new g.Uint8Array(buf); function f(i) { i=i|0; return f4l(u8, 0xFFFF + i | 0); } return f'), this, {}, buf);
enableSingleStepProfiling();
assertThrowsInstanceOf(() => f(4), RangeError);
var stacks = disableSingleStepProfiling();
// TODO check that expected is actually the correctly expected string, when
// SIMD is implemented on ARM.
assertStackContainsSeq(stacks, ">,f,>,inline stub,f,>");
}
// Thunks
setJitCompilerOption("jump-threshold", 0);
var h = asmLink(asmCompile(USE_ASM + 'function f() {} function g() { f() } function h() { g() } return h'));

Просмотреть файл

@ -0,0 +1,525 @@
load(libdir + "asm.js");
load(libdir + "simd.js");
load(libdir + "asserts.js");
// Set to true to see more JS debugging spew.
const DEBUG = false;
if (!isSimdAvailable()) {
DEBUG && print("won't run tests as simd extensions aren't activated yet");
quit(0);
}
// Tests for 16x8 SIMD types: Int16x8, Uint16x8, Bool16x8.
const I16x8 = 'var i16x8 = glob.SIMD.Int16x8;'
const I16x8CHK = 'var i16x8chk = i16x8.check;'
const I16x8EXT = 'var i16x8ext = i16x8.extractLane;'
const I16x8REP = 'var i16x8rep = i16x8.replaceLane;'
const I16x8U16x8 = 'var i16x8u16x8 = i16x8.fromUint16x8Bits;'
const U16x8 = 'var u16x8 = glob.SIMD.Uint16x8;'
const U16x8CHK = 'var u16x8chk = u16x8.check;'
const U16x8EXT = 'var u16x8ext = u16x8.extractLane;'
const U16x8REP = 'var u16x8rep = u16x8.replaceLane;'
const U16x8I16x8 = 'var u16x8i16x8 = u16x8.fromInt16x8Bits;'
const B16x8 = 'var b16x8 = glob.SIMD.Bool16x8;'
const B16x8CHK = 'var b16x8chk = b16x8.check;'
const B16x8EXT = 'var b16x8ext = b16x8.extractLane;'
const B16x8REP = 'var b16x8rep = b16x8.replaceLane;'
const INT16_MAX = 0x7fff
const INT16_MIN = -0x10000
const UINT16_MAX = 0xffff
// Linking
assertEq(asmLink(asmCompile('glob', USE_ASM + I16x8 + "function f() {} return f"), {SIMD:{Int16x8: SIMD.Int16x8}})(), undefined);
assertEq(asmLink(asmCompile('glob', USE_ASM + U16x8 + "function f() {} return f"), {SIMD:{Uint16x8: SIMD.Uint16x8}})(), undefined);
assertEq(asmLink(asmCompile('glob', USE_ASM + B16x8 + "function f() {} return f"), {SIMD:{Bool16x8: SIMD.Bool16x8}})(), undefined);
// Local variable of Int16x8 type.
assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Int16x8(1,2,3,4,5,6,7,8);} return f");
assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8;} return f");
assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8();} return f");
assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1);} return f");
assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4);} return f");
assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4,5,6,7,8.0);} return f");
assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4,5,6,7,8,9);} return f");
assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4,5,6,7,8|0);} return f");
assertEq(asmLink(asmCompile('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4,5,6,7,8);} return f"), this)(), undefined);
assertEq(asmLink(asmCompile('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4,5,6,7," + (INT16_MAX + 1) + ");} return f"), this)(), undefined);
// Local variable of Uint16x8 type.
assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Uint16x8(1,2,3,4,5,6,7,8);} return f");
assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8;} return f");
assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8();} return f");
assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1);} return f");
assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4);} return f");
assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4,5,6,7,8.0);} return f");
assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4,5,6,7,8,9);} return f");
assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4,5,6,7,8|0);} return f");
assertEq(asmLink(asmCompile('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4,5,6,7,8);} return f"), this)(), undefined);
assertEq(asmLink(asmCompile('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4,5,6,7," + (UINT16_MAX + 1) + ");} return f"), this)(), undefined);
// Local variable of Bool16x8 type.
assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Bool16x8(1,0,0,0, 0,0,0,0);} return f");
assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8;} return f");
assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8();} return f");
assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1);} return f");
assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1,0,0,0);} return f");
assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1,0,0,0, 0,0,0,1.0);} return f");
assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1,0,0,0, 0,0,0,0|0);} return f");
assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1,0,0,0, 0,0,0,0, 1);} return f");
assertEq(asmLink(asmCompile('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1,0,0,0, 0,-1,-2,0);} return f"), this)(), undefined);
// Global variable of Int16x8 type.
assertEqVecArr(asmLink(asmCompile('glob', 'ffi', USE_ASM + I16x8 + I16x8CHK + "var g=i16x8chk(ffi.g); function f() { return i16x8chk(g); } return f"), this,
{g: SIMD.Int16x8(1,2,3,4,5,6,7,8)})(), [1,2,3,4,5,6,7,8]);
assertEqVecArr(asmLink(asmCompile('glob', 'ffi', USE_ASM + I16x8 + I16x8CHK + "var g=i16x8chk(ffi.g); function f() { g=i16x8(5,6,7,8,9,10,11,12); return i16x8chk(g); } return f"), this,
{g: SIMD.Int16x8(1,2,3,4,5,6,7,8)})(), [5,6,7,8,9,10,11,12]);
// Global variable of Bool16x8 type.
assertEqVecArr(asmLink(asmCompile('glob', 'ffi', USE_ASM + B16x8 + B16x8CHK + "var g=b16x8chk(ffi.g); function f() { return b16x8chk(g); } return f"), this,
{g: SIMD.Bool16x8(1,1,0,1,0,0,1,0)})(), [true,true,false,true,false,false,true,false]);
assertEqVecArr(asmLink(asmCompile('glob', 'ffi', USE_ASM + B16x8 + B16x8CHK + "var g=b16x8chk(ffi.g); function f() { g=b16x8(1,1,0,1,0,1,1,1); return b16x8chk(g); } return f"), this,
{g: SIMD.Bool16x8(1,1,0,1,0,0,1,0)})(), [true,true,false,true,false,true,true,true]);
// Unsigned SIMD globals are not allowed.
assertAsmTypeFail('glob', 'ffi', USE_ASM + U16x8 + U16x8CHK + "var g=u16x8chk(ffi.g); function f() { } return f");
// Only signed Int16x8 allowed as return value.
assertEqVecArr(asmLink(asmCompile('glob', USE_ASM + I16x8 + "function f() {return i16x8(1,2,3,4,5,6,7,8);} return f"), this)(),
[1, 2, 3, 4, 5, 6, 7, 8]);
assertEqVecArr(asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK + "function f() {return i16x8chk(i16x8(1,2,3,32771,5,6,7,8));} return f"), this)(),
[1, 2, 3, -32765, 5, 6, 7, 8]);
assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {return u16x8(1,2,3,4,5,6,7,8);} return f");
assertAsmTypeFail('glob', USE_ASM + U16x8 + U16x8CHK + "function f() {return u16x8chk(u16x8(1,2,3,4,5,6,7,8));} return f");
// Test splat.
function splat(x) {
let r = []
for (let i = 0; i < 8; i++)
r.push(x);
return r
}
splatB = asmLink(asmCompile('glob', USE_ASM + B16x8 +
'var splat = b16x8.splat;' +
'function f(x) { x = x|0; return splat(x); } return f'), this);
assertEqVecArr(splatB(true), splat(true));
assertEqVecArr(splatB(false), splat(false));
splatB0 = asmLink(asmCompile('glob', USE_ASM + B16x8 +
'var splat = b16x8.splat;' +
'function f() { var x = 0; return splat(x); } return f'), this);
assertEqVecArr(splatB0(), splat(false));
splatB1 = asmLink(asmCompile('glob', USE_ASM + B16x8 +
'var splat = b16x8.splat;' +
'function f() { var x = 1; return splat(x); } return f'), this);
assertEqVecArr(splatB1(), splat(true));
splatI = asmLink(asmCompile('glob', USE_ASM + I16x8 +
'var splat = i16x8.splat;' +
'function f(x) { x = x|0; return splat(x); } return f'), this);
for (let x of [0, 1, -1, 0x12345, 0x1234, -1000, -1000000]) {
assertEqVecArr(splatI(x), splat(x << 16 >> 16));
}
splatIc = asmLink(asmCompile('glob', USE_ASM + I16x8 +
'var splat = i16x8.splat;' +
'function f() { var x = 100; return splat(x); } return f'), this);
assertEqVecArr(splatIc(), splat(100))
splatU = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8U16x8 +
'var splat = u16x8.splat;' +
'function f(x) { x = x|0; return i16x8u16x8(splat(x)); } return f'), this);
for (let x of [0, 1, -1, 0x12345, 0x1234, -1000, -1000000]) {
assertEqVecArr(SIMD.Uint16x8.fromInt16x8Bits(splatI(x)), splat(x << 16 >>> 16));
}
splatUc = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8U16x8 +
'var splat = u16x8.splat;' +
'function f() { var x = 200; return i16x8u16x8(splat(x)); } return f'), this);
assertEqVecArr(SIMD.Uint16x8.fromInt16x8Bits(splatUc()), splat(200))
// Test extractLane.
//
// The lane index must be a literal int, and we generate different code for
// different lanes.
function extractI(a, i) {
return asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8EXT +
`function f() {var x=i16x8(${a.join(',')}); return i16x8ext(x, ${i})|0; } return f`), this)();
}
a = [-1,2,-3,4,-5,6,-7,-8];
for (var i = 0; i < 8; i++)
assertEq(extractI(a, i), a[i]);
a = a.map(x => -x);
for (var i = 0; i < 8; i++)
assertEq(extractI(a, i), a[i]);
function extractU(a, i) {
return asmLink(asmCompile('glob', USE_ASM + U16x8 + U16x8EXT +
`function f() {var x=u16x8(${a.join(',')}); return u16x8ext(x, ${i})|0; } return f`), this)();
}
a = [1,255,12,13,14,150,200,3];
for (var i = 0; i < 8; i++)
assertEq(extractU(a, i), a[i]);
a = a.map(x => UINT16_MAX-x);
for (var i = 0; i < 8; i++)
assertEq(extractU(a, i), a[i]);
function extractB(a, i) {
return asmLink(asmCompile('glob', USE_ASM + B16x8 + B16x8EXT +
`function f() {var x=b16x8(${a.join(',')}); return b16x8ext(x, ${i})|0; } return f`), this)();
}
a = [1,1,0,1, 1,0,0,0];
for (var i = 0; i < 8; i++)
assertEq(extractB(a, i), a[i]);
a = a.map(x => 1-x);
for (var i = 0; i < 8; i++)
assertEq(extractB(a, i), a[i]);
// Test replaceLane.
function replaceI(a, i) {
return asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8REP +
`function f(v) {v=v|0; var x=i16x8(${a.join(',')}); return i16x8rep(x,${i},v); } return f`), this);
}
a = [-1,2,-3,4,-5,6,-7,-9];
for (var i = 0; i < 8; i++) {
var f = replaceI(a, i);
var b = a.slice(0);
b[i] = -20;
assertEqVecArr(f(-20), b);
}
function replaceU(a, i) {
return asmLink(asmCompile('glob', USE_ASM + U16x8 + U16x8REP + I16x8 + I16x8U16x8 +
`function f(v) {v=v|0; var x=u16x8(${a.join(',')}); return i16x8u16x8(u16x8rep(x,${i},v)); } return f`), this);
}
a = [65000-1,2,65000-3,4,65000-5,6,65000-7,65000-9];
for (var i = 0; i < 8; i++) {
var rawf = replaceU(a, i);
var f = x => SIMD.Uint16x8.fromInt16x8Bits(rawf(x))
var b = a.slice(0);
b[i] = 1000;
assertEqVecArr(f(1000), b);
}
function replaceB(a, i) {
return asmLink(asmCompile('glob', USE_ASM + B16x8 + B16x8REP +
`function f(v) {v=v|0; var x=b16x8(${a.join(',')}); return b16x8rep(x,${i},v); } return f`), this);
}
a = [1,1,0,1,1,0,0,0];
for (var i = 0; i < 8; i++) {
var f = replaceB(a, i);
var b = a.slice(0);
let v = 1 - a[i];
b[i] = v;
assertEqVecArr(f(v), b.map(x => !!x));
}
// Test select.
selectI = asmLink(asmCompile('glob', USE_ASM + I16x8 + B16x8 + B16x8CHK +
'var select = i16x8.select;' +
'var a = i16x8(-1,2,-3,4,-5, 6,-7, 8);' +
'var b = i16x8( 5,6, 7,8, 9,10,11,12);' +
'function f(x) { x = b16x8chk(x); return select(x, a, b); } return f'), this);
assertEqVecArr(selectI(SIMD.Bool16x8( 0,0, 1,0, 1,1, 1, 0)),
[ 5,6,-3,8,-5,6,-7,12]);
selectU = asmLink(asmCompile('glob', USE_ASM + I16x8 + B16x8 + B16x8CHK + U16x8 + I16x8U16x8 + U16x8I16x8 +
'var select = u16x8.select;' +
'var a = i16x8(-1,2,-3,4,-5, 6,-7, 8);' +
'var b = i16x8( 5,6, 7,8, 9,10,11,12);' +
'function f(x) { x = b16x8chk(x); return i16x8u16x8(select(x, u16x8i16x8(a), u16x8i16x8(b))); } return f'), this);
assertEqVecArr(selectU(SIMD.Bool16x8( 0,0, 1,0, 1,1, 1, 0)),
[ 5,6,-3,8,-5,6,-7,12]);
// Test swizzle.
function swizzle(vec, lanes) {
let r = [];
for (let i = 0; i < 8; i++)
r.push(vec[lanes[i]]);
return r;
}
function swizzleI(lanes) {
let asm = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
'var swz = i16x8.swizzle;' +
`function f(a) { a = i16x8chk(a); return swz(a, ${lanes.join()}); } return f`), this);
let a1 = [ -1,2, -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
let a2 = [0x8000,2,0x8000,0x7fff, 0,0, 8,-9].map(x => x << 16 >> 16);
let v1 = SIMD.Int16x8(...a1);
let v2 = SIMD.Int16x8(...a2);
assertEqVecArr(asm(v1), swizzle(a1, lanes));
assertEqVecArr(asm(v2), swizzle(a2, lanes));
}
swizzleI([3, 4, 7, 1, 4, 3, 1, 2]);
swizzleI([0, 0, 0, 0, 0, 0, 0, 0]);
swizzleI([7, 7, 7, 7, 7, 7, 7, 7]);
function swizzleU(lanes) {
let asm = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK + U16x8 + U16x8I16x8 + I16x8U16x8 +
'var swz = u16x8.swizzle;' +
`function f(a) { a = i16x8chk(a); return i16x8u16x8(swz(u16x8i16x8(a), ${lanes.join()})); } return f`), this);
let a1 = [ -1,2, -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
let a2 = [0x8000,2,0x8000,0x7fff, 0,0, 8,-9].map(x => x << 16 >> 16);
let v1 = SIMD.Int16x8(...a1);
let v2 = SIMD.Int16x8(...a2);
assertEqVecArr(asm(v1), swizzle(a1, lanes));
assertEqVecArr(asm(v2), swizzle(a2, lanes));
}
swizzleU([3, 4, 7, 1, 4, 3, 1, 2]);
swizzleU([0, 0, 0, 0, 0, 0, 0, 0]);
swizzleU([7, 7, 7, 7, 7, 7, 7, 7]);
// Out-of-range lane indexes.
assertAsmTypeFail('glob', USE_ASM + I16x8 + 'var swz = i16x8.swizzle; ' +
'function f() { var x=i16x8(0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8); } return f');
assertAsmTypeFail('glob', USE_ASM + U16x8 + 'var swz = u16x8.swizzle; ' +
'function f() { var x=u16x8(0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8); } return f');
// Missing lane indexes.
assertAsmTypeFail('glob', USE_ASM + I16x8 + 'var swz = i16x8.swizzle; ' +
'function f() { var x=i16x8(0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7); } return f');
assertAsmTypeFail('glob', USE_ASM + U16x8 + 'var swz = u16x8.swizzle; ' +
'function f() { var x=u16x8(0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7); } return f');
// Test shuffle.
function shuffle(vec1, vec2, lanes) {
let r = [];
let vec = vec1.concat(vec2)
for (let i = 0; i < 8; i++)
r.push(vec[lanes[i]]);
return r;
}
function shuffleI(lanes) {
let asm = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
'var shuf = i16x8.shuffle;' +
`function f(a1, a2) { a1 = i16x8chk(a1); a2 = i16x8chk(a2); return shuf(a1, a2, ${lanes.join()}); } return f`), this);
let a1 = [ -1,2, -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
let a2 = [0x8000,2,0x8000,0x7fff, 0,0, 8,-9].map(x => x << 16 >> 16);
let v1 = SIMD.Int16x8(...a1);
let v2 = SIMD.Int16x8(...a2);
assertEqVecArr(asm(v1, v2), shuffle(a1, a2, lanes));
}
function shuffleU(lanes) {
let asm = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK + U16x8 + U16x8I16x8 + I16x8U16x8 +
'var shuf = u16x8.shuffle;' +
'function f(a1, a2) { a1 = i16x8chk(a1); a2 = i16x8chk(a2); ' +
`return i16x8u16x8(shuf(u16x8i16x8(a1), u16x8i16x8(a2), ${lanes.join()})); } return f`), this);
let a1 = [ -1,2, -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
let a2 = [0x8000,2,0x8000,0x7fff, 0,0, 8,-9].map(x => x << 16 >> 16);
let v1 = SIMD.Int16x8(...a1);
let v2 = SIMD.Int16x8(...a2);
assertEqVecArr(asm(v1, v2), shuffle(a1, a2, lanes));
}
shuffleI([0, 0, 0, 0, 0, 0, 0, 0])
shuffleI([15, 15, 15, 15, 15, 15, 15, 15])
shuffleI([6, 2, 0, 14, 6, 10, 11, 1])
shuffleU([7, 7, 7, 7, 7, 7, 7, 7])
shuffleU([8, 15, 15, 15, 15, 15, 15, 15])
shuffleU([6, 2, 0, 14, 6, 10, 11, 1])
// Test unary operators.
function unaryI(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
`var fut = i16x8.${opname};` +
'function f(v) { v = i16x8chk(v); return fut(v); } return f'), this);
let a = [65000-1,2,65000-3,4,65000-5,6,65000-7,65000-9];
let v = SIMD.Int16x8(...a);
assertEqVecArr(simdfunc(v), a.map(lanefunc));
}
function unaryU(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8CHK + U16x8I16x8 + I16x8U16x8 +
`var fut = u16x8.${opname};` +
'function f(v) { v = i16x8chk(v); return i16x8u16x8(fut(u16x8i16x8(v))); } return f'), this);
let a = [65000-1,2,65000-3,4,65000-5,6,65000-7,65000-9];
let v = SIMD.Int16x8(...a);
assertEqVecArr(SIMD.Uint16x8.fromInt16x8Bits(simdfunc(v)), a.map(lanefunc));
}
function unaryB(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + B16x8 + B16x8CHK +
`var fut = b16x8.${opname};` +
'function f(v) { v = b16x8chk(v); return fut(v); } return f'), this);
let a = [1,1,0,1,1,0,0,0];
let v = SIMD.Bool16x8(...a);
assertEqVecArr(simdfunc(v), a.map(lanefunc));
}
unaryI('not', x => ~x << 16 >> 16);
unaryU('not', x => ~x << 16 >>> 16);
unaryB('not', x => !x);
unaryI('neg', x => -x << 16 >> 16);
unaryU('neg', x => -x << 16 >>> 16);
// Test binary operators.
function zipmap(a1, a2, f) {
assertEq(a1.length, a2.length);
let r = [];
for (var i = 0; i < a1.length; i++)
r.push(f(a1[i], a2[i]));
return r
}
function binaryI(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
`var fut = i16x8.${opname};` +
'function f(v1, v2) { v1 = i16x8chk(v1); v2 = i16x8chk(v2); return fut(v1, v2); } return f'), this);
let a1 = [ -1,2, -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
let a2 = [0x8000,2,0x8000,0x7fff, 0,0, 8,-9].map(x => x << 16 >> 16);
let ref = zipmap(a1, a2, lanefunc);
let v1 = SIMD.Int16x8(...a1);
let v2 = SIMD.Int16x8(...a2);
assertEqVecArr(simdfunc(v1, v2), ref);
}
function binaryU(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8CHK + U16x8I16x8 + I16x8U16x8 +
`var fut = u16x8.${opname};` +
'function f(v1, v2) { v1 = i16x8chk(v1); v2 = i16x8chk(v2); return i16x8u16x8(fut(u16x8i16x8(v1), u16x8i16x8(v2))); } return f'), this);
let a1 = [ -1,2, -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >>> 16);
let a2 = [0x8000,2,0x8000,0x7fff, 0,0, 8,-9].map(x => x << 16 >>> 16);
let ref = zipmap(a1, a2, lanefunc);
let v1 = SIMD.Int16x8(...a1);
let v2 = SIMD.Int16x8(...a2);
let res = SIMD.Uint16x8.fromInt16x8Bits(simdfunc(v1, v2));
assertEqVecArr(res, ref);
}
function binaryB(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + B16x8 + B16x8CHK +
`var fut = b16x8.${opname};` +
'function f(v1, v2) { v1 = b16x8chk(v1); v2 = b16x8chk(v2); return fut(v1, v2); } return f'), this);
let a = [1,1,0,1,1,0,0,0];
let v = SIMD.Bool16x8(...a);
assertEqVecArr(simdfunc(v), a.map(lanefunc));
}
binaryI('add', (x, y) => (x + y) << 16 >> 16);
binaryI('sub', (x, y) => (x - y) << 16 >> 16);
binaryI('mul', (x, y) => (x * y) << 16 >> 16);
binaryU('add', (x, y) => (x + y) << 16 >>> 16);
binaryU('sub', (x, y) => (x - y) << 16 >>> 16);
binaryU('mul', (x, y) => (x * y) << 16 >>> 16);
binaryI('and', (x, y) => (x & y) << 16 >> 16);
binaryI('or', (x, y) => (x | y) << 16 >> 16);
binaryI('xor', (x, y) => (x ^ y) << 16 >> 16);
binaryU('and', (x, y) => (x & y) << 16 >>> 16);
binaryU('or', (x, y) => (x | y) << 16 >>> 16);
binaryU('xor', (x, y) => (x ^ y) << 16 >>> 16);
function sat(x, lo, hi) {
if (x < lo) return lo;
if (x > hi) return hi;
return x
}
function isat(x) { return sat(x, -32768, 32767); }
function usat(x) { return sat(x, 0, 0xffff); }
binaryI('addSaturate', (x, y) => isat(x + y))
binaryI('subSaturate', (x, y) => isat(x - y))
binaryU('addSaturate', (x, y) => usat(x + y))
binaryU('subSaturate', (x, y) => usat(x - y))
// Test shift operators.
function zip1map(a, s, f) {
return a.map(x => f(x, s));
}
function shiftI(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
`var fut = i16x8.${opname};` +
'function f(v, s) { v = i16x8chk(v); s = s|0; return fut(v, s); } return f'), this);
let a = [-1,2,-3,0x80,0x7f,6,0x8000,0x7fff];
let v = SIMD.Int16x8(...a);
for (let s of [0, 1, 2, 6, 7, 8, 9, 10, 16, 255, -1, -8, -7, -1000]) {
let ref = zip1map(a, s, lanefunc);
// 1. Test dynamic shift amount.
assertEqVecArr(simdfunc(v, s), ref);
// 2. Test constant shift amount.
let cstf = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
`var fut = i16x8.${opname};` +
`function f(v) { v = i16x8chk(v); return fut(v, ${s}); } return f`), this);
assertEqVecArr(cstf(v, s), ref);
}
}
function shiftU(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8CHK + U16x8I16x8 + I16x8U16x8 +
`var fut = u16x8.${opname};` +
'function f(v, s) { v = i16x8chk(v); s = s|0; return i16x8u16x8(fut(u16x8i16x8(v), s)); } return f'), this);
let a = [-1,2,-3,0x80,0x7f,6,0x8000,0x7fff];
let v = SIMD.Int16x8(...a);
for (let s of [0, 1, 2, 6, 7, 8, 9, 10, 16, 255, -1, -8, -7, -1000]) {
let ref = zip1map(a, s, lanefunc);
// 1. Test dynamic shift amount.
assertEqVecArr(SIMD.Uint16x8.fromInt16x8Bits(simdfunc(v, s)), ref);
// 2. Test constant shift amount.
let cstf = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8CHK + U16x8I16x8 + I16x8U16x8 +
`var fut = u16x8.${opname};` +
`function f(v) { v = i16x8chk(v); return i16x8u16x8(fut(u16x8i16x8(v), ${s})); } return f`), this);
assertEqVecArr(SIMD.Uint16x8.fromInt16x8Bits(cstf(v, s)), ref);
}
}
shiftI('shiftLeftByScalar', (x,s) => (x << (s & 15)) << 16 >> 16);
shiftU('shiftLeftByScalar', (x,s) => (x << (s & 15)) << 16 >>> 16);
shiftI('shiftRightByScalar', (x,s) => ((x << 16 >> 16) >> (s & 15)) << 16 >> 16);
shiftU('shiftRightByScalar', (x,s) => ((x << 16 >>> 16) >>> (s & 15)) << 16 >>> 16);
// Comparisons.
function compareI(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
`var fut = i16x8.${opname};` +
'function f(v1, v2) { v1 = i16x8chk(v1); v2 = i16x8chk(v2); return fut(v1, v2); } return f'), this);
let a1 = [ -1,2, -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
let a2 = [0x8000,2,0x8000,0x7fff, 0,0, 8,-9].map(x => x << 16 >> 16);
let ref = zipmap(a1, a2, lanefunc);
let v1 = SIMD.Int16x8(...a1);
let v2 = SIMD.Int16x8(...a2);
assertEqVecArr(simdfunc(v1, v2), ref);
}
function compareU(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK + U16x8 + U16x8I16x8 +
`var fut = u16x8.${opname};` +
'function f(v1, v2) { v1 = i16x8chk(v1); v2 = i16x8chk(v2); return fut(u16x8i16x8(v1), u16x8i16x8(v2)); } return f'), this);
let a1 = [ -1,2, -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >>> 16);
let a2 = [0x8000,2,0x8000,0x7fff, 0,0, 8,-9].map(x => x << 16 >>> 16);
let ref = zipmap(a1, a2, lanefunc);
let v1 = SIMD.Int16x8(...a1);
let v2 = SIMD.Int16x8(...a2);
assertEqVecArr(simdfunc(v1, v2), ref);
}
compareI("equal", (x,y) => x == y);
compareU("equal", (x,y) => x == y);
compareI("notEqual", (x,y) => x != y);
compareU("notEqual", (x,y) => x != y);
compareI("lessThan", (x,y) => x < y);
compareU("lessThan", (x,y) => x < y);
compareI("lessThanOrEqual", (x,y) => x <= y);
compareU("lessThanOrEqual", (x,y) => x <= y);
compareI("greaterThan", (x,y) => x > y);
compareU("greaterThan", (x,y) => x > y);
compareI("greaterThanOrEqual", (x,y) => x >= y);
compareU("greaterThanOrEqual", (x,y) => x >= y);

Просмотреть файл

@ -0,0 +1,539 @@
load(libdir + "asm.js");
load(libdir + "simd.js");
load(libdir + "asserts.js");
// Set to true to see more JS debugging spew.
const DEBUG = false;
if (!isSimdAvailable()) {
DEBUG && print("won't run tests as simd extensions aren't activated yet");
quit(0);
}
// Tests for 8x16 SIMD types: Int8x16, Uint8x16, Bool8x16.
const I8x16 = 'var i8x16 = glob.SIMD.Int8x16;'
const I8x16CHK = 'var i8x16chk = i8x16.check;'
const I8x16EXT = 'var i8x16ext = i8x16.extractLane;'
const I8x16REP = 'var i8x16rep = i8x16.replaceLane;'
const I8x16U8x16 = 'var i8x16u8x16 = i8x16.fromUint8x16Bits;'
const U8x16 = 'var u8x16 = glob.SIMD.Uint8x16;'
const U8x16CHK = 'var u8x16chk = u8x16.check;'
const U8x16EXT = 'var u8x16ext = u8x16.extractLane;'
const U8x16REP = 'var u8x16rep = u8x16.replaceLane;'
const U8x16I8x16 = 'var u8x16i8x16 = u8x16.fromInt8x16Bits;'
const B8x16 = 'var b8x16 = glob.SIMD.Bool8x16;'
const B8x16CHK = 'var b8x16chk = b8x16.check;'
const B8x16EXT = 'var b8x16ext = b8x16.extractLane;'
const B8x16REP = 'var b8x16rep = b8x16.replaceLane;'
const INT8_MAX = 127
const INT8_MIN = -128
const UINT8_MAX = 255
// Linking
assertEq(asmLink(asmCompile('glob', USE_ASM + I8x16 + "function f() {} return f"), {SIMD:{Int8x16: SIMD.Int8x16}})(), undefined);
assertEq(asmLink(asmCompile('glob', USE_ASM + U8x16 + "function f() {} return f"), {SIMD:{Uint8x16: SIMD.Uint8x16}})(), undefined);
assertEq(asmLink(asmCompile('glob', USE_ASM + B8x16 + "function f() {} return f"), {SIMD:{Bool8x16: SIMD.Bool8x16}})(), undefined);
// Local variable of Int8x16 type.
assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Int8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f");
assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16;} return f");
assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16();} return f");
assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1);} return f");
assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4);} return f");
assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16.0);} return f");
assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17);} return f");
assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16|0);} return f");
assertEq(asmLink(asmCompile('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f"), this)(), undefined);
assertEq(asmLink(asmCompile('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15," + (INT8_MAX + 1) + ");} return f"), this)(), undefined);
// Local variable of Uint8x16 type.
assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Uint8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f");
assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16;} return f");
assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16();} return f");
assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1);} return f");
assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4);} return f");
assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16.0);} return f");
assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17);} return f");
assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16|0);} return f");
assertEq(asmLink(asmCompile('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f"), this)(), undefined);
assertEq(asmLink(asmCompile('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15," + (UINT8_MAX + 1) + ");} return f"), this)(), undefined);
// Local variable of Bool8x16 type.
assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Bool8x16(1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1);} return f");
assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16;} return f");
assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16();} return f");
assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1);} return f");
assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1,0,0,0);} return f");
assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1.0);} return f");
assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1|0);} return f");
assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1);} return f");
assertEq(asmLink(asmCompile('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1,0,0,0,0,0,0,0,0,1,-1,2,-2,1,1,1);} return f"), this)(), undefined);
// Global variable of Int8x16 type.
assertEqVecArr(asmLink(asmCompile('glob', 'ffi', USE_ASM + I8x16 + I8x16CHK + "var g=i8x16chk(ffi.g); function f() { return i8x16chk(g); } return f"), this,
{g: SIMD.Int8x16(1,2,3,4,5,6,7,8,10,11,12,13,14,15,16,17)})(), [1,2,3,4,5,6,7,8,10,11,12,13,14,15,16,17]);
assertEqVecArr(asmLink(asmCompile('glob', 'ffi', USE_ASM + I8x16 + I8x16CHK + "var g=i8x16chk(ffi.g); function f() { g=i8x16(5,6,7,8,9,10,11,12,1,2,3,4,5,6,7,8); return i8x16chk(g); } return f"), this,
{g: SIMD.Int8x16(1,2,3,4,5,6,7,8,10,11,12,13,14,15,16,17)})(), [5,6,7,8,9,10,11,12,1,2,3,4,5,6,7,8]);
// Global variable of Bool8x16 type.
assertEqVecArr(asmLink(asmCompile('glob', 'ffi', USE_ASM + B8x16 + B8x16CHK + "var g=b8x16chk(ffi.g); function f() { return b8x16chk(g); } return f"), this,
{g: SIMD.Bool8x16(1,1,0,1,0,0,1,0,0,1,0,1,0,0,1,0)})(), [true,true,false,true,false,false,true,false,false,true,false,true,false,false,true,false]);
assertEqVecArr(asmLink(asmCompile('glob', 'ffi', USE_ASM + B8x16 + B8x16CHK + "var g=b8x16chk(ffi.g); function f() { g=b8x16(1,1,0,1,0,1,1,1,0,1,0,1,1,1,0,0); return b8x16chk(g); } return f"), this,
{g: SIMD.Bool8x16(1,1,0,1,0,0,1,0)})(), [true,true,false,true,false,true,true,true,false,true,false,true,true,true,false,false]);
// Unsigned SIMD globals are not allowed.
assertAsmTypeFail('glob', 'ffi', USE_ASM + U8x16 + U8x16CHK + "var g=u8x16chk(ffi.g); function f() { } return f");
// Only signed Int8x16 allowed as return value.
assertEqVecArr(asmLink(asmCompile('glob', USE_ASM + I8x16 + "function f() {return i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f"), this)(),
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
assertEqVecArr(asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK + "function f() {return i8x16chk(i8x16(1,2,3,132,5,6,7,8,9,10,11,12,13,14,15,16));} return f"), this)(),
[1, 2, 3, -124, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {return u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f");
assertAsmTypeFail('glob', USE_ASM + U8x16 + U8x16CHK + "function f() {return u8x16chk(u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16));} return f");
// Test splat.
function splat(x) {
let r = []
for (let i = 0; i < 16; i++)
r.push(x);
return r
}
splatB = asmLink(asmCompile('glob', USE_ASM + B8x16 +
'var splat = b8x16.splat;' +
'function f(x) { x = x|0; return splat(x); } return f'), this);
assertEqVecArr(splatB(true), splat(true));
assertEqVecArr(splatB(false), splat(false));
splatB0 = asmLink(asmCompile('glob', USE_ASM + B8x16 +
'var splat = b8x16.splat;' +
'function f() { var x = 0; return splat(x); } return f'), this);
assertEqVecArr(splatB0(), splat(false));
splatB1 = asmLink(asmCompile('glob', USE_ASM + B8x16 +
'var splat = b8x16.splat;' +
'function f() { var x = 1; return splat(x); } return f'), this);
assertEqVecArr(splatB1(), splat(true));
splatI = asmLink(asmCompile('glob', USE_ASM + I8x16 +
'var splat = i8x16.splat;' +
'function f(x) { x = x|0; return splat(x); } return f'), this);
for (let x of [0, 1, -1, 0x1234, 0x12, 1000, -1000000]) {
assertEqVecArr(splatI(x), splat(x << 24 >> 24));
}
splatIc = asmLink(asmCompile('glob', USE_ASM + I8x16 +
'var splat = i8x16.splat;' +
'function f() { var x = 100; return splat(x); } return f'), this);
assertEqVecArr(splatIc(), splat(100))
splatU = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16U8x16 +
'var splat = u8x16.splat;' +
'function f(x) { x = x|0; return i8x16u8x16(splat(x)); } return f'), this);
for (let x of [0, 1, -1, 0x1234, 0x12, 1000, -1000000]) {
assertEqVecArr(SIMD.Uint8x16.fromInt8x16Bits(splatI(x)), splat(x << 24 >>> 24));
}
splatUc = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16U8x16 +
'var splat = u8x16.splat;' +
'function f() { var x = 200; return i8x16u8x16(splat(x)); } return f'), this);
assertEqVecArr(SIMD.Uint8x16.fromInt8x16Bits(splatUc()), splat(200))
// Test extractLane.
//
// The lane index must be a literal int, and we generate different code for
// different lanes.
function extractI(a, i) {
return asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16EXT +
`function f() {var x=i8x16(${a.join(',')}); return i8x16ext(x, ${i})|0; } return f`), this)();
}
a = [-1,2,-3,4,-5,6,-7,8,-9,10,-11,12,-13,-14,-15,-16];
for (var i = 0; i < 16; i++)
assertEq(extractI(a, i), a[i]);
a = a.map(x => -x);
for (var i = 0; i < 16; i++)
assertEq(extractI(a, i), a[i]);
function extractU(a, i) {
return asmLink(asmCompile('glob', USE_ASM + U8x16 + U8x16EXT +
`function f() {var x=u8x16(${a.join(',')}); return u8x16ext(x, ${i})|0; } return f`), this)();
}
a = [1,255,12,13,14,150,200,3,4,5,6,7,8,9,10,16];
for (var i = 0; i < 16; i++)
assertEq(extractU(a, i), a[i]);
a = a.map(x => 255-x);
for (var i = 0; i < 16; i++)
assertEq(extractU(a, i), a[i]);
function extractB(a, i) {
return asmLink(asmCompile('glob', USE_ASM + B8x16 + B8x16EXT +
`function f() {var x=b8x16(${a.join(',')}); return b8x16ext(x, ${i})|0; } return f`), this)();
}
a = [1,1,0,1,1,0,0,0,1,1,1,1,0,0,0,1];
for (var i = 0; i < 16; i++)
assertEq(extractB(a, i), a[i]);
a = a.map(x => 1-x);
for (var i = 0; i < 16; i++)
assertEq(extractB(a, i), a[i]);
// Test replaceLane.
function replaceI(a, i) {
return asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16REP +
`function f(v) {v=v|0; var x=i8x16(${a.join(',')}); return i8x16rep(x,${i},v); } return f`), this);
}
a = [-1,2,-3,4,-5,6,-7,8,-9,10,-11,12,-13,-14,-15,-16];
for (var i = 0; i < 16; i++) {
var f = replaceI(a, i);
var b = a.slice(0);
b[i] = -20;
assertEqVecArr(f(-20), b);
}
function replaceU(a, i) {
return asmLink(asmCompile('glob', USE_ASM + U8x16 + U8x16REP + I8x16 + I8x16U8x16 +
`function f(v) {v=v|0; var x=u8x16(${a.join(',')}); x=u8x16rep(x,${i},v); return i8x16u8x16(x); } return f`), this);
}
a = [256-1,2,256-3,4,256-5,6,256-7,8,256-9,10,256-11,12,256-13,256-14,256-15,256-16];
for (var i = 0; i < 16; i++) {
// Result returned as Int8x16, convert back.
var rawf = replaceU(a, i);
var f = x => SIMD.Uint8x16.fromInt8x16Bits(rawf(x));
var b = a.slice(0);
b[i] = 100;
assertEqVecArr(f(100), b);
}
function replaceB(a, i) {
return asmLink(asmCompile('glob', USE_ASM + B8x16 + B8x16REP +
`function f(v) {v=v|0; var x=b8x16(${a.join(',')}); return b8x16rep(x,${i},v); } return f`), this);
}
a = [1,1,0,1,1,0,0,0,1,1,1,1,0,0,0,1];
for (var i = 0; i < 16; i++) {
var f = replaceB(a, i);
var b = a.slice(0);
v = 1 - a[i];
b[i] = v;
assertEqVecArr(f(v), b.map(x => !!x));
}
// Test select.
selectI = asmLink(asmCompile('glob', USE_ASM + I8x16 + B8x16 + B8x16CHK +
'var select = i8x16.select;' +
'var a = i8x16(-1,2,-3,4,-5, 6,-7, 8,-9,10,-11,12,-13,-14,-15,-16);' +
'var b = i8x16( 5,6, 7,8, 9,10,11,12,13,14, 15,16,-77, 45, 32, 0);' +
'function f(x) { x = b8x16chk(x); return select(x, a, b); } return f'), this);
assertEqVecArr(selectI(SIMD.Bool8x16( 0,0, 1,0, 1,1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1)),
[ 5,6,-3,8,-5,6,-7,12,-9,10,15,16,-13,-14,32,-16]);
selectU = asmLink(asmCompile('glob', USE_ASM + I8x16 + B8x16 + B8x16CHK + U8x16 + I8x16U8x16 + U8x16I8x16 +
'var select = u8x16.select;' +
'var a = i8x16(-1,2,-3,4,-5, 6,-7, 8,-9,10,-11,12,-13,-14,-15,-16);' +
'var b = i8x16( 5,6, 7,8, 9,10,11,12,13,14, 15,16,-77, 45, 32, 0);' +
'function f(x) { x = b8x16chk(x); return i8x16u8x16(select(x, u8x16i8x16(a), u8x16i8x16(b))); } return f'), this);
assertEqVecArr(selectU(SIMD.Bool8x16( 0,0, 1,0, 1,1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1)),
[ 5,6,-3,8,-5,6,-7,12,-9,10,15,16,-13,-14,32,-16]);
// Test swizzle.
function swizzle(vec, lanes) {
let r = [];
for (let i = 0; i < 16; i++)
r.push(vec[lanes[i]]);
return r;
}
function swizzleI(lanes) {
let asm = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
'var swz = i8x16.swizzle;' +
`function f(a) { a = i8x16chk(a); return swz(a, ${lanes.join()}); } return f`), this);
let a1 = [ -1,2, -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
let a2 = [-128,2,-128,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16, -1];
let v1 = SIMD.Int8x16(...a1);
let v2 = SIMD.Int8x16(...a2);
assertEqVecArr(asm(v1), swizzle(a1, lanes));
assertEqVecArr(asm(v2), swizzle(a2, lanes));
}
swizzleI([10, 1, 7, 5, 1, 2, 6, 8, 5, 13, 0, 6, 2, 8, 0, 9]);
swizzleI([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
swizzleI([15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15]);
function swizzleU(lanes) {
let asm = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK + U8x16 + U8x16I8x16 + I8x16U8x16 +
'var swz = u8x16.swizzle;' +
`function f(a) { a = i8x16chk(a); return i8x16u8x16(swz(u8x16i8x16(a), ${lanes.join()})); } return f`), this);
let a1 = [ -1,2, -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
let a2 = [-128,2,-128,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16, -1];
let v1 = SIMD.Int8x16(...a1);
let v2 = SIMD.Int8x16(...a2);
assertEqVecArr(asm(v1), swizzle(a1, lanes));
assertEqVecArr(asm(v2), swizzle(a2, lanes));
}
swizzleU([10, 1, 7, 5, 1, 2, 6, 8, 5, 13, 0, 6, 2, 8, 0, 9]);
swizzleU([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
swizzleU([15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15]);
// Out-of-range lane indexes.
assertAsmTypeFail('glob', USE_ASM + I8x16 + 'var swz = i8x16.swizzle; ' +
'function f() { var x=i8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16); } return f');
assertAsmTypeFail('glob', USE_ASM + U8x16 + 'var swz = u8x16.swizzle; ' +
'function f() { var x=u8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16); } return f');
// Missing lane indexes.
assertAsmTypeFail('glob', USE_ASM + I8x16 + 'var swz = i8x16.swizzle; ' +
'function f() { var x=i8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15); } return f');
assertAsmTypeFail('glob', USE_ASM + U8x16 + 'var swz = u8x16.swizzle; ' +
'function f() { var x=u8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15); } return f');
// Test shuffle.
function shuffle(vec1, vec2, lanes) {
let r = [];
let vec = vec1.concat(vec2);
for (let i = 0; i < 16; i++)
r.push(vec[lanes[i]]);
return r;
}
function shuffleI(lanes) {
let asm = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
'var shuf = i8x16.shuffle;' +
`function f(a1, a2) { a1 = i8x16chk(a1); a2 = i8x16chk(a2); return shuf(a1, a2, ${lanes.join()}); } return f`), this);
let a1 = [ -1,2, -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
let a2 = [-128,2,-128,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16, -1];
let v1 = SIMD.Int8x16(...a1);
let v2 = SIMD.Int8x16(...a2);
assertEqVecArr(asm(v1, v2), shuffle(a1, a2, lanes));
}
shuffleI([31, 9, 5, 4, 29, 12, 19, 10, 16, 22, 10, 9, 6, 18, 9, 8]);
shuffleI([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
shuffleI([31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31]);
function shuffleU(lanes) {
let asm = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK + U8x16 + U8x16I8x16 + I8x16U8x16 +
'var shuf = u8x16.shuffle;' +
'function f(a1, a2) { a1 = i8x16chk(a1); a2 = i8x16chk(a2); ' +
`return i8x16u8x16(shuf(u8x16i8x16(a1), u8x16i8x16(a2), ${lanes.join()})); } return f`), this);
let a1 = [ -1,2, -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
let a2 = [-128,2,-128,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16, -1];
let v1 = SIMD.Int8x16(...a1);
let v2 = SIMD.Int8x16(...a2);
assertEqVecArr(asm(v1, v2), shuffle(a1, a2, lanes));
}
shuffleU([31, 9, 5, 4, 29, 12, 19, 10, 16, 22, 10, 9, 6, 18, 9, 8]);
shuffleU([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
shuffleU([31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31]);
// Out-of-range lane indexes.
assertAsmTypeFail('glob', USE_ASM + I8x16 + 'var shuf = i8x16.shuffle; ' +
'function f() { var x=i8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); shuf(x,x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,32); } return f');
assertAsmTypeFail('glob', USE_ASM + U8x16 + 'var shuf = u8x16.shuffle; ' +
'function f() { var x=u8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); shuf(x,x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,32); } return f');
// Missing lane indexes.
assertAsmTypeFail('glob', USE_ASM + I8x16 + 'var shuf = i8x16.shuffle; ' +
'function f() { var x=i8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); shuf(x,x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15); } return f');
assertAsmTypeFail('glob', USE_ASM + U8x16 + 'var shuf = u8x16.shuffle; ' +
'function f() { var x=u8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); shuf(x,x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15); } return f');
// Test unary operators.
function unaryI(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
`var fut = i8x16.${opname};` +
'function f(v) { v = i8x16chk(v); return fut(v); } return f'), this);
let a = [-1,2,-3,4,-5,6,-7,8,-9,10,-11,12,-13,-14,-15,-16];
let v = SIMD.Int8x16(...a);
assertEqVecArr(simdfunc(v), a.map(lanefunc));
}
function unaryU(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16CHK + U8x16I8x16 + I8x16U8x16 +
`var fut = u8x16.${opname};` +
'function f(v) { v = i8x16chk(v); return i8x16u8x16(fut(u8x16i8x16(v))); } return f'), this);
let a = [256-1,2,256-3,4,256-5,6,256-7,8,256-9,10,256-11,12,256-13,256-14,256-15,256-16];
let v = SIMD.Int8x16(...a);
assertEqVecArr(SIMD.Uint8x16.fromInt8x16Bits(simdfunc(v)), a.map(lanefunc));
}
function unaryB(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + B8x16 + B8x16CHK +
`var fut = b8x16.${opname};` +
'function f(v) { v = b8x16chk(v); return fut(v); } return f'), this);
let a = [1,1,0,1,1,0,0,0,1,1,1,1,0,0,0,1];
let v = SIMD.Bool8x16(...a);
assertEqVecArr(simdfunc(v), a.map(lanefunc));
}
unaryI('not', x => ~x << 24 >> 24);
unaryU('not', x => ~x << 24 >>> 24);
unaryB('not', x => !x);
unaryI('neg', x => -x << 24 >> 24);
unaryU('neg', x => -x << 24 >>> 24);
// Test binary operators.
function zipmap(a1, a2, f) {
assertEq(a1.length, a2.length);
let r = [];
for (var i = 0; i < a1.length; i++)
r.push(f(a1[i], a2[i]));
return r
}
function binaryI(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
`var fut = i8x16.${opname};` +
'function f(v1, v2) { v1 = i8x16chk(v1); v2 = i8x16chk(v2); return fut(v1, v2); } return f'), this);
let a1 = [ -1,2, -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
let a2 = [-128,2,-128,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16, -1];
let ref = zipmap(a1, a2, lanefunc);
let v1 = SIMD.Int8x16(...a1);
let v2 = SIMD.Int8x16(...a2);
assertEqVecArr(simdfunc(v1, v2), ref);
}
function binaryU(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16CHK + U8x16I8x16 + I8x16U8x16 +
`var fut = u8x16.${opname};` +
'function f(v1, v2) { v1 = i8x16chk(v1); v2 = i8x16chk(v2); return i8x16u8x16(fut(u8x16i8x16(v1), u8x16i8x16(v2))); } return f'), this);
let a1 = [ -1,2, -3,0x80,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16].map(x => x & 0xff);
let a2 = [0x80,2,0x80,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16,0xff].map(x => x & 0xff);
let ref = zipmap(a1, a2, lanefunc);
let v1 = SIMD.Int8x16(...a1);
let v2 = SIMD.Int8x16(...a2);
let res = SIMD.Uint8x16.fromInt8x16Bits(simdfunc(v1, v2));
assertEqVecArr(res, ref);
}
function binaryB(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + B8x16 + B8x16CHK +
`var fut = b8x16.${opname};` +
'function f(v1, v2) { v1 = b8x16chk(v1); v2 = b8x16chk(v2); return fut(v1, v2); } return f'), this);
let a = [1,1,0,1,1,0,0,0,1,1,1,1,0,0,0,1];
let v = SIMD.Bool8x16(...a);
assertEqVecArr(simdfunc(v), a.map(lanefunc));
}
binaryI('add', (x, y) => (x + y) << 24 >> 24);
binaryI('sub', (x, y) => (x - y) << 24 >> 24);
binaryI('mul', (x, y) => (x * y) << 24 >> 24);
binaryU('add', (x, y) => (x + y) << 24 >>> 24);
binaryU('sub', (x, y) => (x - y) << 24 >>> 24);
binaryU('mul', (x, y) => (x * y) << 24 >>> 24);
binaryI('and', (x, y) => (x & y) << 24 >> 24);
binaryI('or', (x, y) => (x | y) << 24 >> 24);
binaryI('xor', (x, y) => (x ^ y) << 24 >> 24);
binaryU('and', (x, y) => (x & y) << 24 >>> 24);
binaryU('or', (x, y) => (x | y) << 24 >>> 24);
binaryU('xor', (x, y) => (x ^ y) << 24 >>> 24);
function sat(x, lo, hi) {
if (x < lo) return lo;
if (x > hi) return hi;
return x
}
function isat(x) { return sat(x, -128, 127); }
function usat(x) { return sat(x, 0, 255); }
binaryI('addSaturate', (x, y) => isat(x + y))
binaryI('subSaturate', (x, y) => isat(x - y))
binaryU('addSaturate', (x, y) => usat(x + y))
binaryU('subSaturate', (x, y) => usat(x - y))
// Test shift operators.
function zip1map(a, s, f) {
return a.map(x => f(x, s));
}
function shiftI(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
`var fut = i8x16.${opname};` +
'function f(v, s) { v = i8x16chk(v); s = s|0; return fut(v, s); } return f'), this);
let a = [0x80,2,0x80,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16,0xff];
let v = SIMD.Int8x16(...a);
for (let s of [0, 1, 2, 6, 7, 8, 9, 10, 16, 255, -1, -8, -7, -1000]) {
let ref = zip1map(a, s, lanefunc);
// 1. Test dynamic shift amount.
assertEqVecArr(simdfunc(v, s), ref);
// 2. Test constant shift amount.
let cstf = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
`var fut = i8x16.${opname};` +
`function f(v) { v = i8x16chk(v); return fut(v, ${s}); } return f`), this);
assertEqVecArr(cstf(v, s), ref);
}
}
function shiftU(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16CHK + U8x16I8x16 + I8x16U8x16 +
`var fut = u8x16.${opname};` +
'function f(v, s) { v = i8x16chk(v); s = s|0; return i8x16u8x16(fut(u8x16i8x16(v), s)); } return f'), this);
let a = [0x80,2,0x80,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16,0xff];
let v = SIMD.Int8x16(...a);
for (let s of [0, 1, 2, 6, 7, 8, 9, 10, 16, 255, -1, -8, -7, -1000]) {
let ref = zip1map(a, s, lanefunc);
// 1. Test dynamic shift amount.
assertEqVecArr(SIMD.Uint8x16.fromInt8x16Bits(simdfunc(v, s)), ref);
// 2. Test constant shift amount.
let cstf = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16CHK + U8x16I8x16 + I8x16U8x16 +
`var fut = u8x16.${opname};` +
`function f(v) { v = i8x16chk(v); return i8x16u8x16(fut(u8x16i8x16(v), ${s})); } return f`), this);
assertEqVecArr(SIMD.Uint8x16.fromInt8x16Bits(cstf(v, s)), ref);
}
}
shiftI('shiftLeftByScalar', (x,s) => (x << (s & 7)) << 24 >> 24);
shiftU('shiftLeftByScalar', (x,s) => (x << (s & 7)) << 24 >>> 24);
shiftI('shiftRightByScalar', (x,s) => ((x << 24 >> 24) >> (s & 7)) << 24 >> 24);
shiftU('shiftRightByScalar', (x,s) => ((x << 24 >>> 24) >>> (s & 7)) << 24 >>> 24);
// Comparisons.
function compareI(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
`var fut = i8x16.${opname};` +
'function f(v1, v2) { v1 = i8x16chk(v1); v2 = i8x16chk(v2); return fut(v1, v2); } return f'), this);
let a1 = [ -1,2, -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
let a2 = [-128,2,-128,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16, -1];
let ref = zipmap(a1, a2, lanefunc);
let v1 = SIMD.Int8x16(...a1);
let v2 = SIMD.Int8x16(...a2);
assertEqVecArr(simdfunc(v1, v2), ref);
}
function compareU(opname, lanefunc) {
let simdfunc = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK + U8x16 + U8x16I8x16 +
`var fut = u8x16.${opname};` +
'function f(v1, v2) { v1 = i8x16chk(v1); v2 = i8x16chk(v2); return fut(u8x16i8x16(v1), u8x16i8x16(v2)); } return f'), this);
let a1 = [ -1,2, -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16].map(x => x << 24 >>> 24);
let a2 = [-128,2,-128,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16, -1].map(x => x << 24 >>> 24);
let ref = zipmap(a1, a2, lanefunc);
let v1 = SIMD.Int8x16(...a1);
let v2 = SIMD.Int8x16(...a2);
assertEqVecArr(simdfunc(v1, v2), ref);
}
compareI("equal", (x,y) => x == y);
compareU("equal", (x,y) => x == y);
compareI("notEqual", (x,y) => x != y);
compareU("notEqual", (x,y) => x != y);
compareI("lessThan", (x,y) => x < y);
compareU("lessThan", (x,y) => x < y);
compareI("lessThanOrEqual", (x,y) => x <= y);
compareU("lessThanOrEqual", (x,y) => x <= y);
compareI("greaterThan", (x,y) => x > y);
compareU("greaterThan", (x,y) => x > y);
compareI("greaterThanOrEqual", (x,y) => x >= y);
compareU("greaterThanOrEqual", (x,y) => x >= y);

Просмотреть файл

@ -0,0 +1,84 @@
load(libdir + "asm.js");
load(libdir + "simd.js");
load(libdir + "asserts.js");
// Set to true to see more JS debugging spew.
const DEBUG = false;
if (!isSimdAvailable()) {
DEBUG && print("won't run tests as simd extensions aren't activated yet");
quit(0);
}
// Test all bit-casts and normal loads and stores.
var heap = new ArrayBuffer(BUF_MIN);
var asU8 = new Uint8Array(heap);
var allTypes = [
"Int8x16",
"Int16x8",
"Int32x4",
"Uint8x16",
"Uint16x8",
"Uint32x4",
"Float32x4"
];
// Generate a load bit-cast store test function that performs:
//
// function f(a, b) {
// vec = src.load(H, a);
// cast = dst.from«src»Bits(vec);
// store(H, b, cast);
// }
//
// Here, `H` is the heap provided by `heap`.
function test_func(src, dst) {
text = `
"use asm";
var src = glob.SIMD.${src};
var dst = glob.SIMD.${dst};
var ld = src.load;
var st = dst.store;
var bc = dst.from${src}Bits;
var H = new glob.Uint8Array(heap);
function f(a, b) {
a = a|0;
b = b|0;
st(H, b, bc(ld(H, a)));
}
return f;
`;
return asmLink(asmCompile('glob', 'ffi', 'heap', text), this, null, heap);
}
function assertBuf16(a, b) {
for (let i=0; i < 16; i++) {
assertEq(asU8[a+i], asU8[b+i]);
}
}
for (let src of allTypes) {
for (let dst of allTypes) {
// Skip identity conversions.
if (src == dst) continue;
print(src, dst);
let f = test_func(src, dst);
// Initialize with pseudo-random data.
for (let i = 0; i < 64; i++) {
asU8[i] = (i + 17) * 97;
}
// Aligned load/store.
f(0, 16);
assertBuf16(0, 16);
// Unaligned access.
f(1, 27);
assertBuf16(1, 27);
}
}

Просмотреть файл

@ -0,0 +1,457 @@
// |jit-test|
load(libdir + "asm.js");
load(libdir + "simd.js");
load(libdir + "asserts.js");
// Avoid pathological --ion-eager compile times due to bails in loops
setJitCompilerOption('ion.warmup.trigger', 1000000);
// Set to true to see more JS debugging spew
const DEBUG = false;
if (!isSimdAvailable() || typeof SIMD === 'undefined' || !isAsmJSCompilationAvailable()) {
DEBUG && print("won't run tests as simd extensions aren't activated yet");
quit(0);
}
const RuntimeError = WebAssembly.RuntimeError;
const INT32_MAX = Math.pow(2, 31) - 1;
const INT32_MIN = INT32_MAX + 1 | 0;
try {
// Load / Store
var IMPORTS = USE_ASM + 'var H=new glob.Uint8Array(heap); var i4=glob.SIMD.Int32x4; var ci4=i4.check; var load=i4.load; var store=i4.store;';
// Bad number of args
assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load();} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(3);} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(3, 4, 5);} return f");
// Bad type of args
assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(3, 5);} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, 5.0);} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){var i=0.;load(H, i);} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "var H2=new glob.Int32Array(heap); function f(){var i=0;load(H2, i)} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "var H2=42; function f(){var i=0;load(H2, i)} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){var i=0;load(H2, i)} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "var f4=glob.SIMD.Float32x4; function f(){var i=0;var vec=f4(1,2,3,4); store(H, i, vec)} return f");
// Bad coercions of returned values
assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){var i=0;return load(H, i)|0;} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){var i=0;return +load(H, i);} return f");
// Literal index constants
var buf = new ArrayBuffer(BUF_MIN);
var SIZE_TA = BUF_MIN >> 2
var asI32 = new Int32Array(buf);
asI32[SIZE_TA - 4] = 4;
asI32[SIZE_TA - 3] = 3;
asI32[SIZE_TA - 2] = 2;
asI32[SIZE_TA - 1] = 1;
assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, -1);} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, " + (INT32_MAX + 1) + ");} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, " + (INT32_MAX + 1 - 15) + ");} return f");
asmCompile('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, " + (INT32_MAX + 1 - 16) + ");} return f");
assertAsmLinkFail(asmCompile('glob', 'ffi', 'heap', IMPORTS + "function f() {return ci4(load(H, " + (BUF_MIN - 15) + "));} return f"), this, {}, buf);
assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', IMPORTS + "function f() {return ci4(load(H, " + (BUF_MIN - 16) + "));} return f"), this, {}, buf)(), [4, 3, 2, 1]);
assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', IMPORTS + "function f() {return ci4(load(H, " + BUF_MIN + " - 16 | 0));} return f"), this, {}, buf)(), [4, 3, 2, 1]);
var CONSTANT_INDEX = 42;
var CONSTANT_BYTE_INDEX = CONSTANT_INDEX << 2;
var loadStoreCode = `
"use asm";
var H = new glob.Uint8Array(heap);
var i4 = glob.SIMD.Int32x4;
var i4load = i4.load;
var i4store = i4.store;
var ci4 = i4.check;
var f4 = glob.SIMD.Float32x4;
var f4load = f4.load;
var f4store = f4.store;
var cf4 = f4.check;
function f32l(i) { i=i|0; return cf4(f4load(H, i|0)); }
function f32lcst() { return cf4(f4load(H, ${CONSTANT_BYTE_INDEX})); }
function f32s(i, vec) { i=i|0; vec=cf4(vec); f4store(H, i|0, vec); }
function f32scst(vec) { vec=cf4(vec); f4store(H, ${CONSTANT_BYTE_INDEX}, vec); }
function i32l(i) { i=i|0; return ci4(i4load(H, i|0)); }
function i32lcst() { return ci4(i4load(H, ${CONSTANT_BYTE_INDEX})); }
function i32s(i, vec) { i=i|0; vec=ci4(vec); i4store(H, i|0, vec); }
function i32scst(vec) { vec=ci4(vec); i4store(H, ${CONSTANT_BYTE_INDEX}, vec); }
function f32lbndcheck(i) {
i=i|0;
if ((i|0) > ${CONSTANT_BYTE_INDEX}) i=${CONSTANT_BYTE_INDEX};
if ((i|0) < 0) i = 0;
return cf4(f4load(H, i|0));
}
function f32sbndcheck(i, vec) {
i=i|0;
vec=cf4(vec);
if ((i|0) > ${CONSTANT_BYTE_INDEX}) i=${CONSTANT_BYTE_INDEX};
if ((i|0) < 0) i = 0;
return cf4(f4store(H, i|0, vec));
}
return {
f32l: f32l,
f32lcst: f32lcst,
f32s: f32s,
f32scst: f32scst,
f32lbndcheck: f32lbndcheck,
f32sbndcheck: f32sbndcheck,
i32l: i32l,
i32lcst: i32lcst,
i32s: i32s,
i32scst: i32scst
}
`;
const SIZE = 0x8000;
var F32 = new Float32Array(SIZE);
var reset = function() {
for (var i = 0; i < SIZE; i++)
F32[i] = i + 1;
};
reset();
var buf = F32.buffer;
var m = asmLink(asmCompile('glob', 'ffi', 'heap', loadStoreCode), this, null, buf);
function slice(TA, i, n) { return Array.prototype.slice.call(TA, i, i + n); }
// Float32x4.load
function f32l(n) { return m.f32l((n|0) << 2 | 0); };
// Correct accesses
assertEqX4(f32l(0), slice(F32, 0, 4));
assertEqX4(f32l(1), slice(F32, 1, 4));
assertEqX4(f32l(SIZE - 4), slice(F32, SIZE - 4, 4));
assertEqX4(m.f32lcst(), slice(F32, CONSTANT_INDEX, 4));
assertEqX4(m.f32lbndcheck(CONSTANT_BYTE_INDEX), slice(F32, CONSTANT_INDEX, 4));
// OOB
assertThrowsInstanceOf(() => f32l(-1), RuntimeError);
assertThrowsInstanceOf(() => f32l(SIZE), RuntimeError);
assertThrowsInstanceOf(() => f32l(SIZE - 1), RuntimeError);
assertThrowsInstanceOf(() => f32l(SIZE - 2), RuntimeError);
assertThrowsInstanceOf(() => f32l(SIZE - 3), RuntimeError);
var code = `
"use asm";
var f4 = glob.SIMD.Float32x4;
var f4l = f4.load;
var u8 = new glob.Uint8Array(heap);
function g(x) {
x = x|0;
// set a constraint on the size of the heap
var ptr = 0;
ptr = u8[0xFFFF] | 0;
// give a precise range to x
x = (x>>0) > 5 ? 5 : x;
x = (x>>0) < 0 ? 0 : x;
// ptr value gets a precise range but the bounds check shouldn't get
// eliminated.
return f4l(u8, 0xFFFA + x | 0);
}
return g;
`;
assertThrowsInstanceOf(() => asmLink(asmCompile('glob', 'ffi', 'heap', code), this, {}, new ArrayBuffer(0x10000))(0), RuntimeError);
// Float32x4.store
function f32s(n, v) { return m.f32s((n|0) << 2 | 0, v); };
var vec = SIMD.Float32x4(5,6,7,8);
var vec2 = SIMD.Float32x4(0,1,2,3);
var vecWithNaN = SIMD.Float32x4(NaN, 2, NaN, 4);
reset();
f32s(0, vec);
assertEqX4(vec, slice(F32, 0, 4));
reset();
f32s(0, vec2);
assertEqX4(vec2, slice(F32, 0, 4));
reset();
f32s(4, vec);
assertEqX4(vec, slice(F32, 4, 4));
reset();
f32s(4, vecWithNaN);
assertEqX4(vecWithNaN, slice(F32, 4, 4));
reset();
m.f32scst(vec2);
assertEqX4(vec2, slice(F32, CONSTANT_INDEX, 4));
reset();
m.f32sbndcheck(CONSTANT_BYTE_INDEX, vec);
assertEqX4(vec, slice(F32, CONSTANT_INDEX, 4));
// OOB
reset();
assertThrowsInstanceOf(() => f32s(SIZE - 3, vec), RuntimeError);
assertThrowsInstanceOf(() => f32s(SIZE - 2, vec), RuntimeError);
assertThrowsInstanceOf(() => f32s(SIZE - 1, vec), RuntimeError);
assertThrowsInstanceOf(() => f32s(SIZE, vec), RuntimeError);
for (var i = 0; i < SIZE; i++)
assertEq(F32[i], i + 1);
// Int32x4.load
var I32 = new Int32Array(buf);
reset = function () {
for (var i = 0; i < SIZE; i++)
I32[i] = i + 1;
};
reset();
function i32(n) { return m.i32l((n|0) << 2 | 0); };
// Correct accesses
assertEqX4(i32(0), slice(I32, 0, 4));
assertEqX4(i32(1), slice(I32, 1, 4));
assertEqX4(i32(SIZE - 4), slice(I32, SIZE - 4, 4));
assertEqX4(m.i32lcst(), slice(I32, CONSTANT_INDEX, 4));
// OOB
assertThrowsInstanceOf(() => i32(-1), RuntimeError);
assertThrowsInstanceOf(() => i32(SIZE), RuntimeError);
assertThrowsInstanceOf(() => i32(SIZE - 1), RuntimeError);
assertThrowsInstanceOf(() => i32(SIZE - 2), RuntimeError);
assertThrowsInstanceOf(() => i32(SIZE - 3), RuntimeError);
// Int32x4.store
function i32s(n, v) { return m.i32s((n|0) << 2 | 0, v); };
var vec = SIMD.Int32x4(5,6,7,8);
var vec2 = SIMD.Int32x4(0,1,2,3);
reset();
i32s(0, vec);
assertEqX4(vec, slice(I32, 0, 4));
reset();
i32s(0, vec2);
assertEqX4(vec2, slice(I32, 0, 4));
reset();
i32s(4, vec);
assertEqX4(vec, slice(I32, 4, 4));
reset();
m.i32scst(vec2);
assertEqX4(vec2, slice(I32, CONSTANT_INDEX, 4));
// OOB
reset();
assertThrowsInstanceOf(() => i32s(SIZE - 3, vec), RuntimeError);
assertThrowsInstanceOf(() => i32s(SIZE - 2, vec), RuntimeError);
assertThrowsInstanceOf(() => i32s(SIZE - 1, vec), RuntimeError);
assertThrowsInstanceOf(() => i32s(SIZE - 0, vec), RuntimeError);
for (var i = 0; i < SIZE; i++)
assertEq(I32[i], i + 1);
// Partial loads and stores
(function() {
// Variable indexes
function MakeCodeFor(typeName) {
return `
"use asm";
var type = glob.SIMD.${typeName};
var c = type.check;
var l1 = type.load1;
var l2 = type.load2;
var s1 = type.store1;
var s2 = type.store2;
var u8 = new glob.Uint8Array(heap);
function load1(i) { i=i|0; return l1(u8, i); }
function load2(i) { i=i|0; return l2(u8, i); }
function loadCst1() { return l1(u8, 41 << 2); }
function loadCst2() { return l2(u8, 41 << 2); }
function store1(i, x) { i=i|0; x=c(x); return s1(u8, i, x); }
function store2(i, x) { i=i|0; x=c(x); return s2(u8, i, x); }
function storeCst1(x) { x=c(x); return s1(u8, 41 << 2, x); }
function storeCst2(x) { x=c(x); return s2(u8, 41 << 2, x); }
return {
load1: load1,
load2: load2,
loadCst1: loadCst1,
loadCst2: loadCst2,
store1: store1,
store2: store2,
storeCst1: storeCst1,
storeCst2: storeCst2,
}
`;
}
var SIZE = 0x10000;
function TestPartialLoads(m, typedArray, x, y, z, w) {
// Fill array with predictable values
for (var i = 0; i < SIZE; i += 4) {
typedArray[i] = x(i);
typedArray[i + 1] = y(i);
typedArray[i + 2] = z(i);
typedArray[i + 3] = w(i);
}
// Test correct loads
var i = 0, j = 0; // i in elems, j in bytes
assertEqX4(m.load1(j), [x(i), 0, 0, 0]);
assertEqX4(m.load2(j), [x(i), y(i), 0, 0]);
j += 4;
assertEqX4(m.load1(j), [y(i), 0, 0, 0]);
assertEqX4(m.load2(j), [y(i), z(i), 0, 0]);
j += 4;
assertEqX4(m.load1(j), [z(i), 0, 0, 0]);
assertEqX4(m.load2(j), [z(i), w(i), 0, 0]);
j += 4;
assertEqX4(m.load1(j), [w(i), 0, 0, 0]);
assertEqX4(m.load2(j), [w(i), x(i+4), 0, 0]);
j += 4;
i += 4;
assertEqX4(m.load1(j), [x(i), 0, 0, 0]);
assertEqX4(m.load2(j), [x(i), y(i), 0, 0]);
// Test loads with constant indexes (41)
assertEqX4(m.loadCst1(), [y(40), 0, 0, 0]);
assertEqX4(m.loadCst2(), [y(40), z(40), 0, 0]);
// Test limit and OOB accesses
assertEqX4(m.load1((SIZE - 1) << 2), [w(SIZE - 4), 0, 0, 0]);
assertThrowsInstanceOf(() => m.load1(((SIZE - 1) << 2) + 1), RuntimeError);
assertEqX4(m.load2((SIZE - 2) << 2), [z(SIZE - 4), w(SIZE - 4), 0, 0]);
assertThrowsInstanceOf(() => m.load2(((SIZE - 2) << 2) + 1), RuntimeError);
}
// Partial stores
function TestPartialStores(m, typedArray, typeName, x, y, z, w) {
var val = SIMD[typeName](x, y, z, w);
function Reset() {
for (var i = 0; i < SIZE; i++)
typedArray[i] = i + 1;
}
function CheckNotModified(low, high) {
for (var i = low; i < high; i++)
assertEq(typedArray[i], i + 1);
}
function TestStore1(i) {
m.store1(i, val);
CheckNotModified(0, i >> 2);
assertEq(typedArray[i >> 2], x);
CheckNotModified((i >> 2) + 1, SIZE);
typedArray[i >> 2] = (i >> 2) + 1;
}
function TestStore2(i) {
m.store2(i, val);
CheckNotModified(0, i >> 2);
assertEq(typedArray[i >> 2], x);
assertEq(typedArray[(i >> 2) + 1], y);
CheckNotModified((i >> 2) + 2, SIZE);
typedArray[i >> 2] = (i >> 2) + 1;
typedArray[(i >> 2) + 1] = (i >> 2) + 2;
}
function TestOOBStore(f) {
assertThrowsInstanceOf(f, RuntimeError);
CheckNotModified(0, SIZE);
}
Reset();
TestStore1(0);
TestStore1(1 << 2);
TestStore1(2 << 2);
TestStore1(3 << 2);
TestStore1(1337 << 2);
var i = (SIZE - 1) << 2;
TestStore1(i);
TestOOBStore(() => m.store1(i + 1, val));
TestOOBStore(() => m.store1(-1, val));
TestStore2(0);
TestStore2(1 << 2);
TestStore2(2 << 2);
TestStore2(3 << 2);
TestStore2(1337 << 2);
var i = (SIZE - 2) << 2;
TestStore2(i);
TestOOBStore(() => m.store2(i + 1, val));
TestOOBStore(() => m.store2(-1, val));
// Constant indexes (41)
m.storeCst1(val);
CheckNotModified(0, 41);
assertEq(typedArray[41], x);
CheckNotModified(42, SIZE);
typedArray[41] = 42;
m.storeCst2(val);
CheckNotModified(0, 41);
assertEq(typedArray[41], x);
assertEq(typedArray[42], y);
CheckNotModified(43, SIZE);
typedArray[41] = 42;
typedArray[42] = 43;
}
var f32 = new Float32Array(SIZE);
var mFloat32x4 = asmLink(asmCompile('glob', 'ffi', 'heap', MakeCodeFor('Float32x4')), this, null, f32.buffer);
TestPartialLoads(mFloat32x4, f32,
(i) => i + 1,
(i) => Math.fround(13.37),
(i) => Math.fround(1/i),
(i) => Math.fround(Math.sqrt(0x2000 - i)));
TestPartialStores(mFloat32x4, f32, 'Float32x4', 42, -0, NaN, 0.1337);
var i32 = new Int32Array(f32.buffer);
var mInt32x4 = asmLink(asmCompile('glob', 'ffi', 'heap', MakeCodeFor('Int32x4')), this, null, i32.buffer);
TestPartialLoads(mInt32x4, i32,
(i) => i + 1 | 0,
(i) => -i | 0,
(i) => i * 2 | 0,
(i) => 42);
TestPartialStores(mInt32x4, i32, 'Int32x4', 42, -3, 13, 37);
})();
} catch (e) { print('stack: ', e.stack); throw e }

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -100,6 +100,106 @@ function assertEqX4(observed, expected) {
assertEq(observed.w, expected.w);
}
function testSimdX4(ctor, shift, scale, disp, simdName, simdCtor) {
var arr = new ctor(ab);
var c = asmCompile('glob', 'imp', 'b',
USE_ASM +
'var arr=new glob.' + ctor.name + '(b); ' +
'var SIMD_' + simdName + ' = glob.SIMD.' + simdName + '; ' +
'var SIMD_' + simdName + '_check = SIMD_' + simdName + '.check; ' +
'var SIMD_' + simdName + '_load = SIMD_' + simdName + '.load; ' +
'var SIMD_' + simdName + '_load2 = SIMD_' + simdName + '.load2; ' +
'var SIMD_' + simdName + '_load1 = SIMD_' + simdName + '.load1; ' +
'var SIMD_' + simdName + '_store = SIMD_' + simdName + '.store; ' +
'var SIMD_' + simdName + '_store2 = SIMD_' + simdName + '.store2; ' +
'var SIMD_' + simdName + '_store1 = SIMD_' + simdName + '.store1; ' +
'function load(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
'function load2(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load2(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
'function load1(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load1(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
'function store(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
'function store2(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store2(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
'function store1(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store1(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
'return { load: load, load2: load2, load1: load1, store: store, store2 : store2, store1 : store1 }');
var f = asmLink(c, this, null, ab);
const RuntimeError = WebAssembly.RuntimeError;
for (var i of indices) {
var index = ((i<<scale)+disp)>>shift;
var v, v2, v1;
var t = false, t2 = false, t1 = false;
try { v = simdCtor.load(arr, index); }
catch (e) {
assertEq(e instanceof RangeError, true);
t = true;
}
try { v2 = simdCtor.load2(arr, index); }
catch (e) {
assertEq(e instanceof RangeError, true);
t2 = true;
}
try { v1 = simdCtor.load1(arr, index); }
catch (e) {
assertEq(e instanceof RangeError, true);
t1 = true;
}
// Loads
var l, l2, l1;
var r = false, r2 = false, r1 = false;
try { l = f.load(i); }
catch (e) {
assertEq(e instanceof RuntimeError, true);
r = true;
}
try { l2 = f.load2(i); }
catch (e) {
assertEq(e instanceof RuntimeError, true);
r2 = true;
}
try { l1 = f.load1(i); }
catch (e) {
assertEq(e instanceof RuntimeError, true);
r1 = true;
}
assertEq(t, r);
assertEq(t2, r2);
assertEq(t1, r1);
if (!t) assertEqX4(v, l);
if (!t2) assertEqX4(v2, l2);
if (!t1) assertEqX4(v1, l1);
// Stores
if (!t) {
simdCtor.store(arr, index, simdCtor.neg(v));
f.store(i, v);
assertEqX4(simdCtor.load(arr, index), v);
} else
assertThrowsInstanceOf(() => f.store(i, simdCtor()), RuntimeError);
if (!t2) {
simdCtor.store2(arr, index, simdCtor.neg(v2));
f.store2(i, v2);
assertEqX4(simdCtor.load2(arr, index), v2);
} else
assertThrowsInstanceOf(() => f.store2(i, simdCtor()), RuntimeError);
if (!t1) {
simdCtor.store1(arr, index, simdCtor.neg(v1));
f.store1(i, v1);
assertEqX4(simdCtor.load1(arr, index), v1);
} else
assertThrowsInstanceOf(() => f.store1(i, simdCtor()), RuntimeError);
}
}
function testFloat32x4(ctor, shift, scale, disp) {
testSimdX4(ctor, shift, scale, disp, 'Float32x4', SIMD.Float32x4);
}
function testInt32x4(ctor, shift, scale, disp) {
testSimdX4(ctor, shift, scale, disp, 'Int32x4', SIMD.Int32x4);
}
function test(tester, ctor, shift) {
var arr = new ctor(ab);
for (var i = 0; i < arr.length; i++)
@ -123,3 +223,15 @@ test(testInt, Int32Array, 2);
test(testInt, Uint32Array, 2);
test(testFloat32, Float32Array, 2);
test(testFloat64, Float64Array, 3);
if (typeof SIMD !== 'undefined' && isSimdAvailable()) {
// Avoid pathological --ion-eager compile times due to bails in loops
setJitCompilerOption('ion.warmup.trigger', 1000000);
// Use a fresh ArrayBuffer so prepareForAsmJS can allocated a guard page
// which SIMD.js needs. Since the original ArrayBuffer was prepared for
// asm.js that didn't use SIMD.js, it has no guard page (on 32-bit).
ab = new ArrayBuffer(BUF_MIN);
test(testInt32x4, Uint8Array, 0);
test(testFloat32x4, Uint8Array, 0);
}

Просмотреть файл

@ -2012,6 +2012,7 @@ jit::FinishBailoutToBaseline(BaselineBailoutInfo* bailoutInfo)
case Bailout_NonObjectInput:
case Bailout_NonStringInput:
case Bailout_NonSymbolInput:
case Bailout_UnexpectedSimdInput:
case Bailout_NonSharedTypedArrayInput:
case Bailout_Debugger:
case Bailout_UninitializedThis:

Просмотреть файл

@ -14,6 +14,7 @@
#include "jstypes.h"
#include "builtin/Eval.h"
#include "builtin/SIMDConstants.h"
#include "gc/Policy.h"
#include "jit/BaselineCacheIRCompiler.h"
#include "jit/BaselineDebugModeOSR.h"
@ -2012,6 +2013,71 @@ TryAttachFunCallStub(JSContext* cx, ICCall_Fallback* stub, HandleScript script,
return true;
}
// Check if target is a native SIMD operation which returns a SIMD type.
// If so, set res to a template object matching the SIMD type produced and return true.
static bool
GetTemplateObjectForSimd(JSContext* cx, JSFunction* target, MutableHandleObject res)
{
if (!target->hasJitInfo())
return false;
const JSJitInfo* jitInfo = target->jitInfo();
if (jitInfo->type() != JSJitInfo::InlinableNative)
return false;
// Check if this is a native inlinable SIMD operation.
SimdType ctrlType;
switch (jitInfo->inlinableNative) {
case InlinableNative::SimdInt8x16: ctrlType = SimdType::Int8x16; break;
case InlinableNative::SimdUint8x16: ctrlType = SimdType::Uint8x16; break;
case InlinableNative::SimdInt16x8: ctrlType = SimdType::Int16x8; break;
case InlinableNative::SimdUint16x8: ctrlType = SimdType::Uint16x8; break;
case InlinableNative::SimdInt32x4: ctrlType = SimdType::Int32x4; break;
case InlinableNative::SimdUint32x4: ctrlType = SimdType::Uint32x4; break;
case InlinableNative::SimdFloat32x4: ctrlType = SimdType::Float32x4; break;
case InlinableNative::SimdBool8x16: ctrlType = SimdType::Bool8x16; break;
case InlinableNative::SimdBool16x8: ctrlType = SimdType::Bool16x8; break;
case InlinableNative::SimdBool32x4: ctrlType = SimdType::Bool32x4; break;
// This is not an inlinable SIMD operation.
default: return false;
}
// The controlling type is not necessarily the return type.
// Check the actual operation.
SimdOperation simdOp = SimdOperation(jitInfo->nativeOp);
SimdType retType;
switch(simdOp) {
case SimdOperation::Fn_allTrue:
case SimdOperation::Fn_anyTrue:
case SimdOperation::Fn_extractLane:
// These operations return a scalar. No template object needed.
return false;
case SimdOperation::Fn_lessThan:
case SimdOperation::Fn_lessThanOrEqual:
case SimdOperation::Fn_equal:
case SimdOperation::Fn_notEqual:
case SimdOperation::Fn_greaterThan:
case SimdOperation::Fn_greaterThanOrEqual:
// These operations return a boolean vector with the same shape as the
// controlling type.
retType = GetBooleanSimdType(ctrlType);
break;
default:
// All other operations return the controlling type.
retType = ctrlType;
break;
}
// Create a template object based on retType.
RootedGlobalObject global(cx, cx->global());
Rooted<SimdTypeDescr*> descr(cx, GlobalObject::getOrCreateSimdTypeDescr(cx, global, retType));
res.set(cx->realm()->jitRealm()->getSimdTemplateObjectFor(cx, descr));
return true;
}
static bool
GetTemplateObjectForNative(JSContext* cx, HandleFunction target, const CallArgs& args,
MutableHandleObject res, bool* skipAttach)
@ -2095,6 +2161,9 @@ GetTemplateObjectForNative(JSContext* cx, HandleFunction target, const CallArgs&
return !!res;
}
if (JitSupportsSimd() && GetTemplateObjectForSimd(cx, target, res))
return !!res;
return true;
}
@ -2111,6 +2180,12 @@ GetTemplateObjectForClassHook(JSContext* cx, JSNative hook, CallArgs& args,
return !!templateObject;
}
if (hook == SimdTypeDescr::call && JitSupportsSimd()) {
Rooted<SimdTypeDescr*> descr(cx, &args.callee().as<SimdTypeDescr>());
templateObject.set(cx->realm()->jitRealm()->getSimdTemplateObjectFor(cx, descr));
return !!templateObject;
}
return true;
}

Просмотреть файл

@ -663,6 +663,25 @@ BaselineInspector::getTemplateObjectForClassHook(jsbytecode* pc, const Class* cl
return nullptr;
}
JSObject*
BaselineInspector::getTemplateObjectForSimdCtor(jsbytecode* pc, SimdType simdType)
{
if (!hasBaselineScript())
return nullptr;
const ICEntry& entry = icEntryFromPC(pc);
for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
if (stub->isCall_ClassHook() && stub->toCall_ClassHook()->clasp() == &SimdTypeDescr::class_) {
JSObject* templateObj = stub->toCall_ClassHook()->templateObject();
InlineTypedObject& typedObj = templateObj->as<InlineTypedObject>();
if (typedObj.typeDescr().as<SimdTypeDescr>().type() == simdType)
return templateObj;
}
}
return nullptr;
}
LexicalEnvironmentObject*
BaselineInspector::templateNamedLambdaObject()
{

Просмотреть файл

@ -131,6 +131,7 @@ class BaselineInspector
JSObject* getTemplateObject(jsbytecode* pc);
JSObject* getTemplateObjectForNative(jsbytecode* pc, Native native);
JSObject* getTemplateObjectForClassHook(jsbytecode* pc, const Class* clasp);
JSObject* getTemplateObjectForSimdCtor(jsbytecode* pc, SimdType simdType);
// Sometimes the group a template object will have is known, even if the
// object itself isn't.

Просмотреть файл

@ -422,6 +422,7 @@ CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph, MacroAssembler*
: CodeGeneratorSpecific(gen, graph, masm)
, ionScriptLabels_(gen->alloc())
, scriptCounts_(nullptr)
, simdTemplatesToReadBarrier_(0)
, realmStubsToReadBarrier_(0)
{
}
@ -6426,6 +6427,82 @@ CodeGenerator::visitNewTypedObject(LNewTypedObject* lir)
masm.bind(ool->rejoin());
}
void
CodeGenerator::visitSimdBox(LSimdBox* lir)
{
FloatRegister in = ToFloatRegister(lir->input());
Register object = ToRegister(lir->output());
Register temp = ToRegister(lir->temp());
InlineTypedObject* templateObject = lir->mir()->templateObject();
gc::InitialHeap initialHeap = lir->mir()->initialHeap();
MIRType type = lir->mir()->input()->type();
addSimdTemplateToReadBarrier(lir->mir()->simdType());
MOZ_ASSERT(lir->safepoint()->liveRegs().has(in), "Save the input register across oolCallVM");
OutOfLineCode* ool = oolCallVM(NewTypedObjectInfo, lir,
ArgList(ImmGCPtr(templateObject), Imm32(initialHeap)),
StoreRegisterTo(object));
TemplateObject templateObj(templateObject);
masm.createGCObject(object, temp, templateObj, initialHeap, ool->entry());
masm.bind(ool->rejoin());
Address objectData(object, InlineTypedObject::offsetOfDataStart());
switch (type) {
case MIRType::Int8x16:
case MIRType::Int16x8:
case MIRType::Int32x4:
case MIRType::Bool8x16:
case MIRType::Bool16x8:
case MIRType::Bool32x4:
masm.storeUnalignedSimd128Int(in, objectData);
break;
case MIRType::Float32x4:
masm.storeUnalignedSimd128Float(in, objectData);
break;
default:
MOZ_CRASH("Unknown SIMD kind when generating code for SimdBox.");
}
}
void
CodeGenerator::addSimdTemplateToReadBarrier(SimdType simdType)
{
simdTemplatesToReadBarrier_ |= 1 << uint32_t(simdType);
}
void
CodeGenerator::visitSimdUnbox(LSimdUnbox* lir)
{
Register object = ToRegister(lir->input());
FloatRegister simd = ToFloatRegister(lir->output());
Register temp = ToRegister(lir->temp());
Label bail;
masm.branchIfNotSimdObject(object, temp, lir->mir()->simdType(), &bail);
// Load the value from the data of the InlineTypedObject.
Address objectData(object, InlineTypedObject::offsetOfDataStart());
switch (lir->mir()->type()) {
case MIRType::Int8x16:
case MIRType::Int16x8:
case MIRType::Int32x4:
case MIRType::Bool8x16:
case MIRType::Bool16x8:
case MIRType::Bool32x4:
masm.loadUnalignedSimd128Int(objectData, simd);
break;
case MIRType::Float32x4:
masm.loadUnalignedSimd128Float(objectData, simd);
break;
default:
MOZ_CRASH("The impossible happened!");
}
bailoutFrom(&bail, lir->snapshot());
}
typedef js::NamedLambdaObject* (*NewNamedLambdaObjectFn)(JSContext*, HandleFunction, gc::InitialHeap);
static const VMFunction NewNamedLambdaObjectInfo =
FunctionInfo<NewNamedLambdaObjectFn>(NamedLambdaObject::createTemplateObject,
@ -7156,7 +7233,7 @@ CodeGenerator::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
MWasmLoadGlobalVar* mir = ins->mir();
MIRType type = mir->type();
MOZ_ASSERT(IsNumberType(type));
MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
Register tls = ToRegister(ins->tlsPtr());
Address addr(tls, offsetof(wasm::TlsData, globalArea) + mir->globalDataOffset());
@ -7183,7 +7260,11 @@ CodeGenerator::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
case MIRType::Bool8x16:
case MIRType::Bool16x8:
case MIRType::Bool32x4:
masm.loadInt32x4(addr, ToFloatRegister(ins->output()));
break;
case MIRType::Float32x4:
masm.loadFloat32x4(addr, ToFloatRegister(ins->output()));
break;
default:
MOZ_CRASH("unexpected type in visitWasmLoadGlobalVar");
}
@ -7195,7 +7276,7 @@ CodeGenerator::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins)
MWasmStoreGlobalVar* mir = ins->mir();
MIRType type = mir->value()->type();
MOZ_ASSERT(IsNumberType(type));
MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
Register tls = ToRegister(ins->tlsPtr());
Address addr(tls, offsetof(wasm::TlsData, globalArea) + mir->globalDataOffset());
@ -7222,7 +7303,11 @@ CodeGenerator::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins)
case MIRType::Bool8x16:
case MIRType::Bool16x8:
case MIRType::Bool32x4:
masm.storeInt32x4(ToFloatRegister(ins->value()), addr);
break;
case MIRType::Float32x4:
masm.storeFloat32x4(ToFloatRegister(ins->value()), addr);
break;
default:
MOZ_CRASH("unexpected type in visitWasmStoreGlobalVar");
}
@ -10277,6 +10362,7 @@ CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints)
// script, which may have happened off-thread.
const JitRealm* jr = gen->realm->jitRealm();
jr->performStubReadBarriers(realmStubsToReadBarrier_);
jr->performSIMDTemplateReadBarriers(simdTemplatesToReadBarrier_);
// We finished the new IonScript. Invalidate the current active IonScript,
// so we can replace it with this new (probably higher optimized) version.
@ -11500,17 +11586,19 @@ CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir)
const MLoadUnboxedScalar* mir = lir->mir();
Scalar::Type readType = mir->readType();
unsigned numElems = mir->numElems();
int width = Scalar::byteSize(mir->storageType());
bool canonicalizeDouble = mir->canonicalizeDoubles();
Label fail;
if (lir->index()->isConstant()) {
Address source(elements, ToInt32(lir->index()) * width + mir->offsetAdjustment());
masm.loadFromTypedArray(readType, source, out, temp, &fail, canonicalizeDouble);
masm.loadFromTypedArray(readType, source, out, temp, &fail, canonicalizeDouble, numElems);
} else {
BaseIndex source(elements, ToRegister(lir->index()), ScaleFromElemWidth(width),
mir->offsetAdjustment());
masm.loadFromTypedArray(readType, source, out, temp, &fail, canonicalizeDouble);
masm.loadFromTypedArray(readType, source, out, temp, &fail, canonicalizeDouble, numElems);
}
if (fail.used())
@ -11789,10 +11877,13 @@ CodeGenerator::visitLoadElementFromStateV(LLoadElementFromStateV* lir)
template <typename T>
static inline void
StoreToTypedArray(MacroAssembler& masm, Scalar::Type writeType, const LAllocation* value,
const T& dest)
const T& dest, unsigned numElems = 0)
{
if (writeType == Scalar::Float32 || writeType == Scalar::Float64) {
masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest);
if (Scalar::isSimdType(writeType) ||
writeType == Scalar::Float32 ||
writeType == Scalar::Float64)
{
masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest, numElems);
} else {
if (value->isConstant())
masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
@ -11810,16 +11901,17 @@ CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir)
const MStoreUnboxedScalar* mir = lir->mir();
Scalar::Type writeType = mir->writeType();
unsigned numElems = mir->numElems();
int width = Scalar::byteSize(mir->storageType());
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width + mir->offsetAdjustment());
StoreToTypedArray(masm, writeType, value, dest);
StoreToTypedArray(masm, writeType, value, dest, numElems);
} else {
BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width),
mir->offsetAdjustment());
StoreToTypedArray(masm, writeType, value, dest);
StoreToTypedArray(masm, writeType, value, dest, numElems);
}
}

Просмотреть файл

@ -317,9 +317,24 @@ class CodeGenerator final : public CodeGeneratorSpecific
PerfSpewer perfSpewer_;
#endif
// This integer is a bit mask of all SimdTypeDescr::Type indexes. When a
// MSimdBox instruction is encoded, it might have either been created by
// IonBuilder, or by the Eager Simd Unbox phase.
//
// As the template objects are weak references, the JitRealm is using
// Read Barriers, but such barrier cannot be used during the compilation. To
// work around this issue, the barriers are captured during
// CodeGenerator::link.
//
// Instead of saving the pointers, we just save the index of the Read
// Barriered objects in a bit mask.
uint32_t simdTemplatesToReadBarrier_;
// Bit mask of JitRealm stubs that are to be read-barriered.
uint32_t realmStubsToReadBarrier_;
void addSimdTemplateToReadBarrier(SimdType simdType);
#define LIR_OP(op) void visit##op(L##op* ins);
LIR_OPCODE_LIST(LIR_OP)
#undef LIR_OP

Просмотреть файл

@ -0,0 +1,128 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/EagerSimdUnbox.h"
#include "jit/MIR.h"
#include "jit/MIRGenerator.h"
#include "jit/MIRGraph.h"
namespace js {
namespace jit {
// Do not optimize any Phi instruction which has conflicting Unbox operations,
// as this might imply some intended polymorphism.
static bool
CanUnboxSimdPhi(const JitRealm* jitRealm, MPhi* phi, SimdType unboxType)
{
MOZ_ASSERT(phi->type() == MIRType::Object);
// If we are unboxing, we are more than likely to have boxed this SIMD type
// once in baseline, otherwise, we cannot create a MSimdBox as we have no
// template object to use.
if (!jitRealm->maybeGetSimdTemplateObjectFor(unboxType))
return false;
MResumePoint* entry = phi->block()->entryResumePoint();
MIRType mirType = SimdTypeToMIRType(unboxType);
for (MUseIterator i(phi->usesBegin()), e(phi->usesEnd()); i != e; i++) {
// If we cannot recover the Simd object at the entry of the basic block,
// then we would have to box the content anyways.
if ((*i)->consumer() == entry && !entry->isRecoverableOperand(*i))
return false;
if (!(*i)->consumer()->isDefinition())
continue;
MDefinition* def = (*i)->consumer()->toDefinition();
if (def->isSimdUnbox() && def->toSimdUnbox()->type() != mirType)
return false;
}
return true;
}
static void
UnboxSimdPhi(const JitRealm* jitRealm, MIRGraph& graph, MPhi* phi, SimdType unboxType)
{
TempAllocator& alloc = graph.alloc();
// Unbox and replace all operands.
for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
MDefinition* op = phi->getOperand(i);
MSimdUnbox* unbox = MSimdUnbox::New(alloc, op, unboxType);
op->block()->insertAtEnd(unbox);
phi->replaceOperand(i, unbox);
}
// Change the MIRType of the Phi.
MIRType mirType = SimdTypeToMIRType(unboxType);
phi->setResultType(mirType);
MBasicBlock* phiBlock = phi->block();
MInstruction* atRecover = phiBlock->safeInsertTop(nullptr, MBasicBlock::IgnoreRecover);
MInstruction* at = phiBlock->safeInsertTop(atRecover);
// Note, we capture the uses-list now, as new instructions are not visited.
MUseIterator i(phi->usesBegin()), e(phi->usesEnd());
// Add a MSimdBox, and replace all the Phi uses with it.
JSObject* templateObject = jitRealm->maybeGetSimdTemplateObjectFor(unboxType);
InlineTypedObject* inlineTypedObject = &templateObject->as<InlineTypedObject>();
MSimdBox* recoverBox = MSimdBox::New(alloc, nullptr, phi, inlineTypedObject, unboxType, gc::DefaultHeap);
recoverBox->setRecoveredOnBailout();
phiBlock->insertBefore(atRecover, recoverBox);
MSimdBox* box = nullptr;
while (i != e) {
MUse* use = *i++;
MNode* ins = use->consumer();
if ((ins->isDefinition() && ins->toDefinition()->isRecoveredOnBailout()) ||
(ins->isResumePoint() && ins->toResumePoint()->isRecoverableOperand(use)))
{
use->replaceProducer(recoverBox);
continue;
}
if (!box) {
box = MSimdBox::New(alloc, nullptr, phi, inlineTypedObject, unboxType, gc::DefaultHeap);
phiBlock->insertBefore(at, box);
}
use->replaceProducer(box);
}
}
bool
EagerSimdUnbox(MIRGenerator* mir, MIRGraph& graph)
{
const JitRealm* jitRealm = mir->realm->jitRealm();
for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
if (mir->shouldCancel("Eager Simd Unbox"))
return false;
for (MInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
if (!ins->isSimdUnbox())
continue;
MSimdUnbox* unbox = ins->toSimdUnbox();
if (!unbox->input()->isPhi())
continue;
MPhi* phi = unbox->input()->toPhi();
if (!CanUnboxSimdPhi(jitRealm, phi, unbox->simdType()))
continue;
UnboxSimdPhi(jitRealm, graph, phi, unbox->simdType());
}
}
return true;
}
} /* namespace jit */
} /* namespace js */

Просмотреть файл

@ -0,0 +1,25 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// This file declares eager SIMD unboxing.
#ifndef jit_EagerSimdUnbox_h
#define jit_EagerSimdUnbox_h
#include "mozilla/Attributes.h"
namespace js {
namespace jit {
class MIRGenerator;
class MIRGraph;
MOZ_MUST_USE bool
EagerSimdUnbox(MIRGenerator* mir, MIRGraph& graph);
} // namespace jit
} // namespace js
#endif /* jit_EagerSimdUnbox_h */

Просмотреть файл

@ -97,6 +97,17 @@
_(ObjectIs) \
_(ObjectToString) \
\
_(SimdInt32x4) \
_(SimdUint32x4) \
_(SimdInt16x8) \
_(SimdUint16x8) \
_(SimdInt8x16) \
_(SimdUint8x16) \
_(SimdFloat32x4) \
_(SimdBool32x4) \
_(SimdBool16x8) \
_(SimdBool8x16) \
\
_(TestBailout) \
_(TestAssertFloat32) \
_(TestAssertRecoveredOnBailout) \

Просмотреть файл

@ -21,6 +21,7 @@
#include "jit/BaselineJIT.h"
#include "jit/CacheIRSpewer.h"
#include "jit/CodeGenerator.h"
#include "jit/EagerSimdUnbox.h"
#include "jit/EdgeCaseAnalysis.h"
#include "jit/EffectiveAddressAnalysis.h"
#include "jit/FoldLinearArithConstants.h"
@ -443,6 +444,17 @@ JitRealm::performStubReadBarriers(uint32_t stubsToBarrier) const
}
}
void
JitRealm::performSIMDTemplateReadBarriers(uint32_t simdTemplatesToBarrier) const
{
while (simdTemplatesToBarrier) {
auto type = PopNextBitmaskValue<SimdType>(&simdTemplatesToBarrier);
const ReadBarrieredObject& tpl = simdTemplateObjects_[type];
MOZ_ASSERT(tpl);
tpl.get();
}
}
bool
JitZone::init(JSContext* cx)
{
@ -637,6 +649,11 @@ JitRealm::sweep(JS::Realm* realm)
if (stub && IsAboutToBeFinalized(&stub))
stub.set(nullptr);
}
for (ReadBarrieredObject& obj : simdTemplateObjects_) {
if (obj && IsAboutToBeFinalized(&obj))
obj.set(nullptr);
}
}
void
@ -1467,6 +1484,17 @@ OptimizeMIR(MIRGenerator* mir)
return false;
}
if (!JitOptions.disableRecoverIns && mir->optimizationInfo().eagerSimdUnboxEnabled()) {
AutoTraceLog log(logger, TraceLogger_EagerSimdUnbox);
if (!EagerSimdUnbox(mir, graph))
return false;
gs.spewPass("Eager Simd Unbox");
AssertGraphCoherency(graph);
if (mir->shouldCancel("Eager Simd Unbox"))
return false;
}
if (mir->optimizationInfo().amaEnabled()) {
AutoTraceLog log(logger, TraceLogger_AlignmentMaskAnalysis);
AlignmentMaskAnalysis ama(graph);

Просмотреть файл

@ -7902,6 +7902,11 @@ IonBuilder::getElemTryTypedObject(bool* emitted, MDefinition* obj, MDefinition*
return Ok();
switch (elemPrediction.kind()) {
case type::Simd:
// FIXME (bug 894105): load into a MIRType::float32x4 etc
trackOptimizationOutcome(TrackedOutcome::GenericFailure);
return Ok();
case type::Struct:
case type::Array:
return getElemTryComplexElemOfTypedObject(emitted,
@ -8959,6 +8964,11 @@ IonBuilder::setElemTryTypedObject(bool* emitted, MDefinition* obj,
return Ok();
switch (elemPrediction.kind()) {
case type::Simd:
// FIXME (bug 894105): store a MIRType::float32x4 etc
trackOptimizationOutcome(TrackedOutcome::GenericFailure);
return Ok();
case type::Reference:
return setElemTryReferenceElemOfTypedObject(emitted, obj, index,
objPrediction, value, elemPrediction);
@ -10586,6 +10596,10 @@ IonBuilder::getPropTryTypedObject(bool* emitted,
return Ok();
switch (fieldPrediction.kind()) {
case type::Simd:
// FIXME (bug 894104): load into a MIRType::float32x4 etc
return Ok();
case type::Struct:
case type::Array:
return getPropTryComplexPropOfTypedObject(emitted,
@ -11728,6 +11742,10 @@ IonBuilder::setPropTryTypedObject(bool* emitted, MDefinition* obj,
return Ok();
switch (fieldPrediction.kind()) {
case type::Simd:
// FIXME (bug 894104): store into a MIRType::float32x4 etc
return Ok();
case type::Reference:
return setPropTryReferencePropOfTypedObject(emitted, obj, fieldOffset,
value, fieldPrediction, name);

Просмотреть файл

@ -730,6 +730,51 @@ class IonBuilder
InliningResult inlineSetTypedObjectOffset(CallInfo& callInfo);
InliningResult inlineConstructTypedObject(CallInfo& callInfo, TypeDescr* target);
// SIMD intrinsics and natives.
InliningResult inlineConstructSimdObject(CallInfo& callInfo, SimdTypeDescr* target);
// SIMD helpers.
bool canInlineSimd(CallInfo& callInfo, JSNative native, unsigned numArgs,
InlineTypedObject** templateObj);
MDefinition* unboxSimd(MDefinition* ins, SimdType type);
InliningResult boxSimd(CallInfo& callInfo, MDefinition* ins, InlineTypedObject* templateObj);
MDefinition* convertToBooleanSimdLane(MDefinition* scalar);
InliningResult inlineSimd(CallInfo& callInfo, JSFunction* target, SimdType type);
InliningResult inlineSimdBinaryArith(CallInfo& callInfo, JSNative native,
MSimdBinaryArith::Operation op, SimdType type);
InliningResult inlineSimdBinaryBitwise(CallInfo& callInfo, JSNative native,
MSimdBinaryBitwise::Operation op, SimdType type);
InliningResult inlineSimdBinarySaturating(CallInfo& callInfo, JSNative native,
MSimdBinarySaturating::Operation op, SimdType type);
InliningResult inlineSimdShift(CallInfo& callInfo, JSNative native, MSimdShift::Operation op,
SimdType type);
InliningResult inlineSimdComp(CallInfo& callInfo, JSNative native,
MSimdBinaryComp::Operation op, SimdType type);
InliningResult inlineSimdUnary(CallInfo& callInfo, JSNative native,
MSimdUnaryArith::Operation op, SimdType type);
InliningResult inlineSimdExtractLane(CallInfo& callInfo, JSNative native, SimdType type);
InliningResult inlineSimdReplaceLane(CallInfo& callInfo, JSNative native, SimdType type);
InliningResult inlineSimdSplat(CallInfo& callInfo, JSNative native, SimdType type);
InliningResult inlineSimdShuffle(CallInfo& callInfo, JSNative native, SimdType type,
unsigned numVectors);
InliningResult inlineSimdCheck(CallInfo& callInfo, JSNative native, SimdType type);
InliningResult inlineSimdConvert(CallInfo& callInfo, JSNative native, bool isCast,
SimdType from, SimdType to);
InliningResult inlineSimdSelect(CallInfo& callInfo, JSNative native, SimdType type);
bool prepareForSimdLoadStore(CallInfo& callInfo, Scalar::Type simdType,
MInstruction** elements, MDefinition** index,
Scalar::Type* arrayType);
InliningResult inlineSimdLoad(CallInfo& callInfo, JSNative native, SimdType type,
unsigned numElems);
InliningResult inlineSimdStore(CallInfo& callInfo, JSNative native, SimdType type,
unsigned numElems);
InliningResult inlineSimdAnyAllTrue(CallInfo& callInfo, bool IsAllTrue, JSNative native,
SimdType type);
// Utility intrinsics.
InliningResult inlineIsCallable(CallInfo& callInfo);
InliningResult inlineIsConstructor(CallInfo& callInfo);

Просмотреть файл

@ -27,6 +27,7 @@ OptimizationInfo::initNormalOptimizationInfo()
autoTruncate_ = true;
eaa_ = true;
eagerSimdUnbox_ = true;
edgeCaseAnalysis_ = true;
eliminateRedundantChecks_ = true;
inlineInterpreted_ = true;
@ -68,6 +69,7 @@ OptimizationInfo::initWasmOptimizationInfo()
ama_ = true;
autoTruncate_ = false;
eagerSimdUnbox_ = false; // wasm has no boxing / unboxing.
edgeCaseAnalysis_ = false;
eliminateRedundantChecks_ = false;
scalarReplacement_ = false; // wasm has no objects.

Просмотреть файл

@ -65,6 +65,9 @@ class OptimizationInfo
// Toggles whether native scripts get inlined.
bool inlineNative_;
// Toggles whether eager unboxing of SIMD is used.
bool eagerSimdUnbox_;
// Toggles whether global value numbering is used.
bool gvn_;
@ -154,6 +157,7 @@ class OptimizationInfo
eliminateRedundantChecks_(false),
inlineInterpreted_(false),
inlineNative_(false),
eagerSimdUnbox_(false),
gvn_(false),
licm_(false),
rangeAnalysis_(false),
@ -194,6 +198,10 @@ class OptimizationInfo
uint32_t compilerWarmUpThreshold(JSScript* script, jsbytecode* pc = nullptr) const;
bool eagerSimdUnboxEnabled() const {
return eagerSimdUnbox_ && !JitOptions.disableEagerSimdUnbox;
}
bool gvnEnabled() const {
return gvn_ && !JitOptions.disableGvn;
}

Просмотреть файл

@ -104,6 +104,9 @@ enum BailoutKind
Bailout_NonStringInput,
Bailout_NonSymbolInput,
// SIMD Unbox expects a given type, bails out if it doesn't match.
Bailout_UnexpectedSimdInput,
// Atomic operations require shared memory, bail out if the typed array
// maps unshared memory.
Bailout_NonSharedTypedArrayInput,
@ -205,6 +208,8 @@ BailoutKindString(BailoutKind kind)
return "Bailout_NonStringInput";
case Bailout_NonSymbolInput:
return "Bailout_NonSymbolInput";
case Bailout_UnexpectedSimdInput:
return "Bailout_UnexpectedSimdInput";
case Bailout_NonSharedTypedArrayInput:
return "Bailout_NonSharedTypedArrayInput";
case Bailout_Debugger:
@ -247,19 +252,6 @@ static const uint32_t VECTOR_SCALE_BITS = 3;
static const uint32_t VECTOR_SCALE_SHIFT = ELEMENT_TYPE_BITS + ELEMENT_TYPE_SHIFT;
static const uint32_t VECTOR_SCALE_MASK = (1 << VECTOR_SCALE_BITS) - 1;
// The integer SIMD types have a lot of operations that do the exact same thing
// for signed and unsigned integer types. Sometimes it is simpler to treat
// signed and unsigned integer SIMD types as the same type, using a SimdSign to
// distinguish the few cases where there is a difference.
enum class SimdSign {
// Signedness is not applicable to this type. (i.e., Float or Bool).
NotApplicable,
// Treat as an unsigned integer with a range 0 .. 2^N-1.
Unsigned,
// Treat as a signed integer in two's complement encoding.
Signed,
};
class SimdConstant {
public:
enum Type {
@ -465,6 +457,39 @@ IsSimdType(MIRType type)
return ((unsigned(type) >> VECTOR_SCALE_SHIFT) & VECTOR_SCALE_MASK) != 0;
}
// Returns the number of vector elements (hereby called "length") for a given
// SIMD kind. It is the Y part of the name "Foo x Y".
static inline unsigned
SimdTypeToLength(MIRType type)
{
MOZ_ASSERT(IsSimdType(type));
return 1 << ((unsigned(type) >> VECTOR_SCALE_SHIFT) & VECTOR_SCALE_MASK);
}
// Get the type of the individual lanes in a SIMD type.
// For example, Int32x4 -> Int32, Float32x4 -> Float32 etc.
static inline MIRType
SimdTypeToLaneType(MIRType type)
{
MOZ_ASSERT(IsSimdType(type));
static_assert(unsigned(MIRType::Last) <= ELEMENT_TYPE_MASK,
"ELEMENT_TYPE_MASK should be larger than the last MIRType");
return MIRType((unsigned(type) >> ELEMENT_TYPE_SHIFT) & ELEMENT_TYPE_MASK);
}
// Get the type expected when inserting a lane into a SIMD type.
// This is the argument type expected by the MSimdValue constructors as well as
// MSimdSplat and MSimdInsertElement.
static inline MIRType
SimdTypeToLaneArgumentType(MIRType type)
{
MIRType laneType = SimdTypeToLaneType(type);
// Boolean lanes should be pre-converted to an Int32 with the values 0 or -1.
// All other lane types are inserted directly.
return laneType == MIRType::Boolean ? MIRType::Int32 : laneType;
}
static inline MIRType
MIRTypeFromValueType(JSValueType type)
{
@ -664,6 +689,24 @@ IsNullOrUndefined(MIRType type)
return type == MIRType::Null || type == MIRType::Undefined;
}
static inline bool
IsFloatingPointSimdType(MIRType type)
{
return type == MIRType::Float32x4;
}
static inline bool
IsIntegerSimdType(MIRType type)
{
return IsSimdType(type) && SimdTypeToLaneType(type) == MIRType::Int32;
}
static inline bool
IsBooleanSimdType(MIRType type)
{
return IsSimdType(type) && SimdTypeToLaneType(type) == MIRType::Boolean;
}
static inline bool
IsMagicType(MIRType type)
{
@ -692,10 +735,18 @@ ScalarTypeToMIRType(Scalar::Type type)
return MIRType::Float32;
case Scalar::Float64:
return MIRType::Double;
case Scalar::Float32x4:
return MIRType::Float32x4;
case Scalar::Int8x16:
return MIRType::Int8x16;
case Scalar::Int16x8:
return MIRType::Int16x8;
case Scalar::Int32x4:
return MIRType::Int32x4;
case Scalar::MaxTypedArrayViewType:
break;
}
MOZ_CRASH("unexpected kind");
MOZ_CRASH("unexpected SIMD kind");
}
static inline unsigned
@ -713,10 +764,17 @@ ScalarTypeToLength(Scalar::Type type)
case Scalar::Float64:
case Scalar::Uint8Clamped:
return 1;
case Scalar::Float32x4:
case Scalar::Int32x4:
return 4;
case Scalar::Int16x8:
return 8;
case Scalar::Int8x16:
return 16;
case Scalar::MaxTypedArrayViewType:
break;
}
MOZ_CRASH("unexpected kind");
MOZ_CRASH("unexpected SIMD kind");
}
static inline const char*

Просмотреть файл

@ -407,7 +407,9 @@ struct MaybeReadFallback
}
};
class RResumePoint;
class RSimdBox;
// Reads frame information in snapshot-encoding order (that is, outermost frame
// to innermost frame).
@ -469,6 +471,7 @@ class SnapshotIterator
void warnUnreadableAllocation();
private:
friend class RSimdBox;
const FloatRegisters::RegisterContent* floatAllocationPointer(const RValueAllocation& a) const;
public:

Просмотреть файл

@ -86,6 +86,9 @@ DefaultJitOptions::DefaultJitOptions()
// Toggles whether Effective Address Analysis is globally disabled.
SET_DEFAULT(disableEaa, false);
// Toggle whether eager simd unboxing is globally disabled.
SET_DEFAULT(disableEagerSimdUnbox, false);
// Toggles whether Edge Case Analysis is gobally disabled.
SET_DEFAULT(disableEdgeCaseAnalysis, false);

Просмотреть файл

@ -50,6 +50,7 @@ struct DefaultJitOptions
bool disableInlineBacktracking;
bool disableAma;
bool disableEaa;
bool disableEagerSimdUnbox;
bool disableEdgeCaseAnalysis;
bool disableGvn;
bool disableInlining;

Просмотреть файл

@ -507,6 +507,10 @@ class JitRealm
mozilla::EnumeratedArray<StubIndex, StubIndex::Count, ReadBarrieredJitCode> stubs_;
// The same approach is taken for SIMD template objects.
mozilla::EnumeratedArray<SimdType, SimdType::Count, ReadBarrieredObject> simdTemplateObjects_;
JitCode* generateStringConcatStub(JSContext* cx);
JitCode* generateRegExpMatcherStub(JSContext* cx);
JitCode* generateRegExpSearcherStub(JSContext* cx);
@ -519,6 +523,23 @@ class JitRealm
}
public:
JSObject* getSimdTemplateObjectFor(JSContext* cx, Handle<SimdTypeDescr*> descr) {
ReadBarrieredObject& tpl = simdTemplateObjects_[descr->type()];
if (!tpl)
tpl.set(TypedObject::createZeroed(cx, descr, gc::TenuredHeap));
return tpl.get();
}
JSObject* maybeGetSimdTemplateObjectFor(SimdType type) const {
// This function is used by Eager Simd Unbox phase which can run
// off-thread, so we cannot use the usual read barrier. For more
// information, see the comment above
// CodeGenerator::simdRefreshTemplatesDuringLink_.
MOZ_ASSERT(CurrentThreadIsIonCompiling());
return simdTemplateObjects_[type].unbarrieredGet();
}
JitCode* getStubCode(uint32_t key) {
ICStubCodeMap::Ptr p = stubCodes_->lookup(key);
if (p)
@ -599,13 +620,15 @@ class JitRealm
return stubs_[RegExpTester];
}
// Perform the necessary read barriers on stubs described by the bitmasks
// passed in. This function can only be called from the main thread.
// Perform the necessary read barriers on stubs and SIMD template object
// described by the bitmasks passed in. This function can only be called
// from the main thread.
//
// The stub pointers must still be valid by the time these methods are
// called. This is arranged by cancelling off-thread Ion compilation at the
// start of GC and at the start of sweeping.
// The stub and template object pointers must still be valid by the time
// these methods are called. This is arranged by cancelling off-thread Ion
// compilation at the start of GC and at the start of sweeping.
void performStubReadBarriers(uint32_t stubsToBarrier) const;
void performSIMDTemplateReadBarriers(uint32_t simdTemplatesToBarrier) const;
size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;

Просмотреть файл

@ -3689,7 +3689,8 @@ LIRGenerator::visitLoadUnboxedScalar(MLoadUnboxedScalar* ins)
const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegisterOrConstant(ins->index());
MOZ_ASSERT(IsNumberType(ins->type()) || ins->type() == MIRType::Boolean);
MOZ_ASSERT(IsNumberType(ins->type()) || IsSimdType(ins->type()) ||
ins->type() == MIRType::Boolean);
// We need a temp register for Uint32Array with known double result.
LDefinition tempDef = LDefinition::BogusTemp();
@ -3770,7 +3771,12 @@ LIRGenerator::visitStoreUnboxedScalar(MStoreUnboxedScalar* ins)
MOZ_ASSERT(IsValidElementsType(ins->elements(), ins->offsetAdjustment()));
MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
if (ins->isFloatWrite()) {
if (ins->isSimdWrite()) {
MOZ_ASSERT_IF(ins->writeType() == Scalar::Float32x4, ins->value()->type() == MIRType::Float32x4);
MOZ_ASSERT_IF(ins->writeType() == Scalar::Int8x16, ins->value()->type() == MIRType::Int8x16);
MOZ_ASSERT_IF(ins->writeType() == Scalar::Int16x8, ins->value()->type() == MIRType::Int16x8);
MOZ_ASSERT_IF(ins->writeType() == Scalar::Int32x4, ins->value()->type() == MIRType::Int32x4);
} else if (ins->isFloatWrite()) {
MOZ_ASSERT_IF(ins->writeType() == Scalar::Float32, ins->value()->type() == MIRType::Float32);
MOZ_ASSERT_IF(ins->writeType() == Scalar::Float64, ins->value()->type() == MIRType::Double);
} else {
@ -4704,7 +4710,7 @@ LIRGenerator::visitWasmParameter(MWasmParameter* ins)
#endif
);
} else {
MOZ_ASSERT(IsNumberType(ins->type()));
MOZ_ASSERT(IsNumberType(ins->type()) || IsSimdType(ins->type()));
defineFixed(new(alloc()) LWasmParameter, ins, LArgument(abi.offsetFromArgBase()));
}
}
@ -4724,6 +4730,8 @@ LIRGenerator::visitWasmReturn(MWasmReturn* ins)
lir->setOperand(0, useFixed(rval, ReturnFloat32Reg));
else if (rval->type() == MIRType::Double)
lir->setOperand(0, useFixed(rval, ReturnDoubleReg));
else if (IsSimdType(rval->type()))
lir->setOperand(0, useFixed(rval, ReturnSimd128Reg));
else if (rval->type() == MIRType::Int32)
lir->setOperand(0, useFixed(rval, ReturnReg));
else
@ -4743,7 +4751,7 @@ LIRGenerator::visitWasmStackArg(MWasmStackArg* ins)
{
if (ins->arg()->type() == MIRType::Int64) {
add(new(alloc()) LWasmStackArgI64(useInt64RegisterOrConstantAtStart(ins->arg())), ins);
} else if (IsFloatingPointType(ins->arg()->type())) {
} else if (IsFloatingPointType(ins->arg()->type()) || IsSimdType(ins->arg()->type())) {
MOZ_ASSERT(!ins->arg()->isEmittedAtUses());
add(new(alloc()) LWasmStackArg(useRegisterAtStart(ins->arg())), ins);
} else {
@ -4878,6 +4886,217 @@ LIRGenerator::visitRecompileCheck(MRecompileCheck* ins)
assignSafepoint(lir, ins);
}
void
LIRGenerator::visitSimdBox(MSimdBox* ins)
{
MOZ_ASSERT(IsSimdType(ins->input()->type()));
LUse in = useRegister(ins->input());
LSimdBox* lir = new(alloc()) LSimdBox(in, temp());
define(lir, ins);
assignSafepoint(lir, ins);
}
void
LIRGenerator::visitSimdUnbox(MSimdUnbox* ins)
{
MOZ_ASSERT(ins->input()->type() == MIRType::Object);
MOZ_ASSERT(IsSimdType(ins->type()));
LUse in = useRegister(ins->input());
LSimdUnbox* lir = new(alloc()) LSimdUnbox(in, temp());
assignSnapshot(lir, Bailout_UnexpectedSimdInput);
define(lir, ins);
}
void
LIRGenerator::visitSimdConstant(MSimdConstant* ins)
{
MOZ_ASSERT(IsSimdType(ins->type()));
switch (ins->type()) {
case MIRType::Int8x16:
case MIRType::Int16x8:
case MIRType::Int32x4:
case MIRType::Bool8x16:
case MIRType::Bool16x8:
case MIRType::Bool32x4:
define(new(alloc()) LSimd128Int(), ins);
break;
case MIRType::Float32x4:
define(new(alloc()) LSimd128Float(), ins);
break;
default:
MOZ_CRASH("Unknown SIMD kind when generating constant");
}
}
void
LIRGenerator::visitSimdConvert(MSimdConvert* ins)
{
MOZ_ASSERT(IsSimdType(ins->type()));
MDefinition* input = ins->input();
LUse use = useRegister(input);
if (ins->type() == MIRType::Int32x4) {
MOZ_ASSERT(input->type() == MIRType::Float32x4);
switch (ins->signedness()) {
case SimdSign::Signed: {
LFloat32x4ToInt32x4* lir = new(alloc()) LFloat32x4ToInt32x4(use, temp());
if (!gen->compilingWasm())
assignSnapshot(lir, Bailout_BoundsCheck);
define(lir, ins);
break;
}
case SimdSign::Unsigned: {
LFloat32x4ToUint32x4* lir =
new (alloc()) LFloat32x4ToUint32x4(use, temp(), temp(LDefinition::SIMD128INT));
if (!gen->compilingWasm())
assignSnapshot(lir, Bailout_BoundsCheck);
define(lir, ins);
break;
}
default:
MOZ_CRASH("Unexpected SimdConvert sign");
}
} else if (ins->type() == MIRType::Float32x4) {
MOZ_ASSERT(input->type() == MIRType::Int32x4);
MOZ_ASSERT(ins->signedness() == SimdSign::Signed, "Unexpected SimdConvert sign");
define(new(alloc()) LInt32x4ToFloat32x4(use), ins);
} else {
MOZ_CRASH("Unknown SIMD kind when generating constant");
}
}
void
LIRGenerator::visitSimdReinterpretCast(MSimdReinterpretCast* ins)
{
MOZ_ASSERT(IsSimdType(ins->type()) && IsSimdType(ins->input()->type()));
MDefinition* input = ins->input();
LUse use = useRegisterAtStart(input);
// :TODO: (Bug 1132894) We have to allocate a different register as redefine
// and/or defineReuseInput are not yet capable of reusing the same register
// with a different register type.
define(new(alloc()) LSimdReinterpretCast(use), ins);
}
void
LIRGenerator::visitSimdAllTrue(MSimdAllTrue* ins)
{
MDefinition* input = ins->input();
MOZ_ASSERT(IsBooleanSimdType(input->type()));
LUse use = useRegisterAtStart(input);
define(new(alloc()) LSimdAllTrue(use), ins);
}
void
LIRGenerator::visitSimdAnyTrue(MSimdAnyTrue* ins)
{
MDefinition* input = ins->input();
MOZ_ASSERT(IsBooleanSimdType(input->type()));
LUse use = useRegisterAtStart(input);
define(new(alloc()) LSimdAnyTrue(use), ins);
}
void
LIRGenerator::visitSimdUnaryArith(MSimdUnaryArith* ins)
{
MOZ_ASSERT(IsSimdType(ins->input()->type()));
MOZ_ASSERT(IsSimdType(ins->type()));
// Cannot be at start, as the ouput is used as a temporary to store values.
LUse in = use(ins->input());
switch (ins->type()) {
case MIRType::Int8x16:
case MIRType::Bool8x16:
define(new (alloc()) LSimdUnaryArithIx16(in), ins);
break;
case MIRType::Int16x8:
case MIRType::Bool16x8:
define(new (alloc()) LSimdUnaryArithIx8(in), ins);
break;
case MIRType::Int32x4:
case MIRType::Bool32x4:
define(new (alloc()) LSimdUnaryArithIx4(in), ins);
break;
case MIRType::Float32x4:
define(new (alloc()) LSimdUnaryArithFx4(in), ins);
break;
default:
MOZ_CRASH("Unknown SIMD kind for unary operation");
}
}
void
LIRGenerator::visitSimdBinaryComp(MSimdBinaryComp* ins)
{
MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
MOZ_ASSERT(IsBooleanSimdType(ins->type()));
if (ShouldReorderCommutative(ins->lhs(), ins->rhs(), ins))
ins->reverse();
switch (ins->specialization()) {
case MIRType::Int8x16: {
MOZ_ASSERT(ins->signedness() == SimdSign::Signed);
LSimdBinaryCompIx16* add = new (alloc()) LSimdBinaryCompIx16();
lowerForFPU(add, ins, ins->lhs(), ins->rhs());
return;
}
case MIRType::Int16x8: {
MOZ_ASSERT(ins->signedness() == SimdSign::Signed);
LSimdBinaryCompIx8* add = new (alloc()) LSimdBinaryCompIx8();
lowerForFPU(add, ins, ins->lhs(), ins->rhs());
return;
}
case MIRType::Int32x4: {
MOZ_ASSERT(ins->signedness() == SimdSign::Signed);
LSimdBinaryCompIx4* add = new (alloc()) LSimdBinaryCompIx4();
lowerForCompIx4(add, ins, ins->lhs(), ins->rhs());
return;
}
case MIRType::Float32x4: {
MOZ_ASSERT(ins->signedness() == SimdSign::NotApplicable);
LSimdBinaryCompFx4* add = new (alloc()) LSimdBinaryCompFx4();
lowerForCompFx4(add, ins, ins->lhs(), ins->rhs());
return;
}
default:
MOZ_CRASH("Unknown compare type when comparing values");
}
}
void
LIRGenerator::visitSimdBinaryBitwise(MSimdBinaryBitwise* ins)
{
MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
MOZ_ASSERT(IsSimdType(ins->type()));
MDefinition* lhs = ins->lhs();
MDefinition* rhs = ins->rhs();
ReorderCommutative(&lhs, &rhs, ins);
LSimdBinaryBitwise* lir = new(alloc()) LSimdBinaryBitwise;
lowerForFPU(lir, ins, lhs, rhs);
}
void
LIRGenerator::visitSimdShift(MSimdShift* ins)
{
MOZ_ASSERT(IsIntegerSimdType(ins->type()));
MOZ_ASSERT(ins->lhs()->type() == ins->type());
MOZ_ASSERT(ins->rhs()->type() == MIRType::Int32);
LUse vector = useRegisterAtStart(ins->lhs());
LAllocation value = useRegisterOrConstant(ins->rhs());
// We need a temp register to mask the shift amount, but not if the shift
// amount is a constant.
LDefinition tempReg = value.isConstant() ? LDefinition::BogusTemp() : temp();
LSimdShift* lir = new(alloc()) LSimdShift(vector, value, tempReg);
defineReuseInput(lir, ins, 0);
}
void
LIRGenerator::visitLexicalCheck(MLexicalCheck* ins)
{

Просмотреть файл

@ -15,6 +15,7 @@
#include "builtin/intl/PluralRules.h"
#include "builtin/intl/RelativeTimeFormat.h"
#include "builtin/MapObject.h"
#include "builtin/SIMDConstants.h"
#include "builtin/String.h"
#include "builtin/TestingFunctions.h"
#include "builtin/TypedObject.h"
@ -262,6 +263,28 @@ IonBuilder::inlineNativeCall(CallInfo& callInfo, JSFunction* target)
case InlinableNative::ObjectToString:
return inlineObjectToString(callInfo);
// SIMD natives.
case InlinableNative::SimdInt32x4:
return inlineSimd(callInfo, target, SimdType::Int32x4);
case InlinableNative::SimdUint32x4:
return inlineSimd(callInfo, target, SimdType::Uint32x4);
case InlinableNative::SimdInt16x8:
return inlineSimd(callInfo, target, SimdType::Int16x8);
case InlinableNative::SimdUint16x8:
return inlineSimd(callInfo, target, SimdType::Uint16x8);
case InlinableNative::SimdInt8x16:
return inlineSimd(callInfo, target, SimdType::Int8x16);
case InlinableNative::SimdUint8x16:
return inlineSimd(callInfo, target, SimdType::Uint8x16);
case InlinableNative::SimdFloat32x4:
return inlineSimd(callInfo, target, SimdType::Float32x4);
case InlinableNative::SimdBool32x4:
return inlineSimd(callInfo, target, SimdType::Bool32x4);
case InlinableNative::SimdBool16x8:
return inlineSimd(callInfo, target, SimdType::Bool16x8);
case InlinableNative::SimdBool8x16:
return inlineSimd(callInfo, target, SimdType::Bool8x16);
// Testing functions.
case InlinableNative::TestBailout:
return inlineBailout(callInfo);
@ -456,6 +479,9 @@ IonBuilder::inlineNonFunctionCall(CallInfo& callInfo, JSObject* target)
if (callInfo.constructing() && target->constructHook() == TypedObject::construct)
return inlineConstructTypedObject(callInfo, &target->as<TypeDescr>());
if (!callInfo.constructing() && target->callHook() == SimdTypeDescr::call)
return inlineConstructSimdObject(callInfo, &target->as<SimdTypeDescr>());
return InliningStatus_NotInlined;
}
@ -3771,6 +3797,766 @@ IonBuilder::inlineConstructTypedObject(CallInfo& callInfo, TypeDescr* descr)
return InliningStatus_Inlined;
}
// Main entry point for SIMD inlining.
// When the controlling simdType is an integer type, sign indicates whether the lanes should
// be treated as signed or unsigned integers.
IonBuilder::InliningResult
IonBuilder::inlineSimd(CallInfo& callInfo, JSFunction* target, SimdType type)
{
if (!JitSupportsSimd()) {
trackOptimizationOutcome(TrackedOutcome::NoSimdJitSupport);
return InliningStatus_NotInlined;
}
JSNative native = target->native();
SimdOperation simdOp = SimdOperation(target->jitInfo()->nativeOp);
switch(simdOp) {
case SimdOperation::Constructor:
// SIMD constructor calls are handled via inlineNonFunctionCall(), so
// they won't show up here where target is required to be a JSFunction.
// See also inlineConstructSimdObject().
MOZ_CRASH("SIMD constructor call not expected.");
case SimdOperation::Fn_check:
return inlineSimdCheck(callInfo, native, type);
case SimdOperation::Fn_splat:
return inlineSimdSplat(callInfo, native, type);
case SimdOperation::Fn_extractLane:
return inlineSimdExtractLane(callInfo, native, type);
case SimdOperation::Fn_replaceLane:
return inlineSimdReplaceLane(callInfo, native, type);
case SimdOperation::Fn_select:
return inlineSimdSelect(callInfo, native, type);
case SimdOperation::Fn_swizzle:
return inlineSimdShuffle(callInfo, native, type, 1);
case SimdOperation::Fn_shuffle:
return inlineSimdShuffle(callInfo, native, type, 2);
// Unary arithmetic.
case SimdOperation::Fn_abs:
return inlineSimdUnary(callInfo, native, MSimdUnaryArith::abs, type);
case SimdOperation::Fn_neg:
return inlineSimdUnary(callInfo, native, MSimdUnaryArith::neg, type);
case SimdOperation::Fn_not:
return inlineSimdUnary(callInfo, native, MSimdUnaryArith::not_, type);
case SimdOperation::Fn_reciprocalApproximation:
return inlineSimdUnary(callInfo, native, MSimdUnaryArith::reciprocalApproximation,
type);
case SimdOperation::Fn_reciprocalSqrtApproximation:
return inlineSimdUnary(callInfo, native, MSimdUnaryArith::reciprocalSqrtApproximation,
type);
case SimdOperation::Fn_sqrt:
return inlineSimdUnary(callInfo, native, MSimdUnaryArith::sqrt, type);
// Binary arithmetic.
case SimdOperation::Fn_add:
return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_add, type);
case SimdOperation::Fn_sub:
return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_sub, type);
case SimdOperation::Fn_mul:
return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_mul, type);
case SimdOperation::Fn_div:
return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_div, type);
case SimdOperation::Fn_max:
return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_max, type);
case SimdOperation::Fn_min:
return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_min, type);
case SimdOperation::Fn_maxNum:
return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_maxNum, type);
case SimdOperation::Fn_minNum:
return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_minNum, type);
// Binary saturating.
case SimdOperation::Fn_addSaturate:
return inlineSimdBinarySaturating(callInfo, native, MSimdBinarySaturating::add, type);
case SimdOperation::Fn_subSaturate:
return inlineSimdBinarySaturating(callInfo, native, MSimdBinarySaturating::sub, type);
// Binary bitwise.
case SimdOperation::Fn_and:
return inlineSimdBinaryBitwise(callInfo, native, MSimdBinaryBitwise::and_, type);
case SimdOperation::Fn_or:
return inlineSimdBinaryBitwise(callInfo, native, MSimdBinaryBitwise::or_, type);
case SimdOperation::Fn_xor:
return inlineSimdBinaryBitwise(callInfo, native, MSimdBinaryBitwise::xor_, type);
// Shifts.
case SimdOperation::Fn_shiftLeftByScalar:
return inlineSimdShift(callInfo, native, MSimdShift::lsh, type);
case SimdOperation::Fn_shiftRightByScalar:
return inlineSimdShift(callInfo, native, MSimdShift::rshForSign(GetSimdSign(type)), type);
// Boolean unary.
case SimdOperation::Fn_allTrue:
return inlineSimdAnyAllTrue(callInfo, /* IsAllTrue= */true, native, type);
case SimdOperation::Fn_anyTrue:
return inlineSimdAnyAllTrue(callInfo, /* IsAllTrue= */false, native, type);
// Comparisons.
case SimdOperation::Fn_lessThan:
return inlineSimdComp(callInfo, native, MSimdBinaryComp::lessThan, type);
case SimdOperation::Fn_lessThanOrEqual:
return inlineSimdComp(callInfo, native, MSimdBinaryComp::lessThanOrEqual, type);
case SimdOperation::Fn_equal:
return inlineSimdComp(callInfo, native, MSimdBinaryComp::equal, type);
case SimdOperation::Fn_notEqual:
return inlineSimdComp(callInfo, native, MSimdBinaryComp::notEqual, type);
case SimdOperation::Fn_greaterThan:
return inlineSimdComp(callInfo, native, MSimdBinaryComp::greaterThan, type);
case SimdOperation::Fn_greaterThanOrEqual:
return inlineSimdComp(callInfo, native, MSimdBinaryComp::greaterThanOrEqual, type);
// Int <-> Float conversions.
case SimdOperation::Fn_fromInt32x4:
return inlineSimdConvert(callInfo, native, false, SimdType::Int32x4, type);
case SimdOperation::Fn_fromUint32x4:
return inlineSimdConvert(callInfo, native, false, SimdType::Uint32x4, type);
case SimdOperation::Fn_fromFloat32x4:
return inlineSimdConvert(callInfo, native, false, SimdType::Float32x4, type);
// Load/store.
case SimdOperation::Fn_load:
return inlineSimdLoad(callInfo, native, type, GetSimdLanes(type));
case SimdOperation::Fn_load1:
return inlineSimdLoad(callInfo, native, type, 1);
case SimdOperation::Fn_load2:
return inlineSimdLoad(callInfo, native, type, 2);
case SimdOperation::Fn_load3:
return inlineSimdLoad(callInfo, native, type, 3);
case SimdOperation::Fn_store:
return inlineSimdStore(callInfo, native, type, GetSimdLanes(type));
case SimdOperation::Fn_store1:
return inlineSimdStore(callInfo, native, type, 1);
case SimdOperation::Fn_store2:
return inlineSimdStore(callInfo, native, type, 2);
case SimdOperation::Fn_store3:
return inlineSimdStore(callInfo, native, type, 3);
// Bitcasts. One for each type with a memory representation.
case SimdOperation::Fn_fromInt32x4Bits:
return inlineSimdConvert(callInfo, native, true, SimdType::Int32x4, type);
case SimdOperation::Fn_fromUint32x4Bits:
return inlineSimdConvert(callInfo, native, true, SimdType::Uint32x4, type);
case SimdOperation::Fn_fromInt16x8Bits:
return inlineSimdConvert(callInfo, native, true, SimdType::Int16x8, type);
case SimdOperation::Fn_fromUint16x8Bits:
return inlineSimdConvert(callInfo, native, true, SimdType::Uint16x8, type);
case SimdOperation::Fn_fromInt8x16Bits:
return inlineSimdConvert(callInfo, native, true, SimdType::Int8x16, type);
case SimdOperation::Fn_fromUint8x16Bits:
return inlineSimdConvert(callInfo, native, true, SimdType::Uint8x16, type);
case SimdOperation::Fn_fromFloat32x4Bits:
return inlineSimdConvert(callInfo, native, true, SimdType::Float32x4, type);
case SimdOperation::Fn_fromFloat64x2Bits:
return InliningStatus_NotInlined;
}
MOZ_CRASH("Unexpected SIMD opcode");
}
// The representation of boolean SIMD vectors is the same as the corresponding
// integer SIMD vectors with -1 lanes meaning true and 0 lanes meaning false.
//
// Functions that set the value of a boolean vector lane work by applying
// ToBoolean on the input argument, so they accept any argument type, just like
// the MNot and MTest instructions.
//
// Convert any scalar value into an appropriate SIMD lane value: An Int32 value
// that is either 0 for false or -1 for true.
MDefinition*
IonBuilder::convertToBooleanSimdLane(MDefinition* scalar)
{
MSub* result;
if (scalar->type() == MIRType::Boolean) {
// The input scalar is already a boolean with the int32 values 0 / 1.
// Compute result = 0 - scalar.
result = MSub::New(alloc(), constant(Int32Value(0)), scalar);
} else {
// For any other type, let MNot handle the conversion to boolean.
// Compute result = !scalar - 1.
MNot* inv = MNot::New(alloc(), scalar);
current->add(inv);
result = MSub::New(alloc(), inv, constant(Int32Value(1)));
}
result->setInt32Specialization();
current->add(result);
return result;
}
IonBuilder::InliningResult
IonBuilder::inlineConstructSimdObject(CallInfo& callInfo, SimdTypeDescr* descr)
{
if (!JitSupportsSimd()) {
trackOptimizationOutcome(TrackedOutcome::NoSimdJitSupport);
return InliningStatus_NotInlined;
}
// Generic constructor of SIMD valuesX4.
MIRType simdType;
if (!MaybeSimdTypeToMIRType(descr->type(), &simdType)) {
trackOptimizationOutcome(TrackedOutcome::SimdTypeNotOptimized);
return InliningStatus_NotInlined;
}
// Take the templateObject out of Baseline ICs, such that we can box
// SIMD value type in the same kind of objects.
MOZ_ASSERT(InlineTypedObject::canAccommodateType(descr));
MOZ_ASSERT(descr->getClass() == &SimdTypeDescr::class_,
"getTemplateObjectForSimdCtor needs an update");
JSObject* templateObject = inspector->getTemplateObjectForSimdCtor(pc, descr->type());
if (!templateObject)
return InliningStatus_NotInlined;
// The previous assertion ensures this will never fail if we were able to
// allocate a templateObject in Baseline.
InlineTypedObject* inlineTypedObject = &templateObject->as<InlineTypedObject>();
MOZ_ASSERT(&inlineTypedObject->typeDescr() == descr);
// When there are missing arguments, provide a default value
// containing the coercion of 'undefined' to the right type.
MConstant* defVal = nullptr;
MIRType laneType = SimdTypeToLaneType(simdType);
unsigned lanes = SimdTypeToLength(simdType);
if (lanes != 4 || callInfo.argc() < lanes) {
if (laneType == MIRType::Int32 || laneType == MIRType::Boolean) {
// The default lane for a boolean vector is |false|, but
// |MSimdSplat|, |MSimdValueX4|, and |MSimdInsertElement| all
// require an Int32 argument with the value 0 or 01 to initialize a
// boolean lane. See also convertToBooleanSimdLane() which is
// idempotent with a 0 argument after constant folding.
defVal = constant(Int32Value(0));
} else if (laneType == MIRType::Double) {
defVal = constant(DoubleNaNValue());
} else {
MOZ_ASSERT(laneType == MIRType::Float32);
defVal = MConstant::NewFloat32(alloc(), JS::GenericNaN());
current->add(defVal);
}
}
MInstruction *values = nullptr;
// Use the MSimdValueX4 constructor for X4 vectors.
if (lanes == 4) {
MDefinition* lane[4];
for (unsigned i = 0; i < 4; i++)
lane[i] = callInfo.getArgWithDefault(i, defVal);
// Convert boolean lanes into Int32 0 / -1.
if (laneType == MIRType::Boolean) {
for (unsigned i = 0; i < 4; i++)
lane[i] = convertToBooleanSimdLane(lane[i]);
}
values = MSimdValueX4::New(alloc(), simdType, lane[0], lane[1], lane[2], lane[3]);
current->add(values);
} else {
// For general constructor calls, start from splat(defVal), insert one
// lane at a time.
values = MSimdSplat::New(alloc(), defVal, simdType);
current->add(values);
// Stop early if constructor doesn't have enough arguments. These lanes
// then get the default value.
if (callInfo.argc() < lanes)
lanes = callInfo.argc();
for (unsigned i = 0; i < lanes; i++) {
MDefinition* lane = callInfo.getArg(i);
if (laneType == MIRType::Boolean)
lane = convertToBooleanSimdLane(lane);
values = MSimdInsertElement::New(alloc(), values, lane, i);
current->add(values);
}
}
MSimdBox* obj = MSimdBox::New(alloc(), constraints(), values, inlineTypedObject, descr->type(),
inlineTypedObject->group()->initialHeap(constraints()));
current->add(obj);
current->push(obj);
callInfo.setImplicitlyUsedUnchecked();
return InliningStatus_Inlined;
}
bool
IonBuilder::canInlineSimd(CallInfo& callInfo, JSNative native, unsigned numArgs,
InlineTypedObject** templateObj)
{
if (callInfo.argc() != numArgs)
return false;
JSObject* templateObject = inspector->getTemplateObjectForNative(pc, native);
if (!templateObject)
return false;
*templateObj = &templateObject->as<InlineTypedObject>();
return true;
}
IonBuilder::InliningResult
IonBuilder::inlineSimdCheck(CallInfo& callInfo, JSNative native, SimdType type)
{
InlineTypedObject* templateObj = nullptr;
if (!canInlineSimd(callInfo, native, 1, &templateObj))
return InliningStatus_NotInlined;
// Unboxing checks the SIMD object type and throws a TypeError if it doesn't
// match type.
MDefinition *arg = unboxSimd(callInfo.getArg(0), type);
// Create an unbox/box pair, expecting the box to be optimized away if
// anyone use the return value from this check() call. This is what you want
// for code like this:
//
// function f(x) {
// x = Int32x4.check(x)
// for(...) {
// y = Int32x4.add(x, ...)
// }
//
// The unboxing of x happens as early as possible, and only once.
return boxSimd(callInfo, arg, templateObj);
}
// Given a value or object, insert a dynamic check that this is a SIMD object of
// the required SimdType, and unbox it into the corresponding SIMD MIRType.
//
// This represents the standard type checking that all the SIMD operations
// perform on their arguments.
MDefinition*
IonBuilder::unboxSimd(MDefinition* ins, SimdType type)
{
// Trivial optimization: If ins is a MSimdBox of the same SIMD type, there
// is no way the unboxing could fail, and we can skip it altogether.
// This is the same thing MSimdUnbox::foldsTo() does, but we can save the
// memory allocation here.
if (ins->isSimdBox()) {
MSimdBox* box = ins->toSimdBox();
if (box->simdType() == type) {
MDefinition* value = box->input();
MOZ_ASSERT(value->type() == SimdTypeToMIRType(type));
return value;
}
}
MSimdUnbox* unbox = MSimdUnbox::New(alloc(), ins, type);
current->add(unbox);
return unbox;
}
IonBuilder::InliningResult
IonBuilder::boxSimd(CallInfo& callInfo, MDefinition* ins, InlineTypedObject* templateObj)
{
SimdType simdType = templateObj->typeDescr().as<SimdTypeDescr>().type();
MSimdBox* obj = MSimdBox::New(alloc(), constraints(), ins, templateObj, simdType,
templateObj->group()->initialHeap(constraints()));
// In some cases, ins has already been added to current.
if (!ins->block() && ins->isInstruction())
current->add(ins->toInstruction());
current->add(obj);
current->push(obj);
callInfo.setImplicitlyUsedUnchecked();
return InliningStatus_Inlined;
}
IonBuilder::InliningResult
IonBuilder::inlineSimdBinaryArith(CallInfo& callInfo, JSNative native,
MSimdBinaryArith::Operation op, SimdType type)
{
InlineTypedObject* templateObj = nullptr;
if (!canInlineSimd(callInfo, native, 2, &templateObj))
return InliningStatus_NotInlined;
MDefinition* lhs = unboxSimd(callInfo.getArg(0), type);
MDefinition* rhs = unboxSimd(callInfo.getArg(1), type);
auto* ins = MSimdBinaryArith::AddLegalized(alloc(), current, lhs, rhs, op);
return boxSimd(callInfo, ins, templateObj);
}
IonBuilder::InliningResult
IonBuilder::inlineSimdBinaryBitwise(CallInfo& callInfo, JSNative native,
MSimdBinaryBitwise::Operation op, SimdType type)
{
InlineTypedObject* templateObj = nullptr;
if (!canInlineSimd(callInfo, native, 2, &templateObj))
return InliningStatus_NotInlined;
MDefinition* lhs = unboxSimd(callInfo.getArg(0), type);
MDefinition* rhs = unboxSimd(callInfo.getArg(1), type);
auto* ins = MSimdBinaryBitwise::New(alloc(), lhs, rhs, op);
return boxSimd(callInfo, ins, templateObj);
}
// Inline a binary SIMD operation where both arguments are SIMD types.
IonBuilder::InliningResult
IonBuilder::inlineSimdBinarySaturating(CallInfo& callInfo, JSNative native,
MSimdBinarySaturating::Operation op, SimdType type)
{
InlineTypedObject* templateObj = nullptr;
if (!canInlineSimd(callInfo, native, 2, &templateObj))
return InliningStatus_NotInlined;
MDefinition* lhs = unboxSimd(callInfo.getArg(0), type);
MDefinition* rhs = unboxSimd(callInfo.getArg(1), type);
MSimdBinarySaturating* ins =
MSimdBinarySaturating::New(alloc(), lhs, rhs, op, GetSimdSign(type));
return boxSimd(callInfo, ins, templateObj);
}
// Inline a SIMD shiftByScalar operation.
IonBuilder::InliningResult
IonBuilder::inlineSimdShift(CallInfo& callInfo, JSNative native, MSimdShift::Operation op,
SimdType type)
{
InlineTypedObject* templateObj = nullptr;
if (!canInlineSimd(callInfo, native, 2, &templateObj))
return InliningStatus_NotInlined;
MDefinition* vec = unboxSimd(callInfo.getArg(0), type);
MInstruction* ins = MSimdShift::AddLegalized(alloc(), current, vec, callInfo.getArg(1), op);
return boxSimd(callInfo, ins, templateObj);
}
IonBuilder::InliningResult
IonBuilder::inlineSimdComp(CallInfo& callInfo, JSNative native, MSimdBinaryComp::Operation op,
SimdType type)
{
InlineTypedObject* templateObj = nullptr;
if (!canInlineSimd(callInfo, native, 2, &templateObj))
return InliningStatus_NotInlined;
MDefinition* lhs = unboxSimd(callInfo.getArg(0), type);
MDefinition* rhs = unboxSimd(callInfo.getArg(1), type);
MInstruction* ins =
MSimdBinaryComp::AddLegalized(alloc(), current, lhs, rhs, op, GetSimdSign(type));
return boxSimd(callInfo, ins, templateObj);
}
IonBuilder::InliningResult
IonBuilder::inlineSimdUnary(CallInfo& callInfo, JSNative native, MSimdUnaryArith::Operation op,
SimdType type)
{
InlineTypedObject* templateObj = nullptr;
if (!canInlineSimd(callInfo, native, 1, &templateObj))
return InliningStatus_NotInlined;
MDefinition* arg = unboxSimd(callInfo.getArg(0), type);
MSimdUnaryArith* ins = MSimdUnaryArith::New(alloc(), arg, op);
return boxSimd(callInfo, ins, templateObj);
}
IonBuilder::InliningResult
IonBuilder::inlineSimdSplat(CallInfo& callInfo, JSNative native, SimdType type)
{
InlineTypedObject* templateObj = nullptr;
if (!canInlineSimd(callInfo, native, 1, &templateObj))
return InliningStatus_NotInlined;
MIRType mirType = SimdTypeToMIRType(type);
MDefinition* arg = callInfo.getArg(0);
// Convert to 0 / -1 before splatting a boolean lane.
if (SimdTypeToLaneType(mirType) == MIRType::Boolean)
arg = convertToBooleanSimdLane(arg);
MSimdSplat* ins = MSimdSplat::New(alloc(), arg, mirType);
return boxSimd(callInfo, ins, templateObj);
}
IonBuilder::InliningResult
IonBuilder::inlineSimdExtractLane(CallInfo& callInfo, JSNative native, SimdType type)
{
// extractLane() returns a scalar, so don't use canInlineSimd() which looks
// for a template object.
if (callInfo.argc() != 2 || callInfo.constructing()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
return InliningStatus_NotInlined;
}
// Lane index.
MDefinition* arg = callInfo.getArg(1);
if (!arg->isConstant() || arg->type() != MIRType::Int32)
return InliningStatus_NotInlined;
unsigned lane = arg->toConstant()->toInt32();
if (lane >= GetSimdLanes(type))
return InliningStatus_NotInlined;
// Original vector.
MDefinition* orig = unboxSimd(callInfo.getArg(0), type);
MIRType vecType = orig->type();
MIRType laneType = SimdTypeToLaneType(vecType);
SimdSign sign = GetSimdSign(type);
// An Uint32 lane can't be represented in MIRType::Int32. Get it as a double.
if (type == SimdType::Uint32x4)
laneType = MIRType::Double;
MSimdExtractElement* ins =
MSimdExtractElement::New(alloc(), orig, laneType, lane, sign);
current->add(ins);
current->push(ins);
callInfo.setImplicitlyUsedUnchecked();
return InliningStatus_Inlined;
}
IonBuilder::InliningResult
IonBuilder::inlineSimdReplaceLane(CallInfo& callInfo, JSNative native, SimdType type)
{
InlineTypedObject* templateObj = nullptr;
if (!canInlineSimd(callInfo, native, 3, &templateObj))
return InliningStatus_NotInlined;
// Lane index.
MDefinition* arg = callInfo.getArg(1);
if (!arg->isConstant() || arg->type() != MIRType::Int32)
return InliningStatus_NotInlined;
unsigned lane = arg->toConstant()->toInt32();
if (lane >= GetSimdLanes(type))
return InliningStatus_NotInlined;
// Original vector.
MDefinition* orig = unboxSimd(callInfo.getArg(0), type);
MIRType vecType = orig->type();
// Convert to 0 / -1 before inserting a boolean lane.
MDefinition* value = callInfo.getArg(2);
if (SimdTypeToLaneType(vecType) == MIRType::Boolean)
value = convertToBooleanSimdLane(value);
MSimdInsertElement* ins = MSimdInsertElement::New(alloc(), orig, value, lane);
return boxSimd(callInfo, ins, templateObj);
}
// Inline a SIMD conversion or bitcast. When isCast==false, one of the types
// must be floating point and the other integer. In this case, sign indicates if
// the integer lanes should be treated as signed or unsigned integers.
IonBuilder::InliningResult
IonBuilder::inlineSimdConvert(CallInfo& callInfo, JSNative native, bool isCast, SimdType fromType,
SimdType toType)
{
InlineTypedObject* templateObj = nullptr;
if (!canInlineSimd(callInfo, native, 1, &templateObj))
return InliningStatus_NotInlined;
MDefinition* arg = unboxSimd(callInfo.getArg(0), fromType);
MIRType mirType = SimdTypeToMIRType(toType);
MInstruction* ins;
if (isCast) {
// Signed/Unsigned doesn't matter for bitcasts.
ins = MSimdReinterpretCast::New(alloc(), arg, mirType);
} else {
// Exactly one of fromType, toType must be an integer type.
SimdSign sign = GetSimdSign(fromType);
if (sign == SimdSign::NotApplicable)
sign = GetSimdSign(toType);
// Possibly expand into multiple instructions.
ins = MSimdConvert::AddLegalized(alloc(), current, arg, mirType, sign);
}
return boxSimd(callInfo, ins, templateObj);
}
IonBuilder::InliningResult
IonBuilder::inlineSimdSelect(CallInfo& callInfo, JSNative native, SimdType type)
{
InlineTypedObject* templateObj = nullptr;
if (!canInlineSimd(callInfo, native, 3, &templateObj))
return InliningStatus_NotInlined;
MDefinition* mask = unboxSimd(callInfo.getArg(0), GetBooleanSimdType(type));
MDefinition* tval = unboxSimd(callInfo.getArg(1), type);
MDefinition* fval = unboxSimd(callInfo.getArg(2), type);
MSimdSelect* ins = MSimdSelect::New(alloc(), mask, tval, fval);
return boxSimd(callInfo, ins, templateObj);
}
IonBuilder::InliningResult
IonBuilder::inlineSimdShuffle(CallInfo& callInfo, JSNative native, SimdType type,
unsigned numVectors)
{
unsigned numLanes = GetSimdLanes(type);
InlineTypedObject* templateObj = nullptr;
if (!canInlineSimd(callInfo, native, numVectors + numLanes, &templateObj))
return InliningStatus_NotInlined;
MIRType mirType = SimdTypeToMIRType(type);
MSimdGeneralShuffle* ins = MSimdGeneralShuffle::New(alloc(), numVectors, numLanes, mirType);
if (!ins->init(alloc()))
return abort(AbortReason::Alloc);
for (unsigned i = 0; i < numVectors; i++)
ins->setVector(i, unboxSimd(callInfo.getArg(i), type));
for (size_t i = 0; i < numLanes; i++)
ins->setLane(i, callInfo.getArg(numVectors + i));
return boxSimd(callInfo, ins, templateObj);
}
IonBuilder::InliningResult
IonBuilder::inlineSimdAnyAllTrue(CallInfo& callInfo, bool IsAllTrue, JSNative native,
SimdType type)
{
// anyTrue() / allTrue() return a scalar, so don't use canInlineSimd() which looks
// for a template object.
if (callInfo.argc() != 1 || callInfo.constructing()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
return InliningStatus_NotInlined;
}
MDefinition* arg = unboxSimd(callInfo.getArg(0), type);
MUnaryInstruction* ins;
if (IsAllTrue)
ins = MSimdAllTrue::New(alloc(), arg, MIRType::Boolean);
else
ins = MSimdAnyTrue::New(alloc(), arg, MIRType::Boolean);
current->add(ins);
current->push(ins);
callInfo.setImplicitlyUsedUnchecked();
return InliningStatus_Inlined;
}
// Get the typed array element type corresponding to the lanes in a SIMD vector type.
// This only applies to SIMD types that can be loaded and stored to a typed array.
static Scalar::Type
SimdTypeToArrayElementType(SimdType type)
{
switch (type) {
case SimdType::Float32x4: return Scalar::Float32x4;
case SimdType::Int8x16:
case SimdType::Uint8x16: return Scalar::Int8x16;
case SimdType::Int16x8:
case SimdType::Uint16x8: return Scalar::Int16x8;
case SimdType::Int32x4:
case SimdType::Uint32x4: return Scalar::Int32x4;
default: MOZ_CRASH("unexpected simd type");
}
}
bool
IonBuilder::prepareForSimdLoadStore(CallInfo& callInfo, Scalar::Type simdType,
MInstruction** elements, MDefinition** index,
Scalar::Type* arrayType)
{
MDefinition* array = callInfo.getArg(0);
*index = callInfo.getArg(1);
if (!ElementAccessIsTypedArray(constraints(), array, *index, arrayType))
return false;
MInstruction* indexAsInt32 = MToNumberInt32::New(alloc(), *index);
current->add(indexAsInt32);
*index = indexAsInt32;
MDefinition* indexLoadEnd = *index;
MOZ_ASSERT(Scalar::byteSize(simdType) % Scalar::byteSize(*arrayType) == 0);
int32_t byteLoadSize = Scalar::byteSize(simdType) / Scalar::byteSize(*arrayType);
if (byteLoadSize > 1) {
// Add the number of supplementary needed slots. Overflows are fine
// because the bounds check code uses an unsigned comparison.
MAdd* addedIndex = MAdd::New(alloc(), *index, constant(Int32Value(byteLoadSize - 1)));
addedIndex->setInt32Specialization();
current->add(addedIndex);
indexLoadEnd = addedIndex;
}
MInstruction* length;
addTypedArrayLengthAndData(array, SkipBoundsCheck, index, &length, elements);
// If the index+size addition overflows, then indexLoadEnd might be
// in bounds while the actual index isn't, so we need two bounds checks
// here.
if (byteLoadSize > 1) {
indexLoadEnd = addBoundsCheck(indexLoadEnd, length);
auto* sub = MSub::New(alloc(), indexLoadEnd, constant(Int32Value(byteLoadSize - 1)));
sub->setInt32Specialization();
current->add(sub);
*index = sub;
}
*index = addBoundsCheck(*index, length);
return true;
}
IonBuilder::InliningResult
IonBuilder::inlineSimdLoad(CallInfo& callInfo, JSNative native, SimdType type, unsigned numElems)
{
InlineTypedObject* templateObj = nullptr;
if (!canInlineSimd(callInfo, native, 2, &templateObj))
return InliningStatus_NotInlined;
Scalar::Type elemType = SimdTypeToArrayElementType(type);
MDefinition* index = nullptr;
MInstruction* elements = nullptr;
Scalar::Type arrayType;
if (!prepareForSimdLoadStore(callInfo, elemType, &elements, &index, &arrayType))
return InliningStatus_NotInlined;
MLoadUnboxedScalar* load = MLoadUnboxedScalar::New(alloc(), elements, index, arrayType);
load->setResultType(SimdTypeToMIRType(type));
load->setSimdRead(elemType, numElems);
return boxSimd(callInfo, load, templateObj);
}
IonBuilder::InliningResult
IonBuilder::inlineSimdStore(CallInfo& callInfo, JSNative native, SimdType type, unsigned numElems)
{
InlineTypedObject* templateObj = nullptr;
if (!canInlineSimd(callInfo, native, 3, &templateObj))
return InliningStatus_NotInlined;
Scalar::Type elemType = SimdTypeToArrayElementType(type);
MDefinition* index = nullptr;
MInstruction* elements = nullptr;
Scalar::Type arrayType;
if (!prepareForSimdLoadStore(callInfo, elemType, &elements, &index, &arrayType))
return InliningStatus_NotInlined;
MDefinition* valueToWrite = unboxSimd(callInfo.getArg(2), type);
MStoreUnboxedScalar* store = MStoreUnboxedScalar::New(alloc(), elements, index,
valueToWrite, arrayType,
MStoreUnboxedScalar::TruncateInput);
store->setSimdWrite(elemType, numElems);
current->add(store);
// Produce the original boxed value as our return value.
// This is unlikely to be used, so don't bother reboxing valueToWrite.
current->push(callInfo.getArg(2));
callInfo.setImplicitlyUsedUnchecked();
MOZ_TRY(resumeAfter(store));
return InliningStatus_Inlined;
}
// Note that SIMD.cpp provides its own JSJitInfo objects for SIMD.foo.* functions.
// The Simd* objects defined here represent SIMD.foo() constructor calls.
// They are encoded with .nativeOp = 0. That is the sub-opcode within the SIMD type.
static_assert(uint16_t(SimdOperation::Constructor) == 0, "Constructor opcode must be 0");
#define ADD_NATIVE(native) const JSJitInfo JitInfo_##native { \
{ nullptr }, { uint16_t(InlinableNative::native) }, { 0 }, JSJitInfo::InlinableNative };
INLINABLE_NATIVE_LIST(ADD_NATIVE)

Просмотреть файл

@ -17,6 +17,7 @@
#include "jslibmath.h"
#include "builtin/RegExp.h"
#include "builtin/SIMD.h"
#include "builtin/String.h"
#include "jit/AtomicOperations.h"
#include "jit/BaselineInspector.h"
@ -1311,7 +1312,531 @@ MWasmFloatConstant::congruentTo(const MDefinition* ins) const
u.bits_ == ins->toWasmFloatConstant()->u.bits_;
}
MDefinition*
MSimdValueX4::foldsTo(TempAllocator& alloc)
{
#ifdef DEBUG
MIRType laneType = SimdTypeToLaneArgumentType(type());
#endif
bool allConstants = true;
bool allSame = true;
for (size_t i = 0; i < 4; ++i) {
MDefinition* op = getOperand(i);
MOZ_ASSERT(op->type() == laneType);
if (!op->isConstant())
allConstants = false;
if (i > 0 && op != getOperand(i - 1))
allSame = false;
}
if (!allConstants && !allSame)
return this;
if (allConstants) {
SimdConstant cst;
switch (type()) {
case MIRType::Bool32x4: {
int32_t a[4];
for (size_t i = 0; i < 4; ++i)
a[i] = getOperand(i)->toConstant()->valueToBooleanInfallible() ? -1 : 0;
cst = SimdConstant::CreateX4(a);
break;
}
case MIRType::Int32x4: {
int32_t a[4];
for (size_t i = 0; i < 4; ++i)
a[i] = getOperand(i)->toConstant()->toInt32();
cst = SimdConstant::CreateX4(a);
break;
}
case MIRType::Float32x4: {
float a[4];
for (size_t i = 0; i < 4; ++i)
a[i] = getOperand(i)->toConstant()->numberToDouble();
cst = SimdConstant::CreateX4(a);
break;
}
default: MOZ_CRASH("unexpected type in MSimdValueX4::foldsTo");
}
return MSimdConstant::New(alloc, cst, type());
}
MOZ_ASSERT(allSame);
return MSimdSplat::New(alloc, getOperand(0), type());
}
MDefinition*
MSimdSplat::foldsTo(TempAllocator& alloc)
{
#ifdef DEBUG
MIRType laneType = SimdTypeToLaneArgumentType(type());
#endif
MDefinition* op = getOperand(0);
if (!op->isConstant())
return this;
MOZ_ASSERT(op->type() == laneType);
SimdConstant cst;
switch (type()) {
case MIRType::Bool8x16: {
int8_t v = op->toConstant()->valueToBooleanInfallible() ? -1 : 0;
cst = SimdConstant::SplatX16(v);
break;
}
case MIRType::Bool16x8: {
int16_t v = op->toConstant()->valueToBooleanInfallible() ? -1 : 0;
cst = SimdConstant::SplatX8(v);
break;
}
case MIRType::Bool32x4: {
int32_t v = op->toConstant()->valueToBooleanInfallible() ? -1 : 0;
cst = SimdConstant::SplatX4(v);
break;
}
case MIRType::Int8x16: {
int32_t v = op->toConstant()->toInt32();
cst = SimdConstant::SplatX16(v);
break;
}
case MIRType::Int16x8: {
int32_t v = op->toConstant()->toInt32();
cst = SimdConstant::SplatX8(v);
break;
}
case MIRType::Int32x4: {
int32_t v = op->toConstant()->toInt32();
cst = SimdConstant::SplatX4(v);
break;
}
case MIRType::Float32x4: {
float v = op->toConstant()->numberToDouble();
cst = SimdConstant::SplatX4(v);
break;
}
default: MOZ_CRASH("unexpected type in MSimdSplat::foldsTo");
}
return MSimdConstant::New(alloc, cst, type());
}
MDefinition*
MSimdUnbox::foldsTo(TempAllocator& alloc)
{
MDefinition* in = input();
if (in->isSimdBox()) {
MSimdBox* box = in->toSimdBox();
// If the operand is a MSimdBox, then we just reuse the operand of the
// MSimdBox as long as the type corresponds to what we are supposed to
// unbox.
in = box->input();
if (box->simdType() != simdType())
return this;
MOZ_ASSERT(in->type() == type());
return in;
}
return this;
}
MDefinition*
MSimdSwizzle::foldsTo(TempAllocator& alloc)
{
if (lanesMatch(0, 1, 2, 3))
return input();
return this;
}
MDefinition*
MSimdGeneralShuffle::foldsTo(TempAllocator& alloc)
{
FixedList<uint8_t> lanes;
if (!lanes.init(alloc, numLanes()))
return this;
for (size_t i = 0; i < numLanes(); i++) {
if (!lane(i)->isConstant() || lane(i)->type() != MIRType::Int32)
return this;
int32_t temp = lane(i)->toConstant()->toInt32();
if (temp < 0 || unsigned(temp) >= numLanes() * numVectors())
return this;
lanes[i] = uint8_t(temp);
}
if (numVectors() == 1)
return MSimdSwizzle::New(alloc, vector(0), lanes.data());
MOZ_ASSERT(numVectors() == 2);
return MSimdShuffle::New(alloc, vector(0), vector(1), lanes.data());
}
MInstruction*
MSimdConvert::AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* obj,
MIRType toType, SimdSign sign, wasm::BytecodeOffset bytecodeOffset)
{
MIRType fromType = obj->type();
if (SupportsUint32x4FloatConversions || sign != SimdSign::Unsigned) {
MInstruction* ins = New(alloc, obj, toType, sign, bytecodeOffset);
addTo->add(ins);
return ins;
}
// This architecture can't do Uint32x4 <-> Float32x4 conversions (Hi SSE!)
MOZ_ASSERT(sign == SimdSign::Unsigned);
if (fromType == MIRType::Int32x4 && toType == MIRType::Float32x4) {
// Converting Uint32x4 -> Float32x4. This algorithm is from LLVM.
//
// Split the input number into high and low parts:
//
// uint32_t hi = x >> 16;
// uint32_t lo = x & 0xffff;
//
// Insert these parts as the low mantissa bits in a float32 number with
// the corresponding exponent:
//
// float fhi = (bits-as-float)(hi | 0x53000000); // 0x1.0p39f + hi*2^16
// float flo = (bits-as-float)(lo | 0x4b000000); // 0x1.0p23f + lo
//
// Subtract the bias from the hi part:
//
// fhi -= (0x1.0p39 + 0x1.0p23) // hi*2^16 - 0x1.0p23
//
// And finally combine:
//
// result = flo + fhi // lo + hi*2^16.
// Compute hi = obj >> 16 (lane-wise unsigned shift).
MInstruction* c16 = MConstant::New(alloc, Int32Value(16));
addTo->add(c16);
MInstruction* hi = MSimdShift::AddLegalized(alloc, addTo, obj, c16, MSimdShift::ursh);
// Compute lo = obj & 0xffff (lane-wise).
MInstruction* m16 =
MSimdConstant::New(alloc, SimdConstant::SplatX4(0xffff), MIRType::Int32x4);
addTo->add(m16);
MInstruction* lo = MSimdBinaryBitwise::New(alloc, obj, m16, MSimdBinaryBitwise::and_);
addTo->add(lo);
// Mix in the exponents.
MInstruction* exphi =
MSimdConstant::New(alloc, SimdConstant::SplatX4(0x53000000), MIRType::Int32x4);
addTo->add(exphi);
MInstruction* mhi = MSimdBinaryBitwise::New(alloc, hi, exphi, MSimdBinaryBitwise::or_);
addTo->add(mhi);
MInstruction* explo =
MSimdConstant::New(alloc, SimdConstant::SplatX4(0x4b000000), MIRType::Int32x4);
addTo->add(explo);
MInstruction* mlo = MSimdBinaryBitwise::New(alloc, lo, explo, MSimdBinaryBitwise::or_);
addTo->add(mlo);
// Bit-cast both to Float32x4.
MInstruction* fhi = MSimdReinterpretCast::New(alloc, mhi, MIRType::Float32x4);
addTo->add(fhi);
MInstruction* flo = MSimdReinterpretCast::New(alloc, mlo, MIRType::Float32x4);
addTo->add(flo);
// Subtract out the bias: 0x1.0p39f + 0x1.0p23f.
// MSVC doesn't support the hexadecimal float syntax.
const float BiasValue = 549755813888.f + 8388608.f;
MInstruction* bias =
MSimdConstant::New(alloc, SimdConstant::SplatX4(BiasValue), MIRType::Float32x4);
addTo->add(bias);
MInstruction* fhi_debiased =
MSimdBinaryArith::AddLegalized(alloc, addTo, fhi, bias, MSimdBinaryArith::Op_sub);
// Compute the final result.
return MSimdBinaryArith::AddLegalized(alloc, addTo, fhi_debiased, flo,
MSimdBinaryArith::Op_add);
}
if (fromType == MIRType::Float32x4 && toType == MIRType::Int32x4) {
// The Float32x4 -> Uint32x4 conversion can throw if the input is out of
// range. This is handled by the LFloat32x4ToUint32x4 expansion.
MInstruction* ins = New(alloc, obj, toType, sign, bytecodeOffset);
addTo->add(ins);
return ins;
}
MOZ_CRASH("Unhandled SIMD type conversion");
}
MInstruction*
MSimdBinaryComp::AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
MDefinition* right, Operation op, SimdSign sign)
{
MOZ_ASSERT(left->type() == right->type());
MIRType opType = left->type();
MOZ_ASSERT(IsSimdType(opType));
bool IsEquality = op == equal || op == notEqual;
// Check if this is an unsupported unsigned compare that needs to be biased.
// If so, put the bias vector in `bias`.
if (sign == SimdSign::Unsigned && !IsEquality) {
MInstruction* bias = nullptr;
// This is an order comparison of Uint32x4 vectors which are not supported on this target.
// Simply offset |left| and |right| by INT_MIN, then do a signed comparison.
if (!SupportsUint32x4Compares && opType == MIRType::Int32x4)
bias = MSimdConstant::New(alloc, SimdConstant::SplatX4(int32_t(0x80000000)), opType);
else if (!SupportsUint16x8Compares && opType == MIRType::Int16x8)
bias = MSimdConstant::New(alloc, SimdConstant::SplatX8(int16_t(0x8000)), opType);
if (!SupportsUint8x16Compares && opType == MIRType::Int8x16)
bias = MSimdConstant::New(alloc, SimdConstant::SplatX16(int8_t(0x80)), opType);
if (bias) {
addTo->add(bias);
// Add the bias.
MInstruction* bleft =
MSimdBinaryArith::AddLegalized(alloc, addTo, left, bias, MSimdBinaryArith::Op_add);
MInstruction* bright =
MSimdBinaryArith::AddLegalized(alloc, addTo, right, bias, MSimdBinaryArith::Op_add);
// Do the equivalent signed comparison.
MInstruction* result =
MSimdBinaryComp::New(alloc, bleft, bright, op, SimdSign::Signed);
addTo->add(result);
return result;
}
}
if (sign == SimdSign::Unsigned &&
((!SupportsUint32x4Compares && opType == MIRType::Int32x4) ||
(!SupportsUint16x8Compares && opType == MIRType::Int16x8) ||
(!SupportsUint8x16Compares && opType == MIRType::Int8x16))) {
// The sign doesn't matter for equality tests. Flip it to make the
// backend assertions happy.
MOZ_ASSERT(IsEquality);
sign = SimdSign::Signed;
}
// This is a legal operation already. Just create the instruction requested.
MInstruction* result = MSimdBinaryComp::New(alloc, left, right, op, sign);
addTo->add(result);
return result;
}
MInstruction*
MSimdBinaryArith::AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
MDefinition* right, Operation op)
{
MOZ_ASSERT(left->type() == right->type());
MIRType opType = left->type();
MOZ_ASSERT(IsSimdType(opType));
// SSE does not have 8x16 multiply instructions.
if (opType == MIRType::Int8x16 && op == Op_mul) {
// Express the multiply in terms of Int16x8 multiplies by handling the
// even and odd lanes separately.
MInstruction* wideL = MSimdReinterpretCast::New(alloc, left, MIRType::Int16x8);
addTo->add(wideL);
MInstruction* wideR = MSimdReinterpretCast::New(alloc, right, MIRType::Int16x8);
addTo->add(wideR);
// wideL = yyxx yyxx yyxx yyxx yyxx yyxx yyxx yyxx
// wideR = bbaa bbaa bbaa bbaa bbaa bbaa bbaa bbaa
// Shift the odd lanes down to the low bits of the 16x8 vectors.
MInstruction* eight = MConstant::New(alloc, Int32Value(8));
addTo->add(eight);
MInstruction* evenL = wideL;
MInstruction* evenR = wideR;
MInstruction* oddL =
MSimdShift::AddLegalized(alloc, addTo, wideL, eight, MSimdShift::ursh);
MInstruction* oddR =
MSimdShift::AddLegalized(alloc, addTo, wideR, eight, MSimdShift::ursh);
// evenL = yyxx yyxx yyxx yyxx yyxx yyxx yyxx yyxx
// evenR = bbaa bbaa bbaa bbaa bbaa bbaa bbaa bbaa
// oddL = 00yy 00yy 00yy 00yy 00yy 00yy 00yy 00yy
// oddR = 00bb 00bb 00bb 00bb 00bb 00bb 00bb 00bb
// Now do two 16x8 multiplications. We can use the low bits of each.
MInstruction* even = MSimdBinaryArith::AddLegalized(alloc, addTo, evenL, evenR, Op_mul);
MInstruction* odd = MSimdBinaryArith::AddLegalized(alloc, addTo, oddL, oddR, Op_mul);
// even = ~~PP ~~PP ~~PP ~~PP ~~PP ~~PP ~~PP ~~PP
// odd = ~~QQ ~~QQ ~~QQ ~~QQ ~~QQ ~~QQ ~~QQ ~~QQ
MInstruction* mask =
MSimdConstant::New(alloc, SimdConstant::SplatX8(int16_t(0x00ff)), MIRType::Int16x8);
addTo->add(mask);
even = MSimdBinaryBitwise::New(alloc, even, mask, MSimdBinaryBitwise::and_);
addTo->add(even);
odd = MSimdShift::AddLegalized(alloc, addTo, odd, eight, MSimdShift::lsh);
// even = 00PP 00PP 00PP 00PP 00PP 00PP 00PP 00PP
// odd = QQ00 QQ00 QQ00 QQ00 QQ00 QQ00 QQ00 QQ00
// Combine:
MInstruction* result = MSimdBinaryBitwise::New(alloc, even, odd, MSimdBinaryBitwise::or_);
addTo->add(result);
result = MSimdReinterpretCast::New(alloc, result, opType);
addTo->add(result);
return result;
}
// This is a legal operation already. Just create the instruction requested.
MInstruction* result = MSimdBinaryArith::New(alloc, left, right, op);
addTo->add(result);
return result;
}
MInstruction*
MSimdShift::AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
MDefinition* right, Operation op)
{
MIRType opType = left->type();
MOZ_ASSERT(IsIntegerSimdType(opType));
// SSE does not provide 8x16 shift instructions.
if (opType == MIRType::Int8x16) {
// Express the shift in terms of Int16x8 shifts by splitting into even
// and odd lanes, place 8-bit lanes into the high bits of Int16x8
// vectors `even` and `odd`. Shift, mask, combine.
//
// wide = Int16x8.fromInt8x16Bits(left);
// shiftBy = right & 7
// mask = Int16x8.splat(0xff00);
//
MInstruction* wide = MSimdReinterpretCast::New(alloc, left, MIRType::Int16x8);
addTo->add(wide);
// wide = yyxx yyxx yyxx yyxx yyxx yyxx yyxx yyxx
MInstruction* shiftMask = MConstant::New(alloc, Int32Value(7));
addTo->add(shiftMask);
MBinaryBitwiseInstruction* shiftBy = MBitAnd::New(alloc, right, shiftMask);
shiftBy->setInt32Specialization();
addTo->add(shiftBy);
// Move the even 8x16 lanes into the high bits of the 16x8 lanes.
MInstruction* eight = MConstant::New(alloc, Int32Value(8));
addTo->add(eight);
MInstruction* even = MSimdShift::AddLegalized(alloc, addTo, wide, eight, lsh);
// Leave the odd lanes in place.
MInstruction* odd = wide;
// even = xx00 xx00 xx00 xx00 xx00 xx00 xx00 xx00
// odd = yyxx yyxx yyxx yyxx yyxx yyxx yyxx yyxx
MInstruction* mask =
MSimdConstant::New(alloc, SimdConstant::SplatX8(int16_t(0xff00)), MIRType::Int16x8);
addTo->add(mask);
// Left-shift: Clear the low bits in `odd` before shifting.
if (op == lsh) {
odd = MSimdBinaryBitwise::New(alloc, odd, mask, MSimdBinaryBitwise::and_);
addTo->add(odd);
// odd = yy00 yy00 yy00 yy00 yy00 yy00 yy00 yy00
}
// Do the real shift twice: once for the even lanes, once for the odd
// lanes. This is a recursive call, but with a different type.
even = MSimdShift::AddLegalized(alloc, addTo, even, shiftBy, op);
odd = MSimdShift::AddLegalized(alloc, addTo, odd, shiftBy, op);
// even = XX~~ XX~~ XX~~ XX~~ XX~~ XX~~ XX~~ XX~~
// odd = YY~~ YY~~ YY~~ YY~~ YY~~ YY~~ YY~~ YY~~
// Right-shift: Clear the low bits in `odd` after shifting.
if (op != lsh) {
odd = MSimdBinaryBitwise::New(alloc, odd, mask, MSimdBinaryBitwise::and_);
addTo->add(odd);
// odd = YY00 YY00 YY00 YY00 YY00 YY00 YY00 YY00
}
// Move the even lanes back to their original place.
even = MSimdShift::AddLegalized(alloc, addTo, even, eight, ursh);
// Now, `odd` contains the odd lanes properly shifted, and `even`
// contains the even lanes properly shifted:
//
// even = 00XX 00XX 00XX 00XX 00XX 00XX 00XX 00XX
// odd = YY00 YY00 YY00 YY00 YY00 YY00 YY00 YY00
//
// Combine:
MInstruction* result = MSimdBinaryBitwise::New(alloc, even, odd, MSimdBinaryBitwise::or_);
addTo->add(result);
result = MSimdReinterpretCast::New(alloc, result, opType);
addTo->add(result);
return result;
}
// This is a legal operation already. Just create the instruction requested.
MInstruction* result = MSimdShift::New(alloc, left, right, op);
addTo->add(result);
return result;
}
template <typename T>
static void
PrintOpcodeOperation(T* mir, GenericPrinter& out)
{
mir->MDefinition::printOpcode(out);
out.printf(" (%s)", T::OperationName(mir->operation()));
}
#ifdef JS_JITSPEW
void
MSimdBinaryArith::printOpcode(GenericPrinter& out) const
{
PrintOpcodeOperation(this, out);
}
void
MSimdBinarySaturating::printOpcode(GenericPrinter& out) const
{
PrintOpcodeOperation(this, out);
}
void
MSimdBinaryBitwise::printOpcode(GenericPrinter& out) const
{
PrintOpcodeOperation(this, out);
}
void
MSimdUnaryArith::printOpcode(GenericPrinter& out) const
{
PrintOpcodeOperation(this, out);
}
void
MSimdBinaryComp::printOpcode(GenericPrinter& out) const
{
PrintOpcodeOperation(this, out);
}
void
MSimdShift::printOpcode(GenericPrinter& out) const
{
PrintOpcodeOperation(this, out);
}
void
MSimdInsertElement::printOpcode(GenericPrinter& out) const
{
MDefinition::printOpcode(out);
out.printf(" (lane %u)", lane());
}
void
MSimdBox::printOpcode(GenericPrinter& out) const
{
MDefinition::printOpcode(out);
out.printf(" (%s%s)", SimdTypeToString(simdType()),
initialHeap() == gc::TenuredHeap ? ", tenured" : "");
}
void
MSimdUnbox::printOpcode(GenericPrinter& out) const
{
MDefinition::printOpcode(out);
out.printf(" (%s)", SimdTypeToString(simdType()));
}
void
MControlInstruction::printOpcode(GenericPrinter& out) const
{

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -158,6 +158,10 @@ class MIRGenerator
return needsOverrecursedCheck_;
}
// Traverses the graph to find if there's any SIMD instruction. Costful but
// the value is cached, so don't worry about calling it several times.
bool usesSimd();
bool modifiesFrameArguments() const {
return modifiesFrameArguments_;
}
@ -187,6 +191,8 @@ class MIRGenerator
uint32_t wasmMaxStackArgBytes_;
bool needsOverrecursedCheck_;
bool needsStaticStackAlignment_;
bool usesSimd_;
bool cachedUsesSimd_;
// Keep track of whether frame arguments are modified during execution.
// RegAlloc needs to know this as spilling values back to their register

Просмотреть файл

@ -32,6 +32,8 @@ MIRGenerator::MIRGenerator(CompileRealm* realm, const JitCompileOptions& options
wasmMaxStackArgBytes_(0),
needsOverrecursedCheck_(false),
needsStaticStackAlignment_(false),
usesSimd_(false),
cachedUsesSimd_(false),
modifiesFrameArguments_(false),
instrumentedProfiling_(false),
instrumentedProfilingIsCached_(false),
@ -42,6 +44,37 @@ MIRGenerator::MIRGenerator(CompileRealm* realm, const JitCompileOptions& options
gs_(alloc)
{ }
bool
MIRGenerator::usesSimd()
{
if (cachedUsesSimd_)
return usesSimd_;
cachedUsesSimd_ = true;
for (ReversePostorderIterator block = graph_->rpoBegin(),
end = graph_->rpoEnd();
block != end;
block++)
{
// It's fine to use MInstructionIterator here because we don't have to
// worry about Phis, since any reachable phi (or phi cycle) will have at
// least one instruction as an input.
for (MInstructionIterator inst = block->begin(); inst != block->end(); inst++) {
// Instructions that have SIMD inputs but not a SIMD type are fine
// to ignore, as their inputs are also reached at some point. By
// induction, at least one instruction with a SIMD type is reached
// at some point.
if (IsSimdType(inst->type())) {
MOZ_ASSERT(SupportsSimd);
usesSimd_ = true;
return true;
}
}
}
usesSimd_ = false;
return false;
}
mozilla::GenericErrorResult<AbortReason>
MIRGenerator::abort(AbortReason r)
{

Просмотреть файл

@ -340,7 +340,8 @@ template void MacroAssembler::guardTypeSet(const TypedOrValueRegister& value, co
template<typename S, typename T>
static void
StoreToTypedFloatArray(MacroAssembler& masm, int arrayType, const S& value, const T& dest)
StoreToTypedFloatArray(MacroAssembler& masm, int arrayType, const S& value, const T& dest,
unsigned numElems)
{
switch (arrayType) {
case Scalar::Float32:
@ -349,6 +350,48 @@ StoreToTypedFloatArray(MacroAssembler& masm, int arrayType, const S& value, cons
case Scalar::Float64:
masm.storeDouble(value, dest);
break;
case Scalar::Float32x4:
switch (numElems) {
case 1:
masm.storeFloat32(value, dest);
break;
case 2:
masm.storeDouble(value, dest);
break;
case 3:
masm.storeFloat32x3(value, dest);
break;
case 4:
masm.storeUnalignedSimd128Float(value, dest);
break;
default: MOZ_CRASH("unexpected number of elements in simd write");
}
break;
case Scalar::Int32x4:
switch (numElems) {
case 1:
masm.storeInt32x1(value, dest);
break;
case 2:
masm.storeInt32x2(value, dest);
break;
case 3:
masm.storeInt32x3(value, dest);
break;
case 4:
masm.storeUnalignedSimd128Int(value, dest);
break;
default: MOZ_CRASH("unexpected number of elements in simd write");
}
break;
case Scalar::Int8x16:
MOZ_ASSERT(numElems == 16, "unexpected partial store");
masm.storeUnalignedSimd128Int(value, dest);
break;
case Scalar::Int16x8:
MOZ_ASSERT(numElems == 8, "unexpected partial store");
masm.storeUnalignedSimd128Int(value, dest);
break;
default:
MOZ_CRASH("Invalid typed array type");
}
@ -356,21 +399,21 @@ StoreToTypedFloatArray(MacroAssembler& masm, int arrayType, const S& value, cons
void
MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
const BaseIndex& dest)
const BaseIndex& dest, unsigned numElems)
{
StoreToTypedFloatArray(*this, arrayType, value, dest);
StoreToTypedFloatArray(*this, arrayType, value, dest, numElems);
}
void
MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
const Address& dest)
const Address& dest, unsigned numElems)
{
StoreToTypedFloatArray(*this, arrayType, value, dest);
StoreToTypedFloatArray(*this, arrayType, value, dest, numElems);
}
template<typename T>
void
MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegister dest, Register temp,
Label* fail, bool canonicalizeDoubles)
Label* fail, bool canonicalizeDoubles, unsigned numElems)
{
switch (arrayType) {
case Scalar::Int8:
@ -411,17 +454,59 @@ MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegi
if (canonicalizeDoubles)
canonicalizeDouble(dest.fpu());
break;
case Scalar::Int32x4:
switch (numElems) {
case 1:
loadInt32x1(src, dest.fpu());
break;
case 2:
loadInt32x2(src, dest.fpu());
break;
case 3:
loadInt32x3(src, dest.fpu());
break;
case 4:
loadUnalignedSimd128Int(src, dest.fpu());
break;
default: MOZ_CRASH("unexpected number of elements in SIMD load");
}
break;
case Scalar::Float32x4:
switch (numElems) {
case 1:
loadFloat32(src, dest.fpu());
break;
case 2:
loadDouble(src, dest.fpu());
break;
case 3:
loadFloat32x3(src, dest.fpu());
break;
case 4:
loadUnalignedSimd128Float(src, dest.fpu());
break;
default: MOZ_CRASH("unexpected number of elements in SIMD load");
}
break;
case Scalar::Int8x16:
MOZ_ASSERT(numElems == 16, "unexpected partial load");
loadUnalignedSimd128Int(src, dest.fpu());
break;
case Scalar::Int16x8:
MOZ_ASSERT(numElems == 8, "unexpected partial load");
loadUnalignedSimd128Int(src, dest.fpu());
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const Address& src,
AnyRegister dest, Register temp, Label* fail,
bool canonicalizeDoubles);
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const BaseIndex& src,
AnyRegister dest, Register temp, Label* fail,
bool canonicalizeDoubles);
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const Address& src, AnyRegister dest,
Register temp, Label* fail, bool canonicalizeDoubles,
unsigned numElems);
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const BaseIndex& src, AnyRegister dest,
Register temp, Label* fail, bool canonicalizeDoubles,
unsigned numElems);
template<typename T>
void
@ -3310,6 +3395,41 @@ MacroAssembler::branchIfInlineTypedObject(Register obj, Register scratch, Label*
branchPtr(Assembler::Equal, scratch, ImmPtr(&InlineTransparentTypedObject::class_), label);
}
void
MacroAssembler::branchIfNotSimdObject(Register obj, Register scratch, SimdType simdType,
Label* label)
{
loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
// Guard that the object has the same representation as the one produced for
// SIMD value-type.
Address clasp(scratch, ObjectGroup::offsetOfClasp());
static_assert(!SimdTypeDescr::Opaque, "SIMD objects are transparent");
branchPtr(Assembler::NotEqual, clasp, ImmPtr(&InlineTransparentTypedObject::class_),
label);
// obj->type()->typeDescr()
// The previous class pointer comparison implies that the addendumKind is
// Addendum_TypeDescr.
loadPtr(Address(scratch, ObjectGroup::offsetOfAddendum()), scratch);
// Check for the /Kind/ reserved slot of the TypeDescr. This is an Int32
// Value which is equivalent to the object class check.
static_assert(JS_DESCR_SLOT_KIND < NativeObject::MAX_FIXED_SLOTS, "Load from fixed slots");
Address typeDescrKind(scratch, NativeObject::getFixedSlotOffset(JS_DESCR_SLOT_KIND));
assertTestInt32(Assembler::Equal, typeDescrKind,
"MOZ_ASSERT(obj->type()->typeDescr()->getReservedSlot(JS_DESCR_SLOT_KIND).isInt32())");
branch32(Assembler::NotEqual, ToPayload(typeDescrKind), Imm32(js::type::Simd), label);
// Check if the SimdTypeDescr /Type/ matches the specialization of this
// MSimdUnbox instruction.
static_assert(JS_DESCR_SLOT_TYPE < NativeObject::MAX_FIXED_SLOTS, "Load from fixed slots");
Address typeDescrType(scratch, NativeObject::getFixedSlotOffset(JS_DESCR_SLOT_TYPE));
assertTestInt32(Assembler::Equal, typeDescrType,
"MOZ_ASSERT(obj->type()->typeDescr()->getReservedSlot(JS_DESCR_SLOT_TYPE).isInt32())");
branch32(Assembler::NotEqual, ToPayload(typeDescrType), Imm32(int32_t(simdType)), label);
}
void
MacroAssembler::copyObjGroupNoPreBarrier(Register sourceObj, Register destObj, Register scratch)
{

Просмотреть файл

@ -1158,6 +1158,8 @@ class MacroAssembler : public MacroAssemblerSpecific
void branchIfInlineTypedObject(Register obj, Register scratch, Label* label);
void branchIfNotSimdObject(Register obj, Register scratch, SimdType simdType, Label* label);
inline void branchTestClassIsProxy(bool proxy, Register clasp, Label* label);
inline void branchTestObjectIsProxy(bool proxy, Register object, Register scratch, Label* label);
@ -1374,6 +1376,9 @@ class MacroAssembler : public MacroAssemblerSpecific
inline void canonicalizeFloat(FloatRegister reg);
inline void canonicalizeFloatIfDeterministic(FloatRegister reg);
inline void canonicalizeFloat32x4(FloatRegister reg, FloatRegister scratch)
DEFINED_ON(x86_shared);
public:
// ========================================================================
// Memory access primitives.
@ -2148,7 +2153,7 @@ class MacroAssembler : public MacroAssemblerSpecific
template<typename T>
void loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegister dest, Register temp, Label* fail,
bool canonicalizeDoubles = true);
bool canonicalizeDoubles = true, unsigned numElems = 0);
template<typename T>
void loadFromTypedArray(Scalar::Type arrayType, const T& src, const ValueOperand& dest, bool allowDouble,
@ -2175,8 +2180,10 @@ class MacroAssembler : public MacroAssemblerSpecific
}
}
void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const BaseIndex& dest);
void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address& dest);
void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const BaseIndex& dest,
unsigned numElems = 0);
void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address& dest,
unsigned numElems = 0);
void memoryBarrierBefore(const Synchronization& sync);
void memoryBarrierAfter(const Synchronization& sync);

Просмотреть файл

@ -1785,6 +1785,10 @@ GetTypedArrayRange(TempAllocator& alloc, Scalar::Type type)
case Scalar::Int64:
case Scalar::Float32:
case Scalar::Float64:
case Scalar::Float32x4:
case Scalar::Int8x16:
case Scalar::Int16x8:
case Scalar::Int32x4:
case Scalar::MaxTypedArrayViewType:
break;
}

Просмотреть файл

@ -10,6 +10,7 @@
#include "jsmath.h"
#include "builtin/RegExp.h"
#include "builtin/SIMD.h"
#include "builtin/String.h"
#include "builtin/TypedObject.h"
#include "gc/Heap.h"
@ -1661,6 +1662,79 @@ RNewCallObject::recover(JSContext* cx, SnapshotIterator& iter) const
return true;
}
bool
MSimdBox::writeRecoverData(CompactBufferWriter& writer) const
{
MOZ_ASSERT(canRecoverOnBailout());
writer.writeUnsigned(uint32_t(RInstruction::Recover_SimdBox));
static_assert(unsigned(SimdType::Count) < 0x100, "assuming SimdType fits in 8 bits");
writer.writeByte(uint8_t(simdType()));
return true;
}
RSimdBox::RSimdBox(CompactBufferReader& reader)
{
type_ = reader.readByte();
}
bool
RSimdBox::recover(JSContext* cx, SnapshotIterator& iter) const
{
JSObject* resultObject = nullptr;
RValueAllocation a = iter.readAllocation();
MOZ_ASSERT(iter.allocationReadable(a));
MOZ_ASSERT_IF(a.mode() == RValueAllocation::ANY_FLOAT_REG, a.fpuReg().isSimd128());
const FloatRegisters::RegisterContent* raw = iter.floatAllocationPointer(a);
switch (SimdType(type_)) {
case SimdType::Bool8x16:
resultObject = js::CreateSimd<Bool8x16>(cx, (const Bool8x16::Elem*) raw);
break;
case SimdType::Int8x16:
resultObject = js::CreateSimd<Int8x16>(cx, (const Int8x16::Elem*) raw);
break;
case SimdType::Uint8x16:
resultObject = js::CreateSimd<Uint8x16>(cx, (const Uint8x16::Elem*) raw);
break;
case SimdType::Bool16x8:
resultObject = js::CreateSimd<Bool16x8>(cx, (const Bool16x8::Elem*) raw);
break;
case SimdType::Int16x8:
resultObject = js::CreateSimd<Int16x8>(cx, (const Int16x8::Elem*) raw);
break;
case SimdType::Uint16x8:
resultObject = js::CreateSimd<Uint16x8>(cx, (const Uint16x8::Elem*) raw);
break;
case SimdType::Bool32x4:
resultObject = js::CreateSimd<Bool32x4>(cx, (const Bool32x4::Elem*) raw);
break;
case SimdType::Int32x4:
resultObject = js::CreateSimd<Int32x4>(cx, (const Int32x4::Elem*) raw);
break;
case SimdType::Uint32x4:
resultObject = js::CreateSimd<Uint32x4>(cx, (const Uint32x4::Elem*) raw);
break;
case SimdType::Float32x4:
resultObject = js::CreateSimd<Float32x4>(cx, (const Float32x4::Elem*) raw);
break;
case SimdType::Float64x2:
MOZ_CRASH("NYI, RSimdBox of Float64x2");
break;
case SimdType::Bool64x2:
MOZ_CRASH("NYI, RSimdBox of Bool64x2");
break;
case SimdType::Count:
MOZ_CRASH("RSimdBox of Count is unreachable");
}
if (!resultObject)
return false;
RootedValue result(cx);
result.setObject(*resultObject);
iter.storeInstructionResult(result);
return true;
}
bool
MObjectState::writeRecoverData(CompactBufferWriter& writer) const
{

Просмотреть файл

@ -112,6 +112,7 @@ namespace jit {
_(CreateThisWithTemplate) \
_(Lambda) \
_(LambdaArrow) \
_(SimdBox) \
_(ObjectState) \
_(ArrayState) \
_(SetArrayLength) \
@ -689,6 +690,17 @@ class RNewCallObject final : public RInstruction
MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const override;
};
class RSimdBox final : public RInstruction
{
private:
uint8_t type_;
public:
RINSTRUCTION_HEADER_NUM_OP_(SimdBox, 1)
MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const override;
};
class RObjectState final : public RInstruction
{
private:

Просмотреть файл

@ -637,6 +637,45 @@ template bool NoFloatPolicyAfter<0>::adjustInputs(TempAllocator& alloc, MInstruc
template bool NoFloatPolicyAfter<1>::adjustInputs(TempAllocator& alloc, MInstruction* def) const;
template bool NoFloatPolicyAfter<2>::adjustInputs(TempAllocator& alloc, MInstruction* def) const;
template <unsigned Op>
bool
SimdScalarPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
{
MOZ_ASSERT(IsSimdType(ins->type()));
MIRType laneType = SimdTypeToLaneType(ins->type());
MDefinition* in = ins->getOperand(Op);
// A vector with boolean lanes requires Int32 inputs that have already been
// converted to 0/-1.
// We can't insert a MIRType::Boolean lane directly - it requires conversion.
if (laneType == MIRType::Boolean) {
MOZ_ASSERT(in->type() == MIRType::Int32, "Boolean SIMD vector requires Int32 lanes.");
return true;
}
if (in->type() == laneType)
return true;
MInstruction* replace;
if (laneType == MIRType::Int32) {
replace = MTruncateToInt32::New(alloc, in);
} else {
MOZ_ASSERT(laneType == MIRType::Float32);
replace = MToFloat32::New(alloc, in);
}
ins->block()->insertBefore(ins, replace);
ins->replaceOperand(Op, replace);
return replace->typePolicy()->adjustInputs(alloc, replace);
}
template bool SimdScalarPolicy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
template bool SimdScalarPolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
template bool SimdScalarPolicy<2>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
template bool SimdScalarPolicy<3>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
template <unsigned Op>
bool
BoxPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
@ -818,6 +857,75 @@ template bool ObjectPolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruc
template bool ObjectPolicy<2>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
template bool ObjectPolicy<3>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
template <unsigned Op>
bool
SimdSameAsReturnedTypePolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
{
MOZ_ASSERT(ins->type() == ins->getOperand(Op)->type());
return true;
}
template bool
SimdSameAsReturnedTypePolicy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
template bool
SimdSameAsReturnedTypePolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
bool
SimdAllPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const
{
for (unsigned i = 0, e = ins->numOperands(); i < e; i++)
MOZ_ASSERT(ins->getOperand(i)->type() == ins->typePolicySpecialization());
return true;
}
template <unsigned Op>
bool
SimdPolicy<Op>::adjustInputs(TempAllocator& alloc, MInstruction* ins) const
{
MOZ_ASSERT(ins->typePolicySpecialization() == ins->getOperand(Op)->type());
return true;
}
template bool
SimdPolicy<0>::adjustInputs(TempAllocator& alloc, MInstruction* ins) const;
bool
SimdShufflePolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const
{
MSimdGeneralShuffle* s = ins->toSimdGeneralShuffle();
for (unsigned i = 0; i < s->numVectors(); i++)
MOZ_ASSERT(ins->getOperand(i)->type() == ins->typePolicySpecialization());
// Next inputs are the lanes, which need to be int32
for (unsigned i = 0; i < s->numLanes(); i++) {
MDefinition* in = ins->getOperand(s->numVectors() + i);
if (in->type() == MIRType::Int32)
continue;
auto* replace = MToNumberInt32::New(alloc, in, IntConversionInputKind::NumbersOnly);
ins->block()->insertBefore(ins, replace);
ins->replaceOperand(s->numVectors() + i, replace);
if (!replace->typePolicy()->adjustInputs(alloc, replace))
return false;
}
return true;
}
bool
SimdSelectPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const
{
// First input is the mask, which has to be a boolean.
MOZ_ASSERT(IsBooleanSimdType(ins->getOperand(0)->type()));
// Next inputs are the two vectors of a particular type.
for (unsigned i = 1; i < 3; i++)
MOZ_ASSERT(ins->getOperand(i)->type() == ins->typePolicySpecialization());
return true;
}
bool
CallPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const
{
@ -875,6 +983,13 @@ StoreUnboxedScalarPolicy::adjustValueInput(TempAllocator& alloc, MInstruction* i
Scalar::Type writeType, MDefinition* value,
int valueOperand)
{
// Storing a SIMD value requires a valueOperand that has already been
// SimdUnboxed. See IonBuilder::inlineSimdStore(()
if (Scalar::isSimdType(writeType)) {
MOZ_ASSERT(IsSimdType(value->type()));
return true;
}
MDefinition* curValue = value;
// First, ensure the value is int32, boolean, double or Value.
// The conversion is based on TypedArrayObjectTemplate::setElementTail.
@ -1155,6 +1270,9 @@ FilterTypeSetPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const
_(PowPolicy) \
_(SameValuePolicy) \
_(SignPolicy) \
_(SimdAllPolicy) \
_(SimdSelectPolicy) \
_(SimdShufflePolicy) \
_(StoreTypedArrayHolePolicy) \
_(StoreUnboxedScalarPolicy) \
_(StoreUnboxedObjectOrNullPolicy) \
@ -1192,6 +1310,7 @@ FilterTypeSetPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const
_(MixPolicy<ObjectPolicy<0>, UnboxedInt32Policy<1>, UnboxedInt32Policy<2>, UnboxedInt32Policy<3>>) \
_(MixPolicy<ObjectPolicy<0>, UnboxedInt32Policy<1>, TruncateToInt32Policy<2>, TruncateToInt32Policy<3> >) \
_(MixPolicy<ObjectPolicy<0>, CacheIdPolicy<1>, NoFloatPolicy<2>>) \
_(MixPolicy<SimdScalarPolicy<0>, SimdScalarPolicy<1>, SimdScalarPolicy<2>, SimdScalarPolicy<3> >) \
_(MixPolicy<ObjectPolicy<0>, BoxExceptPolicy<1, MIRType::Object>, CacheIdPolicy<2>>) \
_(MixPolicy<BoxPolicy<0>, ObjectPolicy<1> >) \
_(MixPolicy<ConvertToStringPolicy<0>, ConvertToStringPolicy<1> >) \
@ -1211,6 +1330,8 @@ FilterTypeSetPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const
_(MixPolicy<ObjectPolicy<0>, StringPolicy<1> >) \
_(MixPolicy<ObjectPolicy<0>, ConvertToStringPolicy<2> >) \
_(MixPolicy<ObjectPolicy<1>, ConvertToStringPolicy<0> >) \
_(MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1> >) \
_(MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdScalarPolicy<1> >) \
_(MixPolicy<StringPolicy<0>, UnboxedInt32Policy<1> >) \
_(MixPolicy<StringPolicy<0>, StringPolicy<1> >) \
_(MixPolicy<BoxPolicy<0>, BoxPolicy<1> >) \
@ -1221,6 +1342,9 @@ FilterTypeSetPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const
_(ObjectPolicy<0>) \
_(ObjectPolicy<1>) \
_(ObjectPolicy<3>) \
_(SimdPolicy<0>) \
_(SimdSameAsReturnedTypePolicy<0>) \
_(SimdScalarPolicy<0>) \
_(StringPolicy<0>)

Просмотреть файл

@ -365,6 +365,67 @@ class ObjectPolicy final : public TypePolicy
// a primitive, we use ValueToNonNullObject.
typedef ObjectPolicy<0> SingleObjectPolicy;
// Convert an operand to have a type identical to the scalar type of the
// returned type of the instruction.
template <unsigned Op>
class SimdScalarPolicy final : public TypePolicy
{
public:
constexpr SimdScalarPolicy() { }
EMPTY_DATA_;
static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) const override {
return staticAdjustInputs(alloc, def);
}
};
class SimdAllPolicy final : public TypePolicy
{
public:
constexpr SimdAllPolicy () { }
SPECIALIZATION_DATA_;
MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) const override;
};
template <unsigned Op>
class SimdPolicy final : public TypePolicy
{
public:
constexpr SimdPolicy() { }
SPECIALIZATION_DATA_;
MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) const override;
};
class SimdSelectPolicy final : public TypePolicy
{
public:
constexpr SimdSelectPolicy() { }
SPECIALIZATION_DATA_;
MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) const override;
};
class SimdShufflePolicy final : public TypePolicy
{
public:
constexpr SimdShufflePolicy() { }
SPECIALIZATION_DATA_;
MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) const override;
};
// SIMD value-type policy, use the returned type of the instruction to determine
// how to unbox its operand.
template <unsigned Op>
class SimdSameAsReturnedTypePolicy final : public TypePolicy
{
public:
constexpr SimdSameAsReturnedTypePolicy() { }
EMPTY_DATA_;
static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) const override {
return staticAdjustInputs(alloc, ins);
}
};
template <unsigned Op>
class BoxPolicy final : public TypePolicy
{

Просмотреть файл

@ -119,6 +119,7 @@ TypedObjectPrediction::ofArrayKind() const
switch (kind()) {
case type::Scalar:
case type::Reference:
case type::Simd:
case type::Struct:
return false;
@ -206,6 +207,12 @@ TypedObjectPrediction::referenceType() const
return extractType<ReferenceTypeDescr>();
}
SimdType
TypedObjectPrediction::simdType() const
{
return descr().as<SimdTypeDescr>().type();
}
bool
TypedObjectPrediction::hasKnownArrayLength(int32_t* length) const
{

Просмотреть файл

@ -165,10 +165,11 @@ class TypedObjectPrediction {
//////////////////////////////////////////////////////////////////////
// Simple operations
//
// Only valid when |kind()| is Scalar or Reference.
// Only valid when |kind()| is Scalar, Reference, or Simd (as appropriate).
Scalar::Type scalarType() const;
ReferenceType referenceType() const;
SimdType simdType() const;
///////////////////////////////////////////////////////////////////////////
// Queries valid only for arrays.

Просмотреть файл

@ -3156,14 +3156,248 @@ CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir)
masm.atomicExchange64(Synchronization::Full(), addr, value, out);
}
void
CodeGenerator::visitSimdSplatX4(LSimdSplatX4* lir)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimd128Int(LSimd128Int* ins)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimd128Float(LSimd128Float* ins)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdExtractElementI(LSimdExtractElementI* ins)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdExtractElementF(LSimdExtractElementF* ins)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdBinaryCompIx4(LSimdBinaryCompIx4* lir)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdBinaryCompFx4(LSimdBinaryCompFx4* lir)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdBinaryArithIx4(LSimdBinaryArithIx4* lir)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdBinaryArithFx4(LSimdBinaryArithFx4* lir)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdBinaryBitwise(LSimdBinaryBitwise* lir)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitNearbyInt(LNearbyInt*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdShift(LSimdShift*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitNearbyIntF(LNearbyIntF*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdSelect(LSimdSelect*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdAllTrue(LSimdAllTrue*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdAnyTrue(LSimdAnyTrue*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdShuffle(LSimdShuffle*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdSplatX8(LSimdSplatX8*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdSplatX16(LSimdSplatX16*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdSwizzleF(LSimdSwizzleF*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdSwizzleI(LSimdSwizzleI*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdShuffleX4(LSimdShuffleX4*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdBinaryCompIx8(LSimdBinaryCompIx8*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdUnaryArithFx4(LSimdUnaryArithFx4*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdUnaryArithIx4(LSimdUnaryArithIx4*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdUnaryArithIx8(LSimdUnaryArithIx8*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitFloat32x4ToInt32x4(LFloat32x4ToInt32x4*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitInt32x4ToFloat32x4(LInt32x4ToFloat32x4*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdBinaryArithIx8(LSimdBinaryArithIx8*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdBinaryCompIx16(LSimdBinaryCompIx16*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdInsertElementF(LSimdInsertElementF*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdInsertElementI(LSimdInsertElementI*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdUnaryArithIx16(LSimdUnaryArithIx16*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitFloat32x4ToUint32x4(LFloat32x4ToUint32x4*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdBinaryArithIx16(LSimdBinaryArithIx16*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdExtractElementB(LSimdExtractElementB*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdGeneralShuffleF(LSimdGeneralShuffleF*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdGeneralShuffleI(LSimdGeneralShuffleI*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdReinterpretCast(LSimdReinterpretCast*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdBinarySaturating(LSimdBinarySaturating*)
{
MOZ_CRASH("NYI");
}
void
CodeGenerator::visitSimdExtractElementU2D(LSimdExtractElementU2D*)
{
MOZ_CRASH("NYI");
}

Просмотреть файл

@ -1058,3 +1058,63 @@ LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins)
{
defineInt64(new(alloc()) LSignExtendInt64(useInt64RegisterAtStart(ins->input())), ins);
}
void
LIRGenerator::visitSimdInsertElement(MSimdInsertElement*)
{
MOZ_CRASH("NYI");
}
void
LIRGenerator::visitSimdExtractElement(MSimdExtractElement*)
{
MOZ_CRASH("NYI");
}
void
LIRGenerator::visitSimdBinaryArith(MSimdBinaryArith*)
{
MOZ_CRASH("NYI");
}
void
LIRGenerator::visitSimdSelect(MSimdSelect*)
{
MOZ_CRASH("NYI");
}
void
LIRGenerator::visitSimdSplat(MSimdSplat*)
{
MOZ_CRASH("NYI");
}
void
LIRGenerator::visitSimdValueX4(MSimdValueX4*)
{
MOZ_CRASH("NYI");
}
void
LIRGenerator::visitSimdBinarySaturating(MSimdBinarySaturating*)
{
MOZ_CRASH("NYI");
}
void
LIRGenerator::visitSimdSwizzle(MSimdSwizzle*)
{
MOZ_CRASH("NYI");
}
void
LIRGenerator::visitSimdShuffle(MSimdShuffle*)
{
MOZ_CRASH("NYI");
}
void
LIRGenerator::visitSimdGeneralShuffle(MSimdGeneralShuffle*)
{
MOZ_CRASH("NYI");
}

Просмотреть файл

@ -63,6 +63,17 @@ class LIRGeneratorARM : public LIRGeneratorShared
void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
MDefinition* lhs, MDefinition* rhs);
void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
MDefinition* lhs, MDefinition* rhs)
{
return lowerForFPU(ins, mir, lhs, rhs);
}
void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
MDefinition* lhs, MDefinition* rhs)
{
return lowerForFPU(ins, mir, lhs, rhs);
}
void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
MDefinition* lhs, MDefinition* rhs);
void lowerTruncateDToInt32(MTruncateToInt32* ins);

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше