Bug 857845 part 1 - rm JaegerMonkey. r=bhackett, sr=luke

This commit is contained in:
Jan de Mooij 2013-05-13 16:47:57 -07:00
Родитель 25093b2815
Коммит 57ed4e32e5
103 изменённых файлов: 76 добавлений и 43173 удалений

Просмотреть файл

@ -184,30 +184,6 @@ CPPSRCS += TraceLogging.cpp
endif
ifdef ENABLE_METHODJIT
###############################################
# BEGIN include sources for the method JIT
#
VPATH += $(srcdir)/methodjit
CPPSRCS += MethodJIT.cpp \
StubCalls.cpp \
Compiler.cpp \
FrameState.cpp \
FastArithmetic.cpp \
FastBuiltins.cpp \
FastOps.cpp \
LoopState.cpp \
StubCompiler.cpp \
MonoIC.cpp \
PolyIC.cpp \
ImmutableSync.cpp \
InvokeHelpers.cpp \
Retcon.cpp \
TrampolineCompiler.cpp \
$(NULL)
# Ion
ifdef ENABLE_ION
VPATH += $(srcdir)/ion
@ -336,17 +312,6 @@ CPPSRCS += Lowering-arm.cpp \
endif #ENABLE_ION
endif
endif #ENABLE_ION
ifeq (sparc, $(findstring sparc,$(TARGET_CPU)))
ASFILES += TrampolineSparc.s
endif
ifeq (mips, $(findstring mips,$(TARGET_CPU)))
CPPSRCS += TrampolineMIPS.cpp
endif
#
# END enclude sources for the method JIT
#############################################
endif
###############################################
# BEGIN include sources for the Nitro assembler
@ -366,10 +331,6 @@ CPPSRCS += ExecutableAllocator.cpp \
YarrCanonicalizeUCS2.cpp \
$(NONE)
ifdef ENABLE_METHODJIT_SPEW
CPPSRCS += Logging.cpp
endif
ifneq (,$(filter-out OS2 WINNT,$(OS_ARCH)))
CPPSRCS += ExecutableAllocatorPosix.cpp \
OSAllocatorPosix.cpp \
@ -386,9 +347,8 @@ CPPSRCS += ExecutableAllocatorOS2.cpp \
$(NONE)
endif
ifneq (,$(ENABLE_METHODJIT)$(ENABLE_ION)$(ENABLE_YARR_JIT))
ifneq (,$(ENABLE_ION)$(ENABLE_YARR_JIT))
VPATH += $(srcdir)/assembler/assembler \
$(srcdir)/methodjit \
$(NONE)
CPPSRCS += ARMAssembler.cpp \
@ -1000,34 +960,12 @@ selfhosted.out.h: $(selfhosted_out_h_deps)
# the code in js/src/assembler.
CXXFLAGS += -DUSE_SYSTEM_MALLOC=1 -DENABLE_ASSEMBLER=1
ifneq (,$(ENABLE_YARR_JIT)$(ENABLE_METHODJIT))
ifneq (,$(ENABLE_YARR_JIT))
CXXFLAGS += -DENABLE_JIT=1
endif
INCLUDES += -I$(srcdir)/assembler -I$(srcdir)/yarr
ifdef ENABLE_METHODJIT
# Build a standalone test program that exercises the assembler
# sources a bit.
TESTMAIN_OBJS = \
Assertions.$(OBJ_SUFFIX) \
ExecutableAllocator.$(OBJ_SUFFIX) \
ARMAssembler.$(OBJ_SUFFIX) \
MacroAssemblerARM.$(OBJ_SUFFIX) \
TestMain.$(OBJ_SUFFIX) \
jsutil.$(OBJ_SUFFIX) \
jslog2.$(OBJ_SUFFIX)
ifeq ($(OS_ARCH),WINNT)
TESTMAIN_OBJS += ExecutableAllocatorWin.$(OBJ_SUFFIX)
else
TESTMAIN_OBJS += ExecutableAllocatorPosix.$(OBJ_SUFFIX)
endif
TestMain$(HOST_BIN_SUFFIX): $(TESTMAIN_OBJS)
$(CXX) -o TestMain$(HOST_BIN_SUFFIX) $(TESTMAIN_OBJS)
endif
#
# END kludges for the Nitro assembler
###############################################

Просмотреть файл

@ -45,7 +45,6 @@
#include "ion/IonSpewer.h"
#include "js/RootingAPI.h"
#include "methodjit/Logging.h"
#define PRETTY_PRINT_OFFSET(os) (((os)<0)?"-":""), (((os)<0)?-(os):(os))
@ -286,8 +285,7 @@ namespace JSC {
__attribute__ ((format (printf, 2, 3)))
#endif
{
if (printer ||
js::IsJaegerSpewChannelActive(js::JSpew_Insns)
if (printer
#ifdef JS_ION
|| js::ion::IonSpewEnabled(js::ion::IonSpew_Codegen)
#endif
@ -306,14 +304,8 @@ namespace JSC {
if (printer)
printer->printf("%s\n", buf);
// The assembler doesn't know which compiler it is for, so if
// both JM and Ion spew are on, just print via one channel
// (Use JM to pick up isOOLPath).
if (js::IsJaegerSpewChannelActive(js::JSpew_Insns))
js::JaegerSpew(js::JSpew_Insns, "%s %s\n", isOOLPath ? ">" : " ", buf);
#ifdef JS_ION
else
js::ion::IonSpew(js::ion::IonSpew_Codegen, "%s", buf);
js::ion::IonSpew(js::ion::IonSpew_Codegen, "%s", buf);
#endif
}
}
@ -324,12 +316,8 @@ namespace JSC {
__attribute__ ((format (printf, 1, 2)))
#endif
{
if (js::IsJaegerSpewChannelActive(js::JSpew_Insns)
#ifdef JS_ION
|| js::ion::IonSpewEnabled(js::ion::IonSpew_Codegen)
#endif
)
{
if (js::ion::IonSpewEnabled(js::ion::IonSpew_Codegen)) {
char buf[200];
va_list va;
@ -337,15 +325,10 @@ namespace JSC {
int i = vsnprintf(buf, sizeof(buf), fmt, va);
va_end(va);
if (i > -1) {
if (js::IsJaegerSpewChannelActive(js::JSpew_Insns))
js::JaegerSpew(js::JSpew_Insns, " %s\n", buf);
#ifdef JS_ION
else
js::ion::IonSpew(js::ion::IonSpew_Codegen, "%s", buf);
#endif
}
if (i > -1)
js::ion::IonSpew(js::ion::IonSpew_Codegen, "%s", buf);
}
#endif
}
};

Просмотреть файл

@ -39,7 +39,6 @@
#include "assembler/wtf/SegmentedVector.h"
#include "assembler/wtf/Assertions.h"
#include "methodjit/Logging.h"
#include "jsnum.h"
#define ASSEMBLER_HAS_CONSTANT_POOL 1

Просмотреть файл

@ -16,7 +16,6 @@
#include "jswrapper.h"
#include "builtin/TestingFunctions.h"
#include "methodjit/MethodJIT.h"
#include "vm/ForkJoin.h"
#include "vm/Stack-inl.h"
@ -170,14 +169,6 @@ GetBuildConfiguration(JSContext *cx, unsigned argc, jsval *vp)
if (!JS_SetProperty(cx, info, "oom-backtraces", &value))
return false;
#ifdef JS_METHODJIT
value = BooleanValue(true);
#else
value = BooleanValue(false);
#endif
if (!JS_SetProperty(cx, info, "methodjit", &value))
return false;
#ifdef ENABLE_PARALLEL_JS
value = BooleanValue(true);
#else
@ -816,45 +807,6 @@ DumpHeapComplete(JSContext *cx, unsigned argc, jsval *vp)
return true;
}
JSBool
MJitChunkLimit(JSContext *cx, unsigned argc, jsval *vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
if (argc != 1) {
RootedObject callee(cx, &args.callee());
ReportUsageError(cx, callee, "Wrong number of arguments");
return JS_FALSE;
}
if (cx->runtime->alwaysPreserveCode) {
JS_ReportError(cx, "Can't change chunk limit after gcPreserveCode()");
return JS_FALSE;
}
for (CompartmentsIter c(cx->runtime); !c.done(); c.next()) {
if (c->lastAnimationTime != 0) {
JS_ReportError(cx, "Can't change chunk limit if code may be preserved");
return JS_FALSE;
}
}
double t;
if (!JS_ValueToNumber(cx, args[0], &t))
return JS_FALSE;
#ifdef JS_METHODJIT
mjit::SetChunkLimit((uint32_t) t);
#endif
// Clear out analysis information which might refer to code compiled with
// the previous chunk limit.
JS_GC(cx->runtime);
vp->setUndefined();
return true;
}
static JSBool
Terminate(JSContext *cx, unsigned arg, jsval *vp)
{
@ -1056,10 +1008,6 @@ static JSFunctionSpecWithHelp TestingFunctions[] = {
"dumpHeapComplete([filename])",
" Dump reachable and unreachable objects to a file."),
JS_FN_HELP("mjitChunkLimit", MJitChunkLimit, 1, 0,
"mjitChunkLimit(N)",
" Specify limit on compiled chunk size during mjit compilation."),
JS_FN_HELP("terminate", Terminate, 0, 0,
"terminate()",
" Terminate JavaScript execution, as if we had run out of\n"

Просмотреть файл

@ -2153,23 +2153,6 @@ MOZ_ARG_DISABLE_BOOL(ion,
[ --disable-ion Disable use of the IonMonkey JIT],
ENABLE_ION= )
MOZ_ARG_DISABLE_BOOL(methodjit,
[ --disable-methodjit Disable method JIT support],
ENABLE_METHODJIT= )
MOZ_ARG_DISABLE_BOOL(monoic,
[ --disable-monoic Disable use of MICs by JIT compiler],
ENABLE_MONOIC= )
MOZ_ARG_DISABLE_BOOL(polyic,
[ --disable-polyic Disable use of PICs by JIT compiler],
ENABLE_POLYIC= )
MOZ_ARG_ENABLE_BOOL(methodjit-spew,
[ --enable-methodjit-spew Enable method JIT spew support],
ENABLE_METHODJIT_SPEW=1,
ENABLE_METHODJIT_SPEW= )
MOZ_ARG_DISABLE_BOOL(yarr-jit,
[ --disable-yarr-jit Disable YARR JIT support],
ENABLE_YARR_JIT= )

Просмотреть файл

@ -11,7 +11,6 @@
#include "gc/Marking.h"
#include "gc/Nursery-inl.h"
#include "methodjit/MethodJIT.h"
#include "vm/Shape.h"
#include "jsobjinlines.h"

Просмотреть файл

@ -769,12 +769,6 @@ js::gc::MarkRuntime(JSTracer *trc, bool useSavedRoots)
c->debugScopes->mark(trc);
}
#ifdef JS_METHODJIT
/* We need to expand inline frames before stack scanning. */
for (ZonesIter zone(rt); !zone.done(); zone.next())
mjit::ExpandInlineFrames(zone);
#endif
rt->stackSpace.mark(trc);
#ifdef JS_ION

Просмотреть файл

@ -66,13 +66,6 @@ Zone::init(JSContext *cx)
void
Zone::setNeedsBarrier(bool needs, ShouldUpdateIon updateIon)
{
#ifdef JS_METHODJIT
/* ClearAllFrames calls compileBarriers() and needs the old value. */
bool old = compileBarriers();
if (compileBarriers(needs) != old)
mjit::ClearAllFrames(this);
#endif
#ifdef JS_ION
if (updateIon == UpdateIon && needs != ionUsingBarriers_) {
ion::ToggleBarriers(this, needs);
@ -156,41 +149,27 @@ Zone::sweep(FreeOp *fop, bool releaseTypes)
void
Zone::discardJitCode(FreeOp *fop, bool discardConstraints)
{
#ifdef JS_METHODJIT
/*
* Kick all frames on the stack into the interpreter, and release all JIT
* code in the compartment unless code is being preserved, in which case
* purge all caches in the JIT scripts. Even if we are not releasing all
* JIT code, we still need to release code for scripts which are in the
* middle of a native or getter stub call, as these stubs will have been
* redirected to the interpoline.
*/
mjit::ClearAllFrames(this);
#ifdef JS_ION
if (isPreservingCode()) {
PurgeJITCaches(this);
} else {
# ifdef JS_ION
# ifdef DEBUG
# ifdef DEBUG
/* Assert no baseline scripts are marked as active. */
for (CellIterUnderGC i(this, FINALIZE_SCRIPT); !i.done(); i.next()) {
JSScript *script = i.get<JSScript>();
JS_ASSERT_IF(script->hasBaselineScript(), !script->baselineScript()->active());
}
# endif
# endif
/* Mark baseline scripts on the stack as active. */
ion::MarkActiveBaselineScripts(this);
/* Only mark OSI points if code is being discarded. */
ion::InvalidateAll(fop, this);
# endif
for (CellIterUnderGC i(this, FINALIZE_SCRIPT); !i.done(); i.next()) {
JSScript *script = i.get<JSScript>();
mjit::ReleaseScriptCode(fop, script);
# ifdef JS_ION
ion::FinishInvalidation(fop, script);
/*
@ -198,7 +177,6 @@ Zone::discardJitCode(FreeOp *fop, bool discardConstraints)
* this also resets the active flag.
*/
ion::FinishDiscardBaselineScript(fop, script);
# endif
/*
* Use counts for scripts are reset on GC. After discarding code we
@ -209,14 +187,12 @@ Zone::discardJitCode(FreeOp *fop, bool discardConstraints)
}
for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
#ifdef JS_ION
/* Free optimized baseline stubs. */
if (comp->ionCompartment())
comp->ionCompartment()->optimizedStubSpace()->free();
#endif
comp->types.sweepCompilerOutputs(fop, discardConstraints);
}
}
#endif /* JS_METHODJIT */
#endif
}

Просмотреть файл

@ -572,11 +572,6 @@ ion::CachedShapeGuardFailure()
script->failedShapeGuard = true;
// Purge JM caches in the script and all inlined script, to avoid baking in
// the same shape guard next time.
for (size_t i = 0; i < script->ionScript()->scriptEntries(); i++)
mjit::PurgeCaches(script->ionScript()->getScript(i));
IonSpew(IonSpew_Invalidate, "Invalidating due to shape guard failure");
return Invalidate(cx, script);

Просмотреть файл

@ -190,23 +190,6 @@ BaselineCompiler::compile()
return Method_Compiled;
}
#ifdef DEBUG
#define SPEW_OPCODE() \
JS_BEGIN_MACRO \
if (IsJaegerSpewChannelActive(JSpew_JSOps)) { \
Sprinter sprinter(cx); \
sprinter.init(); \
RootedScript script_(cx, script); \
js_Disassemble1(cx, script_, pc, pc - script_->code, \
JS_TRUE, &sprinter); \
JaegerSpew(JSpew_JSOps, " %2u %s", \
(unsigned)frame.stackDepth(), sprinter.string()); \
} \
JS_END_MACRO;
#else
#define SPEW_OPCODE()
#endif /* DEBUG */
bool
BaselineCompiler::emitPrologue()
{
@ -539,7 +522,6 @@ BaselineCompiler::emitBody()
uint32_t emittedOps = 0;
while (true) {
SPEW_OPCODE();
JSOp op = JSOp(*pc);
IonSpew(IonSpew_BaselineOp, "Compiling op @ %d: %s",
int(pc - script->code), js_CodeName[op]);

Просмотреть файл

@ -45,7 +45,6 @@
#include "vm/Stack-inl.h"
#include "ion/IonFrames-inl.h"
#include "ion/CompilerRoot.h"
#include "methodjit/Retcon.h"
#include "ExecutionModeInlines.h"
#if JS_TRACE_LOGGING
@ -1294,10 +1293,7 @@ AttachFinishedCompilations(JSContext *cx)
success = codegen->link();
}
if (success) {
if (script->hasIonScript())
mjit::DisableScriptCodeForIon(script, script->ionScript()->osrPc());
} else {
if (!success) {
// Silently ignore OOM during code generation, we're at an
// operation callback and can't propagate failures.
cx->clearPendingException();
@ -1590,8 +1586,8 @@ Compile(JSContext *cx, HandleScript script, AbstractFramePtr fp, jsbytecode *osr
}
if (executionMode == SequentialExecution) {
if (cx->methodJitEnabled || IsBaselineEnabled(cx)) {
// If JM is enabled we use getUseCount instead of incUseCount to avoid
if (IsBaselineEnabled(cx)) {
// If Baseline is enabled we use getUseCount instead of incUseCount to avoid
// bumping the use count twice.
if (script->getUseCount() < js_IonOptions.usesBeforeCompile)
return Method_Skipped;
@ -2297,8 +2293,6 @@ ion::Invalidate(types::TypeCompartment &types, FreeOp *fop,
for (size_t i = 0; i < invalid.length(); i++) {
const types::CompilerOutput &co = *invalid[i].compilerOutput(types);
switch (co.kind()) {
case types::CompilerOutput::MethodJIT:
break;
case types::CompilerOutput::Ion:
case types::CompilerOutput::ParallelIon:
JS_ASSERT(co.isValid());
@ -2328,8 +2322,6 @@ ion::Invalidate(types::TypeCompartment &types, FreeOp *fop,
types::CompilerOutput &co = *invalid[i].compilerOutput(types);
ExecutionMode executionMode = SequentialExecution;
switch (co.kind()) {
case types::CompilerOutput::MethodJIT:
continue;
case types::CompilerOutput::Ion:
break;
case types::CompilerOutput::ParallelIon:

Просмотреть файл

@ -7480,13 +7480,8 @@ IonBuilder::getPropTryInlineAccess(bool *emitted, HandlePropertyName name, Handl
return true;
Vector<Shape *> shapes(cx);
if (Shape *objShape = mjit::GetPICSingleShape(cx, script(), pc, info().constructing())) {
if (!shapes.append(objShape))
return false;
} else {
if (!inspector->maybeShapesForPropertyOp(pc, shapes))
return false;
}
if (!inspector->maybeShapesForPropertyOp(pc, shapes))
return false;
if (shapes.empty() || !CanInlinePropertyOpShapes(shapes))
return true;
@ -7682,13 +7677,8 @@ IonBuilder::jsop_setprop(HandlePropertyName name)
}
Vector<Shape *> shapes(cx);
if (Shape *objShape = mjit::GetPICSingleShape(cx, script(), pc, info().constructing())) {
if (!shapes.append(objShape))
return false;
} else {
if (!inspector->maybeShapesForPropertyOp(pc, shapes))
return false;
}
if (!inspector->maybeShapesForPropertyOp(pc, shapes))
return false;
if (!shapes.empty() && CanInlinePropertyOpShapes(shapes)) {
if (shapes.length() == 1) {

Просмотреть файл

@ -142,21 +142,13 @@ def main(argv):
if options.tbpl:
# Running all bits would take forever. Instead, we test a few interesting combinations.
flags = [
['--no-baseline', '--no-jm'],
['--ion-eager'], # implies --baseline-eager
['--no-baseline'],
['--no-baseline', '--ion-eager'],
['--baseline-eager'],
['--baseline-eager', '--no-ti', '--no-fpu'],
# Below, equivalents the old shell flags: ,m,am,amd,n,mn,amn,amdn,mdn
['--no-baseline', '--no-ion', '--no-jm', '--no-ti'],
['--no-baseline', '--no-ion', '--no-ti'],
['--no-baseline', '--no-ion', '--no-ti', '--always-mjit', '--debugjit'],
['--no-baseline', '--no-ion', '--no-jm'],
['--no-baseline'],
['--no-baseline', '--ion-eager'],
['--no-baseline', '--no-ion'],
['--no-baseline', '--no-ion', '--always-mjit'],
['--no-baseline', '--no-ion', '--always-mjit', '--debugjit'],
['--no-baseline', '--no-ion', '--debugjit']
['--no-baseline', '--no-ion', '--no-ti'],
]
for test in test_list:
for variant in flags:

Просмотреть файл

@ -5,7 +5,6 @@ function tryItOut(code) {
f = eval("(function(){" + code + "})")
for (e in f()) {}
}
mjitChunkLimit(25)
tryItOut("\
for each(x in[0,0,0,0,0,0,0]) {\
function f(b) {\

Просмотреть файл

@ -10,5 +10,5 @@ function g(code) {
evalcx("(function(){return" + code + "})()")
} catch (e) {}
}
g("mjitChunkLimit(8)")
g("")
g(" function(x,[]){NaN.x::c}()")

Просмотреть файл

@ -3,7 +3,6 @@
// Binary: cache/js-dbg-32-92fe907ddac8-linux
// Flags: -m -n
//
mjitChunkLimit(31)
o = {}
o.valueOf = function() {
for (var p in undefined) {

Просмотреть файл

@ -5,7 +5,7 @@
var lfcode = new Array();
lfcode.push("3");
lfcode.push("\
evaluate(\"mjitChunkLimit(5)\");\
evaluate(\"\");\
function slice(a, b) {\
return slice(index, ++(ArrayBuffer));\
}\

Просмотреть файл

@ -8,6 +8,5 @@ Object.defineProperty(this, "t2", {
}
})
h2 = {}
mjitChunkLimit(8)
h2.a = function() {}
Object(t2)

Просмотреть файл

@ -7,8 +7,6 @@ function test(m) {
arr[1] = m;
}
mjitChunkLimit(10);
arr = new Float64Array(2);
// run function a lot to trigger methodjit compile

Просмотреть файл

@ -1,4 +1,3 @@
mjitChunkLimit(42);
Function("\
switch (/x/) {\
case 8:\

Просмотреть файл

@ -1,4 +1,3 @@
mjitChunkLimit(10);
function e() {
try {
var t = undefined;

Просмотреть файл

@ -1,5 +1,3 @@
evaluate("mjitChunkLimit(5)");
expected = 100;
function slice(a, b) {
return expected--;

Просмотреть файл

@ -596,25 +596,6 @@ ScriptAnalysis::analyzeBytecode(JSContext *cx)
*/
if (!script_->analyzedArgsUsage())
analyzeSSA(cx);
/*
* If the script has JIT information (we are reanalyzing the script after
* a purge), add safepoints for the targets of any cross chunk edges in
* the script. These safepoints are normally added when the JITScript is
* constructed, but will have been lost during the purge.
*/
#ifdef JS_METHODJIT
mjit::JITScript *jit = NULL;
for (int constructing = 0; constructing <= 1 && !jit; constructing++) {
for (int barriers = 0; barriers <= 1 && !jit; barriers++)
jit = script_->getJIT((bool) constructing, (bool) barriers);
}
if (jit) {
mjit::CrossChunkEdge *edges = jit->edges();
for (size_t i = 0; i < jit->nedges; i++)
getCode(edges[i].target).safePoint = true;
}
#endif
}
/////////////////////////////////////////////////////////////////////
@ -890,7 +871,7 @@ ScriptAnalysis::analyzeLifetimes(JSContext *cx)
ranLifetimes_ = true;
}
#ifdef JS_METHODJIT_SPEW
#ifdef DEBUG
void
LifetimeVariable::print() const
{
@ -1106,21 +1087,6 @@ ScriptAnalysis::ensureVariable(LifetimeVariable &var, unsigned until)
var.ensured = true;
}
void
ScriptAnalysis::clearAllocations()
{
/*
* Clear out storage used for register allocations in a compilation once
* that compilation has finished. Register allocations are only used for
* a single compilation.
*/
for (unsigned i = 0; i < script_->length; i++) {
Bytecode *code = maybeCode(i);
if (code)
code->allocation = NULL;
}
}
/////////////////////////////////////////////////////////////////////
// SSA Analysis
/////////////////////////////////////////////////////////////////////
@ -1841,13 +1807,9 @@ ScriptAnalysis::needsArgsObj(JSContext *cx, SeenVector &seen, SSAUseChain *use)
if (op == JSOP_POP || op == JSOP_POPN)
return false;
/* SplatApplyArgs can read fp->canonicalActualArg(i) directly. */
if (op == JSOP_FUNAPPLY && GET_ARGC(pc) == 2 && use->u.which == 0) {
#ifdef JS_METHODJIT
JS_ASSERT(mjit::IsLowerableFunCallOrApply(pc));
#endif
/* We can read the frame's arguments directly for f.apply(x, arguments). */
if (op == JSOP_FUNAPPLY && GET_ARGC(pc) == 2 && use->u.which == 0)
return false;
}
/* arguments[i] can read fp->canonicalActualArg(i) directly. */
if (op == JSOP_GETELEM && use->u.which == 1)

Просмотреть файл

@ -25,9 +25,6 @@
class JSScript;
/* Forward declaration of downstream register allocations computed for join points. */
namespace js { namespace mjit { struct RegisterAllocation; } }
namespace js {
namespace analyze {
@ -125,11 +122,6 @@ class Bytecode
/* If this is a JSOP_LOOPHEAD or JSOP_LOOPENTRY, information about the loop. */
LoopAnalysis *loop;
/* --------- Lifetime analysis --------- */
/* Any allocation computed downstream for this bytecode. */
mjit::RegisterAllocation *allocation;
/* --------- SSA analysis --------- */
/* Generated location of each value popped by this bytecode. */
@ -513,7 +505,7 @@ struct LifetimeVariable
return offset;
}
#ifdef JS_METHODJIT_SPEW
#ifdef DEBUG
void print() const;
#endif
};
@ -995,14 +987,6 @@ class ScriptAnalysis
return v.phiNode()->uses;
}
mjit::RegisterAllocation *&getAllocation(uint32_t offset) {
JS_ASSERT(offset < script_->length);
return getCode(offset).allocation;
}
mjit::RegisterAllocation *&getAllocation(const jsbytecode *pc) {
return getAllocation(pc - script_->code);
}
LoopAnalysis *getLoop(uint32_t offset) {
JS_ASSERT(offset < script_->length);
return getCode(offset).loop;
@ -1051,8 +1035,6 @@ class ScriptAnalysis
void printSSA(JSContext *cx);
void printTypes(JSContext *cx);
void clearAllocations();
private:
void setOOM(JSContext *cx) {
if (!outOfMemory)

Просмотреть файл

@ -131,10 +131,7 @@ ThrowHook(JSContext *cx, JSScript *, jsbytecode *, jsval *rval, void *closure)
BEGIN_TEST(testDebugger_throwHook)
{
uint32_t newopts =
JS_GetOptions(cx) | JSOPTION_METHODJIT | JSOPTION_METHODJIT_ALWAYS;
uint32_t oldopts = JS_SetOptions(cx, newopts);
CHECK(JS_SetDebugMode(cx, true));
CHECK(JS_SetThrowHook(rt, ThrowHook, NULL));
EXEC("function foo() { throw 3 };\n"
"for (var i = 0; i < 10; ++i) { \n"
@ -145,7 +142,6 @@ BEGIN_TEST(testDebugger_throwHook)
"}\n");
CHECK(called);
CHECK(JS_SetThrowHook(rt, NULL, NULL));
JS_SetOptions(cx, oldopts);
return true;
}
END_TEST(testDebugger_throwHook)

Просмотреть файл

@ -87,10 +87,9 @@
#if ENABLE_YARR_JIT
#include "assembler/jit/ExecutableAllocator.h"
#include "methodjit/Logging.h"
#endif
#ifdef JS_METHODJIT
#ifdef JS_ION
#include "ion/Ion.h"
#endif
@ -692,7 +691,7 @@ static const JSSecurityCallbacks NullSecurityCallbacks = { };
static bool
JitSupportsFloatingPoint()
{
#if defined(JS_METHODJIT) || defined(JS_ION)
#if defined(JS_ION)
if (!JSC::MacroAssembler().supportsFloatingPoint())
return false;
@ -739,9 +738,6 @@ JSRuntime::JSRuntime(JSUseHelperThreads useHelperThreads)
freeLifoAlloc(TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
execAlloc_(NULL),
bumpAlloc_(NULL),
#ifdef JS_METHODJIT
jaegerRuntime_(NULL),
#endif
ionRuntime_(NULL),
selfHostingGlobal_(NULL),
nativeStackBase(0),
@ -927,10 +923,6 @@ JSRuntime::init(uint32_t maxbytes)
js::TlsPerThreadData.set(&mainThread);
#ifdef JS_METHODJIT_SPEW
JMCheckLogging();
#endif
if (!js_InitGC(this, maxbytes))
return false;
@ -1050,13 +1042,10 @@ JSRuntime::~JSRuntime()
js_delete(bumpAlloc_);
js_delete(mathCache_);
#ifdef JS_METHODJIT
js_delete(jaegerRuntime_);
#endif
#ifdef JS_ION
js_delete(ionRuntime_);
#endif
js_delete(execAlloc_); /* Delete after jaegerRuntime_. */
js_delete(execAlloc_); /* Delete after ionRuntime_. */
if (ionPcScriptCache)
js_delete(ionPcScriptCache);
@ -1166,7 +1155,7 @@ JS_NewRuntime(uint32_t maxbytes, JSUseHelperThreads useHelperThreads)
if (!rt)
return NULL;
#if defined(JS_METHODJIT) && defined(JS_ION)
#if defined(JS_ION)
if (!ion::InitializeIon())
return NULL;
#endif

Просмотреть файл

@ -24,8 +24,6 @@
#include "jstypes.h"
#include "jsutil.h"
#include "ds/Sort.h"
#include "methodjit/MethodJIT.h"
#include "methodjit/StubCalls-inl.h"
#include "vm/ArgumentsObject.h"
#include "vm/ForkJoin.h"
#include "vm/NumericConversions.h"
@ -2006,15 +2004,6 @@ js::ArrayShiftMoveElements(JSObject *obj)
obj->moveDenseElementsUnbarriered(0, 1, initlen);
}
#ifdef JS_METHODJIT
void JS_FASTCALL
mjit::stubs::ArrayShift(VMFrame &f)
{
JSObject *obj = &f.regs.sp[-1].toObject();
ArrayShiftMoveElements(obj);
}
#endif /* JS_METHODJIT */
/* ES5 15.4.4.9 */
JSBool
js::array_shift(JSContext *cx, unsigned argc, Value *vp)
@ -2437,7 +2426,7 @@ array_splice(JSContext *cx, unsigned argc, Value *vp)
return true;
}
#ifdef JS_METHODJIT
#ifdef JS_ION
bool
js::array_concat_dense(JSContext *cx, HandleObject obj1, HandleObject obj2, HandleObject result)
{
@ -2460,22 +2449,10 @@ js::array_concat_dense(JSContext *cx, HandleObject obj1, HandleObject obj2, Hand
result->initDenseElements(0, obj1->getDenseElements(), initlen1);
result->initDenseElements(initlen1, obj2->getDenseElements(), initlen2);
result->setArrayLengthInt32(len);
return true;
}
void JS_FASTCALL
mjit::stubs::ArrayConcatTwoArrays(VMFrame &f)
{
RootedObject result(f.cx, &f.regs.sp[-3].toObject());
RootedObject obj1(f.cx, &f.regs.sp[-2].toObject());
RootedObject obj2(f.cx, &f.regs.sp[-1].toObject());
if (!array_concat_dense(f.cx, obj1, obj2, result))
THROW();
}
#endif /* JS_METHODJIT */
#endif /* JS_ION */
/*
* Python-esque sequence operations.
@ -2992,18 +2969,6 @@ js::NewDenseUnallocatedArray(JSContext *cx, uint32_t length, JSObject *proto /*
return NewArray<false>(cx, length, proto, newKind);
}
#ifdef JS_METHODJIT
JSObject * JS_FASTCALL
mjit::stubs::NewDenseUnallocatedArray(VMFrame &f, uint32_t length)
{
JSObject *obj = NewArray<false>(f.cx, length, (JSObject *)f.scratch);
if (!obj)
THROWV(NULL);
return obj;
}
#endif
JSObject *
js::NewDenseCopiedArray(JSContext *cx, uint32_t length, HandleObject src, uint32_t elementOffset,
JSObject *proto /* = NULL */)

Просмотреть файл

@ -44,9 +44,6 @@
#include "ion/Ion.h"
#endif
#ifdef JS_METHODJIT
# include "methodjit/MethodJIT.h"
#endif
#include "gc/Marking.h"
#include "js/CharacterEncoding.h"
#include "js/MemoryMetrics.h"
@ -208,25 +205,6 @@ JSRuntime::createMathCache(JSContext *cx)
return mathCache_;
}
#ifdef JS_METHODJIT
mjit::JaegerRuntime *
JSRuntime::createJaegerRuntime(JSContext *cx)
{
JS_ASSERT(!jaegerRuntime_);
JS_ASSERT(cx->runtime == this);
mjit::JaegerRuntime *jr = js_new<mjit::JaegerRuntime>();
if (!jr || !jr->init(cx)) {
js_ReportOutOfMemory(cx);
js_delete(jr);
return NULL;
}
jaegerRuntime_ = jr;
return jaegerRuntime_;
}
#endif
void
JSCompartment::sweepCallsiteClones()
{
@ -1165,10 +1143,7 @@ JSContext::JSContext(JSRuntime *rt)
#endif
resolveFlags(0),
iterValue(MagicValue(JS_NO_ITER_VALUE)),
#ifdef JS_METHODJIT
methodJitEnabled(false),
jitIsBroken(false),
#endif
#ifdef MOZ_TRACE_JSCALLS
functionCallback(NULL),
#endif
@ -1392,7 +1367,6 @@ JSContext::purge()
}
}
#if defined(JS_METHODJIT)
static bool
ComputeIsJITBroken()
{
@ -1463,15 +1437,11 @@ IsJITBrokenHere()
}
return isBroken;
}
#endif
void
JSContext::updateJITEnabled()
{
#ifdef JS_METHODJIT
jitIsBroken = IsJITBrokenHere();
methodJitEnabled = (options_ & JSOPTION_METHODJIT) && !jitIsBroken;
#endif
}
size_t

Просмотреть файл

@ -124,10 +124,6 @@ class AutoCycleDetector
extern void
TraceCycleDetectionSet(JSTracer *trc, ObjectSet &set);
namespace mjit {
class JaegerRuntime;
}
class MathCache;
namespace ion {
@ -747,16 +743,12 @@ struct JSRuntime : public JS::shadow::Runtime,
*/
JSC::ExecutableAllocator *execAlloc_;
WTF::BumpPointerAllocator *bumpAlloc_;
#ifdef JS_METHODJIT
js::mjit::JaegerRuntime *jaegerRuntime_;
#endif
js::ion::IonRuntime *ionRuntime_;
JSObject *selfHostingGlobal_;
JSC::ExecutableAllocator *createExecutableAllocator(JSContext *cx);
WTF::BumpPointerAllocator *createBumpPointerAllocator(JSContext *cx);
js::mjit::JaegerRuntime *createJaegerRuntime(JSContext *cx);
js::ion::IonRuntime *createIonRuntime(JSContext *cx);
public:
@ -773,18 +765,6 @@ struct JSRuntime : public JS::shadow::Runtime,
WTF::BumpPointerAllocator *getBumpPointerAllocator(JSContext *cx) {
return bumpAlloc_ ? bumpAlloc_ : createBumpPointerAllocator(cx);
}
#ifdef JS_METHODJIT
js::mjit::JaegerRuntime *getJaegerRuntime(JSContext *cx) {
return jaegerRuntime_ ? jaegerRuntime_ : createJaegerRuntime(cx);
}
bool hasJaegerRuntime() const {
return jaegerRuntime_;
}
js::mjit::JaegerRuntime &jaegerRuntime() {
JS_ASSERT(hasJaegerRuntime());
return *jaegerRuntime_;
}
#endif
js::ion::IonRuntime *getIonRuntime(JSContext *cx) {
return ionRuntime_ ? ionRuntime_ : createIonRuntime(cx);
}
@ -1744,15 +1724,9 @@ struct JSContext : js::ContextFriendFields,
/* Location to stash the iteration value between JSOP_MOREITER and JSOP_ITERNEXT. */
js::Value iterValue;
#ifdef JS_METHODJIT
bool methodJitEnabled;
bool jitIsBroken;
js::mjit::JaegerRuntime &jaegerRuntime() { return runtime->jaegerRuntime(); }
#endif
inline bool typeInferenceEnabled() const;
inline bool jaegerCompilationAllowed() const;
void updateJITEnabled();
@ -2157,16 +2131,6 @@ JS_CHECK_OPERATION_LIMIT(JSContext *cx)
namespace js {
#ifdef JS_METHODJIT
namespace mjit {
void ExpandInlineFrames(JS::Zone *zone);
}
#endif
} /* namespace js */
namespace js {
/************************************************************************/
static JS_ALWAYS_INLINE void

Просмотреть файл

@ -660,16 +660,8 @@ JSCompartment::updateForDebugMode(FreeOp *fop, AutoDebugModeGC &dmgc)
acx->updateJITEnabled();
}
#ifdef JS_METHODJIT
bool enabled = debugMode();
JS_ASSERT_IF(enabled, !hasScriptsOnStack());
for (gc::CellIter i(zone(), gc::FINALIZE_SCRIPT); !i.done(); i.next()) {
JSScript *script = i.get<JSScript>();
if (script->compartment() == this)
script->debugMode = enabled;
}
#ifdef JS_ION
JS_ASSERT_IF(debugMode(), !hasScriptsOnStack());
// When we change a compartment's debug mode, whether we're turning it
// on or off, we must always throw away all analyses: debug mode

Просмотреть файл

@ -411,12 +411,6 @@ JSContext::typeInferenceEnabled() const
return compartment->zone()->types.inferenceEnabled;
}
inline bool
JSContext::jaegerCompilationAllowed() const
{
return compartment->zone()->types.jaegerCompilationAllowed;
}
inline js::Handle<js::GlobalObject*>
JSContext::global() const
{

Просмотреть файл

@ -33,10 +33,6 @@
#include "vm/StringBuffer.h"
#include "vm/Xdr.h"
#ifdef JS_METHODJIT
#include "methodjit/MethodJIT.h"
#endif
#include "jsfuninlines.h"
#include "jsinferinlines.h"
#include "jsinterpinlines.h"
@ -133,28 +129,6 @@ fun_getProperty(JSContext *cx, HandleObject obj_, HandleId id, MutableHandleValu
return true;
}
#ifdef JS_METHODJIT
StackFrame *fp = NULL;
if (!iter.isIon())
fp = iter.interpFrame();
if (JSID_IS_ATOM(id, cx->names().caller) && fp && fp->prev()) {
/*
* If the frame was called from within an inlined frame, mark the
* innermost function as uninlineable to expand its frame and allow us
* to recover its callee object.
*/
InlinedSite *inlined;
jsbytecode *prevpc = fp->prevpc(&inlined);
if (inlined) {
mjit::JITChunk *chunk = fp->prev()->jit()->chunk(prevpc);
JSFunction *fun = chunk->inlineFrames()[inlined->inlineIndex].fun;
fun->nonLazyScript()->uninlineable = true;
MarkTypeObjectFlags(cx, fun, OBJECT_FLAG_UNINLINEABLE);
}
}
#endif
if (JSID_IS_ATOM(id, cx->names().caller)) {
++iter;
if (iter.done() || !iter.isFunctionFrame()) {

Просмотреть файл

@ -58,7 +58,6 @@
#include "gc/GCInternals.h"
#include "gc/Marking.h"
#include "gc/Memory.h"
#include "methodjit/MethodJIT.h"
#include "vm/Debugger.h"
#include "vm/Shape.h"
#include "vm/String.h"
@ -859,12 +858,6 @@ js::SetGCZeal(JSRuntime *rt, uint8_t zeal, uint32_t frequency)
VerifyBarriers(rt, PostBarrierVerifier);
}
#ifdef JS_METHODJIT
/* In case Zone::compileBarriers() changed... */
for (ZonesIter zone(rt); !zone.done(); zone.next())
mjit::ClearAllFrames(zone);
#endif
bool schedule = zeal >= js::gc::ZealAllocValue;
rt->gcZeal_ = zeal;
rt->gcZealFrequency = frequency;
@ -4834,29 +4827,24 @@ void PreventGCDuringInteractiveDebug()
void
js::ReleaseAllJITCode(FreeOp *fop)
{
#ifdef JS_METHODJIT
#ifdef JS_ION
for (ZonesIter zone(fop->runtime()); !zone.done(); zone.next()) {
mjit::ClearAllFrames(zone);
# ifdef JS_ION
# ifdef DEBUG
# ifdef DEBUG
/* Assert no baseline scripts are marked as active. */
for (CellIter i(zone, FINALIZE_SCRIPT); !i.done(); i.next()) {
JSScript *script = i.get<JSScript>();
JS_ASSERT_IF(script->hasBaselineScript(), !script->baselineScript()->active());
}
# endif
# endif
/* Mark baseline scripts on the stack as active. */
ion::MarkActiveBaselineScripts(zone);
ion::InvalidateAll(fop, zone);
# endif
for (CellIter i(zone, FINALIZE_SCRIPT); !i.done(); i.next()) {
JSScript *script = i.get<JSScript>();
mjit::ReleaseScriptCode(fop, script);
# ifdef JS_ION
ion::FinishInvalidation(fop, script);
/*
@ -4864,7 +4852,6 @@ js::ReleaseAllJITCode(FreeOp *fop)
* this also resets the active flag.
*/
ion::FinishDiscardBaselineScript(fop, script);
# endif
}
}
#endif
@ -4972,19 +4959,12 @@ js::PurgePCCounts(JSContext *cx)
void
js::PurgeJITCaches(Zone *zone)
{
#ifdef JS_METHODJIT
mjit::ClearAllFrames(zone);
#ifdef JS_ION
for (CellIterUnderGC i(zone, FINALIZE_SCRIPT); !i.done(); i.next()) {
JSScript *script = i.get<JSScript>();
/* Discard JM caches. */
mjit::PurgeCaches(script);
#ifdef JS_ION
/* Discard Ion caches. */
ion::PurgeCaches(script, zone);
#endif
}
#endif
}

Просмотреть файл

@ -26,8 +26,6 @@
#endif
#include "gc/Marking.h"
#include "js/MemoryMetrics.h"
#include "methodjit/MethodJIT.h"
#include "methodjit/Retcon.h"
#include "vm/Shape.h"
#include "jsatominlines.h"
@ -2343,32 +2341,6 @@ JITCodeHasCheck(JSScript *script, jsbytecode *pc, RecompileKind kind)
if (kind == RECOMPILE_NONE)
return false;
#ifdef JS_METHODJIT
for (int constructing = 0; constructing <= 1; constructing++) {
for (int barriers = 0; barriers <= 1; barriers++) {
mjit::JITScript *jit = script->getJIT((bool) constructing, (bool) barriers);
if (!jit)
continue;
mjit::JITChunk *chunk = jit->chunk(pc);
if (!chunk)
continue;
bool found = false;
uint32_t count = (kind == RECOMPILE_CHECK_MONITORED)
? chunk->nMonitoredBytecodes
: chunk->nTypeBarrierBytecodes;
uint32_t *bytecodes = (kind == RECOMPILE_CHECK_MONITORED)
? chunk->monitoredBytecodes()
: chunk->typeBarrierBytecodes();
for (size_t i = 0; i < count; i++) {
if (bytecodes[i] == uint32_t(pc - script->code))
found = true;
}
if (!found)
return false;
}
}
#endif
if (script->hasAnyIonScript() || script->isIonCompilingOffThread())
return false;
@ -2404,8 +2376,6 @@ AddPendingRecompile(JSContext *cx, JSScript *script, jsbytecode *pc,
return;
}
switch (co->kind()) {
case CompilerOutput::MethodJIT:
break;
case CompilerOutput::Ion:
case CompilerOutput::ParallelIon:
if (co->script == script)
@ -2818,19 +2788,10 @@ TypeCompartment::processPendingRecompiles(FreeOp *fop)
JS_ASSERT(!pending->empty());
#ifdef JS_METHODJIT
mjit::ExpandInlineFrames(compartment()->zone());
#ifdef JS_ION
for (unsigned i = 0; i < pending->length(); i++) {
CompilerOutput &co = *(*pending)[i].compilerOutput(*this);
switch (co.kind()) {
case CompilerOutput::MethodJIT:
JS_ASSERT(co.isValid());
mjit::Recompiler::clearStackReferences(fop, co.script);
co.mjit()->destroyChunk(fop, co.chunkIndex);
JS_ASSERT(co.script == NULL);
break;
case CompilerOutput::Ion:
case CompilerOutput::ParallelIon:
# ifdef JS_THREADSAFE
@ -2848,10 +2809,8 @@ TypeCompartment::processPendingRecompiles(FreeOp *fop)
}
}
# ifdef JS_ION
ion::Invalidate(*this, fop, *pending);
# endif
#endif /* JS_METHODJIT */
#endif /* JS_ION */
fop->delete_(pending);
}
@ -2898,23 +2857,16 @@ TypeZone::nukeTypes(FreeOp *fop)
inferenceEnabled = false;
#ifdef JS_METHODJIT
mjit::ExpandInlineFrames(zone());
mjit::ClearAllFrames(zone());
# ifdef JS_ION
#ifdef JS_ION
ion::InvalidateAll(fop, zone());
# endif
/* Throw away all JIT code in the compartment, but leave everything else alone. */
for (gc::CellIter i(zone(), gc::FINALIZE_SCRIPT); !i.done(); i.next()) {
JSScript *script = i.get<JSScript>();
mjit::ReleaseScriptCode(fop, script);
# ifdef JS_ION
ion::FinishInvalidation(fop, script);
# endif
}
#endif /* JS_METHODJIT */
#endif /* JS_ION */
pendingNukeTypes = false;
}
@ -2937,15 +2889,8 @@ TypeCompartment::addPendingRecompile(JSContext *cx, const RecompileInfo &info)
return;
}
#ifdef JS_METHODJIT
mjit::JITScript *jit = co->script->getJIT(co->constructing, co->barriers);
bool hasJITCode = jit && jit->chunkDescriptor(co->chunkIndex).chunk;
# if defined(JS_ION)
hasJITCode |= !!co->script->hasAnyIonScript();
# endif
if (!hasJITCode) {
#if defined(JS_ION)
if (!co->script->hasAnyIonScript()) {
/* Scripts which haven't been compiled yet don't need to be recompiled. */
return;
}
@ -2981,29 +2926,7 @@ TypeCompartment::addPendingRecompile(JSContext *cx, JSScript *script, jsbytecode
if (!constrainedOutputs)
return;
#ifdef JS_METHODJIT
for (int constructing = 0; constructing <= 1; constructing++) {
for (int barriers = 0; barriers <= 1; barriers++) {
mjit::JITScript *jit = script->getJIT((bool) constructing, (bool) barriers);
if (!jit)
continue;
if (pc) {
unsigned int chunkIndex = jit->chunkIndex(pc);
mjit::JITChunk *chunk = jit->chunkDescriptor(chunkIndex).chunk;
if (chunk)
addPendingRecompile(cx, chunk->recompileInfo);
} else {
for (size_t chunkIndex = 0; chunkIndex < jit->nchunks; chunkIndex++) {
mjit::JITChunk *chunk = jit->chunkDescriptor(chunkIndex).chunk;
if (chunk)
addPendingRecompile(cx, chunk->recompileInfo);
}
}
}
}
# ifdef JS_ION
#ifdef JS_ION
CancelOffThreadIonCompile(cx->compartment, script);
// Let the script warm up again before attempting another compile.
@ -3015,7 +2938,6 @@ TypeCompartment::addPendingRecompile(JSContext *cx, JSScript *script, jsbytecode
if (script->hasParallelIonScript())
addPendingRecompile(cx, script->parallelIonScript()->recompileInfo());
# endif
#endif
}

Просмотреть файл

@ -101,10 +101,6 @@ class RootedBase<TaggedProto> : public TaggedProtoOperations<Rooted<TaggedProto>
class CallObject;
namespace mjit {
struct JITScript;
}
namespace ion {
struct IonScript;
}
@ -1280,7 +1276,6 @@ typedef HashMap<AllocationSiteKey,ReadBarriered<TypeObject>,AllocationSiteKey,Sy
struct CompilerOutput
{
enum Kind {
MethodJIT,
Ion,
ParallelIon
};
@ -1301,7 +1296,6 @@ struct CompilerOutput
Kind kind() const { return static_cast<Kind>(kindInt); }
void setKind(Kind k) { kindInt = k; }
mjit::JITScript *mjit() const;
ion::IonScript *ion() const;
bool isValid() const;

Просмотреть файл

@ -91,31 +91,19 @@ namespace types {
inline
CompilerOutput::CompilerOutput()
: script(NULL),
kindInt(MethodJIT),
kindInt(Ion),
constructing(false),
barriers(false),
chunkIndex(false)
{
}
inline mjit::JITScript *
CompilerOutput::mjit() const
{
#ifdef JS_METHODJIT
JS_ASSERT(kind() == MethodJIT && isValid());
return script->getJIT(constructing, barriers);
#else
return NULL;
#endif
}
inline ion::IonScript *
CompilerOutput::ion() const
{
#ifdef JS_ION
JS_ASSERT(kind() != MethodJIT && isValid());
JS_ASSERT(isValid());
switch (kind()) {
case MethodJIT: break;
case Ion: return script->ionScript();
case ParallelIon: return script->parallelIonScript();
}
@ -130,24 +118,11 @@ CompilerOutput::isValid() const
if (!script)
return false;
#if defined(DEBUG) && (defined(JS_METHODJIT) || defined(JS_ION))
#if defined(DEBUG) && defined(JS_ION)
TypeCompartment &types = script->compartment()->types;
#endif
switch (kind()) {
case MethodJIT: {
#ifdef JS_METHODJIT
mjit::JITScript *jit = script->getJIT(constructing, barriers);
if (!jit)
return false;
mjit::JITChunk *chunk = jit->chunkDescriptor(chunkIndex).chunk;
if (!chunk)
return false;
JS_ASSERT(this == chunk->recompileInfo.compilerOutput(types));
return true;
#endif
}
case Ion:
#ifdef JS_ION
if (script->hasIonScript()) {

Просмотреть файл

@ -38,10 +38,6 @@
#include "vm/Debugger.h"
#include "vm/Shape.h"
#ifdef JS_METHODJIT
#include "methodjit/MethodJIT.h"
#include "methodjit/Logging.h"
#endif
#include "ion/Ion.h"
#include "ion/BaselineJIT.h"
@ -298,9 +294,6 @@ js::RunScript(JSContext *cx, StackFrame *fp)
JS_ASSERT_IF(!fp->isGeneratorFrame(), cx->regs().pc == script->code);
JS_ASSERT_IF(fp->isEvalFrame(), script->isActiveEval);
#ifdef JS_METHODJIT_SPEW
JMCheckLogging();
#endif
JS_CHECK_RECURSION(cx, return false);
@ -369,17 +362,6 @@ js::RunScript(JSContext *cx, StackFrame *fp)
}
#endif
#ifdef JS_METHODJIT
mjit::CompileStatus status;
status = mjit::CanMethodJIT(cx, script, script->code, fp->isConstructing(),
mjit::CompileRequest_Interpreter, fp);
if (status == mjit::Compile_Error)
return false;
if (status == mjit::Compile_Okay)
return mjit::JaegerStatusToSuccess(mjit::JaegerShot(cx, false));
#endif
return Interpret(cx, fp) != Interpret_Error;
}
@ -1082,25 +1064,6 @@ js::Interpret(JSContext *cx, StackFrame *entryFrame, InterpMode interpMode, bool
#define LOAD_DOUBLE(PCOFF, dbl) \
(dbl = script->getConst(GET_UINT32_INDEX(regs.pc + (PCOFF))).toDouble())
#ifdef JS_METHODJIT
#define CHECK_PARTIAL_METHODJIT(status) \
JS_BEGIN_MACRO \
switch (status) { \
case mjit::Jaeger_UnfinishedAtTrap: \
interpMode = JSINTERP_SKIP_TRAP; \
/* FALLTHROUGH */ \
case mjit::Jaeger_Unfinished: \
op = (JSOp) *regs.pc; \
SET_SCRIPT(regs.fp()->script()); \
if (cx->isExceptionPending()) \
goto error; \
DO_OP(); \
default:; \
} \
JS_END_MACRO
#endif
/*
* Prepare to call a user-supplied branch handler, and abort the script
* if it returns false.
@ -1384,35 +1347,6 @@ END_CASE(JSOP_LABEL)
check_backedge:
{
CHECK_BRANCH();
if (op != JSOP_LOOPHEAD)
DO_OP();
#ifdef JS_METHODJIT
// Attempt on-stack replacement with JaegerMonkey code, which is keyed to
// the interpreter state at the JSOP_LOOPHEAD at the start of the loop.
// Unlike IonMonkey, this requires two different code fragments to perform
// hoisting.
mjit::CompileStatus status =
mjit::CanMethodJIT(cx, script, regs.pc, regs.fp()->isConstructing(),
mjit::CompileRequest_Interpreter, regs.fp());
if (status == mjit::Compile_Error)
goto error;
if (status == mjit::Compile_Okay) {
void *ncode =
script->nativeCodeForPC(regs.fp()->isConstructing(), regs.pc);
JS_ASSERT(ncode);
mjit::JaegerStatus status = mjit::JaegerShotAtSafePoint(cx, ncode, true);
if (status == mjit::Jaeger_ThrowBeforeEnter)
goto error;
CHECK_PARTIAL_METHODJIT(status);
interpReturnOK = (status == mjit::Jaeger_Returned);
if (entryFrame != regs.fp())
goto jit_return;
regs.fp()->setFinishedInInterpreter();
goto leave_on_safe_point;
}
#endif /* JS_METHODJIT */
DO_OP();
}
@ -1556,13 +1490,11 @@ BEGIN_CASE(JSOP_STOP)
Probes::exitScript(cx, script, script->function(), regs.fp());
/* The JIT inlines the epilogue. */
#if defined(JS_METHODJIT) || defined(JS_ION)
#if defined(JS_ION)
jit_return:
#endif
/* The results of lowered call/apply frames need to be shifted. */
bool shiftResult = regs.fp()->loweredCallOrApply();
cx->stack.popInlineFrame(regs);
SET_SCRIPT(regs.fp()->script());
@ -1572,11 +1504,6 @@ BEGIN_CASE(JSOP_STOP)
if (JS_LIKELY(interpReturnOK)) {
TypeScript::Monitor(cx, script, regs.pc, regs.sp[-1]);
if (shiftResult) {
regs.sp[-2] = regs.sp[-1];
regs.sp--;
}
len = JSOP_CALL_LENGTH;
DO_NEXT_OP(len);
}
@ -2453,24 +2380,6 @@ BEGIN_CASE(JSOP_FUNCALL)
}
#endif
#ifdef JS_METHODJIT
if (!newType && cx->methodJitEnabled) {
/* Try to ensure methods are method JIT'd. */
mjit::CompileStatus status = mjit::CanMethodJIT(cx, script, script->code,
construct,
mjit::CompileRequest_Interpreter,
regs.fp());
if (status == mjit::Compile_Error)
goto error;
if (status == mjit::Compile_Okay) {
mjit::JaegerStatus status = mjit::JaegerShot(cx, true);
CHECK_PARTIAL_METHODJIT(status);
interpReturnOK = mjit::JaegerStatusToSuccess(status);
goto jit_return;
}
}
#endif
if (!regs.fp()->prologue(cx))
goto error;
if (cx->compartment->debugMode()) {
@ -3396,7 +3305,7 @@ END_CASE(JSOP_ARRAYPUSH)
gc::MaybeVerifyBarriers(cx, true);
#ifdef JS_METHODJIT
#ifdef JS_ION
/*
* This path is used when it's guaranteed the method can be finished
* inside the JIT.

Просмотреть файл

@ -17,7 +17,6 @@
#include "jsprobes.h"
#include "jsstr.h"
#include "methodjit/MethodJIT.h"
#include "vm/ForkJoin.h"
#include "jsatominlines.h"

Просмотреть файл

@ -253,16 +253,13 @@ StatsCellCallback(JSRuntime *rt, void *data, void *thing, JSGCTraceKind traceKin
CompartmentStats *cStats = GetCompartmentStats(script->compartment());
cStats->gcHeapScripts += thingSize;
cStats->scriptData += script->sizeOfData(rtStats->mallocSizeOf_);
#ifdef JS_METHODJIT
cStats->jaegerData += script->sizeOfJitScripts(rtStats->mallocSizeOf_);
# ifdef JS_ION
#ifdef JS_ION
size_t baselineData = 0, baselineStubsFallback = 0;
ion::SizeOfBaselineData(script, rtStats->mallocSizeOf_, &baselineData,
&baselineStubsFallback);
cStats->baselineData += baselineData;
cStats->baselineStubsFallback += baselineStubsFallback;
cStats->ionData += ion::SizeOfIonData(script, rtStats->mallocSizeOf_);
# endif
#endif
ScriptSource *ss = script->scriptSource();
@ -275,11 +272,9 @@ StatsCellCallback(JSRuntime *rt, void *data, void *thing, JSGCTraceKind traceKin
}
case JSTRACE_IONCODE: {
#ifdef JS_METHODJIT
# ifdef JS_ION
#ifdef JS_ION
zStats->gcHeapIonCodes += thingSize;
// The code for a script is counted in ExecutableAllocator::sizeOfCode().
# endif
#endif
break;
}

Просмотреть файл

@ -1492,9 +1492,6 @@ FindStartPC(JSContext *cx, ScriptFrameIter &iter, int spindex, int skipStackHits
if (iter.isIonOptimizedJS())
return true;
if (!iter.isIonBaselineJS() && iter.interpFrame()->jitRevisedStack())
return true;
*valuepc = NULL;
PCStack pcstack;

Просмотреть файл

@ -32,43 +32,6 @@ Probes::JITGranularityRequested(JSContext *cx)
return JITREPORT_GRANULARITY_NONE;
}
#ifdef JS_METHODJIT
bool
Probes::registerMJITCode(JSContext *cx, js::mjit::JITChunk *chunk,
js::mjit::JSActiveFrame *outerFrame,
js::mjit::JSActiveFrame **inlineFrames)
{
if (cx->runtime->spsProfiler.enabled() &&
!cx->runtime->spsProfiler.registerMJITCode(chunk, outerFrame, inlineFrames))
{
return false;
}
return true;
}
void
Probes::discardMJITCode(FreeOp *fop, mjit::JITScript *jscr, mjit::JITChunk *chunk, void* address)
{
if (fop->runtime()->spsProfiler.enabled())
fop->runtime()->spsProfiler.discardMJITCode(jscr, chunk, address);
}
bool
Probes::registerICCode(JSContext *cx,
mjit::JITChunk *chunk, JSScript *script, jsbytecode* pc,
void *start, size_t size)
{
if (cx->runtime->spsProfiler.enabled() &&
!cx->runtime->spsProfiler.registerICCode(chunk, script, pc, start, size))
{
return false;
}
return true;
}
#endif
/* ICs are unregistered in a batch */
void
Probes::discardExecutableRegion(void *start, size_t size)

Просмотреть файл

@ -18,12 +18,6 @@
namespace js {
namespace mjit {
struct NativeAddressInfo;
struct JSActiveFrame;
struct JITChunk;
}
namespace Probes {
/*
@ -113,30 +107,6 @@ enum JITReportGranularity {
JITReportGranularity
JITGranularityRequested(JSContext *cx);
#ifdef JS_METHODJIT
/*
* New method JIT code has been created
*/
bool
registerMJITCode(JSContext *cx, js::mjit::JITChunk *chunk,
mjit::JSActiveFrame *outerFrame,
mjit::JSActiveFrame **inlineFrames);
/*
* Method JIT code is about to be discarded
*/
void
discardMJITCode(FreeOp *fop, mjit::JITScript *jscr, mjit::JITChunk *chunk, void* address);
/*
* IC code has been allocated within the given JITChunk
*/
bool
registerICCode(JSContext *cx,
mjit::JITChunk *chunk, JSScript *script, jsbytecode* pc,
void *start, size_t size);
#endif /* JS_METHODJIT */
/*
* A whole region of code has been deallocated, containing any number of ICs.
* (ICs are unregistered in a batch, so individual ICs are not registered.)

Просмотреть файл

@ -27,10 +27,8 @@
#include "gc/Marking.h"
#include "frontend/BytecodeEmitter.h"
#include "methodjit/MethodJIT.h"
#include "ion/IonCode.h"
#include "ion/BaselineJIT.h"
#include "methodjit/Retcon.h"
#include "vm/Debugger.h"
#include "vm/Shape.h"
#include "vm/Xdr.h"
@ -1842,10 +1840,6 @@ JSScript::fullyInitFromEmitter(JSContext *cx, Handle<JSScript*> script, Bytecode
script->bindingsAccessedDynamically = bce->sc->bindingsAccessedDynamically();
script->funHasExtensibleScope = funbox ? funbox->hasExtensibleScope() : false;
script->hasSingletons = bce->hasSingletons;
#ifdef JS_METHODJIT
if (cx->compartment->debugMode())
script->debugMode = true;
#endif
if (funbox) {
if (funbox->argumentsHasLocalBinding()) {
@ -1986,11 +1980,8 @@ JSScript::finalize(FreeOp *fop)
if (types)
types->destroy();
#ifdef JS_METHODJIT
mjit::ReleaseScriptCode(fop, this);
# ifdef JS_ION
#ifdef JS_ION
ion::DestroyIonScripts(fop, this);
# endif
#endif
destroyScriptCounts(fop);
@ -2510,13 +2501,6 @@ JSScript::ensureHasDebugScript(JSContext *cx)
void
JSScript::recompileForStepMode(FreeOp *fop)
{
#ifdef JS_METHODJIT
if (hasMJITInfo()) {
mjit::Recompiler::clearStackReferences(fop, this);
mjit::ReleaseScriptCode(fop, this);
}
#endif
#ifdef JS_ION
if (hasBaselineScript())
baseline->toggleDebugTraps(this, NULL);
@ -2698,16 +2682,6 @@ JSScript::markChildren(JSTracer *trc)
bindings.trace(trc);
#ifdef JS_METHODJIT
for (int constructing = 0; constructing <= 1; constructing++) {
for (int barriers = 0; barriers <= 1; barriers++) {
mjit::JITScript *jit = getJIT((bool) constructing, (bool) barriers);
if (jit)
jit->trace(trc);
}
}
#endif
if (hasAnyBreakpointsOrStepMode()) {
for (unsigned i = 0; i < length; i++) {
BreakpointSite *site = debugScript()->breakpoints[i];
@ -2841,14 +2815,6 @@ JSScript::argumentsOptimizationFailed(JSContext *cx, HandleScript script)
}
}
#ifdef JS_METHODJIT
if (script->hasMJITInfo()) {
mjit::ExpandInlineFrames(cx->zone());
mjit::Recompiler::clearStackReferences(cx->runtime->defaultFreeOp(), script);
mjit::ReleaseScriptCode(cx->runtime->defaultFreeOp(), script);
}
#endif
if (script->hasAnalysis() && script->analysis()->ranInference()) {
types::AutoEnterAnalysis enter(cx);
types::TypeScript::MonitorUnknown(cx, script, script->argumentsBytecode());

Просмотреть файл

@ -288,70 +288,6 @@ class JSScript : public js::gc::Cell
static const uint32_t stepCountMask = 0x7fffffffU;
public:
#ifdef JS_METHODJIT
// This type wraps JITScript. It has three possible states.
// - "Empty": no compilation has been attempted and there is no JITScript.
// - "Unjittable": compilation failed and there is no JITScript.
// - "Valid": compilation succeeded and there is a JITScript.
class JITScriptHandle
{
// CallCompiler must be a friend because it generates code that uses
// UNJITTABLE.
friend class js::mjit::CallCompiler;
// The exact representation:
// - NULL means "empty".
// - UNJITTABLE means "unjittable".
// - Any other value means "valid".
// UNJITTABLE = 1 so that we can check that a JITScript is valid
// with a single |> 1| test. It's defined outside the class because
// non-integral static const fields can't be defined in the class.
static const js::mjit::JITScript *UNJITTABLE; // = (JITScript *)1;
js::mjit::JITScript *value;
public:
JITScriptHandle() { value = NULL; }
bool isEmpty() { return value == NULL; }
bool isUnjittable() { return value == UNJITTABLE; }
bool isValid() { return value > UNJITTABLE; }
js::mjit::JITScript *getValid() {
JS_ASSERT(isValid());
return value;
}
void setEmpty() { value = NULL; }
void setUnjittable() { value = const_cast<js::mjit::JITScript *>(UNJITTABLE); }
void setValid(js::mjit::JITScript *jit) {
value = jit;
JS_ASSERT(isValid());
}
static void staticAsserts();
};
// All the possible JITScripts that can simultaneously exist for a script.
struct JITScriptSet
{
JITScriptHandle jitHandleNormal; // JIT info for normal scripts
JITScriptHandle jitHandleNormalBarriered; // barriered JIT info for normal scripts
JITScriptHandle jitHandleCtor; // JIT info for constructors
JITScriptHandle jitHandleCtorBarriered; // barriered JIT info for constructors
static size_t jitHandleOffset(bool constructing, bool barriers) {
return constructing
? (barriers
? offsetof(JITScriptSet, jitHandleCtorBarriered)
: offsetof(JITScriptSet, jitHandleCtor))
: (barriers
? offsetof(JITScriptSet, jitHandleNormalBarriered)
: offsetof(JITScriptSet, jitHandleNormal));
}
};
#endif // JS_METHODJIT
//
// We order fields according to their size in order to avoid wasting space
// for alignment.
@ -380,11 +316,6 @@ class JSScript : public js::gc::Cell
private:
js::ScriptSource *scriptSource_; /* source code */
#ifdef JS_METHODJIT
JITScriptSet *mJITInfo;
#else
void *mJITInfoPad;
#endif
js::HeapPtrFunction function_;
// For callsite clones, which cannot have enclosing scopes, the original
@ -492,17 +423,12 @@ class JSScript : public js::gc::Cell
bool shouldCloneAtCallsite:1;
bool isCallsiteClone:1; /* is a callsite clone; has a link to the original function */
#ifdef JS_METHODJIT
bool debugMode:1; /* script was compiled in debug mode */
bool failedBoundsCheck:1; /* script has had hoisted bounds checks fail */
#else
bool debugModePad:1;
bool failedBoundsCheckPad:1;
#endif
#ifdef JS_ION
bool failedBoundsCheck:1; /* script has had hoisted bounds checks fail */
bool failedShapeGuard:1; /* script has had hoisted shape guard fail */
bool hadFrequentBailouts:1;
#else
bool failedBoundsCheckPad:1;
bool failedShapeGuardPad:1;
bool hadFrequentBailoutsPad:1;
#endif
@ -594,6 +520,10 @@ class JSScript : public js::gc::Cell
/* Information attached by Ion for parallel mode execution */
js::ion::IonScript *parallelIon;
#if JS_BITS_PER_WORD == 32
uint32_t padding0;
#endif
/*
* Pointer to either baseline->method()->raw() or ion->method()->raw(), or NULL
* if there's no Baseline or Ion script.
@ -767,56 +697,13 @@ class JSScript : public js::gc::Cell
bool makeBytecodeTypeMap(JSContext *cx);
bool makeAnalysis(JSContext *cx);
#ifdef JS_METHODJIT
private:
// CallCompiler must be a friend because it generates code that directly
// accesses jitHandleNormal/jitHandleCtor, via jitHandleOffset().
friend class js::mjit::CallCompiler;
public:
bool hasMJITInfo() {
return mJITInfo != NULL;
}
static size_t offsetOfMJITInfo() { return offsetof(JSScript, mJITInfo); }
inline bool ensureHasMJITInfo(JSContext *cx);
inline void destroyMJITInfo(js::FreeOp *fop);
JITScriptHandle *jitHandle(bool constructing, bool barriers) {
JS_ASSERT(mJITInfo);
return constructing
? (barriers ? &mJITInfo->jitHandleCtorBarriered : &mJITInfo->jitHandleCtor)
: (barriers ? &mJITInfo->jitHandleNormalBarriered : &mJITInfo->jitHandleNormal);
}
js::mjit::JITScript *getJIT(bool constructing, bool barriers) {
if (!mJITInfo)
return NULL;
JITScriptHandle *jith = jitHandle(constructing, barriers);
return jith->isValid() ? jith->getValid() : NULL;
}
static void ReleaseCode(js::FreeOp *fop, JITScriptHandle *jith);
// These methods are implemented in MethodJIT.h.
inline void **nativeMap(bool constructing);
inline void *nativeCodeForPC(bool constructing, jsbytecode *pc);
uint32_t getUseCount() const { return useCount; }
uint32_t incUseCount(uint32_t amount = 1) { return useCount += amount; }
uint32_t *addressOfUseCount() { return &useCount; }
static size_t offsetOfUseCount() { return offsetof(JSScript, useCount); }
void resetUseCount() { useCount = 0; }
/*
* Size of the JITScript and all sections. If |mallocSizeOf| is NULL, the
* size is computed analytically. (This method is implemented in
* MethodJIT.cpp.)
*/
size_t sizeOfJitScripts(JSMallocSizeOfFun mallocSizeOf);
#endif
public:
bool initScriptCounts(JSContext *cx);
js::PCCounts getPCCounts(jsbytecode *pc);

Просмотреть файл

@ -157,24 +157,6 @@ JSScript::global() const
return *compartment()->maybeGlobal();
}
#ifdef JS_METHODJIT
inline bool
JSScript::ensureHasMJITInfo(JSContext *cx)
{
if (mJITInfo)
return true;
mJITInfo = cx->new_<JITScriptSet>();
return mJITInfo != NULL;
}
inline void
JSScript::destroyMJITInfo(js::FreeOp *fop)
{
fop->delete_(mJITInfo);
mJITInfo = NULL;
}
#endif /* JS_METHODJIT */
inline void
JSScript::writeBarrierPre(JSScript *script)
{

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,265 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_compilerbase_h__ && defined JS_METHODJIT
#define jsjaeger_compilerbase_h__
#include "jscntxt.h"
#include "assembler/assembler/MacroAssembler.h"
#include "assembler/assembler/LinkBuffer.h"
#include "assembler/assembler/RepatchBuffer.h"
#include "assembler/jit/ExecutableAllocator.h"
#include <limits.h>
#if defined JS_CPU_ARM
# define POST_INST_OFFSET(__expr) ((__expr) - sizeof(ARMWord))
#else
# define POST_INST_OFFSET(__expr) (__expr)
#endif
namespace js {
namespace mjit {
struct MacroAssemblerTypedefs {
typedef JSC::MacroAssembler::Label Label;
typedef JSC::MacroAssembler::Imm32 Imm32;
typedef JSC::MacroAssembler::ImmPtr ImmPtr;
typedef JSC::MacroAssembler::RegisterID RegisterID;
typedef JSC::MacroAssembler::FPRegisterID FPRegisterID;
typedef JSC::MacroAssembler::Address Address;
typedef JSC::MacroAssembler::BaseIndex BaseIndex;
typedef JSC::MacroAssembler::AbsoluteAddress AbsoluteAddress;
typedef JSC::MacroAssembler MacroAssembler;
typedef JSC::MacroAssembler::Jump Jump;
typedef JSC::MacroAssembler::JumpList JumpList;
typedef JSC::MacroAssembler::Call Call;
typedef JSC::MacroAssembler::DataLabelPtr DataLabelPtr;
typedef JSC::MacroAssembler::DataLabel32 DataLabel32;
typedef JSC::FunctionPtr FunctionPtr;
typedef JSC::RepatchBuffer RepatchBuffer;
typedef JSC::CodeLocationLabel CodeLocationLabel;
typedef JSC::CodeLocationDataLabel32 CodeLocationDataLabel32;
typedef JSC::CodeLocationDataLabelPtr CodeLocationDataLabelPtr;
typedef JSC::CodeLocationJump CodeLocationJump;
typedef JSC::CodeLocationCall CodeLocationCall;
typedef JSC::CodeLocationInstruction CodeLocationInstruction;
typedef JSC::ReturnAddressPtr ReturnAddressPtr;
typedef JSC::MacroAssemblerCodePtr MacroAssemblerCodePtr;
typedef JSC::JITCode JITCode;
#if defined JS_CPU_ARM
typedef JSC::ARMWord ARMWord;
#endif
};
class BaseCompiler : public MacroAssemblerTypedefs
{
protected:
JSContext *cx;
public:
BaseCompiler() : cx(NULL)
{ }
BaseCompiler(JSContext *cx) : cx(cx)
{ }
};
#ifdef JS_CPU_X64
inline bool
VerifyRange(void *start1, size_t size1, void *start2, size_t size2)
{
uintptr_t end1 = uintptr_t(start1) + size1;
uintptr_t end2 = uintptr_t(start2) + size2;
uintptr_t lowest = Min(uintptr_t(start1), uintptr_t(start2));
uintptr_t highest = Max(end1, end2);
return (highest - lowest < INT_MAX);
}
#endif
// This class wraps JSC::LinkBuffer for Mozilla-specific memory handling.
// Every return |false| guarantees an OOM that has been correctly propagated,
// and should continue to propagate.
class LinkerHelper : public JSC::LinkBuffer
{
protected:
Assembler &masm;
#ifdef DEBUG
bool verifiedRange;
#endif
public:
LinkerHelper(Assembler &masm, JSC::CodeKind kind) : JSC::LinkBuffer(kind)
, masm(masm)
#ifdef DEBUG
, verifiedRange(false)
#endif
{ }
~LinkerHelper() {
JS_ASSERT(verifiedRange);
}
bool verifyRange(const JSC::JITCode &other) {
markVerified();
#ifdef JS_CPU_X64
return VerifyRange(m_code, m_size, other.start(), other.size());
#else
return true;
#endif
}
bool verifyRange(JITChunk *chunk) {
return verifyRange(JSC::JITCode(chunk->code.m_code.executableAddress(),
chunk->code.m_size));
}
JSC::ExecutablePool *init(JSContext *cx) {
// The pool is incref'd after this call, so it's necessary to release()
// on any failure.
JSC::ExecutableAllocator *allocator = &cx->runtime->execAlloc();
allocator->setDestroyCallback(Probes::discardExecutableRegion);
JSC::ExecutablePool *pool;
m_code = executableAllocAndCopy(masm, allocator, &pool);
if (!m_code) {
markVerified();
js_ReportOutOfMemory(cx);
return NULL;
}
m_size = masm.size(); // must come after call to executableAllocAndCopy()!
return pool;
}
JSC::CodeLocationLabel finalize(VMFrame &f) {
masm.finalize(*this);
JSC::CodeLocationLabel label = finalizeCodeAddendum();
Probes::registerICCode(f.cx, f.chunk(), f.script(), f.pc(),
label.executableAddress(), masm.size());
return label;
}
void maybeLink(MaybeJump jump, JSC::CodeLocationLabel label) {
if (!jump.isSet())
return;
link(jump.get(), label);
}
size_t size() const {
return m_size;
}
protected:
void markVerified() {
#ifdef DEBUG
verifiedRange = true;
#endif
}
};
class NativeStubLinker : public LinkerHelper
{
public:
#ifdef JS_CPU_X64
typedef JSC::MacroAssembler::DataLabelPtr FinalJump;
#else
typedef JSC::MacroAssembler::Jump FinalJump;
#endif
NativeStubLinker(Assembler &masm, JITChunk *chunk, jsbytecode *pc, FinalJump done)
: LinkerHelper(masm, JSC::JAEGER_CODE), chunk(chunk), pc(pc), done(done)
{}
bool init(JSContext *cx);
void patchJump(JSC::CodeLocationLabel target) {
#ifdef JS_CPU_X64
patch(done, target);
#else
link(done, target);
#endif
}
private:
JITChunk *chunk;
jsbytecode *pc;
FinalJump done;
};
bool
NativeStubEpilogue(VMFrame &f, Assembler &masm, NativeStubLinker::FinalJump *result,
int32_t initialFrameDepth, int32_t vpOffset,
MaybeRegisterID typeReg, MaybeRegisterID dataReg);
/*
* On ARM, we periodically flush a constant pool into the instruction stream
* where constants are found using PC-relative addressing. This is necessary
* because the fixed-width instruction set doesn't support wide immediates.
*
* ICs perform repatching on the inline (fast) path by knowing small and
* generally fixed code location offset values where the patchable instructions
* live. Dumping a huge constant pool into the middle of an IC's inline path
* makes the distance between emitted instructions potentially variable and/or
* large, which makes the IC offsets invalid. We must reserve contiguous space
* up front to prevent this from happening.
*/
#ifdef JS_CPU_ARM
template <size_t reservedSpace>
class AutoReserveICSpace {
typedef Assembler::Label Label;
Assembler &masm;
bool didCheck;
bool *overflowSpace;
int flushCount;
public:
AutoReserveICSpace(Assembler &masm, bool *overflowSpace)
: masm(masm), didCheck(false), overflowSpace(overflowSpace)
{
masm.ensureSpace(reservedSpace);
flushCount = masm.flushCount();
}
/* Allow manual IC space checks so that non-patchable code at the end of an IC section can be
* free to use constant pools. */
void check() {
JS_ASSERT(!didCheck);
didCheck = true;
if (masm.flushCount() != flushCount)
*overflowSpace = true;
}
~AutoReserveICSpace() {
/* Automatically check the IC space if we didn't already do it manually. */
if (!didCheck) {
check();
}
}
};
# define RESERVE_IC_SPACE(__masm) AutoReserveICSpace<256> arics(__masm, &this->overflowICSpace)
# define CHECK_IC_SPACE() arics.check()
/* The OOL path can need a lot of space because we save and restore a lot of registers. The actual
* sequene varies. However, dumping the literal pool before an OOL block is probably a good idea
* anyway, as we branch directly to the start of the block from the fast path. */
# define RESERVE_OOL_SPACE(__masm) AutoReserveICSpace<2048> arics_ool(__masm, &this->overflowICSpace)
/* Allow the OOL patch to be checked before object destruction. Often, non-patchable epilogues or
* rejoining sequences are emitted, and it isn't necessary to protect these from literal pools. */
# define CHECK_OOL_SPACE() arics_ool.check()
#else
# define RESERVE_IC_SPACE(__masm) /* Do nothing. */
# define CHECK_IC_SPACE() /* Do nothing. */
# define RESERVE_OOL_SPACE(__masm) /* Do nothing. */
# define CHECK_OOL_SPACE() /* Do nothing. */
#endif
} /* namespace js */
} /* namespace mjit */
#endif

Просмотреть файл

@ -1,32 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_codegenincs_h__ && defined JS_METHODJIT
#define jsjaeger_codegenincs_h__
/* Get a label for assertion purposes. Prevent #ifdef clutter. */
#ifdef DEBUG
# define DBGLABEL(name) Label name = masm.label();
# define DBGLABEL_NOMASM(name) Label name = label();
# define DBGLABEL_ASSIGN(name) name = masm.label();
#else
# define DBGLABEL(name)
# define DBGLABEL_NOMASM(name)
# define DBGLABEL_ASSIGN(name)
#endif
#if defined JS_NUNBOX32
# include "NunboxAssembler.h"
#elif defined JS_PUNBOX64
# include "PunboxAssembler.h"
#else
# error "Neither JS_NUNBOX32 nor JS_PUNBOX64 is defined."
#endif
#include "BaseAssembler.h"
#endif /* jsjaeger_codegenincs_h__ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,825 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_compiler_h__ && defined JS_METHODJIT
#define jsjaeger_compiler_h__
#include "jsanalyze.h"
#include "jscntxt.h"
#include "MethodJIT.h"
#include "CodeGenIncludes.h"
#include "BaseCompiler.h"
#include "StubCompiler.h"
#include "MonoIC.h"
#include "PolyIC.h"
namespace js {
namespace mjit {
/*
* Patch for storing call site and rejoin site return addresses at, for
* redirecting the return address in InvariantFailure.
*/
struct InvariantCodePatch {
bool hasPatch;
JSC::MacroAssembler::DataLabelPtr codePatch;
InvariantCodePatch() : hasPatch(false) {}
};
struct JSActiveFrame {
JSActiveFrame *parent;
jsbytecode *parentPC;
JSScript *script;
/*
* Index into inlineFrames or OUTER_FRAME, matches this frame's index in
* the cross script SSA.
*/
uint32_t inlineIndex;
/* JIT code generation tracking state */
size_t mainCodeStart;
size_t stubCodeStart;
size_t mainCodeEnd;
size_t stubCodeEnd;
size_t inlinePCOffset;
JSActiveFrame();
};
class Compiler : public BaseCompiler
{
friend class StubCompiler;
struct BranchPatch {
BranchPatch(const Jump &j, jsbytecode *pc, uint32_t inlineIndex)
: jump(j), pc(pc), inlineIndex(inlineIndex)
{ }
Jump jump;
jsbytecode *pc;
uint32_t inlineIndex;
};
#if defined JS_MONOIC
struct GlobalNameICInfo {
Label fastPathStart;
Call slowPathCall;
DataLabelPtr shape;
DataLabelPtr addrLabel;
void copyTo(ic::GlobalNameIC &to, JSC::LinkBuffer &full, JSC::LinkBuffer &stub) {
to.fastPathStart = full.locationOf(fastPathStart);
int offset = full.locationOf(shape) - to.fastPathStart;
to.shapeOffset = offset;
JS_ASSERT(to.shapeOffset == offset);
to.slowPathCall = stub.locationOf(slowPathCall);
}
};
struct GetGlobalNameICInfo : public GlobalNameICInfo {
Label load;
};
struct SetGlobalNameICInfo : public GlobalNameICInfo {
Label slowPathStart;
Label fastPathRejoin;
DataLabel32 store;
Jump shapeGuardJump;
ValueRemat vr;
RegisterID objReg;
RegisterID shapeReg;
bool objConst;
};
struct EqualityGenInfo {
DataLabelPtr addrLabel;
Label stubEntry;
Call stubCall;
BoolStub stub;
MaybeJump jumpToStub;
Label fallThrough;
jsbytecode *jumpTarget;
bool trampoline;
Label trampolineStart;
ValueRemat lvr, rvr;
Assembler::Condition cond;
JSC::MacroAssembler::RegisterID tempReg;
};
/* InlineFrameAssembler wants to see this. */
public:
struct CallGenInfo {
/*
* These members map to members in CallICInfo. See that structure for
* more comments.
*/
uint32_t callIndex;
Label funGuardLabel;
DataLabelPtr funGuard;
Jump funJump;
Jump hotJump;
Call oolCall;
Label joinPoint;
Label slowJoinPoint;
Label slowPathStart;
Label hotPathLabel;
Label ionJoinPoint;
DataLabelPtr addrLabel1;
DataLabelPtr addrLabel2;
Jump oolJump;
Label icCall;
RegisterID funObjReg;
FrameSize frameSize;
bool typeMonitored;
};
private:
#endif
/*
* Writes of call return addresses which needs to be delayed until the final
* absolute address of the join point is known.
*/
struct CallPatchInfo {
CallPatchInfo() : hasFastNcode(false), hasSlowNcode(false), joinSlow(false) {}
Label joinPoint;
DataLabelPtr fastNcodePatch;
DataLabelPtr slowNcodePatch;
bool hasFastNcode;
bool hasSlowNcode;
bool joinSlow;
};
struct BaseICInfo {
BaseICInfo() : canCallHook(false), forcedTypeBarrier(false)
{ }
Label fastPathStart;
Label fastPathRejoin;
Label slowPathStart;
Call slowPathCall;
DataLabelPtr paramAddr;
bool canCallHook;
bool forcedTypeBarrier;
void copyTo(ic::BaseIC &to, JSC::LinkBuffer &full, JSC::LinkBuffer &stub) {
to.fastPathStart = full.locationOf(fastPathStart);
to.fastPathRejoin = full.locationOf(fastPathRejoin);
to.slowPathStart = stub.locationOf(slowPathStart);
to.slowPathCall = stub.locationOf(slowPathCall);
to.canCallHook = canCallHook;
to.forcedTypeBarrier = forcedTypeBarrier;
}
};
struct GetElementICInfo : public BaseICInfo {
RegisterID typeReg;
RegisterID objReg;
ValueRemat id;
MaybeJump typeGuard;
Jump shapeGuard;
};
struct SetElementICInfo : public BaseICInfo {
RegisterID objReg;
StateRemat objRemat;
ValueRemat vr;
Jump capacityGuard;
Jump shapeGuard;
Jump holeGuard;
Int32Key key;
uint32_t volatileMask;
};
struct PICGenInfo : public BaseICInfo {
PICGenInfo(ic::PICInfo::Kind kind, jsbytecode *pc)
: kind(kind), pc(pc), typeMonitored(false)
{ }
ic::PICInfo::Kind kind;
Label typeCheck;
RegisterID shapeReg;
RegisterID objReg;
RegisterID typeReg;
Label shapeGuard;
jsbytecode *pc;
PropertyName *name;
bool hasTypeCheck;
bool typeMonitored;
bool cached;
ValueRemat vr;
union {
ic::GetPropLabels getPropLabels_;
ic::SetPropLabels setPropLabels_;
ic::BindNameLabels bindNameLabels_;
ic::ScopeNameLabels scopeNameLabels_;
};
ic::GetPropLabels &getPropLabels() {
JS_ASSERT(kind == ic::PICInfo::GET);
return getPropLabels_;
}
ic::SetPropLabels &setPropLabels() {
JS_ASSERT(kind == ic::PICInfo::SET);
return setPropLabels_;
}
ic::BindNameLabels &bindNameLabels() {
JS_ASSERT(kind == ic::PICInfo::BIND);
return bindNameLabels_;
}
ic::ScopeNameLabels &scopeNameLabels() {
JS_ASSERT(kind == ic::PICInfo::NAME ||
kind == ic::PICInfo::XNAME);
return scopeNameLabels_;
}
void copySimpleMembersTo(ic::PICInfo &ic) {
ic.kind = kind;
ic.shapeReg = shapeReg;
ic.objReg = objReg;
ic.name = name;
if (ic.isSet()) {
ic.u.vr = vr;
} else if (ic.isGet()) {
ic.u.get.typeReg = typeReg;
ic.u.get.hasTypeCheck = hasTypeCheck;
}
ic.typeMonitored = typeMonitored;
ic.cached = cached;
if (ic.isGet())
ic.setLabels(getPropLabels());
else if (ic.isSet())
ic.setLabels(setPropLabels());
else if (ic.isBind())
ic.setLabels(bindNameLabels());
else if (ic.isScopeName())
ic.setLabels(scopeNameLabels());
}
};
struct Defs {
Defs(uint32_t ndefs)
: ndefs(ndefs)
{ }
uint32_t ndefs;
};
struct InternalCallSite {
uint32_t returnOffset;
DataLabelPtr inlinePatch;
uint32_t inlineIndex;
jsbytecode *inlinepc;
RejoinState rejoin;
bool ool;
Label loopJumpLabel;
InvariantCodePatch loopPatch;
InternalCallSite(uint32_t returnOffset,
uint32_t inlineIndex, jsbytecode *inlinepc,
RejoinState rejoin, bool ool)
: returnOffset(returnOffset),
inlineIndex(inlineIndex), inlinepc(inlinepc),
rejoin(rejoin), ool(ool)
{ }
};
struct InternalCompileTrigger {
jsbytecode *pc;
Jump inlineJump;
Label stubLabel;
};
struct DoublePatch {
double d;
DataLabelPtr label;
bool ool;
};
struct JumpTable {
DataLabelPtr label;
size_t offsetIndex;
};
struct JumpTableEdge {
uint32_t source;
uint32_t target;
};
struct ChunkJumpTableEdge {
JumpTableEdge edge;
void **jumpTableEntry;
};
struct LoopEntry {
uint32_t pcOffset;
Label label;
};
/*
* Information about the current type of an argument or local in the
* script. The known type tag of these types is cached when possible to
* avoid generating duplicate dependency constraints.
*/
class VarType {
JSValueType type;
types::StackTypeSet *types;
public:
void setTypes(types::StackTypeSet *types) {
this->types = types;
this->type = JSVAL_TYPE_MISSING;
}
types::TypeSet *getTypes() { return types; }
JSValueType getTypeTag() {
if (type == JSVAL_TYPE_MISSING)
type = types ? types->getKnownTypeTag() : JSVAL_TYPE_UNKNOWN;
return type;
}
};
struct OutgoingChunkEdge {
uint32_t source;
uint32_t target;
#ifdef JS_CPU_X64
Label sourceTrampoline;
#endif
Jump fastJump;
MaybeJump slowJump;
};
struct SlotType
{
uint32_t slot;
VarType vt;
SlotType(uint32_t slot, VarType vt) : slot(slot), vt(vt) {}
};
RootedScript outerScript;
unsigned chunkIndex;
bool isConstructing;
ChunkDescriptor outerChunk;
/* SSA information for the outer script and all frames we will be inlining. */
analyze::CrossScriptSSA ssa;
Rooted<GlobalObject*> globalObj;
const HeapSlot *globalSlots; /* Original slots pointer. */
MJITInstrumentation sps;
Assembler masm;
FrameState frame;
/*
* State for the current stack frame, and links to its parents going up to
* the outermost script.
*/
public:
struct ActiveFrame : public JSActiveFrame {
Label *jumpMap;
/* Current types for non-escaping vars in the script. */
VarType *varTypes;
/* State for managing return from inlined frames. */
bool needReturnValue; /* Return value will be used. */
bool syncReturnValue; /* Return value should be fully synced. */
bool returnValueDouble; /* Return value should be a double. */
bool returnSet; /* Whether returnRegister is valid. */
AnyRegisterID returnRegister; /* Register holding return value. */
const FrameEntry *returnEntry; /* Entry copied by return value. */
Vector<Jump, 4, CompilerAllocPolicy> *returnJumps;
/*
* Snapshot of the heap state to use after the call, in case
* there are multiple return paths the inlined frame could take.
*/
RegisterAllocation *exitState;
ActiveFrame(JSContext *cx);
~ActiveFrame();
};
private:
ActiveFrame *a;
ActiveFrame *outer;
RootedScript script_;
analyze::ScriptAnalysis *analysis;
jsbytecode *PC;
LoopState *loop;
/* State spanning all stack frames. */
js::Vector<ActiveFrame*, 4, CompilerAllocPolicy> inlineFrames;
js::Vector<BranchPatch, 64, CompilerAllocPolicy> branchPatches;
#if defined JS_MONOIC
js::Vector<GetGlobalNameICInfo, 16, CompilerAllocPolicy> getGlobalNames;
js::Vector<SetGlobalNameICInfo, 16, CompilerAllocPolicy> setGlobalNames;
js::Vector<CallGenInfo, 64, CompilerAllocPolicy> callICs;
js::Vector<EqualityGenInfo, 64, CompilerAllocPolicy> equalityICs;
#endif
#if defined JS_POLYIC
js::Vector<PICGenInfo, 16, CompilerAllocPolicy> pics;
js::Vector<GetElementICInfo, 16, CompilerAllocPolicy> getElemICs;
js::Vector<SetElementICInfo, 16, CompilerAllocPolicy> setElemICs;
#endif
js::Vector<CallPatchInfo, 64, CompilerAllocPolicy> callPatches;
js::Vector<InternalCallSite, 64, CompilerAllocPolicy> callSites;
js::Vector<InternalCompileTrigger, 4, CompilerAllocPolicy> compileTriggers;
js::Vector<DoublePatch, 16, CompilerAllocPolicy> doubleList;
js::Vector<JSObject*, 0, CompilerAllocPolicy> rootedTemplates;
js::Vector<RegExpShared*, 0, CompilerAllocPolicy> rootedRegExps;
js::Vector<uint32_t> monitoredBytecodes;
js::Vector<uint32_t> typeBarrierBytecodes;
js::Vector<uint32_t> fixedIntToDoubleEntries;
js::Vector<uint32_t> fixedDoubleToAnyEntries;
js::Vector<JumpTable, 16> jumpTables;
js::Vector<JumpTableEdge, 16> jumpTableEdges;
js::Vector<LoopEntry, 16> loopEntries;
js::Vector<OutgoingChunkEdge, 16> chunkEdges;
StubCompiler stubcc;
Label fastEntryLabel;
Label arityLabel;
Label argsCheckLabel;
#ifdef JS_MONOIC
Label argsCheckStub;
Label argsCheckFallthrough;
Jump argsCheckJump;
#endif
bool debugMode_;
bool inlining_;
bool hasGlobalReallocation;
bool oomInVector; // True if we have OOM'd appending to a vector.
bool overflowICSpace; // True if we added a constant pool in a reserved space.
uint64_t gcNumber;
PCLengthEntry *pcLengths;
Compiler *thisFromCtor() { return this; }
friend class CompilerAllocPolicy;
public:
Compiler(JSContext *cx, JSScript *outerScript, unsigned chunkIndex, bool isConstructing);
~Compiler();
CompileStatus compile();
Label getLabel() { return masm.label(); }
bool knownJump(jsbytecode *pc);
Label labelOf(jsbytecode *target, uint32_t inlineIndex);
void addCallSite(const InternalCallSite &callSite);
void addReturnSite();
void inlineStubCall(void *stub, RejoinState rejoin, Uses uses);
bool debugMode() { return debugMode_; }
bool inlining() { return inlining_; }
bool constructing() { return isConstructing; }
jsbytecode *outerPC() {
if (a == outer)
return PC;
ActiveFrame *scan = a;
while (scan && scan->parent != outer)
scan = static_cast<ActiveFrame *>(scan->parent);
return scan->parentPC;
}
JITScript *outerJIT() {
return outerScript->getJIT(isConstructing, cx->zone()->compileBarriers());
}
ChunkDescriptor &outerChunkRef() {
return outerJIT()->chunkDescriptor(chunkIndex);
}
bool bytecodeInChunk(jsbytecode *pc) {
return (unsigned(pc - outerScript->code) >= outerChunk.begin)
&& (unsigned(pc - outerScript->code) < outerChunk.end);
}
jsbytecode *inlinePC() { return PC; }
uint32_t inlineIndex() { return a->inlineIndex; }
Assembler &getAssembler(bool ool) { return ool ? stubcc.masm : masm; }
InvariantCodePatch *getInvariantPatch(unsigned index) {
return &callSites[index].loopPatch;
}
jsbytecode *getInvariantPC(unsigned index) {
return callSites[index].inlinepc;
}
bool activeFrameHasMultipleExits() {
ActiveFrame *na = a;
while (na->parent) {
if (na->exitState)
return true;
na = static_cast<ActiveFrame *>(na->parent);
}
return false;
}
private:
CompileStatus performCompilation();
CompileStatus generatePrologue();
CompileStatus generateMethod();
CompileStatus generateEpilogue();
CompileStatus finishThisUp();
CompileStatus pushActiveFrame(JSScript *script, uint32_t argc);
void popActiveFrame();
void updatePCCounts(jsbytecode *pc, bool *updated);
void updatePCTypes(jsbytecode *pc, FrameEntry *fe);
void updateArithCounts(jsbytecode *pc, FrameEntry *fe,
JSValueType firstUseType, JSValueType secondUseType);
void updateElemCounts(jsbytecode *pc, FrameEntry *obj, FrameEntry *id);
void bumpPropCount(jsbytecode *pc, int count);
/* Analysis helpers. */
CompileStatus prepareInferenceTypes(JSScript *script, ActiveFrame *a);
void ensureDoubleArguments();
void markUndefinedLocal(uint32_t offset, uint32_t i);
void markUndefinedLocals();
void fixDoubleTypes(jsbytecode *target);
bool watchGlobalReallocation();
void updateVarType();
void updateJoinVarTypes();
void restoreVarType();
JSValueType knownPushedType(uint32_t pushed);
bool mayPushUndefined(uint32_t pushed);
types::StackTypeSet *pushedTypeSet(uint32_t which);
bool monitored(jsbytecode *pc);
bool hasTypeBarriers(jsbytecode *pc);
bool testSingletonProperty(HandleObject obj, HandleId id);
bool testSingletonPropertyTypes(FrameEntry *top, HandleId id, bool *testObject);
CompileStatus addInlineFrame(HandleScript script, uint32_t depth, uint32_t parent, jsbytecode *parentpc);
CompileStatus scanInlineCalls(uint32_t index, uint32_t depth);
CompileStatus checkAnalysis(HandleScript script);
struct BarrierState {
MaybeJump jump;
RegisterID typeReg;
RegisterID dataReg;
};
MaybeJump trySingleTypeTest(types::StackTypeSet *types, RegisterID typeReg);
Jump addTypeTest(types::StackTypeSet *types, RegisterID typeReg, RegisterID dataReg);
BarrierState pushAddressMaybeBarrier(Address address, JSValueType type, bool reuseBase,
bool testUndefined = false);
BarrierState testBarrier(RegisterID typeReg, RegisterID dataReg,
bool testUndefined = false, bool testReturn = false,
bool force = false);
void finishBarrier(const BarrierState &barrier, RejoinState rejoin, uint32_t which);
void testPushedType(RejoinState rejoin, int which, bool ool = true);
/* Non-emitting helpers. */
void pushSyncedEntry(uint32_t pushed);
bool jumpInScript(Jump j, jsbytecode *pc);
bool compareTwoValues(JSContext *cx, JSOp op, const Value &lhs, const Value &rhs);
/* Emitting helpers. */
bool constantFoldBranch(jsbytecode *target, bool taken);
bool emitStubCmpOp(BoolStub stub, jsbytecode *target, JSOp fused);
bool iter(unsigned flags);
void iterNext();
bool iterMore(jsbytecode *target);
void iterEnd();
MaybeJump loadDouble(FrameEntry *fe, FPRegisterID *fpReg, bool *allocated);
#ifdef JS_POLYIC
void passICAddress(BaseICInfo *ic);
#endif
#ifdef JS_MONOIC
void passMICAddress(GlobalNameICInfo &mic);
#endif
bool constructThis();
void ensureDouble(FrameEntry *fe);
/*
* Ensure fe is an integer, truncating from double if necessary, or jump to
* the slow path per uses.
*/
void ensureInteger(FrameEntry *fe, Uses uses);
/* Convert fe from a double to integer (per ValueToECMAInt32) in place. */
void truncateDoubleToInt32(FrameEntry *fe, Uses uses);
/*
* Try to convert a double fe to an integer, with no truncation performed,
* or jump to the slow path per uses.
*/
void tryConvertInteger(FrameEntry *fe, Uses uses);
/* Opcode handlers. */
bool jumpAndRun(Jump j, jsbytecode *target,
Jump *slow = NULL, bool *trampoline = NULL,
bool fallthrough = false);
bool startLoop(jsbytecode *head, Jump entry, jsbytecode *entryTarget);
bool finishLoop(jsbytecode *head);
inline bool shouldStartLoop(jsbytecode *head);
void jsop_bindname(HandlePropertyName name);
void jsop_setglobal(uint32_t index);
void jsop_getprop_slow(HandlePropertyName name, bool forPrototype = false);
void jsop_aliasedArg(unsigned i, bool get, bool poppedAfter = false);
void jsop_aliasedVar(ScopeCoordinate sc, bool get, bool poppedAfter = false);
void jsop_this();
void emitReturn(FrameEntry *fe);
void emitFinalReturn(Assembler &masm);
void loadReturnValue(Assembler *masm, FrameEntry *fe);
void emitReturnValue(Assembler *masm, FrameEntry *fe);
void emitInlineReturnValue(FrameEntry *fe);
void dispatchCall(VoidPtrStubUInt32 stub, uint32_t argc);
void interruptCheckHelper();
void ionCompileHelper();
void inliningCompileHelper();
CompileStatus methodEntryHelper();
CompileStatus profilingPushHelper();
void profilingPopHelper();
void emitUncachedCall(uint32_t argc, bool callingNew);
void checkCallApplySpeculation(uint32_t argc, FrameEntry *origCallee, FrameEntry *origThis,
MaybeRegisterID origCalleeType, RegisterID origCalleeData,
MaybeRegisterID origThisType, RegisterID origThisData,
Jump *uncachedCallSlowRejoin, CallPatchInfo *uncachedCallPatch);
bool inlineCallHelper(uint32_t argc, bool callingNew, FrameSize &callFrameSize);
void fixPrimitiveReturn(Assembler *masm, FrameEntry *fe);
bool jsop_getgname(uint32_t index);
void jsop_getgname_slow(uint32_t index);
bool jsop_setgname(HandlePropertyName name, bool popGuaranteed);
void jsop_setgname_slow(HandlePropertyName name);
void jsop_bindgname();
void jsop_setelem_slow();
void jsop_getelem_slow();
bool jsop_getprop(HandlePropertyName name, JSValueType type,
bool typeCheck = true, bool forPrototype = false);
bool jsop_getprop_dispatch(HandlePropertyName name);
bool jsop_setprop(HandlePropertyName name, bool popGuaranteed);
void jsop_setprop_slow(HandlePropertyName name);
bool jsop_instanceof();
bool jsop_intrinsic(HandlePropertyName name, JSValueType type);
void jsop_name(HandlePropertyName name, JSValueType type);
bool jsop_xname(HandlePropertyName name);
void enterBlock(StaticBlockObject *block);
void leaveBlock();
void emitEval(uint32_t argc);
bool jsop_tableswitch(jsbytecode *pc);
Jump getNewObject(JSContext *cx, RegisterID result, JSObject *templateObject);
/* Fast arithmetic. */
bool jsop_binary_slow(JSOp op, VoidStub stub, JSValueType type, FrameEntry *lhs, FrameEntry *rhs);
bool jsop_binary(JSOp op, VoidStub stub, JSValueType type, types::TypeSet *typeSet);
void jsop_binary_full(FrameEntry *lhs, FrameEntry *rhs, JSOp op, VoidStub stub,
JSValueType type, bool cannotOverflow, bool ignoreOverflow);
void jsop_binary_full_simple(FrameEntry *fe, JSOp op, VoidStub stub,
JSValueType type);
void jsop_binary_double(FrameEntry *lhs, FrameEntry *rhs, JSOp op, VoidStub stub,
JSValueType type);
void slowLoadConstantDouble(Assembler &masm, FrameEntry *fe,
FPRegisterID fpreg);
void maybeJumpIfNotInt32(Assembler &masm, MaybeJump &mj, FrameEntry *fe,
MaybeRegisterID &mreg);
void maybeJumpIfNotDouble(Assembler &masm, MaybeJump &mj, FrameEntry *fe,
MaybeRegisterID &mreg);
bool jsop_relational(JSOp op, BoolStub stub, jsbytecode *target, JSOp fused);
bool jsop_relational_full(JSOp op, BoolStub stub, jsbytecode *target, JSOp fused);
bool jsop_relational_double(JSOp op, BoolStub stub, jsbytecode *target, JSOp fused);
bool jsop_relational_int(JSOp op, jsbytecode *target, JSOp fused);
void emitLeftDoublePath(FrameEntry *lhs, FrameEntry *rhs, FrameState::BinaryAlloc &regs,
MaybeJump &lhsNotDouble, MaybeJump &rhsNotNumber,
MaybeJump &lhsUnknownDone);
void emitRightDoublePath(FrameEntry *lhs, FrameEntry *rhs, FrameState::BinaryAlloc &regs,
MaybeJump &rhsNotNumber2);
bool tryBinaryConstantFold(JSContext *cx, FrameState &frame, JSOp op,
FrameEntry *lhs, FrameEntry *rhs, Value *vp);
/* Fast opcodes. */
void jsop_bitop(JSOp op);
bool jsop_mod();
void jsop_neg();
void jsop_bitnot();
void jsop_not();
void jsop_typeof();
bool booleanJumpScript(JSOp op, jsbytecode *target);
bool jsop_ifneq(JSOp op, jsbytecode *target);
bool jsop_andor(JSOp op, jsbytecode *target);
bool jsop_newinit();
bool jsop_regexp();
void jsop_initmethod();
void jsop_initprop();
void jsop_initelem_array();
void jsop_setelem_dense(types::StackTypeSet::DoubleConversion conversion);
#ifdef JS_METHODJIT_TYPED_ARRAY
void jsop_setelem_typed(int atype);
void convertForTypedArray(int atype, ValueRemat *vr, bool *allocated);
#endif
bool jsop_setelem(bool popGuaranteed);
bool jsop_getelem();
void jsop_getelem_dense(bool isPacked);
void jsop_getelem_args();
#ifdef JS_METHODJIT_TYPED_ARRAY
bool jsop_getelem_typed(int atype);
#endif
void jsop_toid();
bool isCacheableBaseAndIndex(FrameEntry *obj, FrameEntry *id);
void jsop_stricteq(JSOp op);
bool jsop_equality(JSOp op, BoolStub stub, jsbytecode *target, JSOp fused);
CompileStatus jsop_equality_obj_obj(JSOp op, jsbytecode *target, JSOp fused);
bool jsop_equality_int_string(JSOp op, BoolStub stub, jsbytecode *target, JSOp fused);
void jsop_pos();
void jsop_in();
static inline Assembler::Condition
GetCompareCondition(JSOp op, JSOp fused)
{
bool ifeq = fused == JSOP_IFEQ;
switch (op) {
case JSOP_GT:
return ifeq ? Assembler::LessThanOrEqual : Assembler::GreaterThan;
case JSOP_GE:
return ifeq ? Assembler::LessThan : Assembler::GreaterThanOrEqual;
case JSOP_LT:
return ifeq ? Assembler::GreaterThanOrEqual : Assembler::LessThan;
case JSOP_LE:
return ifeq ? Assembler::GreaterThan : Assembler::LessThanOrEqual;
case JSOP_STRICTEQ:
case JSOP_EQ:
return ifeq ? Assembler::NotEqual : Assembler::Equal;
case JSOP_STRICTNE:
case JSOP_NE:
return ifeq ? Assembler::Equal : Assembler::NotEqual;
default:
JS_NOT_REACHED("unrecognized op");
return Assembler::Equal;
}
}
static inline Assembler::Condition
GetStubCompareCondition(JSOp fused)
{
return fused == JSOP_IFEQ ? Assembler::Zero : Assembler::NonZero;
}
/* Fast builtins. */
JSObject *pushedSingleton(unsigned pushed);
CompileStatus inlineNativeFunction(uint32_t argc, bool callingNew);
CompileStatus inlineScriptedFunction(uint32_t argc, bool callingNew);
CompileStatus compileMathAbsInt(FrameEntry *arg);
CompileStatus compileMathAbsDouble(FrameEntry *arg);
CompileStatus compileMathSqrt(FrameEntry *arg);
CompileStatus compileMathMinMaxDouble(FrameEntry *arg1, FrameEntry *arg2,
Assembler::DoubleCondition cond);
CompileStatus compileMathMinMaxInt(FrameEntry *arg1, FrameEntry *arg2,
Assembler::Condition cond);
CompileStatus compileMathPowSimple(FrameEntry *arg1, FrameEntry *arg2);
CompileStatus compileArrayPush(FrameEntry *thisv, FrameEntry *arg,
types::StackTypeSet::DoubleConversion conversion);
CompileStatus compileArrayConcat(types::TypeSet *thisTypes, types::TypeSet *argTypes,
FrameEntry *thisValue, FrameEntry *argValue);
CompileStatus compileArrayPopShift(FrameEntry *thisv, bool isPacked, bool isArrayPop);
CompileStatus compileArrayWithLength(uint32_t argc);
CompileStatus compileArrayWithArgs(uint32_t argc);
enum RoundingMode { Floor, Round };
CompileStatus compileRound(FrameEntry *arg, RoundingMode mode);
enum GetCharMode { GetChar, GetCharCode };
CompileStatus compileGetChar(FrameEntry *thisValue, FrameEntry *arg, GetCharMode mode);
CompileStatus compileStringFromCode(FrameEntry *arg);
CompileStatus compileParseInt(JSValueType argType, uint32_t argc);
void prepareStubCall(Uses uses);
Call emitStubCall(void *ptr, DataLabelPtr *pinline);
};
// Given a stub call, emits the call into the inline assembly path. rejoin
// indicates how to rejoin should this call trigger expansion/discarding.
#define INLINE_STUBCALL(stub, rejoin) \
inlineStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, Uses(0))
#define INLINE_STUBCALL_USES(stub, rejoin, uses) \
inlineStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, uses)
// Given a stub call, emits the call into the out-of-line assembly path.
// Unlike the INLINE_STUBCALL variant, this returns the Call offset.
#define OOL_STUBCALL(stub, rejoin) \
stubcc.emitStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, Uses(0))
#define OOL_STUBCALL_USES(stub, rejoin, uses) \
stubcc.emitStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, uses)
// Same as OOL_STUBCALL, but specifies a slot depth.
#define OOL_STUBCALL_LOCAL_SLOTS(stub, rejoin, slots) \
stubcc.emitStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, Uses(0), (slots))
} /* namespace js */
} /* namespace mjit */
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,254 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_valueinfo_h__ && defined JS_METHODJIT
#define jsjaeger_valueinfo_h__
#include "jsapi.h"
#include "jsnum.h"
#include "jstypes.h"
#include "methodjit/MachineRegs.h"
#include "methodjit/RematInfo.h"
#include "assembler/assembler/MacroAssembler.h"
namespace js {
namespace mjit {
class FrameEntry
{
friend class FrameState;
friend class ImmutableSync;
public:
/* Accessors for entries which are known constants. */
bool isConstant() const {
if (isCopy())
return false;
return data.isConstant();
}
const jsval_layout &getConstant() const {
JS_ASSERT(isConstant());
return v_;
}
Value getValue() const {
JS_ASSERT(isConstant());
return IMPL_TO_JSVAL(v_);
}
#if defined JS_NUNBOX32
uint32_t getPayload() const {
JS_ASSERT(isConstant());
return v_.s.payload.u32;
}
#elif defined JS_PUNBOX64
uint64_t getPayload() const {
JS_ASSERT(isConstant());
return v_.asBits & JSVAL_PAYLOAD_MASK;
}
#endif
/* For a constant double FrameEntry, truncate to an int32_t. */
void convertConstantDoubleOrBooleanToInt32(JSContext *cx) {
JS_ASSERT(isConstant());
JS_ASSERT(isType(JSVAL_TYPE_DOUBLE) || isType(JSVAL_TYPE_BOOLEAN));
int32_t value;
ToInt32(cx, getValue(), &value);
Value newValue = Int32Value(value);
setConstant(newValue);
}
/*
* Accessors for entries whose type is known. Any entry can have a known
* type, and constant entries must have one.
*/
bool isTypeKnown() const {
return backing()->type.isConstant();
}
/*
* The known type should not be used in generated code if it is JSVAL_TYPE_DOUBLE.
* In such cases either the value is constant, in memory or in a floating point register.
*/
JSValueType getKnownType() const {
JS_ASSERT(isTypeKnown());
return backing()->knownType;
}
#if defined JS_NUNBOX32
JSValueTag getKnownTag() const {
JS_ASSERT(backing()->v_.s.tag != JSVAL_TAG_CLEAR);
return backing()->v_.s.tag;
}
#elif defined JS_PUNBOX64
JSValueShiftedTag getKnownTag() const {
return JSValueShiftedTag(backing()->v_.asBits & JSVAL_TAG_MASK);
}
#endif
// Return true iff the type of this value is definitely known to be type_.
bool isType(JSValueType type_) const {
return isTypeKnown() && getKnownType() == type_;
}
// Return true iff the type of this value is definitely known not to be type_.
bool isNotType(JSValueType type_) const {
return isTypeKnown() && getKnownType() != type_;
}
// Return true if the type of this value is definitely type_, or is unknown
// and thus potentially type_ at runtime.
bool mightBeType(JSValueType type_) const {
return !isNotType(type_);
}
/* Accessors for entries which are copies of other mutable entries. */
bool isCopy() const { return !!copy; }
bool isCopied() const { return copied != 0; }
const FrameEntry *backing() const {
return isCopy() ? copyOf() : this;
}
bool hasSameBacking(const FrameEntry *other) const {
return backing() == other->backing();
}
private:
void setType(JSValueType type_) {
JS_ASSERT(!isCopy() && type_ != JSVAL_TYPE_UNKNOWN);
type.setConstant();
#if defined JS_NUNBOX32
v_.s.tag = JSVAL_TYPE_TO_TAG(type_);
#elif defined JS_PUNBOX64
v_.asBits &= JSVAL_PAYLOAD_MASK;
v_.asBits |= JSVAL_TYPE_TO_SHIFTED_TAG(type_);
#endif
knownType = type_;
}
void track(uint32_t index) {
copied = 0;
copy = NULL;
index_ = index;
tracked = true;
}
void clear() {
JS_ASSERT(copied == 0);
if (copy) {
JS_ASSERT(copy->copied != 0);
copy->copied--;
copy = NULL;
}
}
uint32_t trackerIndex() {
return index_;
}
/*
* Marks the FE as unsynced & invalid.
*/
void resetUnsynced() {
clear();
type.unsync();
data.unsync();
type.invalidate();
data.invalidate();
}
/*
* Marks the FE as synced & in memory.
*/
void resetSynced() {
clear();
type.setMemory();
data.setMemory();
}
/*
* Marks the FE as having a constant.
*/
void setConstant(const Value &v) {
clear();
type.unsync();
data.unsync();
type.setConstant();
data.setConstant();
v_ = JSVAL_TO_IMPL(v);
if (v.isDouble())
knownType = JSVAL_TYPE_DOUBLE;
else
knownType = v.extractNonDoubleType();
}
FrameEntry *copyOf() const {
JS_ASSERT(isCopy());
JS_ASSERT_IF(!copy->temporary, copy < this);
return copy;
}
/*
* Set copy index.
*/
void setCopyOf(FrameEntry *fe) {
clear();
copy = fe;
if (fe) {
type.invalidate();
data.invalidate();
fe->copied++;
}
}
inline bool isTracked() const {
return tracked;
}
inline void untrack() {
tracked = false;
}
inline bool dataInRegister(AnyRegisterID reg) const {
JS_ASSERT(!copy);
return reg.isReg()
? (data.inRegister() && data.reg() == reg.reg())
: (data.inFPRegister() && data.fpreg() == reg.fpreg());
}
private:
JSValueType knownType;
jsval_layout v_;
RematInfo type;
RematInfo data;
uint32_t index_;
FrameEntry *copy;
bool tracked;
bool temporary;
/* Number of copies of this entry. */
uint32_t copied;
/*
* Offset of the last loop in which this entry was written or had a loop
* register assigned.
*/
uint32_t lastLoop;
};
} /* namespace mjit */
} /* namespace js */
#endif /* jsjaeger_valueinfo_h__ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,51 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_icchecker_h__ && defined JS_METHODJIT
#define jsjaeger_icchecker_h__
#include "assembler/assembler/MacroAssembler.h"
namespace js {
namespace mjit {
#if defined DEBUG && defined JS_CPU_ARM
static inline void
CheckInstMask(void *addr, uint32_t mask, uint32_t expected)
{
uint32_t inst = *static_cast<uint32_t *>(addr);
JS_ASSERT((inst & mask) == expected);
}
static inline void
CheckIsLDR(JSC::CodeLocationLabel label, uint8_t rd)
{
JS_ASSERT((rd & 0xf) == rd);
CheckInstMask(label.executableAddress(), 0xfc50f000, 0xe4100000 | (rd << 12));
}
static inline void
CheckIsBLX(JSC::CodeLocationLabel label, uint8_t rsrc)
{
JS_ASSERT((rsrc & 0xf) == rsrc);
CheckInstMask(label.executableAddress(), 0xfff000ff, 0xe1200030 | rsrc);
}
static inline void
CheckIsStubCall(JSC::CodeLocationLabel label)
{
CheckIsLDR(label.labelAtOffset(-4), JSC::ARMRegisters::ip);
CheckIsLDR(label.labelAtOffset(0), JSC::ARMRegisters::r8);
CheckIsBLX(label.labelAtOffset(4), JSC::ARMRegisters::r8);
}
#else
static inline void CheckIsStubCall(JSC::CodeLocationLabel label) {}
#endif
} /* namespace mjit */
} /* namespace js */
#endif

Просмотреть файл

@ -1,372 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_ic_labels_h__ && defined JS_METHODJIT
#define jsjaeger_ic_labels_h__
#include "methodjit/BaseCompiler.h"
class ICOffsetInitializer {
public:
ICOffsetInitializer();
};
namespace js {
namespace mjit {
namespace ic {
/* GetPropCompiler */
struct GetPropLabels : MacroAssemblerTypedefs {
friend class ::ICOffsetInitializer;
void setValueLoad(MacroAssembler &masm, Label fastPathRejoin, Label fastValueLoad) {
int offset = masm.differenceBetween(fastPathRejoin, fastValueLoad);
inlineValueLoadOffset = offset;
/*
* Note: the offset between the type and data loads for x86 is asserted
* in NunboxAssembler::loadValueWithAddressOffsetPatch.
*/
JS_ASSERT(offset == inlineValueLoadOffset);
(void) offset;
}
CodeLocationLabel getValueLoad(CodeLocationLabel fastPathRejoin) {
return fastPathRejoin.labelAtOffset(inlineValueLoadOffset);
}
void setDslotsLoad(MacroAssembler &masm, Label fastPathRejoin, Label dslotsLoad) {
int offset = masm.differenceBetween(fastPathRejoin, dslotsLoad);
setDslotsLoadOffset(offset);
}
CodeLocationInstruction getDslotsLoad(CodeLocationLabel fastPathRejoin) {
return fastPathRejoin.instructionAtOffset(getDslotsLoadOffset());
}
void setInlineShapeData(MacroAssembler &masm, Label shapeGuard, DataLabelPtr inlineShape) {
int offset = masm.differenceBetween(shapeGuard, inlineShape);
setInlineShapeOffset(offset);
}
CodeLocationDataLabelPtr getInlineShapeData(CodeLocationLabel fastShapeGuard) {
return fastShapeGuard.dataLabelPtrAtOffset(getInlineShapeOffset());
}
/*
* Note: on x64, the base is the inlineShapeLabel DataLabelPtr, whereas on other
* platforms the base is the shapeGuard.
*/
template <typename T>
void setInlineShapeJump(MacroAssembler &masm, T base, Label afterJump) {
setInlineShapeJumpOffset(masm.differenceBetween(base, afterJump));
}
CodeLocationJump getInlineShapeJump(CodeLocationLabel fastShapeGuard) {
return fastShapeGuard.jumpAtOffset(getInlineShapeJumpOffset());
}
void setInlineTypeJump(MacroAssembler &masm, Label fastPathStart, Label afterTypeJump) {
int offset = masm.differenceBetween(fastPathStart, afterTypeJump);
setInlineTypeJumpOffset(offset);
}
CodeLocationJump getInlineTypeJump(CodeLocationLabel fastPathStart) {
return fastPathStart.jumpAtOffset(getInlineTypeJumpOffset());
}
void setStubShapeJump(MacroAssembler &masm, Label stubStart, Label shapeJump) {
int offset = masm.differenceBetween(stubStart, shapeJump);
setStubShapeJumpOffset(offset);
}
/* Offset-based interface */
void setDslotsLoadOffset(int offset) {
dslotsLoadOffset = offset;
JS_ASSERT(offset == dslotsLoadOffset);
}
void setInlineShapeOffset(int offset) {
inlineShapeOffset = offset;
JS_ASSERT(offset == inlineShapeOffset);
}
void setStubShapeJumpOffset(int offset) {
stubShapeJumpOffset = offset;
JS_ASSERT(offset == stubShapeJumpOffset);
}
int getInlineShapeJumpOffset() {
return POST_INST_OFFSET(inlineShapeJumpOffset);
}
void setInlineShapeJumpOffset(int offset) {
inlineShapeJumpOffset = offset;
JS_ASSERT(offset == inlineShapeJumpOffset);
}
int getInlineTypeJumpOffset() {
return POST_INST_OFFSET(inlineTypeJumpOffset);
}
void setInlineTypeJumpOffset(int offset) {
inlineTypeJumpOffset = offset;
JS_ASSERT(offset == inlineTypeJumpOffset);
}
int getInlineShapeOffset() {
return inlineShapeOffset;
}
int getDslotsLoadOffset() {
return dslotsLoadOffset;
}
int getStubShapeJumpOffset() {
return POST_INST_OFFSET(stubShapeJumpOffset);
}
private:
/* Offset from storeBack to beginning of 'mov dslots, addr' */
int32_t dslotsLoadOffset : 8;
/* Offset from shapeGuard to end of shape comparison. */
int32_t inlineShapeOffset : 8;
/* Offset from storeBack to end of value load. */
int32_t inlineValueLoadOffset : 8;
/*
* Offset from lastStubStart to end of shape jump.
* TODO: We can redefine the location of lastStubStart to be
* after the jump -- at which point this is always 0.
*/
int32_t stubShapeJumpOffset : 8;
/* Offset from the shape guard start to the shape guard jump. */
int32_t inlineShapeJumpOffset : 8;
/* Offset from the fast path to the type guard jump. */
int32_t inlineTypeJumpOffset : 8;
};
/* SetPropCompiler */
struct SetPropLabels : MacroAssemblerTypedefs {
friend class ::ICOffsetInitializer;
void setInlineValueStore(MacroAssembler &masm, Label fastPathRejoin, DataLabel32 inlineValueStore) {
int offset = masm.differenceBetween(fastPathRejoin, inlineValueStore);
setInlineValueStoreOffset(offset);
}
CodeLocationLabel getInlineValueStore(CodeLocationLabel fastPathRejoin) {
return fastPathRejoin.labelAtOffset(getInlineValueStoreOffset());
}
void setInlineShapeData(MacroAssembler &masm, Label shapeGuard, DataLabelPtr inlineShapeData) {
int offset = masm.differenceBetween(shapeGuard, inlineShapeData);
setInlineShapeDataOffset(offset);
}
CodeLocationDataLabelPtr getInlineShapeData(CodeLocationLabel fastPathStart, int shapeGuardOffset) {
return fastPathStart.dataLabelPtrAtOffset(shapeGuardOffset + getInlineShapeDataOffset());
}
void setDslotsLoad(MacroAssembler &masm, Label fastPathRejoin, Label beforeLoad) {
int offset = masm.differenceBetween(fastPathRejoin, beforeLoad);
setDslotsLoadOffset(offset);
}
CodeLocationInstruction getDslotsLoad(CodeLocationLabel fastPathRejoin, const ValueRemat &vr) {
return fastPathRejoin.instructionAtOffset(getDslotsLoadOffset(vr));
}
void setInlineShapeJump(MacroAssembler &masm, Label shapeGuard, Label afterJump) {
setInlineShapeJumpOffset(masm.differenceBetween(shapeGuard, afterJump));
}
CodeLocationJump getInlineShapeJump(CodeLocationLabel shapeGuard) {
return shapeGuard.jumpAtOffset(getInlineShapeJumpOffset());
}
void setStubShapeJump(MacroAssembler &masm, Label stubStart, Label afterShapeJump) {
int offset = masm.differenceBetween(stubStart, afterShapeJump);
setStubShapeJumpOffset(offset);
}
CodeLocationJump getStubShapeJump(CodeLocationLabel stubStart) {
return stubStart.jumpAtOffset(getStubShapeJumpOffset());
}
private:
/* Offset-based interface. */
void setDslotsLoadOffset(int offset) {
dslotsLoadOffset = offset;
JS_ASSERT(offset == dslotsLoadOffset);
}
int getDslotsLoadOffset(const ValueRemat &vr) {
(void) vr;
return dslotsLoadOffset;
}
void setInlineShapeDataOffset(int offset) {
inlineShapeDataOffset = offset;
JS_ASSERT(offset == inlineShapeDataOffset);
}
void setStubShapeJumpOffset(int offset) {
stubShapeJumpOffset = offset;
JS_ASSERT(offset == stubShapeJumpOffset);
}
void setInlineValueStoreOffset(int offset) {
inlineValueStoreOffset = offset;
JS_ASSERT(offset == inlineValueStoreOffset);
}
void setInlineShapeJumpOffset(int offset) {
inlineShapeJumpOffset = offset;
JS_ASSERT(offset == inlineShapeJumpOffset);
}
int getInlineShapeJumpOffset() {
return POST_INST_OFFSET(inlineShapeJumpOffset);
}
int getInlineShapeDataOffset() {
return inlineShapeDataOffset;
}
int getStubShapeJumpOffset() {
return POST_INST_OFFSET(stubShapeJumpOffset);
}
int getInlineValueStoreOffset() {
return inlineValueStoreOffset;
}
/* Offset from storeBack to beginning of 'mov dslots, addr'. */
int32_t dslotsLoadOffset : 8;
/* Offset from shapeGuard to end of shape comparison. */
int32_t inlineShapeDataOffset : 8;
/*
* Offset from lastStubStart to end of shape jump.
* TODO: We can redefine the location of lastStubStart to be
* after the jump -- at which point this is always 0.
*/
int32_t stubShapeJumpOffset : 8;
int32_t inlineValueStoreOffset : 8;
/* Offset from shapeGuard to the end of the shape jump. */
int32_t inlineShapeJumpOffset : 8;
};
/* BindNameCompiler */
struct BindNameLabels : MacroAssemblerTypedefs {
friend class ::ICOffsetInitializer;
void setInlineJumpOffset(int offset) {
inlineJumpOffset = offset;
JS_ASSERT(offset == inlineJumpOffset);
}
void setInlineJump(MacroAssembler &masm, Label shapeGuard, Jump inlineJump) {
int offset = masm.differenceBetween(shapeGuard, inlineJump);
setInlineJumpOffset(offset);
}
CodeLocationJump getInlineJump(CodeLocationLabel fastPathStart) {
return fastPathStart.jumpAtOffset(getInlineJumpOffset());
}
int getInlineJumpOffset() {
return inlineJumpOffset;
}
void setStubJumpOffset(int offset) {
stubJumpOffset = offset;
JS_ASSERT(offset == stubJumpOffset);
}
void setStubJump(MacroAssembler &masm, Label stubStart, Jump stubJump) {
int offset = masm.differenceBetween(stubStart, stubJump);
setStubJumpOffset(offset);
}
CodeLocationJump getStubJump(CodeLocationLabel lastStubStart) {
return lastStubStart.jumpAtOffset(getStubJumpOffset());
}
int getStubJumpOffset() {
return stubJumpOffset;
}
private:
/* Offset from shapeGuard to end of shape jump. */
int32_t inlineJumpOffset : 8;
/* Offset from lastStubStart to end of the shape jump. */
int32_t stubJumpOffset : 8;
};
/* ScopeNameCompiler */
struct ScopeNameLabels : MacroAssemblerTypedefs {
friend class ::ICOffsetInitializer;
void setInlineJumpOffset(int offset) {
inlineJumpOffset = offset;
JS_ASSERT(offset == inlineJumpOffset);
}
void setInlineJump(MacroAssembler &masm, Label fastPathStart, Jump inlineJump) {
int offset = masm.differenceBetween(fastPathStart, inlineJump);
setInlineJumpOffset(offset);
}
CodeLocationJump getInlineJump(CodeLocationLabel fastPathStart) {
return fastPathStart.jumpAtOffset(getInlineJumpOffset());
}
int getInlineJumpOffset() {
return inlineJumpOffset;
}
void setStubJumpOffset(int offset) {
stubJumpOffset = offset;
JS_ASSERT(offset == stubJumpOffset);
}
void setStubJump(MacroAssembler &masm, Label stubStart, Jump stubJump) {
int offset = masm.differenceBetween(stubStart, stubJump);
setStubJumpOffset(offset);
}
CodeLocationJump getStubJump(CodeLocationLabel lastStubStart) {
return lastStubStart.jumpAtOffset(getStubJumpOffset());
}
int getStubJumpOffset() {
return stubJumpOffset;
}
private:
/* Offset from fastPathStart to end of shape jump. */
int32_t inlineJumpOffset : 8;
/* Offset from lastStubStart to end of the shape jump. */
int32_t stubJumpOffset : 8;
};
} /* namespace ic */
} /* namespace mjit */
} /* namespace js */
#endif /* jsjaeger_ic_labels_h__ */

Просмотреть файл

@ -1,145 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_icrepatcher_h__ && defined JS_METHODJIT
#define jsjaeger_icrepatcher_h__
#include "assembler/assembler/RepatchBuffer.h"
#include "assembler/moco/MocoStubs.h"
#include "methodjit/ICChecker.h"
namespace js {
namespace mjit {
namespace ic {
class Repatcher : public JSC::RepatchBuffer
{
typedef JSC::CodeLocationLabel CodeLocationLabel;
typedef JSC::CodeLocationCall CodeLocationCall;
typedef JSC::FunctionPtr FunctionPtr;
CodeLocationLabel label;
public:
explicit Repatcher(JITChunk *js)
: JSC::RepatchBuffer(js->code), label(js->code.m_code.executableAddress())
{ }
explicit Repatcher(const JSC::JITCode &code)
: JSC::RepatchBuffer(code), label(code.start())
{ }
using JSC::RepatchBuffer::relink;
/* Patch a stub call. */
void relink(CodeLocationCall call, FunctionPtr stub) {
#if defined JS_CPU_X64 || defined JS_CPU_X86 || defined JS_CPU_SPARC
JSC::RepatchBuffer::relink(call, stub);
#elif defined JS_CPU_ARM
/*
* Stub calls on ARM look like this:
*
* ldr ip, =stub
* call label -> ldr r8, =JaegerStubVeneer
* blx r8
*
* ARM has to run stub calls through a veneer in order for THROW to
* work properly. The address that must be patched is the load into
* 'ip', not the load into 'r8'.
*/
CheckIsStubCall(call.labelAtOffset(0));
JSC::RepatchBuffer::relink(call.callAtOffset(-4), stub);
#elif defined JS_CPU_MIPS
/*
* Stub calls on MIPS look like this:
*
* lui v0, hi(stub)
* ori v0, v0, lo(stub)
* lui t9, hi(JaegerStubVeneer)
* ori t9, t9, lo(JaegerStubVeneer)
* jalr t9
* nop
* call label -> xxx
*
* MIPS has to run stub calls through a veneer in order for THROW to
* work properly. The address that must be patched is the load into
* 'v0', not the load into 't9'.
*/
JSC::RepatchBuffer::relink(call.callAtOffset(-8), stub);
#else
# error
#endif
}
/* Patch the offset of a Value load emitted by loadValueWithAddressOffsetPatch. */
void patchAddressOffsetForValueLoad(CodeLocationLabel label, uint32_t offset) {
#if defined JS_CPU_X64 || defined JS_CPU_ARM || defined JS_CPU_SPARC || defined JS_CPU_MIPS
repatch(label.dataLabel32AtOffset(0), offset);
#elif defined JS_CPU_X86
static const unsigned LOAD_TYPE_OFFSET = 6;
static const unsigned LOAD_DATA_OFFSET = 12;
/*
* We have the following sequence to patch:
*
* mov <offset+4>($base), %<type>
* mov <offset+0>($base), %<data>
*/
repatch(label.dataLabel32AtOffset(LOAD_DATA_OFFSET), offset);
repatch(label.dataLabel32AtOffset(LOAD_TYPE_OFFSET), offset + 4);
#else
# error
#endif
}
void patchAddressOffsetForValueStore(CodeLocationLabel label, uint32_t offset, bool typeConst) {
#if defined JS_CPU_ARM || defined JS_CPU_X64 || defined JS_CPU_SPARC || defined JS_CPU_MIPS
(void) typeConst;
repatch(label.dataLabel32AtOffset(0), offset);
#elif defined JS_CPU_X86
static const unsigned STORE_TYPE_OFFSET = 6;
static const unsigned STORE_DATA_CONST_TYPE_OFFSET = 16;
static const unsigned STORE_DATA_TYPE_OFFSET = 12;
/*
* The type is stored first, then the payload. Both stores can vary in
* size, depending on whether or not the data is a constant in the
* instruction stream (though only the first store matters for the
* purpose of locating both offsets for patching).
*
* We have one of the following sequences to patch. Offsets are located
* before 6B into a given move instruction, but the mov instructions
* carrying constant payloads are 10B wide overall.
*
* typeConst=false, dataConst=false
* mov %<type>, <offset+4>($base) ; Length is 6
* mov %<data>, <offset+0>($base) ; Offset @ len(prev) + 6 = 12
* typeConst=true, dataConst=false
* mov $<type>, <offset+4>($base) ; Length is 10
* mov %<data>, <offset+0>($base) ; Offset @ len(prev) + 6 = 16
* typeConst=true, dataConst=true
* mov $<type>, <offset+4>($base) ; Length is 10
* mov $<data>, <offset+0>($base) ; Offset @ len(prev) + 6 = 16
*
* Note that we only need to know whether type is const to determine the
* correct patch offsets. In all cases, the label points to the start
* of the sequence.
*/
repatch(label.dataLabel32AtOffset(STORE_TYPE_OFFSET), offset + 4);
unsigned payloadOffset = typeConst ? STORE_DATA_CONST_TYPE_OFFSET : STORE_DATA_TYPE_OFFSET;
repatch(label.dataLabel32AtOffset(payloadOffset), offset);
#else
# error
#endif
}
};
} /* namespace ic */
} /* namespace mjit */
} /* namespace js */
#endif

Просмотреть файл

@ -1,267 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if defined JS_NUNBOX32
#include "FrameEntry.h"
#include "FrameState.h"
#include "FrameState-inl.h"
#include "ImmutableSync.h"
using namespace js;
using namespace js::mjit;
ImmutableSync::ImmutableSync()
: cx(NULL), entries(NULL), frame(NULL), avail(Registers::AvailRegs), generation(0)
{
}
ImmutableSync::~ImmutableSync()
{
if (cx)
js_free(entries);
}
bool
ImmutableSync::init(JSContext *cx, const FrameState &frame, uint32_t nentries)
{
this->cx = cx;
this->frame = &frame;
entries = js_pod_calloc<SyncEntry>(nentries);
return !!entries;
}
void
ImmutableSync::reset(Assembler *masm, Registers avail, FrameEntry *top, FrameEntry *bottom)
{
this->avail = avail;
this->masm = masm;
this->top = top;
this->bottom = bottom;
this->generation++;
memset(regs, 0, sizeof(regs));
}
inline JSC::MacroAssembler::RegisterID
ImmutableSync::doAllocReg()
{
if (!avail.empty())
return avail.takeAnyReg().reg();
uint32_t lastResort = FrameState::InvalidIndex;
uint32_t evictFromFrame = FrameState::InvalidIndex;
/* Find something to evict. */
for (uint32_t i = 0; i < Registers::TotalRegisters; i++) {
RegisterID reg = RegisterID(i);
if (!(Registers::maskReg(reg) & Registers::AvailRegs))
continue;
if (frame->regstate(reg).isPinned())
continue;
lastResort = i;
if (!regs[i]) {
/* If the frame does not own this register, take it! */
FrameEntry *fe = frame->regstate(reg).usedBy();
if (!fe)
return reg;
evictFromFrame = i;
/*
* If not copied, we can sync and not have to load again later.
* That's about as good as it gets, so just break out now.
*/
if (!fe->isCopied())
break;
}
}
if (evictFromFrame != FrameState::InvalidIndex) {
RegisterID evict = RegisterID(evictFromFrame);
FrameEntry *fe = frame->regstate(evict).usedBy();
SyncEntry &e = entryFor(fe);
if (frame->regstate(evict).type() == RematInfo::TYPE) {
JS_ASSERT(!e.typeClobbered);
e.typeClobbered = true;
} else {
JS_ASSERT(!e.dataClobbered);
e.dataClobbered = true;
}
return evict;
}
JS_ASSERT(lastResort != FrameState::InvalidIndex);
JS_ASSERT(regs[lastResort]);
SyncEntry *e = regs[lastResort];
RegisterID reg = RegisterID(lastResort);
if (e->hasDataReg && e->dataReg == reg) {
e->hasDataReg = false;
} else if (e->hasTypeReg && e->typeReg == reg) {
e->hasTypeReg = false;
} else {
JS_NOT_REACHED("no way");
}
return reg;
}
JSC::MacroAssembler::RegisterID
ImmutableSync::allocReg()
{
RegisterID reg = doAllocReg();
JS_ASSERT(!frame->regstate(reg).isPinned());
return reg;
}
void
ImmutableSync::freeReg(JSC::MacroAssembler::RegisterID reg)
{
if (!frame->regstate(reg).isPinned())
avail.putReg(reg);
}
inline ImmutableSync::SyncEntry &
ImmutableSync::entryFor(FrameEntry *fe)
{
JS_ASSERT(fe <= top || frame->isTemporary(fe));
SyncEntry &e = entries[fe - frame->entries];
if (e.generation != generation)
e.reset(generation);
return e;
}
void
ImmutableSync::sync(FrameEntry *fe)
{
if (fe->isCopy())
syncCopy(fe);
else
syncNormal(fe);
}
bool
ImmutableSync::shouldSyncType(FrameEntry *fe, SyncEntry &e)
{
/* Registers are synced up-front. */
return !fe->type.synced() && !fe->type.inRegister();
}
bool
ImmutableSync::shouldSyncData(FrameEntry *fe, SyncEntry &e)
{
/* Registers are synced up-front. */
return !fe->data.synced() && !fe->data.inRegister();
}
JSC::MacroAssembler::RegisterID
ImmutableSync::ensureTypeReg(FrameEntry *fe, SyncEntry &e)
{
if (fe->type.inRegister() && !e.typeClobbered)
return fe->type.reg();
if (e.hasTypeReg)
return e.typeReg;
e.typeReg = allocReg();
e.hasTypeReg = true;
regs[e.typeReg] = &e;
masm->loadTypeTag(frame->addressOf(fe), e.typeReg);
return e.typeReg;
}
JSC::MacroAssembler::RegisterID
ImmutableSync::ensureDataReg(FrameEntry *fe, SyncEntry &e)
{
if (fe->data.inRegister() && !e.dataClobbered)
return fe->data.reg();
if (e.hasDataReg)
return e.dataReg;
e.dataReg = allocReg();
e.hasDataReg = true;
regs[e.dataReg] = &e;
masm->loadPayload(frame->addressOf(fe), e.dataReg);
return e.dataReg;
}
void
ImmutableSync::syncCopy(FrameEntry *fe)
{
JS_ASSERT(fe >= bottom);
FrameEntry *backing = fe->copyOf();
SyncEntry &e = entryFor(backing);
JS_ASSERT(!backing->isConstant());
Address addr = frame->addressOf(fe);
if (fe->isTypeKnown() && !fe->isType(JSVAL_TYPE_DOUBLE) && !e.learnedType) {
e.learnedType = true;
e.type = fe->getKnownType();
}
if (!fe->data.synced())
masm->storePayload(ensureDataReg(backing, e), addr);
if (!fe->type.synced()) {
if (e.learnedType)
masm->storeTypeTag(ImmType(e.type), addr);
else
masm->storeTypeTag(ensureTypeReg(backing, e), addr);
}
}
void
ImmutableSync::syncNormal(FrameEntry *fe)
{
SyncEntry &e = entryFor(fe);
Address addr = frame->addressOf(fe);
if (fe->isTypeKnown() && !fe->isType(JSVAL_TYPE_DOUBLE)) {
e.learnedType = true;
e.type = fe->getKnownType();
}
if (shouldSyncData(fe, e)) {
if (fe->isConstant()) {
masm->storeValue(fe->getValue(), addr);
return;
}
masm->storePayload(ensureDataReg(fe, e), addr);
}
if (shouldSyncType(fe, e)) {
if (e.learnedType)
masm->storeTypeTag(ImmType(e.type), addr);
else
masm->storeTypeTag(ensureTypeReg(fe, e), addr);
}
if (e.hasDataReg) {
freeReg(e.dataReg);
regs[e.dataReg] = NULL;
} else if (!e.dataClobbered &&
fe->data.inRegister() &&
frame->regstate(fe->data.reg()).usedBy()) {
freeReg(fe->data.reg());
}
if (e.hasTypeReg) {
freeReg(e.typeReg);
regs[e.typeReg] = NULL;
} else if (!e.typeClobbered &&
fe->type.inRegister() &&
frame->regstate(fe->type.reg()).usedBy()) {
freeReg(fe->type.reg());
}
}
#endif /* JS_NUNBOX32 */

Просмотреть файл

@ -1,102 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_imm_sync_h__ && defined JS_METHODJIT && defined JS_NUNBOX32
#define jsjaeger_imm_sync_h__
#include "methodjit/MachineRegs.h"
#include "methodjit/FrameEntry.h"
#include "CodeGenIncludes.h"
namespace js {
namespace mjit {
class FrameState;
/*
* This is a structure nestled within the FrameState used for safely syncing
* registers to memory during transitions from the fast path into a slow path
* stub call. During this process, the frame itself is immutable, and we may
* run out of registers needed to remat copies.
*
* This structure maintains a mapping of the tracker used to perform ad-hoc
* register allocation.
*/
class ImmutableSync
{
typedef JSC::MacroAssembler::RegisterID RegisterID;
typedef JSC::MacroAssembler::Address Address;
struct SyncEntry {
/*
* NB: clobbered and sync mean the same thing: the register associated
* in the FrameEntry is no longer valid, and has been written back.
*
* They are separated for readability.
*/
uint32_t generation;
bool dataClobbered;
bool typeClobbered;
bool hasDataReg;
bool hasTypeReg;
bool learnedType;
RegisterID dataReg;
RegisterID typeReg;
JSValueType type;
void reset(uint32_t gen) {
dataClobbered = false;
typeClobbered = false;
hasDataReg = false;
hasTypeReg = false;
learnedType = false;
generation = gen;
}
};
public:
ImmutableSync();
~ImmutableSync();
bool init(JSContext *cx, const FrameState &frame, uint32_t nentries);
void reset(Assembler *masm, Registers avail, FrameEntry *top, FrameEntry *bottom);
void sync(FrameEntry *fe);
private:
void syncCopy(FrameEntry *fe);
void syncNormal(FrameEntry *fe);
RegisterID ensureDataReg(FrameEntry *fe, SyncEntry &e);
RegisterID ensureTypeReg(FrameEntry *fe, SyncEntry &e);
RegisterID allocReg();
void freeReg(RegisterID reg);
/* To be called only by allocReg. */
RegisterID doAllocReg();
inline SyncEntry &entryFor(FrameEntry *fe);
bool shouldSyncType(FrameEntry *fe, SyncEntry &e);
bool shouldSyncData(FrameEntry *fe, SyncEntry &e);
private:
JSContext *cx;
SyncEntry *entries;
const FrameState *frame;
uint32_t nentries;
Registers avail;
Assembler *masm;
SyncEntry *regs[Assembler::TotalRegisters];
FrameEntry *top;
FrameEntry *bottom;
uint32_t generation;
};
} /* namespace mjit */
} /* namespace js */
#endif /* jsjaeger_imm_sync_h__ */

Просмотреть файл

@ -1,128 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_inl_frame_asm_h__ && defined JS_METHODJIT && defined JS_MONOIC
#define jsjaeger_inl_frame_asm_h__
#include "assembler/assembler/MacroAssembler.h"
#include "assembler/assembler/CodeLocation.h"
#include "methodjit/MethodJIT.h"
#include "CodeGenIncludes.h"
namespace js {
namespace mjit {
struct AdjustedFrame {
AdjustedFrame(uint32_t baseOffset)
: baseOffset(baseOffset)
{ }
uint32_t baseOffset;
JSC::MacroAssembler::Address addrOf(uint32_t offset) {
return JSC::MacroAssembler::Address(JSFrameReg, baseOffset + offset);
}
};
/*
* This is used for emitting code to inline callee-side frame creation and
* should jit code equivalent to StackFrame::initCallFrameCallerHalf.
*
* Once finished, JSFrameReg is advanced to be the new fp.
*/
class InlineFrameAssembler {
typedef JSC::MacroAssembler::RegisterID RegisterID;
typedef JSC::MacroAssembler::Address Address;
typedef JSC::MacroAssembler::Imm32 Imm32;
typedef JSC::MacroAssembler::ImmPtr ImmPtr;
typedef JSC::MacroAssembler::DataLabelPtr DataLabelPtr;
Assembler &masm;
FrameSize frameSize; // size of the caller's frame
RegisterID funObjReg; // register containing the function object (callee)
uint32_t flags; // frame flags
public:
/*
* Register state, so consumers of this class can restrict which registers
* can and can't be clobbered.
*/
Registers tempRegs;
InlineFrameAssembler(Assembler &masm, ic::CallICInfo &ic, uint32_t flags)
: masm(masm), flags(flags), tempRegs(Registers::AvailRegs)
{
frameSize = ic.frameSize;
funObjReg = ic.funObjReg;
tempRegs.takeReg(funObjReg);
}
InlineFrameAssembler(Assembler &masm, Compiler::CallGenInfo &gen, uint32_t flags)
: masm(masm), flags(flags), tempRegs(Registers::AvailRegs)
{
frameSize = gen.frameSize;
funObjReg = gen.funObjReg;
tempRegs.takeReg(funObjReg);
}
DataLabelPtr assemble(void *ncode, jsbytecode *pc)
{
JS_ASSERT((flags & ~StackFrame::CONSTRUCTING) == 0);
/* Generate StackFrame::initCallFrameCallerHalf. */
/* Get the actual flags to write. */
JS_ASSERT(!(flags & ~StackFrame::CONSTRUCTING));
uint32_t flags = this->flags | StackFrame::FUNCTION;
if (frameSize.lowered(pc))
flags |= StackFrame::LOWERED_CALL_APPLY;
DataLabelPtr ncodePatch;
if (frameSize.isStatic()) {
uint32_t frameDepth = frameSize.staticLocalSlots();
AdjustedFrame newfp(sizeof(StackFrame) + frameDepth * sizeof(Value));
Address flagsAddr = newfp.addrOf(StackFrame::offsetOfFlags());
masm.store32(Imm32(flags), flagsAddr);
Address prevAddr = newfp.addrOf(StackFrame::offsetOfPrev());
masm.storePtr(JSFrameReg, prevAddr);
Address ncodeAddr = newfp.addrOf(StackFrame::offsetOfNcode());
ncodePatch = masm.storePtrWithPatch(ImmPtr(ncode), ncodeAddr);
masm.addPtr(Imm32(sizeof(StackFrame) + frameDepth * sizeof(Value)), JSFrameReg);
} else {
/*
* If the frame size is dynamic, then the fast path generated by
* generateFullCallStub must be used. Thus, this code is executed
* after stubs::SplatApplyArgs has been called. SplatApplyArgs
* stores the dynamic stack pointer (i.e., regs.sp after pushing a
* dynamic number of arguments) to VMFrame.regs, so we just load it
* here to get the new frame pointer.
*/
RegisterID newfp = tempRegs.takeAnyReg().reg();
masm.loadPtr(FrameAddress(VMFrame::offsetOfRegsSp()), newfp);
Address flagsAddr(newfp, StackFrame::offsetOfFlags());
masm.store32(Imm32(flags), flagsAddr);
Address prevAddr(newfp, StackFrame::offsetOfPrev());
masm.storePtr(JSFrameReg, prevAddr);
Address ncodeAddr(newfp, StackFrame::offsetOfNcode());
ncodePatch = masm.storePtrWithPatch(ImmPtr(ncode), ncodeAddr);
masm.move(newfp, JSFrameReg);
tempRegs.putReg(newfp);
}
return ncodePatch;
}
};
} /* namespace mjit */
} /* namespace js */
#endif /* jsjaeger_inl_frame_asm_h__ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,135 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include "jsutil.h"
#include "MethodJIT.h"
#include "Logging.h"
#include "jsobjinlines.h"
#if defined(JS_METHODJIT_SPEW)
static bool LoggingChecked = false;
static uint32_t LoggingBits = 0;
static const char *ChannelNames[] =
{
#define _(name) #name,
JSPEW_CHAN_MAP(_)
#undef _
};
void
js::JMCheckLogging()
{
/* Not MT safe; races on Logging{Checked,Bits}. */
if (LoggingChecked)
return;
LoggingChecked = true;
const char *env = getenv("JMFLAGS");
if (!env)
return;
if (strstr(env, "help")) {
fflush(NULL);
printf(
"\n"
"usage: JMFLAGS=option,option,option,... where options can be:\n"
"\n"
" help show this message\n"
" abort/aborts ???\n"
" scripts ???\n"
" profile ???\n"
#ifdef DEBUG
" pcprofile Runtime hit counts of every JS opcode executed\n"
" jsops JS opcodes\n"
#endif
" insns JS opcodes and generated insns\n"
" vmframe VMFrame contents\n"
" pics PIC patching activity\n"
" slowcalls Calls to slow path functions\n"
" analysis LICM and other analysis behavior\n"
" regalloc Register allocation behavior\n"
" inlin Call inlining behavior\n"
" recompile Dynamic recompilations\n"
" full everything not affecting codegen\n"
"\n"
);
exit(0);
/*NOTREACHED*/
}
if (strstr(env, "abort") || strstr(env, "aborts"))
LoggingBits |= (1 << uint32_t(JSpew_Abort));
if (strstr(env, "scripts"))
LoggingBits |= (1 << uint32_t(JSpew_Scripts));
if (strstr(env, "profile"))
LoggingBits |= (1 << uint32_t(JSpew_Prof));
#ifdef DEBUG
if (strstr(env, "jsops"))
LoggingBits |= (1 << uint32_t(JSpew_JSOps));
#endif
if (strstr(env, "insns"))
LoggingBits |= (1 << uint32_t(JSpew_Insns) | (1 << uint32_t(JSpew_JSOps)));
if (strstr(env, "vmframe"))
LoggingBits |= (1 << uint32_t(JSpew_VMFrame));
if (strstr(env, "pics"))
LoggingBits |= (1 << uint32_t(JSpew_PICs));
if (strstr(env, "slowcalls"))
LoggingBits |= (1 << uint32_t(JSpew_SlowCalls));
if (strstr(env, "analysis"))
LoggingBits |= (1 << uint32_t(JSpew_Analysis));
if (strstr(env, "regalloc"))
LoggingBits |= (1 << uint32_t(JSpew_Regalloc));
if (strstr(env, "recompile"))
LoggingBits |= (1 << uint32_t(JSpew_Recompile));
if (strstr(env, "inlin"))
LoggingBits |= (1 << uint32_t(JSpew_Inlining));
if (strstr(env, "full"))
LoggingBits |= 0xFFFFFFFF;
}
js::ConditionalLog::ConditionalLog(bool logging)
: oldBits(LoggingBits), logging(logging)
{
if (logging)
LoggingBits = 0xFFFFFFFF;
}
js::ConditionalLog::~ConditionalLog() {
if (logging)
LoggingBits = oldBits;
}
bool
js::IsJaegerSpewChannelActive(JaegerSpewChannel channel)
{
JS_ASSERT(LoggingChecked);
return !!(LoggingBits & (1 << uint32_t(channel)));
}
void
js::JaegerSpew(JaegerSpewChannel channel, const char *fmt, ...)
{
JS_ASSERT(LoggingChecked);
if (!(LoggingBits & (1 << uint32_t(channel))))
return;
fprintf(stderr, "[jaeger] %-7s ", ChannelNames[channel]);
va_list ap;
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
/* fprintf(stdout, "\n"); */
}
#endif

Просмотреть файл

@ -1,93 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_logging_h__
#define jsjaeger_logging_h__
#include "assembler/wtf/Platform.h"
#include "prmjtime.h"
namespace js {
#define JSPEW_CHAN_MAP(_) \
_(Abort) \
_(Scripts) \
_(Prof) \
_(JSOps) \
_(Insns) \
_(VMFrame) \
_(PICs) \
_(SlowCalls) \
_(Analysis) \
_(Regalloc) \
_(Inlining) \
_(Recompile)
enum JaegerSpewChannel {
#define _(name) JSpew_##name,
JSPEW_CHAN_MAP(_)
#undef _
JSpew_Terminator
};
#ifdef JS_METHODJIT_SPEW
void JMCheckLogging();
bool IsJaegerSpewChannelActive(JaegerSpewChannel channel);
#ifdef __GNUC__
void JaegerSpew(JaegerSpewChannel channel, const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
#else
void JaegerSpew(JaegerSpewChannel channel, const char *fmt, ...);
#endif
#else
static inline void JMCheckLogging() {}
static inline bool IsJaegerSpewChannelActive(JaegerSpewChannel channel) { return false; }
static inline void JaegerSpew(JaegerSpewChannel channel, const char *fmt, ...) {}
#endif // JS_METHODJIT_SPEW
#if defined(JS_METHODJIT_SPEW)
struct ConditionalLog {
uint32_t oldBits;
bool logging;
ConditionalLog(bool logging);
~ConditionalLog();
};
struct Profiler {
int64_t t_start;
int64_t t_stop;
static inline int64_t now() {
return PRMJ_Now();
}
inline void start() {
t_start = now();
}
inline void stop() {
t_stop = now();
}
inline uint32_t time_ms() {
return uint32_t((t_stop - t_start) / PRMJ_USEC_PER_MSEC);
}
inline uint32_t time_us() {
return uint32_t(t_stop - t_start);
}
};
#endif // JS_METHODJIT_SPEW
} // namespace js
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,355 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_loopstate_h__ && defined JS_METHODJIT
#define jsjaeger_loopstate_h__
#include "mozilla/PodOperations.h"
#include "jsanalyze.h"
#include "methodjit/Compiler.h"
namespace js {
namespace mjit {
/*
* The LoopState keeps track of register and analysis state within the loop
* currently being processed by the Compiler.
*
* There are several analyses we do that are specific to loops: loop carried
* registers, bounds check hoisting, and loop invariant code motion. Brief
* descriptions of these analyses:
*
* Loop carried registers. We allocate registers as we emit code, in a single
* forward pass over the script. Normally this would mean we need to pick the
* register allocation at the head of the loop before any of the body has been
* processed. Instead, while processing the loop body we retroactively mark
* registers as holding the payload of certain entries at the head (being
* carried around the loop), so that the head's allocation ends up holding
* registers that are likely to be used shortly. This can be done provided that
* (a) the register has not been touched since the loop head, (b) the slot
* has not been modified or separately assigned a different register, and (c)
* all prior slow path rejoins in the loop are patched with reloads of the
* register. The register allocation at the loop head must have all entries
* synced, so that prior slow path syncs do not also need patching.
*
* Bounds check hoisting. If we can determine a loop invariant test which
* implies the bounds check at one or more array accesses, we hoist that and
* check it when initially entering the loop (from JIT code or the
* interpreter) and after every stub or C++ call.
*
* Loop invariant code motion. If we can determine a computation (arithmetic,
* array slot pointer or property access) is loop invariant, we give it a slot
* on the stack and preserve its value throughout the loop. We can allocate
* and carry registers for loop invariant slots as for normal slots. These
* slots sit above the frame's normal slots, and are transient --- they are
* clobbered whenever a new frame is pushed. We thus regenerate the loop
* invariant slots after every C++ and scripted call, and avoid doing LICM on
* loops which have such calls. This has a nice property that the slots only
* need to be loop invariant wrt the side effects that happen directly in the
* loop; if C++ calls a getter which scribbles on the object properties
* involved in an 'invariant' then we will reload the invariant's new value
* after the call finishes.
*/
struct TemporaryCopy;
enum InvariantArrayKind { DENSE_ARRAY, TYPED_ARRAY };
class LoopState : public MacroAssemblerTypedefs
{
JSContext *cx;
analyze::CrossScriptSSA *ssa;
JSScript *outerScript;
analyze::ScriptAnalysis *outerAnalysis;
Compiler &cc;
FrameState &frame;
/* Basic information about this loop. */
analyze::LoopAnalysis *lifetime;
/* Allocation at the head of the loop, has all loop carried variables. */
RegisterAllocation *alloc;
/*
* Set if this is not a do-while loop and the compiler has advanced past
* the loop's entry point.
*/
bool reachedEntryPoint;
/*
* Jump which initially enters the loop. The state is synced when this jump
* occurs, and needs a trampoline generated to load the right registers
* before going to entryTarget.
*/
Jump entry;
/* Registers available for loop variables. */
Registers loopRegs;
/* Whether to skip all bounds check hoisting and loop invariant code analysis. */
bool skipAnalysis;
/* Prior stub rejoins to patch when new loop registers are allocated. */
struct StubJoin {
unsigned index;
bool script;
};
Vector<StubJoin,16,CompilerAllocPolicy> loopJoins;
/* Pending loads to patch for stub rejoins. */
struct StubJoinPatch {
StubJoin join;
Address address;
AnyRegisterID reg;
};
Vector<StubJoinPatch,16,CompilerAllocPolicy> loopPatches;
/*
* Pair of a jump/label immediately after each call in the loop, to patch
* with restores of the loop invariant stack values.
*/
struct RestoreInvariantCall {
Jump jump;
Label label;
bool ool;
bool entry;
unsigned patchIndex; /* Index into Compiler's callSites. */
/* Any copies of temporaries on the stack */
Vector<TemporaryCopy> *temporaryCopies;
};
Vector<RestoreInvariantCall> restoreInvariantCalls;
/*
* Aggregate structure for all loop invariant code and hoisted checks we
* can perform. These are all stored in the same vector as they may depend
* on each other and we need to emit code restoring them in order.
*/
struct InvariantEntry {
enum EntryKind {
/*
* initializedLength(array) > value1 + value2 + constant.
* Unsigned comparison, so will fail if value + constant < 0
*/
DENSE_ARRAY_BOUNDS_CHECK,
TYPED_ARRAY_BOUNDS_CHECK,
/* value1 + constant >= 0 */
NEGATIVE_CHECK,
/* constant >= value1 + value2 */
RANGE_CHECK,
/* For dense arrays */
DENSE_ARRAY_SLOTS,
DENSE_ARRAY_LENGTH,
/* For typed arrays */
TYPED_ARRAY_SLOTS,
TYPED_ARRAY_LENGTH,
/* For lazy arguments */
INVARIANT_ARGS_BASE,
INVARIANT_ARGS_LENGTH,
/* For definite properties */
INVARIANT_PROPERTY
} kind;
union {
struct {
uint32_t arraySlot;
uint32_t valueSlot1;
uint32_t valueSlot2;
int32_t constant;
} check;
struct {
uint32_t arraySlot;
uint32_t temporary;
} array;
struct {
uint32_t objectSlot;
uint32_t propertySlot;
uint32_t temporary;
jsid id;
} property;
} u;
InvariantEntry() { mozilla::PodZero(this); }
bool isBoundsCheck() const {
return kind == DENSE_ARRAY_BOUNDS_CHECK || kind == TYPED_ARRAY_BOUNDS_CHECK;
}
bool isCheck() const {
return isBoundsCheck() || kind == NEGATIVE_CHECK || kind == RANGE_CHECK;
}
};
Vector<InvariantEntry, 4, CompilerAllocPolicy> invariantEntries;
static inline bool entryRedundant(const InvariantEntry &e0, const InvariantEntry &e1);
bool checkRedundantEntry(const InvariantEntry &entry);
bool loopInvariantEntry(uint32_t slot);
bool addHoistedCheck(InvariantArrayKind arrayKind, uint32_t arraySlot,
uint32_t valueSlot1, uint32_t valueSlot2, int32_t constant);
void addNegativeCheck(uint32_t valueSlot, int32_t constant);
void addRangeCheck(uint32_t valueSlot1, uint32_t valueSlot2, int32_t constant);
bool hasTestLinearRelationship(uint32_t slot);
bool hasInvariants() { return !invariantEntries.empty(); }
void restoreInvariants(jsbytecode *pc, Assembler &masm,
Vector<TemporaryCopy> *temporaryCopies, Vector<Jump> *jumps);
public:
/* Outer loop to this one, in case of loop nesting. */
LoopState *outer;
/* Offset from the outermost frame at which temporaries should be allocated. */
uint32_t temporariesStart;
LoopState(JSContext *cx, analyze::CrossScriptSSA *ssa,
Compiler *cc, FrameState *frame);
bool init(jsbytecode *head, Jump entry, jsbytecode *entryTarget);
void setOuterPC(jsbytecode *pc)
{
if (uint32_t(pc - outerScript->code) == lifetime->entry && lifetime->entry != lifetime->head)
reachedEntryPoint = true;
}
bool generatingInvariants() { return !skipAnalysis; }
/* Add a call with trailing jump/label, after which invariants need to be restored. */
void addInvariantCall(Jump jump, Label label, bool ool, bool entry, unsigned patchIndex, Uses uses);
uint32_t headOffset() { return lifetime->head; }
uint32_t getLoopRegs() { return loopRegs.freeMask; }
Jump entryJump() { return entry; }
uint32_t entryOffset() { return lifetime->entry; }
uint32_t backedgeOffset() { return lifetime->backedge; }
/* Whether the payload of slot is carried around the loop in a register. */
bool carriesLoopReg(FrameEntry *fe) { return alloc->hasAnyReg(frame.entrySlot(fe)); }
void setLoopReg(AnyRegisterID reg, FrameEntry *fe);
void clearLoopReg(AnyRegisterID reg)
{
/*
* Mark reg as having been modified since the start of the loop; it
* cannot subsequently be marked to carry a register around the loop.
*/
JS_ASSERT(loopRegs.hasReg(reg) == alloc->loop(reg));
if (loopRegs.hasReg(reg)) {
loopRegs.takeReg(reg);
alloc->setUnassigned(reg);
JaegerSpew(JSpew_Regalloc, "clearing loop register %s\n", reg.name());
}
}
void addJoin(unsigned index, bool script);
void clearLoopRegisters();
void flushLoop(StubCompiler &stubcc);
/*
* These should only be used for entries which are known to be dense arrays
* (if they are objects at all).
*/
bool hoistArrayLengthCheck(InvariantArrayKind arrayKind,
const analyze::CrossSSAValue &obj,
const analyze::CrossSSAValue &index);
FrameEntry *invariantArraySlots(const analyze::CrossSSAValue &obj);
/* Methods for accesses on lazy arguments. */
bool hoistArgsLengthCheck(const analyze::CrossSSAValue &index);
FrameEntry *invariantArguments();
FrameEntry *invariantLength(const analyze::CrossSSAValue &obj);
FrameEntry *invariantProperty(const analyze::CrossSSAValue &obj, jsid id);
/* Whether a binary or inc/dec op's result cannot overflow. */
bool cannotIntegerOverflow(const analyze::CrossSSAValue &pushed);
/*
* Whether integer overflow in addition or negative zeros in multiplication
* at a binary op can be safely ignored.
*/
bool ignoreIntegerOverflow(const analyze::CrossSSAValue &pushed);
private:
/* Analysis information for the loop. */
/*
* Any inequality known to hold at the head of the loop. This has the
* form 'lhs <= rhs + constant' or 'lhs >= rhs + constant', depending on
* lessEqual. The lhs may be modified within the loop body (the test is
* invalid afterwards), and the rhs is invariant. This information is only
* valid if the LHS/RHS are known integers.
*/
enum { UNASSIGNED = UINT32_MAX };
uint32_t testLHS;
uint32_t testRHS;
int32_t testConstant;
bool testLessEqual;
/*
* A variable which will be incremented or decremented exactly once in each
* iteration of the loop. The offset of the operation is indicated, which
* may or may not run after the initial entry into the loop.
*/
struct Increment {
uint32_t slot;
uint32_t offset;
};
Vector<Increment, 4, CompilerAllocPolicy> increments;
/* It is unknown which arrays grow or which objects are modified in this loop. */
bool unknownModset;
/*
* Arrays which might grow during this loop. This is a guess, and may
* underapproximate the actual set of such arrays.
*/
Vector<types::TypeObject *, 4, CompilerAllocPolicy> growArrays;
/* Properties which might be modified during this loop. */
struct ModifiedProperty {
types::TypeObject *object;
jsid id;
};
Vector<ModifiedProperty, 4, CompilerAllocPolicy> modifiedProperties;
/*
* Whether this loop only performs integer and double arithmetic and dense
* array accesses. Integer overflows in this loop which only flow to bitops
* can be ignored.
*/
bool constrainedLoop;
void analyzeLoopBody(unsigned frame);
bool definiteArrayAccess(const analyze::SSAValue &obj, const analyze::SSAValue &index);
bool addGrowArray(types::TypeObject *object);
bool addModifiedProperty(types::TypeObject *object, jsid id);
bool hasGrowArray(types::TypeObject *object);
bool hasModifiedProperty(types::TypeObject *object, jsid id);
uint32_t getIncrement(uint32_t slot);
int32_t adjustConstantForIncrement(jsbytecode *pc, uint32_t slot);
bool getEntryValue(const analyze::CrossSSAValue &v, uint32_t *pslot, int32_t *pconstant);
bool computeInterval(const analyze::CrossSSAValue &v, int32_t *pmin, int32_t *pmax);
bool valueFlowsToBitops(const analyze::SSAValue &v);
};
} /* namespace mjit */
} /* namespace js */
#endif /* jsjaeger_loopstate_h__ */

Просмотреть файл

@ -1,589 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_regstate_h__ && defined JS_METHODJIT
#define jsjaeger_regstate_h__
#include "mozilla/Util.h"
#include "assembler/assembler/MacroAssembler.h"
namespace js {
namespace mjit {
/* Common handling for both general purpose and floating point registers. */
struct AnyRegisterID {
unsigned reg_;
AnyRegisterID()
: reg_((unsigned)-1)
{ pin(); }
AnyRegisterID(const AnyRegisterID &o)
: reg_(o.reg_)
{ pin(); }
AnyRegisterID(JSC::MacroAssembler::RegisterID reg)
: reg_((unsigned)reg)
{ pin(); }
AnyRegisterID(JSC::MacroAssembler::FPRegisterID reg)
: reg_(JSC::MacroAssembler::TotalRegisters + (unsigned)reg)
{ pin(); }
static inline AnyRegisterID fromRaw(unsigned reg);
inline JSC::MacroAssembler::RegisterID reg();
inline JSC::MacroAssembler::FPRegisterID fpreg();
bool isSet() { return reg_ != unsigned(-1); }
bool isReg() { return reg_ < JSC::MacroAssembler::TotalRegisters; }
bool isFPReg() { return isSet() && !isReg(); }
inline const char * name();
private:
unsigned * pin() {
/*
* Workaround for apparent compiler bug in GCC 4.2. If GCC thinks that reg_
* cannot escape then it compiles isReg() and other accesses to reg_ incorrectly.
*/
static unsigned *v;
v = &reg_;
return v;
}
};
struct Registers {
/* General purpose registers. */
static const uint32_t TotalRegisters = JSC::MacroAssembler::TotalRegisters;
enum CallConvention {
NormalCall,
FastCall
};
typedef JSC::MacroAssembler::RegisterID RegisterID;
// Homed and scratch registers for working with Values on x64.
#if defined(JS_CPU_X64)
static const RegisterID TypeMaskReg = JSC::X86Registers::r13;
static const RegisterID PayloadMaskReg = JSC::X86Registers::r14;
static const RegisterID ValueReg = JSC::X86Registers::r10;
static const RegisterID ScratchReg = JSC::X86Registers::r11;
#endif
// Register that homes the current JSStackFrame.
#if defined(JS_CPU_X86)
static const RegisterID JSFrameReg = JSC::X86Registers::ebp;
#elif defined(JS_CPU_X64)
static const RegisterID JSFrameReg = JSC::X86Registers::ebx;
#elif defined(JS_CPU_ARM)
static const RegisterID JSFrameReg = JSC::ARMRegisters::r10;
#elif defined(JS_CPU_SPARC)
static const RegisterID JSFrameReg = JSC::SparcRegisters::l0;
#elif defined(JS_CPU_MIPS)
static const RegisterID JSFrameReg = JSC::MIPSRegisters::s0;
#endif
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
static const RegisterID ReturnReg = JSC::X86Registers::eax;
# if defined(JS_CPU_X86) || defined(_WIN64)
static const RegisterID ArgReg0 = JSC::X86Registers::ecx;
static const RegisterID ArgReg1 = JSC::X86Registers::edx;
# if defined(JS_CPU_X64)
static const RegisterID ArgReg2 = JSC::X86Registers::r8;
static const RegisterID ArgReg3 = JSC::X86Registers::r9;
# endif
# else
static const RegisterID ArgReg0 = JSC::X86Registers::edi;
static const RegisterID ArgReg1 = JSC::X86Registers::esi;
static const RegisterID ArgReg2 = JSC::X86Registers::edx;
static const RegisterID ArgReg3 = JSC::X86Registers::ecx;
# endif
#elif JS_CPU_ARM
static const RegisterID ReturnReg = JSC::ARMRegisters::r0;
static const RegisterID ArgReg0 = JSC::ARMRegisters::r0;
static const RegisterID ArgReg1 = JSC::ARMRegisters::r1;
static const RegisterID ArgReg2 = JSC::ARMRegisters::r2;
#elif JS_CPU_SPARC
static const RegisterID ReturnReg = JSC::SparcRegisters::o0;
static const RegisterID ArgReg0 = JSC::SparcRegisters::o0;
static const RegisterID ArgReg1 = JSC::SparcRegisters::o1;
static const RegisterID ArgReg2 = JSC::SparcRegisters::o2;
static const RegisterID ArgReg3 = JSC::SparcRegisters::o3;
static const RegisterID ArgReg4 = JSC::SparcRegisters::o4;
static const RegisterID ArgReg5 = JSC::SparcRegisters::o5;
#elif JS_CPU_MIPS
static const RegisterID ReturnReg = JSC::MIPSRegisters::v0;
static const RegisterID ArgReg0 = JSC::MIPSRegisters::a0;
static const RegisterID ArgReg1 = JSC::MIPSRegisters::a1;
static const RegisterID ArgReg2 = JSC::MIPSRegisters::a2;
static const RegisterID ArgReg3 = JSC::MIPSRegisters::a3;
#endif
static const RegisterID StackPointer = JSC::MacroAssembler::stackPointerRegister;
static inline uint32_t maskReg(RegisterID reg) {
return (1 << reg);
}
static inline uint32_t mask2Regs(RegisterID reg1, RegisterID reg2) {
return maskReg(reg1) | maskReg(reg2);
}
static inline uint32_t mask3Regs(RegisterID reg1, RegisterID reg2, RegisterID reg3) {
return maskReg(reg1) | maskReg(reg2) | maskReg(reg3);
}
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
static const uint32_t TempRegs =
(1 << JSC::X86Registers::eax)
# if defined(JS_CPU_X86)
| (1 << JSC::X86Registers::ebx)
# endif
| (1 << JSC::X86Registers::ecx)
| (1 << JSC::X86Registers::edx)
# if defined(JS_CPU_X64)
| (1 << JSC::X86Registers::r8)
| (1 << JSC::X86Registers::r9)
# if !defined(_WIN64)
| (1 << JSC::X86Registers::esi)
| (1 << JSC::X86Registers::edi)
# endif
# endif
;
# if defined(JS_CPU_X64)
static const uint32_t SavedRegs =
/* r11 is scratchRegister, used by JSC. */
(1 << JSC::X86Registers::r12)
// r13 is TypeMaskReg.
// r14 is PayloadMaskReg.
| (1 << JSC::X86Registers::r15)
# if defined(_WIN64)
| (1 << JSC::X86Registers::esi)
| (1 << JSC::X86Registers::edi)
# endif
# else
static const uint32_t SavedRegs =
(1 << JSC::X86Registers::esi)
| (1 << JSC::X86Registers::edi)
# endif
;
# if defined(JS_CPU_X86)
static const uint32_t SingleByteRegs = (TempRegs | SavedRegs) &
~((1 << JSC::X86Registers::esi) |
(1 << JSC::X86Registers::edi) |
(1 << JSC::X86Registers::ebp) |
(1 << JSC::X86Registers::esp));
# elif defined(JS_CPU_X64)
static const uint32_t SingleByteRegs = TempRegs | SavedRegs;
# endif
#elif defined(JS_CPU_ARM)
static const uint32_t TempRegs =
(1 << JSC::ARMRegisters::r0)
| (1 << JSC::ARMRegisters::r1)
| (1 << JSC::ARMRegisters::r2);
// r3 is reserved as a scratch register for the assembler.
// r12 is IP, and is used for stub calls.
static const uint32_t SavedRegs =
(1 << JSC::ARMRegisters::r4)
| (1 << JSC::ARMRegisters::r5)
| (1 << JSC::ARMRegisters::r6)
| (1 << JSC::ARMRegisters::r7)
// r8 is reserved as a scratch register for the assembler.
| (1 << JSC::ARMRegisters::r9);
// r10 is reserved for JSFrameReg.
// r13 is SP and must always point to VMFrame whilst in generated code.
// r14 is LR and is used for return sequences.
// r15 is PC (program counter).
static const uint32_t SingleByteRegs = TempRegs | SavedRegs;
#elif defined(JS_CPU_SPARC)
static const uint32_t TempRegs =
(1 << JSC::SparcRegisters::o0)
| (1 << JSC::SparcRegisters::o1)
| (1 << JSC::SparcRegisters::o2)
| (1 << JSC::SparcRegisters::o3)
| (1 << JSC::SparcRegisters::o4)
| (1 << JSC::SparcRegisters::o5);
static const uint32_t SavedRegs =
(1 << JSC::SparcRegisters::l2)
| (1 << JSC::SparcRegisters::l3)
| (1 << JSC::SparcRegisters::l4)
| (1 << JSC::SparcRegisters::l5)
| (1 << JSC::SparcRegisters::l6)
| (1 << JSC::SparcRegisters::l7);
static const uint32_t SingleByteRegs = TempRegs | SavedRegs;
#elif defined(JS_CPU_MIPS)
static const uint32_t TempRegs =
(1 << JSC::MIPSRegisters::at)
| (1 << JSC::MIPSRegisters::v0)
| (1 << JSC::MIPSRegisters::v1)
| (1 << JSC::MIPSRegisters::a0)
| (1 << JSC::MIPSRegisters::a1)
| (1 << JSC::MIPSRegisters::a2)
| (1 << JSC::MIPSRegisters::a3)
| (1 << JSC::MIPSRegisters::t5)
| (1 << JSC::MIPSRegisters::t6)
| (1 << JSC::MIPSRegisters::t7);
/* t0-t4,t9 is reserved as a scratch register for the assembler.
We don't use t8 ($24), as we limit ourselves within $0 to $23 to
leave the bitmask for 8 FP registers. */
static const uint32_t SavedRegs =
(1 << JSC::MIPSRegisters::s1)
| (1 << JSC::MIPSRegisters::s2)
| (1 << JSC::MIPSRegisters::s3)
| (1 << JSC::MIPSRegisters::s4)
| (1 << JSC::MIPSRegisters::s5)
| (1 << JSC::MIPSRegisters::s6)
| (1 << JSC::MIPSRegisters::s7);
// s0 is reserved for JSFrameReg.
static const uint32_t SingleByteRegs = TempRegs | SavedRegs;
#else
# error "Unsupported platform"
#endif
static const uint32_t AvailRegs = SavedRegs | TempRegs;
static bool isAvail(RegisterID reg) {
uint32_t mask = maskReg(reg);
return bool(mask & AvailRegs);
}
static bool isSaved(RegisterID reg) {
uint32_t mask = maskReg(reg);
JS_ASSERT(mask & AvailRegs);
return bool(mask & SavedRegs);
}
static inline uint32_t numArgRegs(CallConvention convention) {
#if defined(JS_CPU_X86)
# if defined(JS_NO_FASTCALL)
return 0;
# else
return (convention == FastCall) ? 2 : 0;
# endif
#elif defined(JS_CPU_X64)
# ifdef _WIN64
return 4;
# else
return 6;
# endif
#elif defined(JS_CPU_ARM)
return 4;
#elif defined(JS_CPU_SPARC)
return 6;
#elif defined(JS_CPU_MIPS)
return 4;
#endif
}
static inline bool regForArg(CallConvention conv, uint32_t i, RegisterID *reg) {
#if defined(JS_CPU_X86)
static const RegisterID regs[] = {
JSC::X86Registers::ecx,
JSC::X86Registers::edx
};
# if defined(JS_NO_FASTCALL)
return false;
# else
if (conv == NormalCall)
return false;
# endif
#elif defined(JS_CPU_X64)
# ifdef _WIN64
static const RegisterID regs[] = {
JSC::X86Registers::ecx,
JSC::X86Registers::edx,
JSC::X86Registers::r8,
JSC::X86Registers::r9
};
# else
static const RegisterID regs[] = {
JSC::X86Registers::edi,
JSC::X86Registers::esi,
JSC::X86Registers::edx,
JSC::X86Registers::ecx,
JSC::X86Registers::r8,
JSC::X86Registers::r9
};
# endif
#elif defined(JS_CPU_ARM)
static const RegisterID regs[] = {
JSC::ARMRegisters::r0,
JSC::ARMRegisters::r1,
JSC::ARMRegisters::r2,
JSC::ARMRegisters::r3
};
#elif defined(JS_CPU_SPARC)
static const RegisterID regs[] = {
JSC::SparcRegisters::o0,
JSC::SparcRegisters::o1,
JSC::SparcRegisters::o2,
JSC::SparcRegisters::o3,
JSC::SparcRegisters::o4,
JSC::SparcRegisters::o5
};
#elif defined(JS_CPU_MIPS)
static const RegisterID regs[] = {
JSC::MIPSRegisters::a0,
JSC::MIPSRegisters::a1,
JSC::MIPSRegisters::a2,
JSC::MIPSRegisters::a3,
};
#endif
JS_ASSERT(numArgRegs(conv) == mozilla::ArrayLength(regs));
if (i > mozilla::ArrayLength(regs))
return false;
*reg = regs[i];
return true;
}
/* Floating point registers. */
typedef JSC::MacroAssembler::FPRegisterID FPRegisterID;
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
#ifdef _WIN64
/* xmm0-xmm5 are scratch register on Win64 ABI */
static const uint32_t TotalFPRegisters = 5;
static const FPRegisterID FPConversionTemp = JSC::X86Registers::xmm5;
#else
static const uint32_t TotalFPRegisters = 7;
static const FPRegisterID FPConversionTemp = JSC::X86Registers::xmm7;
#endif
static const uint32_t TempFPRegs = (
(1 << JSC::X86Registers::xmm0)
| (1 << JSC::X86Registers::xmm1)
| (1 << JSC::X86Registers::xmm2)
| (1 << JSC::X86Registers::xmm3)
| (1 << JSC::X86Registers::xmm4)
#ifndef _WIN64
| (1 << JSC::X86Registers::xmm5)
| (1 << JSC::X86Registers::xmm6)
#endif
) << TotalRegisters;
#elif defined(JS_CPU_ARM)
static const uint32_t TotalFPRegisters = 3;
static const uint32_t TempFPRegs = (
(1 << JSC::ARMRegisters::d0)
| (1 << JSC::ARMRegisters::d1)
| (1 << JSC::ARMRegisters::d2)
) << TotalRegisters;
static const FPRegisterID FPConversionTemp = JSC::ARMRegisters::d3;
#elif defined(JS_CPU_SPARC)
static const uint32_t TotalFPRegisters = 8;
static const uint32_t TempFPRegs = (uint32_t)(
(1 << JSC::SparcRegisters::f0)
| (1 << JSC::SparcRegisters::f2)
| (1 << JSC::SparcRegisters::f4)
| (1 << JSC::SparcRegisters::f6)
) << TotalRegisters;
static const FPRegisterID FPConversionTemp = JSC::SparcRegisters::f8;
#elif defined(JS_CPU_MIPS)
/* TotalRegisters is 24, so TotalFPRegisters can be 8 to have a 32-bit
bit mask.
Note that the O32 ABI can access only even FP registers. */
static const uint32_t TotalFPRegisters = 8;
static const uint32_t TempFPRegs = (uint32_t)(
(1 << JSC::MIPSRegisters::f0)
| (1 << JSC::MIPSRegisters::f2)
| (1 << JSC::MIPSRegisters::f4)
| (1 << JSC::MIPSRegisters::f6)
) << TotalRegisters;
// f16 is reserved as a scratch register for the assembler.
static const FPRegisterID FPConversionTemp = JSC::MIPSRegisters::f18;
#else
# error "Unsupported platform"
#endif
/* Temp reg that can be clobbered when setting up a fallible fast or ABI call. */
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
static const RegisterID ClobberInCall = JSC::X86Registers::ecx;
#elif defined(JS_CPU_ARM)
static const RegisterID ClobberInCall = JSC::ARMRegisters::r2;
#elif defined(JS_CPU_SPARC)
static const RegisterID ClobberInCall = JSC::SparcRegisters::l1;
#elif defined(JS_CPU_MIPS)
static const RegisterID ClobberInCall = JSC::MIPSRegisters::at;
#endif
static const uint32_t AvailFPRegs = TempFPRegs;
static inline uint32_t maskReg(FPRegisterID reg) {
return (1 << reg) << TotalRegisters;
}
/* Common code. */
static const uint32_t TotalAnyRegisters = TotalRegisters + TotalFPRegisters;
static const uint32_t TempAnyRegs = TempRegs | TempFPRegs;
static const uint32_t AvailAnyRegs = AvailRegs | AvailFPRegs;
static inline uint32_t maskReg(AnyRegisterID reg) {
return (1 << reg.reg_);
}
/* Get a register which is not live before a FASTCALL. */
static inline RegisterID tempCallReg() {
Registers regs(TempRegs);
regs.takeReg(Registers::ArgReg0);
regs.takeReg(Registers::ArgReg1);
return regs.takeAnyReg().reg();
}
/* Get a register which is not live before a normal ABI call with at most four args. */
static inline Registers tempCallRegMask() {
Registers regs(AvailRegs);
#ifndef JS_CPU_X86
regs.takeReg(ArgReg0);
regs.takeReg(ArgReg1);
regs.takeReg(ArgReg2);
#if defined(JS_CPU_SPARC) || defined(JS_CPU_X64)
regs.takeReg(ArgReg3);
#endif
#endif
return regs;
}
Registers(uint32_t freeMask)
: freeMask(freeMask)
{ }
Registers(const Registers &other)
: freeMask(other.freeMask)
{ }
Registers & operator =(const Registers &other)
{
freeMask = other.freeMask;
return *this;
}
bool empty(uint32_t mask) const {
return !(freeMask & mask);
}
bool empty() const {
return !freeMask;
}
AnyRegisterID peekReg(uint32_t mask) {
JS_ASSERT(!empty(mask));
unsigned ireg;
JS_FLOOR_LOG2(ireg, freeMask & mask);
return AnyRegisterID::fromRaw(ireg);
}
AnyRegisterID peekReg() {
return peekReg(freeMask);
}
AnyRegisterID takeAnyReg(uint32_t mask) {
AnyRegisterID reg = peekReg(mask);
takeReg(reg);
return reg;
}
AnyRegisterID takeAnyReg() {
return takeAnyReg(freeMask);
}
bool hasReg(AnyRegisterID reg) const {
return !!(freeMask & (1 << reg.reg_));
}
bool hasRegInMask(uint32_t mask) const {
return !!(freeMask & mask);
}
bool hasAllRegs(uint32_t mask) const {
return (freeMask & mask) == mask;
}
void putRegUnchecked(AnyRegisterID reg) {
freeMask |= (1 << reg.reg_);
}
void putReg(AnyRegisterID reg) {
JS_ASSERT(!hasReg(reg));
putRegUnchecked(reg);
}
void takeReg(AnyRegisterID reg) {
JS_ASSERT(hasReg(reg));
takeRegUnchecked(reg);
}
void takeRegUnchecked(AnyRegisterID reg) {
freeMask &= ~(1 << reg.reg_);
}
bool operator ==(const Registers &other) {
return freeMask == other.freeMask;
}
uint32_t freeMask;
};
static const JSC::MacroAssembler::RegisterID JSFrameReg = Registers::JSFrameReg;
AnyRegisterID
AnyRegisterID::fromRaw(unsigned reg_)
{
JS_ASSERT(reg_ < Registers::TotalAnyRegisters);
AnyRegisterID reg;
reg.reg_ = reg_;
return reg;
}
JSC::MacroAssembler::RegisterID
AnyRegisterID::reg()
{
JS_ASSERT(reg_ < Registers::TotalRegisters);
return (JSC::MacroAssembler::RegisterID) reg_;
}
JSC::MacroAssembler::FPRegisterID
AnyRegisterID::fpreg()
{
JS_ASSERT(reg_ >= Registers::TotalRegisters &&
reg_ < Registers::TotalAnyRegisters);
return (JSC::MacroAssembler::FPRegisterID) (reg_ - Registers::TotalRegisters);
}
const char *
AnyRegisterID::name()
{
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
return isReg() ? JSC::X86Registers::nameIReg(reg()) : JSC::X86Registers::nameFPReg(fpreg());
#elif defined(JS_CPU_ARM)
return isReg() ? JSC::ARMAssembler::nameGpReg(reg()) : JSC::ARMAssembler::nameFpRegD(fpreg());
#else
return "???";
#endif
}
} /* namespace mjit */
} /* namespace js */
#endif /* jsjaeger_regstate_h__ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,302 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_mono_ic_h__ && defined JS_METHODJIT && defined JS_MONOIC
#define jsjaeger_mono_ic_h__
#include "assembler/assembler/MacroAssembler.h"
#include "assembler/assembler/CodeLocation.h"
#include "assembler/moco/MocoStubs.h"
#include "methodjit/MethodJIT.h"
#include "CodeGenIncludes.h"
#include "methodjit/ICRepatcher.h"
namespace js {
namespace mjit {
class FrameSize
{
uint32_t frameDepth_ : 16;
uint32_t argc_;
public:
void initStatic(uint32_t frameDepth, uint32_t argc) {
JS_ASSERT(frameDepth > 0);
frameDepth_ = frameDepth;
argc_ = argc;
}
void initDynamic() {
frameDepth_ = 0;
argc_ = -1; /* quiet gcc */
}
bool isStatic() const {
return frameDepth_ > 0;
}
bool isDynamic() const {
return frameDepth_ == 0;
}
uint32_t staticLocalSlots() const {
JS_ASSERT(isStatic());
return frameDepth_;
}
uint32_t staticArgc() const {
JS_ASSERT(isStatic());
return argc_;
}
uint32_t getArgc(VMFrame &f) const {
return isStatic() ? staticArgc() : f.u.call.dynamicArgc;
}
bool lowered(jsbytecode *pc) const {
return isDynamic() || staticArgc() != GET_ARGC(pc);
}
RejoinState rejoinState(jsbytecode *pc, bool native) {
if (isStatic()) {
if (staticArgc() == GET_ARGC(pc))
return native ? REJOIN_NATIVE : REJOIN_CALL_PROLOGUE;
JS_ASSERT(staticArgc() == GET_ARGC(pc) - 1);
return native ? REJOIN_NATIVE_LOWERED : REJOIN_CALL_PROLOGUE_LOWERED_CALL;
}
return native ? REJOIN_NATIVE_LOWERED : REJOIN_CALL_PROLOGUE_LOWERED_APPLY;
}
bool lowered(jsbytecode *pc) {
return !isStatic() || staticArgc() != GET_ARGC(pc);
}
};
namespace ic {
struct GlobalNameIC
{
typedef JSC::MacroAssembler::RegisterID RegisterID;
JSC::CodeLocationLabel fastPathStart;
JSC::CodeLocationCall slowPathCall;
/*
* - ARM and x64 always emit exactly one instruction which needs to be
* patched. On ARM, the label points to the patched instruction, whilst
* on x64 it points to the instruction after it.
* - For x86, the label "load" points to the start of the load/store
* sequence, which may consist of one or two "mov" instructions. Because
* of this, x86 is the only platform which requires non-trivial patching
* code.
*/
int32_t loadStoreOffset : 15;
int32_t shapeOffset : 15;
};
struct GetGlobalNameIC : public GlobalNameIC
{
};
struct SetGlobalNameIC : public GlobalNameIC
{
JSC::CodeLocationLabel slowPathStart;
/* SET only, if we had to generate an out-of-line path. */
int32_t inlineShapeJump : 10; /* Offset into inline path for shape jump. */
bool objConst : 1; /* True if the object is constant. */
RegisterID objReg : 5; /* Register for object, if objConst is false. */
RegisterID shapeReg : 5; /* Register for shape; volatile. */
int32_t fastRejoinOffset : 16; /* Offset from fastPathStart to rejoin. */
/* SET only. */
ValueRemat vr; /* RHS value. */
void patchInlineShapeGuard(Repatcher &repatcher, Shape *shape);
};
void JS_FASTCALL GetGlobalName(VMFrame &f, ic::GetGlobalNameIC *ic);
void JS_FASTCALL SetGlobalName(VMFrame &f, ic::SetGlobalNameIC *ic);
struct EqualityICInfo {
typedef JSC::MacroAssembler::RegisterID RegisterID;
JSC::CodeLocationLabel stubEntry;
JSC::CodeLocationCall stubCall;
BoolStub stub;
JSC::CodeLocationLabel target;
JSC::CodeLocationLabel fallThrough;
JSC::CodeLocationJump jumpToStub;
ValueRemat lvr, rvr;
bool generated : 1;
JSC::MacroAssembler::RegisterID tempReg : 5;
Assembler::Condition cond;
};
JSBool JS_FASTCALL Equality(VMFrame &f, ic::EqualityICInfo *ic);
/* See MonoIC.cpp, CallCompiler for more information on call ICs. */
struct CallICInfo {
typedef JSC::MacroAssembler::RegisterID RegisterID;
/* Linked list entry for all ICs guarding on the same JIT entry point in fastGuardedObject. */
JSCList links;
enum PoolIndex {
Pool_ScriptStub,
Pool_ClosureStub,
Total_Pools
};
JSC::ExecutablePool *pools[Total_Pools];
/* Used for rooting and reification. */
JSObject *fastGuardedObject;
JSObject *fastGuardedNative;
/* Return site for scripted calls at this site, with PC and inlining state. */
CallSite *call;
FrameSize frameSize;
/* Label to the function object identity guard. */
JSC::CodeLocationLabel funGuardLabel;
/* Function object identity guard. */
JSC::CodeLocationDataLabelPtr funGuard;
/* Starting point for all slow call paths. */
JSC::CodeLocationLabel slowPathStart;
/* Inline to OOL jump, redirected by stubs. */
JSC::CodeLocationJump funJump;
/*
* Target of the above jump, remembered so that if we need to generate a
* callsite clone stub we can redirect to the original funJump target.
*/
JSC::CodeLocationLabel funJumpTarget;
/*
* If an Ion stub has been generated, its guard may be linked to another
* stub. The guard location is stored in this label.
*/
bool hasIonStub_;
JSC::JITCode lastOolCode_;
JSC::CodeLocationJump lastOolJump_;
/* Offset to inline scripted call, from funGuard. */
uint32_t hotJumpOffset : 16;
uint32_t joinPointOffset : 16;
/* Out of line slow call. */
uint32_t oolCallOffset : 16;
/* Jump/rejoin to patch for out-of-line scripted calls. */
uint32_t oolJumpOffset : 16;
/* Label for out-of-line call to IC function. */
uint32_t icCallOffset : 16;
/* Offset for deep-fun check to rejoin at. */
uint32_t hotPathOffset : 16;
/* Join point for all slow call paths. */
uint32_t slowJoinOffset : 16;
/* Join point for Ion calls. */
uint32_t ionJoinOffset : 16;
RegisterID funObjReg : 5;
bool hit : 1;
bool hasJsFunCheck : 1;
bool typeMonitored : 1;
inline void releasePool(PoolIndex index) {
if (pools[index]) {
pools[index]->release();
pools[index] = NULL;
}
}
inline void purgeGuardedObject() {
JS_ASSERT(fastGuardedObject);
releasePool(CallICInfo::Pool_ClosureStub);
hasJsFunCheck = false;
fastGuardedObject = NULL;
JS_REMOVE_LINK(&links);
}
bool hasJMStub() const {
return !!pools[Pool_ScriptStub];
}
bool hasIonStub() const {
return hasIonStub_;
}
bool hasStubOolJump() const {
return hasIonStub();
}
JSC::CodeLocationLabel icCall() {
return slowPathStart.labelAtOffset(icCallOffset);
}
JSC::CodeLocationJump oolJump() {
return slowPathStart.jumpAtOffset(oolJumpOffset);
}
JSC::CodeLocationJump lastOolJump() {
if (hasStubOolJump())
return lastOolJump_;
return oolJump();
}
JSC::JITCode lastOolCode() {
JS_ASSERT(hasStubOolJump());
return lastOolCode_;
}
void updateLastOolJump(JSC::CodeLocationJump jump, JSC::JITCode code) {
lastOolJump_ = jump;
lastOolCode_ = code;
}
JSC::CodeLocationLabel nativeRejoin() {
return slowPathStart.labelAtOffset(slowJoinOffset);
}
JSC::CodeLocationLabel ionJoinPoint() {
return funGuard.labelAtOffset(ionJoinOffset);
}
inline void reset(Repatcher &repatcher) {
if (fastGuardedObject) {
repatcher.repatch(funGuard, NULL);
repatcher.relink(funJump, slowPathStart);
purgeGuardedObject();
}
if (fastGuardedNative) {
repatcher.relink(funJump, slowPathStart);
fastGuardedNative = NULL;
}
if (pools[Pool_ScriptStub] || hasIonStub()) {
repatcher.relink(oolJump(), icCall());
releasePool(Pool_ScriptStub);
}
hit = false;
hasIonStub_ = false;
}
};
void * JS_FASTCALL New(VMFrame &f, ic::CallICInfo *ic);
void * JS_FASTCALL Call(VMFrame &f, ic::CallICInfo *ic);
void * JS_FASTCALL NativeNew(VMFrame &f, ic::CallICInfo *ic);
void * JS_FASTCALL NativeCall(VMFrame &f, ic::CallICInfo *ic);
JSBool JS_FASTCALL SplatApplyArgs(VMFrame &f);
void GenerateArgumentCheckStub(VMFrame &f);
} /* namespace ic */
} /* namespace mjit */
} /* namespace js */
#endif /* jsjaeger_mono_ic_h__ */

Просмотреть файл

@ -1,513 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_assembler_h__ && defined JS_METHODJIT && defined JS_NUNBOX32
#define jsjaeger_assembler_h__
#include "assembler/assembler/MacroAssembler.h"
#include "methodjit/CodeGenIncludes.h"
#include "methodjit/RematInfo.h"
namespace js {
namespace mjit {
/* Don't use ImmTag. Use ImmType instead. */
struct ImmTag : JSC::MacroAssembler::Imm32
{
ImmTag(JSValueTag mask)
: Imm32(int32_t(mask))
{ }
};
struct ImmType : ImmTag
{
ImmType(JSValueType type)
: ImmTag(JSVAL_TYPE_TO_TAG(type))
{
JS_ASSERT(type > JSVAL_TYPE_DOUBLE);
}
};
struct ImmPayload : JSC::MacroAssembler::Imm32
{
ImmPayload(uint32_t payload)
: Imm32(payload)
{ }
};
class NunboxAssembler : public JSC::MacroAssembler
{
public:
#ifdef IS_BIG_ENDIAN
static const uint32_t PAYLOAD_OFFSET = 4;
static const uint32_t TAG_OFFSET = 0;
#else
static const uint32_t PAYLOAD_OFFSET = 0;
static const uint32_t TAG_OFFSET = 4;
#endif
public:
static const JSC::MacroAssembler::Scale JSVAL_SCALE = JSC::MacroAssembler::TimesEight;
Address payloadOf(Address address) {
return Address(address.base, address.offset + PAYLOAD_OFFSET);
}
BaseIndex payloadOf(BaseIndex address) {
return BaseIndex(address.base, address.index, address.scale, address.offset + PAYLOAD_OFFSET);
}
Address tagOf(Address address) {
return Address(address.base, address.offset + TAG_OFFSET);
}
BaseIndex tagOf(BaseIndex address) {
return BaseIndex(address.base, address.index, address.scale, address.offset + TAG_OFFSET);
}
void loadInlineSlot(RegisterID objReg, uint32_t slot,
RegisterID typeReg, RegisterID dataReg) {
Address address(objReg, JSObject::getFixedSlotOffset(slot));
if (objReg == typeReg) {
loadPayload(address, dataReg);
loadTypeTag(address, typeReg);
} else {
loadTypeTag(address, typeReg);
loadPayload(address, dataReg);
}
}
template <typename T>
void loadTypeTag(T address, RegisterID reg) {
load32(tagOf(address), reg);
}
template <typename T>
void storeTypeTag(ImmTag imm, T address) {
store32(imm, tagOf(address));
}
template <typename T>
void storeTypeTag(RegisterID reg, T address) {
store32(reg, tagOf(address));
}
template <typename T>
void loadPayload(T address, RegisterID reg) {
load32(payloadOf(address), reg);
}
template <typename T>
void storePayload(RegisterID reg, T address) {
store32(reg, payloadOf(address));
}
template <typename T>
void storePayload(ImmPayload imm, T address) {
store32(imm, payloadOf(address));
}
bool addressUsesRegister(BaseIndex address, RegisterID reg) {
return (address.base == reg) || (address.index == reg);
}
bool addressUsesRegister(Address address, RegisterID reg) {
return address.base == reg;
}
/* Loads type first, then payload, returning label after type load. */
template <typename T>
Label loadValueAsComponents(T address, RegisterID type, RegisterID payload) {
JS_ASSERT(!addressUsesRegister(address, type));
loadTypeTag(address, type);
Label l = label();
loadPayload(address, payload);
return l;
}
void loadValueAsComponents(const Value &val, RegisterID type, RegisterID payload) {
jsval_layout jv = JSVAL_TO_IMPL(val);
move(ImmTag(jv.s.tag), type);
move(Imm32(jv.s.payload.u32), payload);
}
void loadValuePayload(const Value &val, RegisterID payload) {
jsval_layout jv = JSVAL_TO_IMPL(val);
move(Imm32(jv.s.payload.u32), payload);
}
/*
* Load a (64b) js::Value from 'address' into 'type' and 'payload', and
* return a label which can be used by
* ICRepatcher::patchAddressOffsetForValueLoad to patch the address'
* offset.
*
* The data register is guaranteed to be clobbered last. (This makes the
* base register for the address reusable as 'dreg'.)
*/
Label loadValueWithAddressOffsetPatch(Address address, RegisterID treg, RegisterID dreg) {
JS_ASSERT(address.base != treg); /* treg is clobbered first. */
Label start = label();
#if defined JS_CPU_X86
/*
* On x86 there are two loads to patch and they both encode the offset
* in-line.
*/
loadTypeTag(address, treg);
DBGLABEL_NOMASM(endType);
loadPayload(address, dreg);
DBGLABEL_NOMASM(endPayload);
JS_ASSERT(differenceBetween(start, endType) == 6);
JS_ASSERT(differenceBetween(endType, endPayload) == 6);
return start;
#elif defined JS_CPU_ARM || defined JS_CPU_SPARC
/*
* On ARM, the first instruction loads the offset from a literal pool, so the label
* returned points at that instruction.
*/
DataLabel32 load = load64WithAddressOffsetPatch(address, treg, dreg);
JS_ASSERT(differenceBetween(start, load) == 0);
(void) load;
return start;
#elif defined JS_CPU_MIPS
/*
* On MIPS there are LUI/ORI to patch.
*/
load64WithPatch(address, treg, dreg, TAG_OFFSET, PAYLOAD_OFFSET);
return start;
#endif
}
/*
* Store a (64b) js::Value from type |treg| and payload |dreg| into |address|, and
* return a label which can be used by
* ICRepatcher::patchAddressOffsetForValueStore to patch the address'
* offset.
*/
DataLabel32 storeValueWithAddressOffsetPatch(RegisterID treg, RegisterID dreg, Address address) {
#if defined JS_CPU_X86
/*
* On x86 there are two stores to patch and they both encode the offset
* in-line.
*/
DataLabel32 start = dataLabel32();
storeTypeTag(treg, address);
DBGLABEL_NOMASM(endType);
storePayload(dreg, address);
DBGLABEL_NOMASM(endPayload);
JS_ASSERT(differenceBetween(start, endType) == 6);
JS_ASSERT(differenceBetween(endType, endPayload) == 6);
return start;
#elif defined JS_CPU_ARM || defined JS_CPU_SPARC
return store64WithAddressOffsetPatch(treg, dreg, address);
#elif defined JS_CPU_MIPS
/*
* On MIPS there are LUI/ORI to patch.
*/
DataLabel32 start = dataLabel32();
store64WithPatch(address, treg, dreg, TAG_OFFSET, PAYLOAD_OFFSET);
return start;
#endif
}
/* Overloaded for storing a constant type. */
DataLabel32 storeValueWithAddressOffsetPatch(ImmType type, RegisterID dreg, Address address) {
#if defined JS_CPU_X86
DataLabel32 start = dataLabel32();
storeTypeTag(type, address);
DBGLABEL_NOMASM(endType);
storePayload(dreg, address);
DBGLABEL_NOMASM(endPayload);
JS_ASSERT(differenceBetween(start, endType) == 10);
JS_ASSERT(differenceBetween(endType, endPayload) == 6);
return start;
#elif defined JS_CPU_ARM || defined JS_CPU_SPARC
return store64WithAddressOffsetPatch(type, dreg, address);
#elif defined JS_CPU_MIPS
/*
* On MIPS there are LUI/ORI to patch.
*/
DataLabel32 start = dataLabel32();
store64WithPatch(address, type, dreg, TAG_OFFSET, PAYLOAD_OFFSET);
return start;
#endif
}
/* Overloaded for storing constant type and data. */
DataLabel32 storeValueWithAddressOffsetPatch(const Value &v, Address address) {
jsval_layout jv = JSVAL_TO_IMPL(v);
ImmTag type(jv.s.tag);
Imm32 payload(jv.s.payload.u32);
#if defined JS_CPU_X86
DataLabel32 start = dataLabel32();
store32(type, tagOf(address));
DBGLABEL_NOMASM(endType);
store32(payload, payloadOf(address));
DBGLABEL_NOMASM(endPayload);
JS_ASSERT(differenceBetween(start, endType) == 10);
JS_ASSERT(differenceBetween(endType, endPayload) == 10);
return start;
#elif defined JS_CPU_ARM || defined JS_CPU_SPARC
return store64WithAddressOffsetPatch(type, payload, address);
#elif defined JS_CPU_MIPS
/*
* On MIPS there are LUI/ORI to patch.
*/
DataLabel32 start = dataLabel32();
store64WithPatch(address, type, payload, TAG_OFFSET, PAYLOAD_OFFSET);
return start;
#endif
}
/* Overloaded for store with value remat info. */
DataLabel32 storeValueWithAddressOffsetPatch(const ValueRemat &vr, Address address) {
JS_ASSERT(!vr.isFPRegister());
if (vr.isConstant()) {
return storeValueWithAddressOffsetPatch(vr.value(), address);
} else if (vr.isTypeKnown()) {
ImmType type(vr.knownType());
RegisterID data(vr.dataReg());
return storeValueWithAddressOffsetPatch(type, data, address);
} else {
RegisterID type(vr.typeReg());
RegisterID data(vr.dataReg());
return storeValueWithAddressOffsetPatch(type, data, address);
}
}
/*
* Stores type first, then payload.
*/
template <typename T>
Label storeValue(const Value &v, T address) {
jsval_layout jv = JSVAL_TO_IMPL(v);
store32(ImmTag(jv.s.tag), tagOf(address));
Label l = label();
store32(Imm32(jv.s.payload.u32), payloadOf(address));
return l;
}
template <typename T>
void storeValueFromComponents(RegisterID type, RegisterID payload, T address) {
storeTypeTag(type, address);
storePayload(payload, address);
}
template <typename T>
void storeValueFromComponents(ImmType type, RegisterID payload, T address) {
storeTypeTag(type, address);
storePayload(payload, address);
}
template <typename T>
Label storeValue(const ValueRemat &vr, T address) {
if (vr.isConstant()) {
return storeValue(vr.value(), address);
} else if (vr.isFPRegister()) {
Label l = label();
storeDouble(vr.fpReg(), address);
return l;
} else {
if (vr.isTypeKnown())
storeTypeTag(ImmType(vr.knownType()), address);
else
storeTypeTag(vr.typeReg(), address);
Label l = label();
storePayload(vr.dataReg(), address);
return l;
}
}
template <typename T>
Jump guardNotHole(T address) {
return branch32(Equal, tagOf(address), ImmType(JSVAL_TYPE_MAGIC));
}
void loadPrivate(Address privAddr, RegisterID to) {
loadPtr(payloadOf(privAddr), to);
}
void loadObjPrivate(RegisterID base, RegisterID to, uint32_t nfixed) {
Address priv(base, JSObject::getPrivateDataOffset(nfixed));
loadPtr(priv, to);
}
Jump testNull(Condition cond, RegisterID reg) {
return branch32(cond, reg, ImmTag(JSVAL_TAG_NULL));
}
Jump testNull(Condition cond, Address address) {
return branch32(cond, tagOf(address), ImmTag(JSVAL_TAG_NULL));
}
Jump testUndefined(Condition cond, RegisterID reg) {
return branch32(cond, reg, ImmTag(JSVAL_TAG_UNDEFINED));
}
Jump testUndefined(Condition cond, Address address) {
return branch32(cond, tagOf(address), ImmTag(JSVAL_TAG_UNDEFINED));
}
Jump testInt32(Condition cond, RegisterID reg) {
return branch32(cond, reg, ImmTag(JSVAL_TAG_INT32));
}
Jump testInt32(Condition cond, Address address) {
return branch32(cond, tagOf(address), ImmTag(JSVAL_TAG_INT32));
}
Jump testNumber(Condition cond, RegisterID reg) {
cond = (cond == Equal) ? BelowOrEqual : Above;
return branch32(cond, reg, ImmTag(JSVAL_TAG_INT32));
}
Jump testNumber(Condition cond, Address address) {
cond = (cond == Equal) ? BelowOrEqual : Above;
return branch32(cond, tagOf(address), ImmTag(JSVAL_TAG_INT32));
}
Jump testPrimitive(Condition cond, RegisterID reg) {
cond = (cond == NotEqual) ? AboveOrEqual : Below;
return branch32(cond, reg, ImmTag(JSVAL_TAG_OBJECT));
}
Jump testPrimitive(Condition cond, Address address) {
cond = (cond == NotEqual) ? AboveOrEqual : Below;
return branch32(cond, tagOf(address), ImmTag(JSVAL_TAG_OBJECT));
}
Jump testObject(Condition cond, RegisterID reg) {
return branch32(cond, reg, ImmTag(JSVAL_TAG_OBJECT));
}
Jump testObject(Condition cond, Address address) {
return branch32(cond, tagOf(address), ImmTag(JSVAL_TAG_OBJECT));
}
Jump testGCThing(RegisterID reg) {
return branch32(AboveOrEqual, reg, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET));
}
Jump testGCThing(Address address) {
return branch32(AboveOrEqual, tagOf(address), ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET));
}
Jump testDouble(Condition cond, RegisterID reg) {
Condition opcond;
if (cond == Equal)
opcond = Below;
else
opcond = AboveOrEqual;
return branch32(opcond, reg, ImmTag(JSVAL_TAG_CLEAR));
}
Jump testDouble(Condition cond, Address address) {
Condition opcond;
if (cond == Equal)
opcond = Below;
else
opcond = AboveOrEqual;
return branch32(opcond, tagOf(address), ImmTag(JSVAL_TAG_CLEAR));
}
Jump testBoolean(Condition cond, RegisterID reg) {
return branch32(cond, reg, ImmTag(JSVAL_TAG_BOOLEAN));
}
Jump testBoolean(Condition cond, Address address) {
return branch32(cond, tagOf(address), ImmTag(JSVAL_TAG_BOOLEAN));
}
Jump testMagic(Condition cond, RegisterID reg) {
return branch32(cond, reg, ImmTag(JSVAL_TAG_MAGIC));
}
Jump testString(Condition cond, RegisterID reg) {
return branch32(cond, reg, ImmTag(JSVAL_TAG_STRING));
}
Jump testString(Condition cond, Address address) {
return branch32(cond, tagOf(address), ImmTag(JSVAL_TAG_STRING));
}
Jump testPrivate(Condition cond, Address address, void *ptr) {
return branchPtr(cond, address, ImmPtr(ptr));
}
void compareValue(Address one, Address two, RegisterID T0, RegisterID T1,
Vector<Jump> *mismatches) {
loadValueAsComponents(one, T0, T1);
mismatches->append(branch32(NotEqual, T0, tagOf(two)));
mismatches->append(branch32(NotEqual, T1, payloadOf(two)));
}
#ifdef JS_CPU_X86
void fastLoadDouble(RegisterID lo, RegisterID hi, FPRegisterID fpReg) {
if (MacroAssemblerX86Common::getSSEState() >= HasSSE4_1) {
m_assembler.movd_rr(lo, fpReg);
m_assembler.pinsrd_rr(hi, fpReg);
} else {
m_assembler.movd_rr(lo, fpReg);
m_assembler.movd_rr(hi, Registers::FPConversionTemp);
m_assembler.unpcklps_rr(Registers::FPConversionTemp, fpReg);
}
}
#endif
void breakDouble(FPRegisterID srcDest, RegisterID typeReg, RegisterID dataReg) {
#ifdef JS_CPU_X86
// Move the low 32-bits of the 128-bit XMM register into dataReg.
// Then, right shift the 128-bit XMM register by 4 bytes.
// Finally, move the new low 32-bits of the 128-bit XMM register into typeReg.
m_assembler.movd_rr(srcDest, dataReg);
m_assembler.psrldq_rr(srcDest, 4);
m_assembler.movd_rr(srcDest, typeReg);
#elif defined JS_CPU_SPARC
breakDoubleTo32(srcDest, typeReg, dataReg);
#elif defined JS_CPU_ARM
// Yes, we are backwards from SPARC.
fastStoreDouble(srcDest, dataReg, typeReg);
#elif defined JS_CPU_MIPS
#if defined(IS_LITTLE_ENDIAN)
fastStoreDouble(srcDest, dataReg, typeReg);
#else
fastStoreDouble(srcDest, typeReg, dataReg);
#endif
#else
JS_NOT_REACHED("implement this - push double, pop pop is easiest");
#endif
}
void loadStaticDouble(const double *dp, FPRegisterID dest, RegisterID scratch) {
move(ImmPtr(dp), scratch);
loadDouble(Address(scratch), dest);
}
template <typename T>
Jump fastArrayLoadSlot(T address, bool holeCheck,
MaybeRegisterID typeReg, RegisterID dataReg)
{
Jump notHole;
if (typeReg.isSet()) {
loadTypeTag(address, typeReg.reg());
if (holeCheck)
notHole = branch32(Equal, typeReg.reg(), ImmType(JSVAL_TYPE_MAGIC));
} else if (holeCheck) {
notHole = branch32(Equal, tagOf(address), ImmType(JSVAL_TYPE_MAGIC));
}
loadPayload(address, dataReg);
return notHole;
}
};
typedef NunboxAssembler ValueAssembler;
} /* namespace mjit */
} /* namespace js */
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,528 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_poly_ic_h__ && defined JS_METHODJIT
#define jsjaeger_poly_ic_h__
#include "jscntxt.h"
#include "assembler/assembler/MacroAssembler.h"
#include "assembler/assembler/CodeLocation.h"
#include "js/Vector.h"
#include "methodjit/MethodJIT.h"
#include "methodjit/ICRepatcher.h"
#include "BaseAssembler.h"
#include "RematInfo.h"
#include "BaseCompiler.h"
#include "methodjit/ICLabels.h"
#include "assembler/moco/MocoStubs.h"
namespace js {
namespace mjit {
namespace ic {
/* Maximum number of stubs for a given callsite. */
static const uint32_t MAX_PIC_STUBS = 16;
static const uint32_t MAX_GETELEM_IC_STUBS = 17;
enum LookupStatus {
Lookup_Error = 0,
Lookup_Uncacheable,
Lookup_Cacheable,
Lookup_NoProperty
};
struct BaseIC : public MacroAssemblerTypedefs {
// Address of inline fast-path.
CodeLocationLabel fastPathStart;
// Address to rejoin to the fast-path.
CodeLocationLabel fastPathRejoin;
// Start of the slow path.
CodeLocationLabel slowPathStart;
// Slow path stub call.
CodeLocationCall slowPathCall;
// Offset from start of stub to jump target of second shape guard as Nitro
// asm data location. This is 0 if there is only one shape guard in the
// last stub.
int32_t secondShapeGuard;
// Whether or not the callsite has been hit at least once.
bool hit : 1;
bool slowCallPatched : 1;
// Whether getter/setter hooks can be called from IC stubs.
bool canCallHook : 1;
// Whether a type barrier is in place for the result of the op.
bool forcedTypeBarrier : 1;
// Whether this IC has been disabled.
bool disabled : 1;
// Number of stubs generated.
uint32_t stubsGenerated : 5;
bool shouldUpdate(VMFrame &f);
void spew(VMFrame &f, const char *event, const char *reason);
LookupStatus disable(VMFrame &f, const char *reason, void *stub);
void updatePCCounters(VMFrame &f, Assembler &masm);
protected:
void reset() {
hit = false;
slowCallPatched = false;
forcedTypeBarrier = false;
disabled = false;
stubsGenerated = 0;
secondShapeGuard = 0;
}
};
class BasePolyIC : public BaseIC {
typedef Vector<JSC::ExecutablePool *, 2, SystemAllocPolicy> ExecPoolVector;
// ExecutablePools that IC stubs were generated into. Very commonly (eg.
// 99.5% of BasePolyICs) there are 0 or 1, and there are lots of
// BasePolyICs, so we space-optimize for that case. If the bottom bit of
// the pointer is 0, execPool should be used, and it will be NULL (for 0
// pools) or non-NULL (for 1 pool). If the bottom bit of the
// pointer is 1, taggedExecPools should be used, but only after de-tagging
// (for 2 or more pools).
union {
JSC::ExecutablePool *execPool; // valid when bottom bit is a 0
ExecPoolVector *taggedExecPools; // valid when bottom bit is a 1
} u;
static bool isTagged(void *p) {
return !!(intptr_t(p) & 1);
}
static ExecPoolVector *tag(ExecPoolVector *p) {
JS_ASSERT(!isTagged(p));
return (ExecPoolVector *)(intptr_t(p) | 1);
}
static ExecPoolVector *detag(ExecPoolVector *p) {
JS_ASSERT(isTagged(p));
return (ExecPoolVector *)(intptr_t(p) & ~1);
}
bool areZeroPools() { return !u.execPool; }
bool isOnePool() { return u.execPool && !isTagged(u.execPool); }
bool areMultiplePools() { return isTagged(u.taggedExecPools); }
ExecPoolVector *multiplePools() {
JS_ASSERT(areMultiplePools());
return detag(u.taggedExecPools);
}
public:
bool addPool(JSContext *cx, JSC::ExecutablePool *pool) {
if (areZeroPools()) {
u.execPool = pool;
return true;
}
if (isOnePool()) {
JSC::ExecutablePool *oldPool = u.execPool;
JS_ASSERT(!isTagged(oldPool));
ExecPoolVector *execPools = js_new<ExecPoolVector>(SystemAllocPolicy());
if (!execPools)
return false;
if (!execPools->append(oldPool) || !execPools->append(pool)) {
js_delete(execPools);
return false;
}
u.taggedExecPools = tag(execPools);
return true;
}
return multiplePools()->append(pool);
}
protected:
void reset() {
BaseIC::reset();
if (areZeroPools()) {
// Common case: do nothing.
} else if (isOnePool()) {
u.execPool->release();
u.execPool = NULL;
} else {
ExecPoolVector *execPools = multiplePools();
for (size_t i = 0; i < execPools->length(); i++)
(*execPools)[i]->release();
js_delete(execPools);
u.execPool = NULL;
}
JS_ASSERT(areZeroPools());
}
};
struct GetElementIC : public BasePolyIC {
// On stub entry:
// If hasInlineTypeCheck() is true, and inlineTypeCheckPatched is false,
// - typeReg contains the type of the |id| parameter.
// If hasInlineTypeCheck() is true, and inlineTypeCheckPatched is true,
// - typeReg contains the shape of |objReg| iff typeRegHasBaseShape
// is true.
// Otherwise, typeReg is garbage.
//
// On stub exit, typeReg must contain the type of the result value.
RegisterID typeReg : 5;
// On stub entry, objReg contains the object pointer for the |obj| parameter.
// On stub exit, objReg must contain the payload of the result value.
RegisterID objReg : 5;
// Offset from the fast path to the inline type check.
// This is only set if hasInlineTypeCheck() is true.
unsigned inlineTypeGuard : 8;
// Offset from the fast path to the inline shape guard. This is always
// set; if |id| is known to not be int32, then it's an unconditional
// jump to the slow path.
unsigned inlineShapeGuard : 8;
// This is usable if hasInlineTypeGuard() returns true, which implies
// that a dense array fast path exists. The inline type guard serves as
// the head of the chain of all string-based element stubs.
bool inlineTypeGuardPatched : 1;
// This is always usable, and specifies whether the inline shape guard
// has been patched. If hasInlineTypeGuard() is true, it guards against
// a dense array, and guarantees the inline type guard has passed.
// Otherwise, there is no inline type guard, and the shape guard is just
// an unconditional jump.
bool inlineShapeGuardPatched : 1;
////////////////////////////////////////////
// State for string-based property stubs. //
////////////////////////////////////////////
// True if typeReg is guaranteed to have the shape of objReg.
bool typeRegHasBaseShape : 1;
// These offsets are used for string-key dependent stubs, such as named
// property accesses. They are separated from the int-key dependent stubs,
// in order to guarantee that the id type needs only one guard per type.
int32_t atomGuard : 8; // optional, non-zero if present
int32_t firstShapeGuard : 11; // always set
int32_t secondShapeGuard : 11; // optional, non-zero if present
bool hasLastStringStub : 1;
JITCode lastStringStub;
// A limited ValueRemat instance. It may contains either:
// 1) A constant, or
// 2) A known type and data reg, or
// 3) A data reg.
// The sync bits are not set, and the type reg is never set and should not
// be used, as it is encapsulated more accurately in |typeReg|. Also, note
// carefully that the data reg is immutable.
ValueRemat idRemat;
bool hasInlineTypeGuard() const {
return !idRemat.isTypeKnown();
}
bool shouldPatchInlineTypeGuard() {
return hasInlineTypeGuard() && !inlineTypeGuardPatched;
}
bool shouldPatchUnconditionalShapeGuard() {
// The shape guard is only unconditional if the type is known to not
// be an int32.
if (idRemat.isTypeKnown() && idRemat.knownType() != JSVAL_TYPE_INT32)
return !inlineShapeGuardPatched;
return false;
}
void purge(Repatcher &repatcher);
LookupStatus update(VMFrame &f, HandleObject obj, HandleValue v, HandleId id, MutableHandleValue vp);
LookupStatus attachGetProp(VMFrame &f, HandleObject obj, HandleValue v, HandlePropertyName name,
MutableHandleValue vp);
LookupStatus attachTypedArray(VMFrame &f, HandleObject obj, HandleValue v, HandleId id,
MutableHandleValue vp);
LookupStatus disable(VMFrame &f, const char *reason);
LookupStatus error(JSContext *cx);
bool shouldUpdate(VMFrame &f);
protected:
void reset() {
BasePolyIC::reset();
inlineTypeGuardPatched = false;
inlineShapeGuardPatched = false;
typeRegHasBaseShape = false;
hasLastStringStub = false;
}
};
struct SetElementIC : public BaseIC {
// On stub entry:
// objReg contains the payload of the |obj| parameter.
// On stub exit:
// objReg may be clobbered.
RegisterID objReg : 5;
// Information on how to rematerialize |objReg|.
int32_t objRemat : MIN_STATE_REMAT_BITS;
// Offset from the start of the fast path to the inline shape guard.
unsigned inlineShapeGuard : 6;
// True if the shape guard has been patched; false otherwise.
bool inlineShapeGuardPatched : 1;
// Offset from the start of the fast path to the inline hole guard.
unsigned inlineHoleGuard : 8;
// True if the capacity guard has been patched; false otherwise.
bool inlineHoleGuardPatched : 1;
// True if this is from a strict-mode script.
bool strictMode : 1;
// A bitmask of registers that are volatile and must be preserved across
// stub calls inside the IC.
uint32_t volatileMask;
// If true, then keyValue contains a constant index value >= 0. Otherwise,
// keyReg contains a dynamic integer index in any range.
bool hasConstantKey : 1;
union {
RegisterID keyReg;
int32_t keyValue;
};
// Rematerialize information about the value being stored.
ValueRemat vr;
// Optional executable pool for the out-of-line hole stub.
JSC::ExecutablePool *execPool;
void purge(Repatcher &repatcher);
LookupStatus attachTypedArray(VMFrame &f, JSObject *obj, int32_t key);
LookupStatus update(VMFrame &f, const Value &objval, const Value &idval);
LookupStatus disable(VMFrame &f, const char *reason);
LookupStatus error(JSContext *cx);
bool shouldUpdate(VMFrame &f);
protected:
void reset() {
BaseIC::reset();
if (execPool) {
execPool->release();
execPool = NULL;
}
inlineShapeGuardPatched = false;
inlineHoleGuardPatched = false;
}
};
struct PICInfo : public BasePolyIC {
PICInfo() { reset(); }
// Operation this is a PIC for.
enum Kind
#ifdef _MSC_VER
: uint8_t
#endif
{
GET, // JSOP_GETPROP
SET, // JSOP_SETPROP, JSOP_SETNAME
NAME, // JSOP_NAME
BIND, // JSOP_BINDNAME
XNAME // JSOP_GETXPROP
};
union {
struct {
RegisterID typeReg : 5; // reg used for checking type
bool hasTypeCheck : 1; // type check and reg are present
// Reverse offset from slowPathStart to the type check slow path.
int32_t typeCheckOffset;
} get;
ValueRemat vr;
} u;
// Address of the start of the last generated stub, if any. Note that this
// does not correctly overlay with the allocated memory; it does however
// overlay the portion that may need to be patched, which is good enough.
JITCode lastStubStart;
// Return the start address of the last path in this PIC, which is the
// inline path if no stubs have been generated yet.
CodeLocationLabel lastPathStart() {
if (!stubsGenerated)
return fastPathStart;
return CodeLocationLabel(lastStubStart.start());
}
CodeLocationLabel getFastShapeGuard() {
return fastPathStart.labelAtOffset(shapeGuard);
}
CodeLocationLabel getSlowTypeCheck() {
JS_ASSERT(isGet());
return slowPathStart.labelAtOffset(u.get.typeCheckOffset);
}
// Return a JITCode block corresponding to the code memory to attach a
// new stub to.
JITCode lastCodeBlock(JITChunk *chunk) {
if (!stubsGenerated)
return JITCode(chunk->code.m_code.executableAddress(), chunk->code.m_size);
return lastStubStart;
}
void updateLastPath(LinkerHelper &linker, Label label) {
CodeLocationLabel loc = linker.locationOf(label);
lastStubStart = JITCode(loc.executableAddress(), linker.size());
}
Kind kind : 3;
// True if register R holds the base object shape along exits from the
// last stub.
bool shapeRegHasBaseShape : 1;
// If set, at least one lookup was uncacheable (no stub was generated).
bool hadUncacheable : 1;
// State flags.
bool inlinePathPatched : 1; // inline path has been patched
RegisterID shapeReg : 5; // also the out type reg
RegisterID objReg : 5; // also the out data reg
// Whether type properties need to be updated to reflect generated stubs.
bool typeMonitored : 1;
// For GET caches, whether the access may use the property cache.
bool cached : 1;
// Offset from start of fast path to initial shape guard.
uint32_t shapeGuard;
inline bool isSet() const {
return kind == SET;
}
inline bool isGet() const {
return kind == GET;
}
inline bool isBind() const {
return kind == BIND;
}
inline bool isScopeName() const {
return kind == NAME || kind == XNAME;
}
inline RegisterID typeReg() {
JS_ASSERT(isGet());
return u.get.typeReg;
}
inline bool hasTypeCheck() {
JS_ASSERT(isGet());
return u.get.hasTypeCheck;
}
inline bool shapeNeedsRemat() {
return !shapeRegHasBaseShape;
}
union {
GetPropLabels getPropLabels_;
SetPropLabels setPropLabels_;
BindNameLabels bindNameLabels_;
ScopeNameLabels scopeNameLabels_;
};
void setLabels(const ic::GetPropLabels &labels) {
JS_ASSERT(isGet());
getPropLabels_ = labels;
}
void setLabels(const ic::SetPropLabels &labels) {
JS_ASSERT(isSet());
setPropLabels_ = labels;
}
void setLabels(const ic::BindNameLabels &labels) {
JS_ASSERT(kind == BIND);
bindNameLabels_ = labels;
}
void setLabels(const ic::ScopeNameLabels &labels) {
JS_ASSERT(kind == NAME || kind == XNAME);
scopeNameLabels_ = labels;
}
GetPropLabels &getPropLabels() {
JS_ASSERT(isGet());
return getPropLabels_;
}
SetPropLabels &setPropLabels() {
JS_ASSERT(isSet());
return setPropLabels_;
}
BindNameLabels &bindNameLabels() {
JS_ASSERT(kind == BIND);
return bindNameLabels_;
}
ScopeNameLabels &scopeNameLabels() {
JS_ASSERT(kind == NAME || kind == XNAME);
return scopeNameLabels_;
}
// Where in the script did we generate this PIC?
jsbytecode *pc;
// Index into the script's atom table.
PropertyName *name;
private:
Shape *inlinePathShape_;
public:
void purge(Repatcher &repatcher);
void setInlinePathShape(Shape *shape) {
JS_ASSERT(!inlinePathShape_);
inlinePathShape_ = shape;
}
Shape *getSingleShape() {
if (disabled || hadUncacheable || stubsGenerated > 0)
return NULL;
return inlinePathShape_;
}
protected:
// Reset the data members to the state of a fresh PIC before any patching
// or stub generation was done.
void reset() {
BasePolyIC::reset();
inlinePathPatched = false;
shapeRegHasBaseShape = true;
hadUncacheable = false;
inlinePathShape_ = NULL;
}
};
#ifdef JS_POLYIC
void JS_FASTCALL GetProp(VMFrame &f, ic::PICInfo *);
void JS_FASTCALL SetPropOrName(VMFrame &f, ic::PICInfo *);
void JS_FASTCALL Name(VMFrame &f, ic::PICInfo *);
void JS_FASTCALL XName(VMFrame &f, ic::PICInfo *);
void JS_FASTCALL BindName(VMFrame &f, ic::PICInfo *);
void JS_FASTCALL GetElement(VMFrame &f, ic::GetElementIC *);
template <JSBool strict> void JS_FASTCALL SetElement(VMFrame &f, ic::SetElementIC *);
#endif
} /* namespace ic */
} /* namespace mjit */
} /* namespace js */
#endif /* jsjaeger_poly_ic_h__ */

Просмотреть файл

@ -1,411 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_assembler64_h__ && defined JS_METHODJIT && defined JS_PUNBOX64
#define jsjaeger_assembler64_h__
#include "assembler/assembler/MacroAssembler.h"
#include "js/Value.h"
#include "methodjit/MachineRegs.h"
#include "methodjit/RematInfo.h"
namespace js {
namespace mjit {
struct Imm64 : JSC::MacroAssembler::ImmPtr
{
Imm64(uint64_t u)
: ImmPtr((const void *)u)
{ }
};
/* Tag stored in shifted format. */
struct ImmTag : JSC::MacroAssembler::ImmPtr
{
ImmTag(JSValueShiftedTag shtag)
: ImmPtr((const void *)shtag)
{ }
};
struct ImmType : ImmTag
{
ImmType(JSValueType type)
: ImmTag(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type)))
{
JS_ASSERT(type > JSVAL_TYPE_DOUBLE);
}
};
struct ImmPayload : Imm64
{
ImmPayload(uint64_t payload)
: Imm64(payload)
{ }
};
class PunboxAssembler : public JSC::MacroAssembler
{
public:
static const uint32_t PAYLOAD_OFFSET = 0;
static const JSC::MacroAssembler::Scale JSVAL_SCALE = JSC::MacroAssembler::TimesEight;
template <typename T>
T payloadOf(T address) {
return address;
}
template <typename T>
T valueOf(T address) {
return address;
}
void loadInlineSlot(RegisterID objReg, uint32_t slot,
RegisterID typeReg, RegisterID dataReg) {
Address address(objReg, JSObject::getFixedSlotOffset(slot));
loadValueAsComponents(address, typeReg, dataReg);
}
template <typename T>
void loadValue(T address, RegisterID dst) {
loadPtr(address, dst);
}
void convertValueToType(RegisterID val) {
andPtr(Registers::TypeMaskReg, val);
}
void convertValueToPayload(RegisterID val) {
andPtr(Registers::PayloadMaskReg, val);
}
// Returns a label after the one Value load.
template <typename T>
Label loadValueAsComponents(T address, RegisterID type, RegisterID payload) {
loadValue(address, type);
Label l = label();
move(Registers::PayloadMaskReg, payload);
andPtr(type, payload);
xorPtr(payload, type);
return l;
}
void loadValueAsComponents(const Value &val, RegisterID type, RegisterID payload) {
uint64_t bits = JSVAL_TO_IMPL(val).asBits;
move(Imm64(bits & JSVAL_TAG_MASK), type);
move(Imm64(bits & JSVAL_PAYLOAD_MASK), payload);
}
void loadValuePayload(const Value &val, RegisterID payload) {
move(Imm64(JSVAL_TO_IMPL(val).asBits & JSVAL_PAYLOAD_MASK), payload);
}
/*
* Load a (64b) js::Value from 'address' into 'type' and 'payload', and
* return a label which can be used by
* Repatcher::patchAddressOffsetForValue to patch the address offset.
*/
Label loadValueWithAddressOffsetPatch(Address address, RegisterID type, RegisterID payload) {
return loadValueAsComponents(address, type, payload);
}
template <typename T>
void storeValueFromComponents(RegisterID type, RegisterID payload, T address) {
move(type, Registers::ValueReg);
orPtr(payload, Registers::ValueReg);
storeValue(Registers::ValueReg, address);
}
template <typename T>
void storeValueFromComponents(ImmTag type, RegisterID payload, T address) {
move(type, Registers::ValueReg);
orPtr(payload, Registers::ValueReg);
storeValue(Registers::ValueReg, address);
}
/*
* Store a (64b) js::Value from 'type' and 'payload' into 'address', and
* return a label which can be used by
* Repatcher::patchAddressOffsetForValueStore to patch the address offset.
*/
DataLabel32 storeValueWithAddressOffsetPatch(RegisterID type, RegisterID payload, Address address) {
move(type, Registers::ValueReg);
orPtr(payload, Registers::ValueReg);
return storePtrWithAddressOffsetPatch(Registers::ValueReg, address);
}
/* Overload for constant type. */
DataLabel32 storeValueWithAddressOffsetPatch(ImmTag type, RegisterID payload, Address address) {
move(type, Registers::ValueReg);
orPtr(payload, Registers::ValueReg);
return storePtrWithAddressOffsetPatch(Registers::ValueReg, address);
}
/* Overload for constant type and constant data. */
DataLabel32 storeValueWithAddressOffsetPatch(const Value &v, Address address) {
move(ImmPtr(JSVAL_TO_IMPL(v).asPtr), Registers::ValueReg);
return storePtrWithAddressOffsetPatch(Registers::ValueReg, valueOf(address));
}
/* Overloaded for store with value remat info. */
DataLabel32 storeValueWithAddressOffsetPatch(const ValueRemat &vr, Address address) {
JS_ASSERT(!vr.isFPRegister());
if (vr.isConstant()) {
return storeValueWithAddressOffsetPatch(vr.value(), address);
} else if (vr.isTypeKnown()) {
ImmType type(vr.knownType());
RegisterID data(vr.dataReg());
return storeValueWithAddressOffsetPatch(type, data, address);
} else {
RegisterID type(vr.typeReg());
RegisterID data(vr.dataReg());
return storeValueWithAddressOffsetPatch(type, data, address);
}
}
template <typename T>
void loadTypeTag(T address, RegisterID reg) {
loadValue(address, reg);
convertValueToType(reg);
}
template <typename T>
void storeTypeTag(ImmTag imm, T address) {
loadPayload(address, Registers::ValueReg);
orPtr(imm, Registers::ValueReg);
storePtr(Registers::ValueReg, valueOf(address));
}
template <typename T>
void storeTypeTag(RegisterID reg, T address) {
/* The type tag must be stored in shifted format. */
loadPayload(address, Registers::ValueReg);
orPtr(reg, Registers::ValueReg);
storePtr(Registers::ValueReg, valueOf(address));
}
template <typename T>
void loadPayload(T address, RegisterID reg) {
loadValue(address, reg);
convertValueToPayload(reg);
}
template <typename T>
void storePayload(RegisterID reg, T address) {
/* Not for doubles. */
loadTypeTag(address, Registers::ValueReg);
orPtr(reg, Registers::ValueReg);
storePtr(Registers::ValueReg, valueOf(address));
}
template <typename T>
void storePayload(ImmPayload imm, T address) {
/* Not for doubles. */
storePtr(imm, valueOf(address));
}
template <typename T>
void storeValue(RegisterID reg, T address) {
storePtr(reg, valueOf(address));
}
template <typename T>
void storeValue(const Value &v, T address) {
storePtr(Imm64(JSVAL_TO_IMPL(v).asBits), valueOf(address));
}
template <typename T>
void storeValue(const ValueRemat &vr, T address) {
if (vr.isConstant())
storeValue(vr.value(), address);
else if (vr.isFPRegister())
storeDouble(vr.fpReg(), address);
else if (vr.isTypeKnown())
storeValueFromComponents(ImmType(vr.knownType()), vr.dataReg(), address);
else
storeValueFromComponents(vr.typeReg(), vr.dataReg(), address);
}
template <typename T>
Jump guardNotHole(T address) {
loadTypeTag(address, Registers::ValueReg);
return branchPtr(Equal, Registers::ValueReg, ImmType(JSVAL_TYPE_MAGIC));
}
void loadPrivate(Address privAddr, RegisterID to) {
loadPtr(privAddr, to);
lshiftPtr(Imm32(1), to);
}
void loadObjPrivate(RegisterID base, RegisterID to, uint32_t nfixed) {
Address priv(base, JSObject::getPrivateDataOffset(nfixed));
loadPtr(priv, to);
}
Jump testNull(Condition cond, RegisterID reg) {
return branchPtr(cond, reg, ImmTag(JSVAL_SHIFTED_TAG_NULL));
}
Jump testNull(Condition cond, Address address) {
loadValue(address, Registers::ValueReg);
return testNull(cond, Registers::ValueReg);
}
Jump testUndefined(Condition cond, RegisterID reg) {
return branchPtr(cond, reg, ImmTag(JSVAL_SHIFTED_TAG_UNDEFINED));
}
Jump testUndefined(Condition cond, Address address) {
loadValue(address, Registers::ValueReg);
return testUndefined(cond, Registers::ValueReg);
}
Jump testInt32(Condition cond, RegisterID reg) {
return branchPtr(cond, reg, ImmTag(JSVAL_SHIFTED_TAG_INT32));
}
Jump testInt32(Condition cond, Address address) {
loadTypeTag(address, Registers::ValueReg);
return testInt32(cond, Registers::ValueReg);
}
Jump testNumber(Condition cond, RegisterID reg) {
cond = (cond == Equal) ? Below : AboveOrEqual;
return branchPtr(cond, reg,
ImmTag(JSVAL_UPPER_EXCL_SHIFTED_TAG_OF_NUMBER_SET));
}
Jump testNumber(Condition cond, Address address) {
loadValue(address, Registers::ValueReg);
return testNumber(cond, Registers::ValueReg);
}
Jump testPrimitive(Condition cond, RegisterID reg) {
cond = (cond == Equal) ? Below : AboveOrEqual;
return branchPtr(cond, reg,
ImmTag(JSVAL_UPPER_EXCL_SHIFTED_TAG_OF_PRIMITIVE_SET));
}
Jump testPrimitive(Condition cond, Address address) {
loadValue(address, Registers::ValueReg);
return testPrimitive(cond, Registers::ValueReg);
}
Jump testObject(Condition cond, RegisterID reg) {
cond = (cond == Equal) ? AboveOrEqual : Below;
return branchPtr(cond, reg, ImmTag(JSVAL_SHIFTED_TAG_OBJECT));
}
Jump testObject(Condition cond, Address address) {
loadValue(address, Registers::ValueReg);
return testObject(cond, Registers::ValueReg);
}
Jump testGCThing(RegisterID reg) {
return branchPtr(AboveOrEqual, reg, ImmTag(JSVAL_LOWER_INCL_SHIFTED_TAG_OF_GCTHING_SET));
}
Jump testGCThing(Address address) {
loadValue(address, Registers::ValueReg);
return branchPtr(AboveOrEqual, Registers::ValueReg,
ImmTag(JSVAL_LOWER_INCL_SHIFTED_TAG_OF_GCTHING_SET));
}
Jump testDouble(Condition cond, RegisterID reg) {
cond = (cond == Equal) ? BelowOrEqual : Above;
return branchPtr(cond, reg, ImmTag(JSVAL_SHIFTED_TAG_MAX_DOUBLE));
}
Jump testDouble(Condition cond, Address address) {
loadValue(address, Registers::ValueReg);
return testDouble(cond, Registers::ValueReg);
}
Jump testBoolean(Condition cond, RegisterID reg) {
return branchPtr(cond, reg, ImmTag(JSVAL_SHIFTED_TAG_BOOLEAN));
}
Jump testBoolean(Condition cond, Address address) {
loadTypeTag(address, Registers::ValueReg);
return testBoolean(cond, Registers::ValueReg);
}
Jump testMagic(Condition cond, RegisterID reg) {
return branchPtr(cond, reg, ImmTag(JSVAL_SHIFTED_TAG_MAGIC));
}
Jump testString(Condition cond, RegisterID reg) {
return branchPtr(cond, reg, ImmTag(JSVAL_SHIFTED_TAG_STRING));
}
Jump testString(Condition cond, Address address) {
loadTypeTag(address, Registers::ValueReg);
return testString(cond, Registers::ValueReg);
}
Jump testPrivate(Condition cond, Address address, void *ptr) {
uint64_t valueBits = PrivateValue(ptr).asRawBits();
return branchPtr(cond, address, ImmPtr((void *) valueBits));
}
void compareValue(Address one, Address two, RegisterID T0, RegisterID T1,
Vector<Jump> *mismatches) {
loadValue(one, T0);
mismatches->append(branchPtr(NotEqual, T0, two));
}
void breakDouble(FPRegisterID srcDest, RegisterID typeReg, RegisterID dataReg) {
m_assembler.movq_rr(srcDest, typeReg);
move(Registers::PayloadMaskReg, dataReg);
andPtr(typeReg, dataReg);
xorPtr(dataReg, typeReg);
}
void fastLoadDouble(RegisterID dataReg, RegisterID typeReg, FPRegisterID fpReg) {
move(typeReg, Registers::ValueReg);
orPtr(dataReg, Registers::ValueReg);
m_assembler.movq_rr(Registers::ValueReg, fpReg);
}
void loadStaticDouble(const double *dp, FPRegisterID dest, RegisterID scratch) {
union DoublePun {
double d;
uint64_t u;
} pun;
pun.d = *dp;
move(ImmPtr(reinterpret_cast<void*>(pun.u)), scratch);
m_assembler.movq_rr(scratch, dest);
}
template <typename T>
Jump fastArrayLoadSlot(T address, bool holeCheck,
MaybeRegisterID typeReg, RegisterID dataReg)
{
Jump notHole;
if (typeReg.isSet()) {
loadValueAsComponents(address, typeReg.reg(), dataReg);
if (holeCheck)
notHole = branchPtr(Equal, typeReg.reg(), ImmType(JSVAL_TYPE_MAGIC));
} else {
if (holeCheck) {
loadTypeTag(address, Registers::ValueReg);
notHole = branchPtr(Equal, Registers::ValueReg, ImmType(JSVAL_TYPE_MAGIC));
}
loadPayload(address, dataReg);
}
return notHole;
}
};
typedef PunboxAssembler ValueAssembler;
} /* namespace mjit */
} /* namespace js */
#endif

Просмотреть файл

@ -1,372 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jsjaeger_remat_h__ && defined JS_METHODJIT
#define jsjaeger_remat_h__
#include "jscntxt.h"
#include "MachineRegs.h"
#include "assembler/assembler/MacroAssembler.h"
#include "vm/Stack.h"
namespace js {
namespace mjit {
// Lightweight, union-able components of FrameEntry.
struct StateRemat {
typedef JSC::MacroAssembler::RegisterID RegisterID;
typedef JSC::MacroAssembler::Address Address;
static const int32_t CONSTANT = -int(UINT16_LIMIT * sizeof(Value));
// This union encodes the fastest rematerialization of a non-constant
// value. The |offset| field can be used to recover information
// without this struct's helpers:
// 1) A value in (CONSTANT, 0) is an argument slot.
// 2) A value in [0, fp) is a register ID.
// 3) A value in [fp, inf) is a local slot.
union {
RegisterID reg_;
int32_t offset_;
};
static StateRemat FromInt32(int32_t i32) {
StateRemat sr;
sr.offset_ = i32;
return sr;
}
static StateRemat FromRegister(RegisterID reg) {
StateRemat sr;
sr.reg_ = reg;
JS_ASSERT(sr.inRegister());
return sr;
}
static StateRemat FromAddress(Address address) {
JS_ASSERT(address.base == JSFrameReg);
StateRemat sr;
sr.offset_ = address.offset;
JS_ASSERT(sr.inMemory());
return sr;
}
// Minimum number of bits needed to compactly store the int32_t
// representation in a struct or union. This prevents bloating the IC
// structs by an extra 8 bytes in some cases. 16 bits are needed to encode
// the largest local:
// ((UINT16_LIMIT - 1) * sizeof(Value) + sizeof(StackFrame),
// And an extra bit for the sign on arguments.
#define MIN_STATE_REMAT_BITS 21
bool isConstant() const { return offset_ == CONSTANT; }
bool inRegister() const { return offset_ >= 0 &&
offset_ <= int32_t(JSC::MacroAssembler::TotalRegisters); }
bool inMemory() const {
return offset_ >= int32_t(sizeof(StackFrame)) ||
offset_ < 0;
}
int32_t toInt32() const { return offset_; }
Address address() const {
JS_ASSERT(inMemory());
return Address(JSFrameReg, offset_);
}
RegisterID reg() const {
JS_ASSERT(inRegister());
return reg_;
}
};
/* Lightweight version of FrameEntry. */
struct ValueRemat {
typedef JSC::MacroAssembler::RegisterID RegisterID;
typedef JSC::MacroAssembler::FPRegisterID FPRegisterID;
union {
struct {
union {
int32_t typeRemat_;
JSValueType knownType_;
} type;
int32_t dataRemat_ : MIN_STATE_REMAT_BITS;
bool isTypeKnown_ : 1;
} s;
jsval v_;
FPRegisterID fpreg_;
} u;
bool isConstant_ : 1;
bool isFPRegister_ : 1;
bool isDataSynced : 1;
bool isTypeSynced : 1;
static ValueRemat FromConstant(const Value &v) {
ValueRemat vr;
vr.isConstant_ = true;
vr.isFPRegister_ = false;
vr.u.v_ = v;
return vr;
}
static ValueRemat FromFPRegister(FPRegisterID fpreg) {
ValueRemat vr;
vr.isConstant_ = false;
vr.isFPRegister_ = true;
vr.u.fpreg_ = fpreg;
return vr;
}
static ValueRemat FromKnownType(JSValueType type, RegisterID dataReg) {
ValueRemat vr;
vr.isConstant_ = false;
vr.isFPRegister_ = false;
vr.u.s.type.knownType_ = type;
vr.u.s.isTypeKnown_ = true;
vr.u.s.dataRemat_ = StateRemat::FromRegister(dataReg).toInt32();
// Assert bitfields are okay.
JS_ASSERT(vr.dataReg() == dataReg);
return vr;
}
static ValueRemat FromRegisters(RegisterID typeReg, RegisterID dataReg) {
ValueRemat vr;
vr.isConstant_ = false;
vr.isFPRegister_ = false;
vr.u.s.isTypeKnown_ = false;
vr.u.s.type.typeRemat_ = StateRemat::FromRegister(typeReg).toInt32();
vr.u.s.dataRemat_ = StateRemat::FromRegister(dataReg).toInt32();
// Assert bitfields are okay.
JS_ASSERT(vr.dataReg() == dataReg);
JS_ASSERT(vr.typeReg() == typeReg);
return vr;
}
FPRegisterID fpReg() const {
JS_ASSERT(isFPRegister());
return u.fpreg_;
}
RegisterID dataReg() const {
JS_ASSERT(!isConstant() && !isFPRegister());
return dataRemat().reg();
}
RegisterID typeReg() const {
JS_ASSERT(!isTypeKnown());
return typeRemat().reg();
}
bool isConstant() const { return isConstant_; }
bool isFPRegister() const { return isFPRegister_; }
bool isTypeKnown() const { return isConstant() || isFPRegister() || u.s.isTypeKnown_; }
StateRemat dataRemat() const {
JS_ASSERT(!isConstant());
return StateRemat::FromInt32(u.s.dataRemat_);
}
StateRemat typeRemat() const {
JS_ASSERT(!isTypeKnown());
return StateRemat::FromInt32(u.s.type.typeRemat_);
}
Value value() const {
JS_ASSERT(isConstant());
return u.v_;
}
JSValueType knownType() const {
JS_ASSERT(isTypeKnown());
if (isConstant()) {
const Value v = value();
if (v.isDouble())
return JSVAL_TYPE_DOUBLE;
return v.extractNonDoubleType();
}
if (isFPRegister())
return JSVAL_TYPE_DOUBLE;
return u.s.type.knownType_;
}
bool isType(JSValueType type_) const {
return isTypeKnown() && knownType() == type_;
}
};
/*
* Describes how to rematerialize a value during compilation.
*/
struct RematInfo {
typedef JSC::MacroAssembler::RegisterID RegisterID;
typedef JSC::MacroAssembler::FPRegisterID FPRegisterID;
enum SyncState {
SYNCED,
UNSYNCED
};
enum RematType {
TYPE,
DATA
};
/* Physical location. */
enum PhysLoc {
/*
* Backing bits are in memory. No fast remat.
*/
PhysLoc_Memory = 0,
/* Backing bits are known at compile time. */
PhysLoc_Constant,
/* Backing bits are in a general purpose register. */
PhysLoc_Register,
/* Backing bits are part of a floating point register. */
PhysLoc_FPRegister,
/* Backing bits are invalid/unknown. */
PhysLoc_Invalid
};
void setRegister(RegisterID reg) {
reg_ = reg;
location_ = PhysLoc_Register;
}
RegisterID reg() const {
JS_ASSERT(inRegister());
return reg_;
}
void setFPRegister(FPRegisterID reg) {
fpreg_ = reg;
location_ = PhysLoc_FPRegister;
}
FPRegisterID fpreg() const {
JS_ASSERT(inFPRegister());
return fpreg_;
}
void setMemory() {
location_ = PhysLoc_Memory;
sync_ = SYNCED;
}
#ifdef DEBUG
void invalidate() {
location_ = PhysLoc_Invalid;
}
#else
void invalidate() {}
#endif
void setConstant() { location_ = PhysLoc_Constant; }
bool isConstant() const {
JS_ASSERT(location_ != PhysLoc_Invalid);
return location_ == PhysLoc_Constant;
}
bool inRegister() const {
JS_ASSERT(location_ != PhysLoc_Invalid);
return location_ == PhysLoc_Register;
}
bool inFPRegister() const {
JS_ASSERT(location_ != PhysLoc_Invalid);
return location_ == PhysLoc_FPRegister;
}
bool inMemory() const {
JS_ASSERT(location_ != PhysLoc_Invalid);
return location_ == PhysLoc_Memory;
}
bool synced() const { return sync_ == SYNCED; }
void sync() {
JS_ASSERT(!synced());
sync_ = SYNCED;
}
void unsync() {
sync_ = UNSYNCED;
}
void inherit(const RematInfo &other) {
JS_STATIC_ASSERT(sizeof(RegisterID) == sizeof(FPRegisterID));
reg_ = other.reg_;
location_ = other.location_;
}
private:
union {
/* Set if location is PhysLoc_Register. */
RegisterID reg_;
/*
* Set if location is PhysLoc_FPRegister. This must be the data for a FE,
* and the known type is JSVAL_TYPE_DOUBLE.
*/
FPRegisterID fpreg_;
};
/* Remat source. */
PhysLoc location_;
/* Sync state. */
SyncState sync_;
};
template <class T>
class MaybeRegister {
public:
MaybeRegister()
: reg_((T)0), set(false)
{ }
MaybeRegister(T reg)
: reg_(reg), set(true)
{ }
inline T reg() const { JS_ASSERT(set); return reg_; }
inline void setReg(T r) { reg_ = r; set = true; }
inline bool isSet() const { return set; }
MaybeRegister<T> & operator =(const MaybeRegister<T> &other) {
set = other.set;
reg_ = other.reg_;
return *this;
}
MaybeRegister<T> & operator =(T r) {
setReg(r);
return *this;
}
private:
T reg_;
bool set;
};
typedef MaybeRegister<JSC::MacroAssembler::RegisterID> MaybeRegisterID;
typedef MaybeRegister<JSC::MacroAssembler::FPRegisterID> MaybeFPRegisterID;
class MaybeJump {
typedef JSC::MacroAssembler::Jump Jump;
public:
MaybeJump()
: set(false)
{ }
inline Jump getJump() const { JS_ASSERT(set); return jump; }
inline Jump get() const { JS_ASSERT(set); return jump; }
inline void setJump(const Jump &j) { jump = j; set = true; }
inline bool isSet() const { return set; }
inline MaybeJump &operator=(Jump j) { setJump(j); return *this; }
private:
Jump jump;
bool set;
};
} /* namespace mjit */
} /* namespace js */
#endif

Просмотреть файл

@ -1,478 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifdef JS_METHODJIT
#include "mozilla/DebugOnly.h"
#include "Retcon.h"
#include "MethodJIT.h"
#include "Compiler.h"
#include "StubCalls.h"
#include "jsdbgapi.h"
#include "jsnum.h"
#include "assembler/assembler/LinkBuffer.h"
#include "assembler/assembler/RepatchBuffer.h"
#include "jscntxtinlines.h"
#include "jsinterpinlines.h"
using namespace js;
using namespace js::mjit;
using mozilla::DebugOnly;
namespace js {
namespace mjit {
static inline void
SetRejoinState(StackFrame *fp, const CallSite &site, void **location)
{
if (site.rejoin == REJOIN_SCRIPTED) {
fp->setRejoin(ScriptedRejoin(site.pcOffset));
*location = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpolineScripted);
} else {
fp->setRejoin(StubRejoin(site.rejoin));
*location = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
}
}
static inline bool
CallsiteMatches(uint8_t *codeStart, const CallSite &site, void *location)
{
if (codeStart + site.codeOffset == location)
return true;
#ifdef JS_CPU_ARM
if (codeStart + site.codeOffset + 4 == location)
return true;
#endif
return false;
}
void
Recompiler::patchCall(JITChunk *chunk, StackFrame *fp, void **location)
{
uint8_t* codeStart = (uint8_t *)chunk->code.m_code.executableAddress();
CallSite *callSites_ = chunk->callSites();
for (uint32_t i = 0; i < chunk->nCallSites; i++) {
if (CallsiteMatches(codeStart, callSites_[i], *location)) {
JS_ASSERT(callSites_[i].inlineIndex == analyze::CrossScriptSSA::OUTER_FRAME);
SetRejoinState(fp, callSites_[i], location);
return;
}
}
JS_NOT_REACHED("failed to find call site");
}
void
Recompiler::patchNative(JSRuntime *rt, JITChunk *chunk, StackFrame *fp,
jsbytecode *pc, RejoinState rejoin)
{
/*
* There is a native call or getter IC at pc which triggered recompilation.
* The recompilation could have been triggered either by the native call
* itself, or by a SplatApplyArgs preparing for the native call. Either
* way, we don't want to patch up the call, but will instead steal the pool
* for the IC so it doesn't get freed with the JITChunk, and patch up the
* jump at the end to go to the interpoline.
*
* When doing this, we do not reset the the IC itself; there may be other
* native calls from this chunk on the stack and we need to find and patch
* all live stubs before purging the chunk's caches.
*/
fp->setRejoin(StubRejoin(rejoin));
/* :XXX: We might crash later if this fails. */
rt->jaegerRuntime().orphanedNativeFrames.append(fp);
DebugOnly<bool> found = false;
/*
* Find and patch all native call stubs attached to the given PC. There may
* be multiple ones for getter stubs attached to e.g. a GETELEM.
*/
for (unsigned i = 0; i < chunk->nativeCallStubs.length(); i++) {
NativeCallStub &stub = chunk->nativeCallStubs[i];
if (stub.pc != pc)
continue;
found = true;
/* Check for pools that were already patched. */
if (!stub.pool)
continue;
/* Patch the native fallthrough to go to the interpoline. */
{
#if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
/* Win64 needs stack adjustment */
void *interpoline = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpolinePatched);
#else
void *interpoline = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
#endif
uint8_t *start = (uint8_t *)stub.jump.executableAddress();
JSC::RepatchBuffer repatch(JSC::JITCode(start - 32, 64));
#ifdef JS_CPU_X64
repatch.repatch(stub.jump, interpoline);
#else
repatch.relink(stub.jump, JSC::CodeLocationLabel(interpoline));
#endif
}
/* :XXX: We leak the pool if this fails. Oh well. */
rt->jaegerRuntime().orphanedNativePools.append(stub.pool);
/* Mark as stolen in case there are multiple calls on the stack. */
stub.pool = NULL;
}
JS_ASSERT(found);
}
void
Recompiler::patchFrame(JSRuntime *rt, VMFrame *f, JSScript *script)
{
/*
* Check if the VMFrame returns directly into the script's jitcode. This
* depends on the invariant that f->fp() reflects the frame at the point
* where the call occurred, irregardless of any frames which were pushed
* inside the call.
*/
StackFrame *fp = f->fp();
void **addr = f->returnAddressLocation();
RejoinState rejoin = (RejoinState) f->stubRejoin;
if (rejoin == REJOIN_NATIVE ||
rejoin == REJOIN_NATIVE_LOWERED ||
rejoin == REJOIN_NATIVE_GETTER) {
/* Native call. */
if (fp->script() == script) {
patchNative(rt, fp->jit()->chunk(f->regs.pc), fp, f->regs.pc, rejoin);
f->stubRejoin = REJOIN_NATIVE_PATCHED;
}
} else if (rejoin == REJOIN_NATIVE_PATCHED) {
/* Already patched, don't do anything. */
} else if (rejoin) {
/* Recompilation triggered by CompileFunction. */
if (fp->script() == script) {
fp->setRejoin(StubRejoin(rejoin));
*addr = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
f->stubRejoin = 0;
}
} else {
for (int constructing = 0; constructing <= 1; constructing++) {
for (int barriers = 0; barriers <= 1; barriers++) {
JITScript *jit = script->getJIT((bool) constructing, (bool) barriers);
if (jit) {
JITChunk *chunk = jit->findCodeChunk(*addr);
if (chunk)
patchCall(chunk, fp, addr);
}
}
}
}
}
StackFrame *
Recompiler::expandInlineFrameChain(StackFrame *outer, InlineFrame *inner)
{
StackFrame *parent;
if (inner->parent)
parent = expandInlineFrameChain(outer, inner->parent);
else
parent = outer;
JaegerSpew(JSpew_Recompile, "Expanding inline frame\n");
StackFrame *fp = (StackFrame *) ((uint8_t *)outer + sizeof(Value) * inner->depth);
fp->initInlineFrame(inner->fun, parent, inner->parentpc);
uint32_t pcOffset = inner->parentpc - parent->script()->code;
void **location = fp->addressOfNativeReturnAddress();
*location = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpolineScripted);
parent->setRejoin(ScriptedRejoin(pcOffset));
return fp;
}
/*
* Whether a given return address for a frame indicates it returns directly
* into JIT code.
*/
static inline bool
JITCodeReturnAddress(void *data)
{
return data != NULL /* frame is interpreted */
&& data != JS_FUNC_TO_DATA_PTR(void *, JaegerTrampolineReturn)
&& data != JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline)
#if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
&& data != JS_FUNC_TO_DATA_PTR(void *, JaegerInterpolinePatched)
#endif
&& data != JS_FUNC_TO_DATA_PTR(void *, JaegerInterpolineScripted);
}
/*
* Expand all inlined frames within fp per 'inlined' and update next and regs
* to refer to the new innermost frame.
*/
void
Recompiler::expandInlineFrames(Zone *zone,
StackFrame *fp, mjit::CallSite *inlined,
StackFrame *next, VMFrame *f)
{
JS_ASSERT_IF(next, next->prev() == fp && next->prevInline() == inlined);
/*
* Treat any frame expansion as a recompilation event, so that f.jit() is
* stable if no recompilations have occurred.
*/
for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
comp->types.frameExpansions++;
jsbytecode *pc = next ? next->prevpc() : f->regs.pc;
JITChunk *chunk = fp->jit()->chunk(pc);
/*
* Patch the VMFrame's return address if it is returning at the given inline site.
* Note there is no worry about handling a native or CompileFunction call here,
* as such IC stubs are not generated within inline frames.
*/
void **frameAddr = f->returnAddressLocation();
uint8_t* codeStart = (uint8_t *)chunk->code.m_code.executableAddress();
InlineFrame *inner = &chunk->inlineFrames()[inlined->inlineIndex];
jsbytecode *innerpc = inner->fun->nonLazyScript()->code + inlined->pcOffset;
StackFrame *innerfp = expandInlineFrameChain(fp, inner);
/* Check if the VMFrame returns into the inlined frame. */
if (f->stubRejoin && f->fp() == fp) {
/* The VMFrame is calling CompileFunction. */
JS_ASSERT(f->stubRejoin != REJOIN_NATIVE &&
f->stubRejoin != REJOIN_NATIVE_LOWERED &&
f->stubRejoin != REJOIN_NATIVE_GETTER &&
f->stubRejoin != REJOIN_NATIVE_PATCHED);
innerfp->setRejoin(StubRejoin((RejoinState) f->stubRejoin));
*frameAddr = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
f->stubRejoin = 0;
}
if (CallsiteMatches(codeStart, *inlined, *frameAddr)) {
/* The VMFrame returns directly into the expanded frame. */
SetRejoinState(innerfp, *inlined, frameAddr);
}
if (f->fp() == fp) {
JS_ASSERT(f->regs.inlined() == inlined);
f->regs.expandInline(innerfp, innerpc);
}
/*
* Note: unlike the case for recompilation, during frame expansion we don't
* need to worry about the next VMFrame holding a reference to the inlined
* frame in its entryncode. entryncode is non-NULL only if the next frame's
* code was discarded and has executed via the Interpoline, which can only
* happen after all inline frames have been expanded.
*/
if (next) {
next->resetInlinePrev(innerfp, innerpc);
void **addr = next->addressOfNativeReturnAddress();
if (JITCodeReturnAddress(*addr)) {
innerfp->setRejoin(ScriptedRejoin(inlined->pcOffset));
*addr = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpolineScripted);
}
}
}
void
ExpandInlineFrames(Zone *zone)
{
JSRuntime *rt = zone->rt;
if (!rt->hasJaegerRuntime())
return;
for (VMFrame *f = rt->jaegerRuntime().activeFrame(); f != NULL; f = f->previous) {
if (f->entryfp->compartment()->zone() != zone)
continue;
if (f->regs.inlined())
mjit::Recompiler::expandInlineFrames(zone, f->fp(), f->regs.inlined(), NULL, f);
StackFrame *end = f->entryfp->prev();
StackFrame *next = NULL;
for (StackFrame *fp = f->fp(); fp != end; fp = fp->prev()) {
if (!next) {
next = fp;
continue;
}
mjit::CallSite *inlined;
next->prevpc(&inlined);
if (inlined) {
mjit::Recompiler::expandInlineFrames(zone, fp, inlined, next, f);
fp = next;
next = NULL;
} else {
if (fp->downFramesExpanded())
break;
next = fp;
}
fp->setDownFramesExpanded();
}
}
}
void
ClearAllFrames(Zone *zone)
{
JSRuntime *rt = zone->rt;
if (!rt->hasJaegerRuntime())
return;
ExpandInlineFrames(zone);
for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
comp->types.recompilations++;
for (VMFrame *f = rt->jaegerRuntime().activeFrame();
f != NULL;
f = f->previous)
{
if (f->entryfp->compartment()->zone() != zone)
continue;
Recompiler::patchFrame(rt, f, f->fp()->script());
// Clear ncode values from all frames associated with the VMFrame.
// Patching the VMFrame's return address will cause all its frames to
// finish in the interpreter, unless the interpreter enters one of the
// intermediate frames at a loop boundary (where EnterMethodJIT will
// overwrite ncode). However, leaving stale values for ncode in stack
// frames can confuse the recompiler, which may see the VMFrame before
// it has resumed execution.
for (StackFrame *fp = f->fp(); fp != f->entryfp; fp = fp->prev())
fp->setNativeReturnAddress(NULL);
}
// Purge all ICs in chunks for which we patched any native frames, see patchNative.
for (VMFrame *f = rt->jaegerRuntime().activeFrame();
f != NULL;
f = f->previous)
{
if (f->entryfp->compartment()->zone() != zone)
continue;
JS_ASSERT(f->stubRejoin != REJOIN_NATIVE &&
f->stubRejoin != REJOIN_NATIVE_LOWERED &&
f->stubRejoin != REJOIN_NATIVE_GETTER);
if (f->stubRejoin == REJOIN_NATIVE_PATCHED && f->jit() && f->chunk())
f->chunk()->purgeCaches();
}
}
/*
* Recompilation can be triggered either by the debugger (turning debug mode on for
* a script or setting/clearing a trap), or by dynamic changes in type information
* from type inference. When recompiling we don't immediately recompile the JIT
* code, but destroy the old code and remove all references to the code, including
* those from active stack frames. Things to do:
*
* - Purge scripted call inline caches calling into the script.
*
* - For frames with an ncode return address in the original script, redirect
* to the interpoline.
*
* - For VMFrames with a stub call return address in the original script,
* redirect to the interpoline.
*
* - For VMFrames whose entryncode address (the value of entryfp->ncode before
* being clobbered with JaegerTrampolineReturn) is in the original script,
* redirect that entryncode to the interpoline.
*/
void
Recompiler::clearStackReferences(FreeOp *fop, JSScript *script)
{
JS_ASSERT(script->hasMJITInfo());
JaegerSpew(JSpew_Recompile, "recompiling script (file \"%s\") (line \"%d\") (length \"%d\") (usecount=\"%d\")\n",
script->filename(), script->lineno, script->length, (int) script->getUseCount());
JSCompartment *comp = script->compartment();
types::AutoEnterAnalysis enter(fop, comp);
/*
* The strategy for this goes as follows:
*
* 1) Scan the stack, looking at all return addresses that could go into JIT
* code.
* 2) If an address corresponds to a call site registered by |callSite| during
* the last compilation, patch it to go to the interpoline.
* 3) Purge the old compiled state.
*/
// Find all JIT'd stack frames to account for return addresses that will
// need to be patched after recompilation.
for (VMFrame *f = fop->runtime()->jaegerRuntime().activeFrame();
f != NULL;
f = f->previous)
{
if (f->entryfp->compartment() != comp)
continue;
// Scan all frames owned by this VMFrame.
StackFrame *end = f->entryfp->prev();
StackFrame *next = NULL;
for (StackFrame *fp = f->fp(); fp != end; fp = fp->prev()) {
if (fp->script() != script) {
next = fp;
continue;
}
if (next) {
// check for a scripted call returning into the recompiled script.
// this misses scanning the entry fp, which cannot return directly
// into JIT code.
void **addr = next->addressOfNativeReturnAddress();
if (JITCodeReturnAddress(*addr)) {
JITChunk *chunk = fp->jit()->findCodeChunk(*addr);
patchCall(chunk, fp, addr);
}
}
next = fp;
}
patchFrame(comp->rt, f, script);
}
comp->types.recompilations++;
// Purge all ICs in chunks for which we patched any native frames, see patchNative.
for (VMFrame *f = fop->runtime()->jaegerRuntime().activeFrame();
f != NULL;
f = f->previous)
{
if (f->fp()->script() == script) {
JS_ASSERT(f->stubRejoin != REJOIN_NATIVE &&
f->stubRejoin != REJOIN_NATIVE_LOWERED &&
f->stubRejoin != REJOIN_NATIVE_GETTER);
if (f->stubRejoin == REJOIN_NATIVE_PATCHED && f->jit() && f->chunk())
f->chunk()->purgeCaches();
}
}
}
} /* namespace mjit */
} /* namespace js */
#endif /* JS_METHODJIT */

Просмотреть файл

@ -1,59 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* Retroactive continuity ("retcon") refers to the retroactive modification
* or reinterpretation of established facts.
*/
#if !defined jsjaeger_retcon_h__ && defined JS_METHODJIT
#define jsjaeger_retcon_h__
#include "jscntxt.h"
#include "jsscript.h"
#include "MethodJIT.h"
#include "Compiler.h"
namespace js {
namespace mjit {
/*
* This class is responsible for sanely destroying a JITed script while frames
* for it are still on the stack, removing all references in the world to it
* and patching up those existing frames to go into the interpreter. If you
* ever change the code associated with a JSScript, or otherwise would cause
* existing JITed code to be incorrect, you /must/ use this to invalidate the
* JITed code, fixing up the stack in the process.
*/
class Recompiler {
public:
// Clear all uses of compiled code for script on the stack. This must be
// followed by destroying all JIT code for the script.
static void
clearStackReferences(FreeOp *fop, JSScript *script);
static void
expandInlineFrames(JS::Zone *zone, StackFrame *fp, mjit::CallSite *inlined,
StackFrame *next, VMFrame *f);
static void patchFrame(JSRuntime *rt, VMFrame *f, JSScript *script);
private:
static void patchCall(JITChunk *chunk, StackFrame *fp, void **location);
static void patchNative(JSRuntime *rt, JITChunk *chunk, StackFrame *fp,
jsbytecode *pc, RejoinState rejoin);
static StackFrame *
expandInlineFrameChain(StackFrame *outer, InlineFrame *inner);
};
} /* namespace mjit */
} /* namespace js */
#endif

Просмотреть файл

@ -1,60 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jslogic_h_inl__ && defined JS_METHODJIT
#define jslogic_h_inl__
#include "methodjit/StubCalls.h"
namespace js {
namespace mjit {
static inline void
ThrowException(VMFrame &f)
{
void *ptr = JS_FUNC_TO_DATA_PTR(void *, JaegerThrowpoline);
*f.returnAddressLocation() = ptr;
}
#define THROW() do { mjit::ThrowException(f); return; } while (0)
#define THROWV(v) do { mjit::ThrowException(f); return v; } while (0)
static inline void
ReportAtomNotDefined(JSContext *cx, JSAtom *atom)
{
JSAutoByteString printable;
if (js_AtomToPrintableString(cx, atom, &printable))
js_ReportIsNotDefined(cx, printable.ptr());
}
inline bool
stubs::UncachedCallResult::setFunction(JSContext *cx, CallArgs &args,
HandleScript callScript, jsbytecode *callPc)
{
if (!IsFunctionObject(args.calleev(), fun.address()))
return true;
if (fun->isInterpretedLazy() && !fun->getOrCreateScript(cx))
return false;
if (cx->typeInferenceEnabled() && fun->isInterpreted() &&
fun->nonLazyScript()->shouldCloneAtCallsite)
{
original = fun;
fun = CloneFunctionAtCallsite(cx, original, callScript, callPc);
if (!fun)
return false;
args.setCallee(ObjectValue(*fun));
}
return true;
}
} /* namespace mjit */
} /* namespace js */
#endif /* jslogic_h__ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,218 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined jslogic_h__ && defined JS_METHODJIT
#define jslogic_h__
#include "jsfuninlines.h"
#include "MethodJIT.h"
namespace js {
namespace mjit {
namespace stubs {
typedef enum JSTrapType {
JSTRAP_NONE = 0,
JSTRAP_TRAP = 1,
JSTRAP_SINGLESTEP = 2
} JSTrapType;
void JS_FASTCALL This(VMFrame &f);
void JS_FASTCALL NewInitArray(VMFrame &f, uint32_t count);
void JS_FASTCALL NewInitObject(VMFrame &f, JSObject *base);
void JS_FASTCALL Trap(VMFrame &f, uint32_t trapTypes);
void JS_FASTCALL DebuggerStatement(VMFrame &f, jsbytecode *pc);
void JS_FASTCALL Interrupt(VMFrame &f, jsbytecode *pc);
void JS_FASTCALL TriggerIonCompile(VMFrame &f);
void JS_FASTCALL RecompileForInline(VMFrame &f);
void JS_FASTCALL InitElem(VMFrame &f);
void JS_FASTCALL InitProp(VMFrame &f, PropertyName *name);
void JS_FASTCALL HitStackQuota(VMFrame &f);
void * JS_FASTCALL FixupArity(VMFrame &f, uint32_t argc);
void * JS_FASTCALL CompileFunction(VMFrame &f, uint32_t argc);
void JS_FASTCALL SlowNew(VMFrame &f, uint32_t argc);
void JS_FASTCALL SlowCall(VMFrame &f, uint32_t argc);
void * JS_FASTCALL UncachedNew(VMFrame &f, uint32_t argc);
void * JS_FASTCALL UncachedCall(VMFrame &f, uint32_t argc);
void * JS_FASTCALL UncachedLoweredCall(VMFrame &f, uint32_t argc);
void JS_FASTCALL Eval(VMFrame &f, uint32_t argc);
void JS_FASTCALL ScriptDebugPrologue(VMFrame &f);
void JS_FASTCALL ScriptDebugEpilogue(VMFrame &f);
void JS_FASTCALL ScriptProbeOnlyPrologue(VMFrame &f);
void JS_FASTCALL ScriptProbeOnlyEpilogue(VMFrame &f);
/*
* Result struct for UncachedXHelper.
*
* These functions can have one of two results:
*
* (1) The function was executed in the interpreter. Then all fields
* are NULL except unjittable.
*
* (2) The function was not executed, and the function has been compiled
* to JM native code. Then all fields are non-NULL.
*/
struct UncachedCallResult {
RootedFunction fun; // callee function
RootedFunction original; // NULL if fun is not a callsite clone, else
// points to the original function.
void *codeAddr; // code address of compiled callee function
bool unjittable; // did we try to JIT and fail?
UncachedCallResult(JSContext *cx) : fun(cx), original(cx) {}
void init() {
fun = NULL;
original = NULL;
codeAddr = NULL;
unjittable = false;
}
inline bool setFunction(JSContext *cx, CallArgs &args,
HandleScript callScript, jsbytecode *callPc);
};
/*
* Helper functions for stubs and IC functions for calling functions.
* These functions either execute the function, return a native code
* pointer that can be used to call the function, or throw.
*/
void UncachedCallHelper(VMFrame &f, uint32_t argc, bool lowered, UncachedCallResult &ucr);
void UncachedNewHelper(VMFrame &f, uint32_t argc, UncachedCallResult &ucr);
void JS_FASTCALL CreateThis(VMFrame &f, JSObject *proto);
void JS_FASTCALL Throw(VMFrame &f);
void * JS_FASTCALL TableSwitch(VMFrame &f, jsbytecode *origPc);
void JS_FASTCALL BindName(VMFrame &f, PropertyName *name);
JSObject * JS_FASTCALL BindGlobalName(VMFrame &f);
void JS_FASTCALL SetName(VMFrame &f, PropertyName *name);
void JS_FASTCALL IntrinsicName(VMFrame &f, PropertyName *name);
void JS_FASTCALL Name(VMFrame &f);
void JS_FASTCALL GetProp(VMFrame &f, PropertyName *name);
void JS_FASTCALL GetPropNoCache(VMFrame &f, PropertyName *name);
void JS_FASTCALL SetProp(VMFrame &f, PropertyName *name);
void JS_FASTCALL GetElem(VMFrame &f);
template<JSBool strict> void JS_FASTCALL SetElem(VMFrame &f);
void JS_FASTCALL ToId(VMFrame &f);
void JS_FASTCALL ImplicitThis(VMFrame &f, PropertyName *name);
template <JSBool strict> void JS_FASTCALL DelProp(VMFrame &f, PropertyName *name);
template <JSBool strict> void JS_FASTCALL DelElem(VMFrame &f);
void JS_FASTCALL DelName(VMFrame &f, PropertyName *name);
JSBool JS_FASTCALL In(VMFrame &f);
void JS_FASTCALL DefVarOrConst(VMFrame &f, PropertyName *name);
void JS_FASTCALL SetConst(VMFrame &f, PropertyName *name);
template<JSBool strict> void JS_FASTCALL DefFun(VMFrame &f, JSFunction *fun);
void JS_FASTCALL RegExp(VMFrame &f, JSObject *regex);
JSObject * JS_FASTCALL Lambda(VMFrame &f, JSFunction *fun);
JSObject * JS_FASTCALL FlatLambda(VMFrame &f, JSFunction *fun);
void JS_FASTCALL Arguments(VMFrame &f);
void JS_FASTCALL EnterBlock(VMFrame &f, JSObject *obj);
void JS_FASTCALL LeaveBlock(VMFrame &f);
JSBool JS_FASTCALL LessThan(VMFrame &f);
JSBool JS_FASTCALL LessEqual(VMFrame &f);
JSBool JS_FASTCALL GreaterThan(VMFrame &f);
JSBool JS_FASTCALL GreaterEqual(VMFrame &f);
JSBool JS_FASTCALL Equal(VMFrame &f);
JSBool JS_FASTCALL NotEqual(VMFrame &f);
void JS_FASTCALL BitOr(VMFrame &f);
void JS_FASTCALL BitXor(VMFrame &f);
void JS_FASTCALL BitAnd(VMFrame &f);
void JS_FASTCALL BitNot(VMFrame &f);
void JS_FASTCALL Lsh(VMFrame &f);
void JS_FASTCALL Rsh(VMFrame &f);
void JS_FASTCALL Ursh(VMFrame &f);
void JS_FASTCALL Add(VMFrame &f);
void JS_FASTCALL Sub(VMFrame &f);
void JS_FASTCALL Mul(VMFrame &f);
void JS_FASTCALL Div(VMFrame &f);
void JS_FASTCALL Mod(VMFrame &f);
void JS_FASTCALL Neg(VMFrame &f);
void JS_FASTCALL Pos(VMFrame &f);
void JS_FASTCALL Not(VMFrame &f);
void JS_FASTCALL StrictEq(VMFrame &f);
void JS_FASTCALL StrictNe(VMFrame &f);
void JS_FASTCALL Iter(VMFrame &f, uint32_t flags);
void JS_FASTCALL IterNext(VMFrame &f);
JSBool JS_FASTCALL IterMore(VMFrame &f);
void JS_FASTCALL EndIter(VMFrame &f);
JSBool JS_FASTCALL ValueToBoolean(VMFrame &f);
JSString * JS_FASTCALL TypeOf(VMFrame &f);
JSBool JS_FASTCALL InstanceOf(VMFrame &f);
void JS_FASTCALL FastInstanceOf(VMFrame &f);
/*
* Helper for triggering recompilation should a name read miss a type barrier,
* produce undefined or -0.
*/
void JS_FASTCALL TypeBarrierHelper(VMFrame &f, uint32_t which);
void JS_FASTCALL TypeBarrierReturn(VMFrame &f, Value *vp);
void JS_FASTCALL NegZeroHelper(VMFrame &f);
void JS_FASTCALL StubTypeHelper(VMFrame &f, int32_t which);
void JS_FASTCALL CheckArgumentTypes(VMFrame &f);
#ifdef DEBUG
void JS_FASTCALL AssertArgumentTypes(VMFrame &f);
#endif
void JS_FASTCALL MissedBoundsCheckEntry(VMFrame &f);
void JS_FASTCALL MissedBoundsCheckHead(VMFrame &f);
void * JS_FASTCALL InvariantFailure(VMFrame &f, void *repatchCode);
template <bool strict> int32_t JS_FASTCALL ConvertToTypedInt(JSContext *cx, Value *vp);
void JS_FASTCALL ConvertToTypedFloat(JSContext *cx, Value *vp);
void JS_FASTCALL Exception(VMFrame &f);
void JS_FASTCALL StrictEvalPrologue(VMFrame &f);
void JS_FASTCALL HeavyweightFunctionPrologue(VMFrame &f);
void JS_FASTCALL AnyFrameEpilogue(VMFrame &f);
void JS_FASTCALL Epilogue(VMFrame &f);
JSObject * JS_FASTCALL
NewDenseUnallocatedArray(VMFrame &f, uint32_t length);
void JS_FASTCALL ArrayConcatTwoArrays(VMFrame &f);
void JS_FASTCALL ArrayShift(VMFrame &f);
void JS_FASTCALL WriteBarrier(VMFrame &f, Value *addr);
void JS_FASTCALL GCThingWriteBarrier(VMFrame &f, Value *addr);
void JS_FASTCALL CrossChunkShim(VMFrame &f, void *edge);
} /* namespace stubs */
/*
* If COND is true, return A; otherwise, return B. This allows us to choose between
* function template instantiations without running afoul of C++'s overload resolution
* rules. (Try simplifying, and you'll either see the problem --- or have found a
* better solution!)
*/
template<typename FuncPtr>
inline FuncPtr FunctionTemplateConditional(bool cond, FuncPtr a, FuncPtr b) {
return cond ? a : b;
}
}} /* namespace stubs,mjit,js */
extern "C" void *
js_InternalThrow(js::VMFrame &f);
extern "C" void *
js_InternalInterpret(void *returnData, void *returnType, void *returnReg, js::VMFrame &f);
#endif /* jslogic_h__ */

Просмотреть файл

@ -1,231 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "StubCalls.h"
#include "StubCompiler.h"
#include "Compiler.h"
#include "assembler/assembler/LinkBuffer.h"
#include "FrameState-inl.h"
using namespace js;
using namespace mjit;
StubCompiler::StubCompiler(JSContext *cx, mjit::Compiler &cc, FrameState &frame)
: cx(cx),
cc(cc),
frame(frame),
masm(&cc.sps, &cc.PC),
generation(1),
lastGeneration(0),
exits(CompilerAllocPolicy(cx, cc)),
joins(CompilerAllocPolicy(cx, cc)),
scriptJoins(CompilerAllocPolicy(cx, cc)),
jumpList(SystemAllocPolicy())
{
#ifdef DEBUG
masm.setSpewPath(true);
#endif
}
void
StubCompiler::linkExitDirect(Jump j, Label L)
{
exits.append(CrossPatch(j, L));
}
JSC::MacroAssembler::Label
StubCompiler::syncExit(Uses uses)
{
JaegerSpew(JSpew_Insns, " ---- BEGIN SLOW MERGE CODE ---- \n");
if (lastGeneration == generation) {
Jump j2 = masm.jump();
jumpList.append(j2);
}
Label l = masm.label();
frame.sync(masm, uses);
lastGeneration = generation;
JaegerSpew(JSpew_Insns, " ---- END SLOW MERGE CODE ---- \n");
return l;
}
JSC::MacroAssembler::Label
StubCompiler::syncExitAndJump(Uses uses)
{
Label l = syncExit(uses);
Jump j2 = masm.jump();
jumpList.append(j2);
/* Suppress jumping on next sync/link. */
generation++;
return l;
}
// Link an exit from the fast path to a slow path. This does two main things:
// (a) links the given jump to the slow path, and (b) generates a prolog for
// the slow path that syncs frame state for a slow call that uses |uses|
// values from the top of the stack.
//
// The return value is the label for the start of the merge code. This is
// the correct place to jump to in order to execute the slow path being
// generated here.
//
// Note 1: Slow path generation is interleaved with fast path generation, but
// the slow path goes into a separate buffer. The slow path code is appended
// to the fast path code to keep it nearby in code memory.
//
// Note 2: A jump from the fast path to the slow path is called an "exit".
// A jump from the slow path to the fast path is called a "rejoin".
JSC::MacroAssembler::Label
StubCompiler::linkExit(Jump j, Uses uses)
{
Label l = syncExit(uses);
linkExitDirect(j, l);
return l;
}
// Special version of linkExit that is used when there is a JavaScript
// control-flow branch after the slow path. Our compilation strategy
// requires the JS frame to be fully materialized in memory across branches.
// This function does a linkExit and also fully materializes the frame.
void
StubCompiler::linkExitForBranch(Jump j)
{
Label l = syncExit(Uses(frame.frameSlots()));
linkExitDirect(j, l);
}
void
StubCompiler::leave()
{
JaegerSpew(JSpew_Insns, " ---- BEGIN SLOW LEAVE CODE ---- \n");
for (size_t i = 0; i < jumpList.length(); i++)
jumpList[i].linkTo(masm.label(), &masm);
jumpList.clear();
generation++;
JaegerSpew(JSpew_Insns, " ---- END SLOW LEAVE CODE ---- \n");
}
void
StubCompiler::rejoin(Changes changes)
{
JaegerSpew(JSpew_Insns, " ---- BEGIN SLOW RESTORE CODE ---- \n");
frame.merge(masm, changes);
unsigned index = crossJump(masm.jump(), cc.getLabel());
if (cc.loop)
cc.loop->addJoin(index, false);
JaegerSpew(JSpew_Insns, " ---- END SLOW RESTORE CODE ---- \n");
}
void
StubCompiler::linkRejoin(Jump j)
{
crossJump(j, cc.getLabel());
}
typedef JSC::MacroAssembler::RegisterID RegisterID;
typedef JSC::MacroAssembler::ImmPtr ImmPtr;
typedef JSC::MacroAssembler::Imm32 Imm32;
typedef JSC::MacroAssembler::DataLabelPtr DataLabelPtr;
JSC::MacroAssembler::Call
StubCompiler::emitStubCall(void *ptr, RejoinState rejoin, Uses uses)
{
return emitStubCall(ptr, rejoin, uses, frame.totalDepth());
}
JSC::MacroAssembler::Call
StubCompiler::emitStubCall(void *ptr, RejoinState rejoin, Uses uses, int32_t slots)
{
JaegerSpew(JSpew_Insns, " ---- BEGIN SLOW CALL CODE ---- \n");
masm.bumpStubCount(cc.script_, cc.PC, Registers::tempCallReg());
DataLabelPtr inlinePatch;
Call cl = masm.fallibleVMCall(cx->typeInferenceEnabled(),
ptr, cc.outerPC(), &inlinePatch, slots);
JaegerSpew(JSpew_Insns, " ---- END SLOW CALL CODE ---- \n");
/* Add the call site for debugging and recompilation. */
Compiler::InternalCallSite site(masm.callReturnOffset(cl),
cc.inlineIndex(), cc.inlinePC(),
rejoin, true);
site.inlinePatch = inlinePatch;
/* Add a hook for restoring loop invariants if necessary. */
if (cc.loop && cc.loop->generatingInvariants()) {
site.loopJumpLabel = masm.label();
Jump j = masm.jump();
Label l = masm.label();
/* MissedBoundsCheck* are not actually called, so f.regs need to be written before InvariantFailure. */
bool entry = (ptr == JS_FUNC_TO_DATA_PTR(void *, stubs::MissedBoundsCheckEntry))
|| (ptr == JS_FUNC_TO_DATA_PTR(void *, stubs::MissedBoundsCheckHead));
cc.loop->addInvariantCall(j, l, true, entry, cc.callSites.length(), uses);
}
cc.addCallSite(site);
return cl;
}
void
StubCompiler::fixCrossJumps(uint8_t *ncode, size_t offset, size_t total)
{
JSC::LinkBuffer fast(ncode, total, JSC::JAEGER_CODE);
JSC::LinkBuffer slow(ncode + offset, total - offset, JSC::JAEGER_CODE);
for (size_t i = 0; i < exits.length(); i++)
fast.link(exits[i].from, slow.locationOf(exits[i].to));
for (size_t i = 0; i < scriptJoins.length(); i++) {
const CrossJumpInScript &cj = scriptJoins[i];
slow.link(cj.from, fast.locationOf(cc.labelOf(cj.pc, cj.inlineIndex)));
}
for (size_t i = 0; i < joins.length(); i++)
slow.link(joins[i].from, fast.locationOf(joins[i].to));
}
unsigned
StubCompiler::crossJump(Jump j, Label L)
{
joins.append(CrossPatch(j, L));
/* This won't underflow, as joins has space preallocated for some entries. */
return joins.length() - 1;
}
bool
StubCompiler::jumpInScript(Jump j, jsbytecode *target)
{
if (cc.knownJump(target)) {
unsigned index = crossJump(j, cc.labelOf(target, cc.inlineIndex()));
if (cc.loop)
cc.loop->addJoin(index, false);
} else {
if (!scriptJoins.append(CrossJumpInScript(j, target, cc.inlineIndex())))
return false;
if (cc.loop)
cc.loop->addJoin(scriptJoins.length() - 1, true);
}
return true;
}
void
StubCompiler::patchJoin(unsigned i, bool script, Assembler::Address address, AnyRegisterID reg)
{
Jump &j = script ? scriptJoins[i].from : joins[i].from;
j.linkTo(masm.label(), &masm);
if (reg.isReg())
masm.loadPayload(address, reg.reg());
else
masm.loadDouble(address, reg.fpreg());
j = masm.jump();
}

Просмотреть файл

@ -1,115 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined(jsstub_compiler_h__) && defined(JS_METHODJIT)
#define jsstub_compiler_h__
#include "jscntxt.h"
#include "MethodJIT.h"
#include "methodjit/FrameState.h"
#include "CodeGenIncludes.h"
namespace js {
namespace mjit {
class Compiler;
class StubCompiler
{
typedef JSC::MacroAssembler::Call Call;
typedef JSC::MacroAssembler::Jump Jump;
typedef JSC::MacroAssembler::Label Label;
struct CrossPatch {
CrossPatch(Jump from, Label to)
: from(from), to(to)
{ }
Jump from;
Label to;
};
struct CrossJumpInScript {
CrossJumpInScript(Jump from, jsbytecode *pc, uint32_t inlineIndex)
: from(from), pc(pc), inlineIndex(inlineIndex)
{ }
Jump from;
jsbytecode *pc;
uint32_t inlineIndex;
};
JSContext *cx;
Compiler &cc;
FrameState &frame;
public:
Assembler masm;
private:
uint32_t generation;
uint32_t lastGeneration;
Vector<CrossPatch, 64, mjit::CompilerAllocPolicy> exits;
Vector<CrossPatch, 64, mjit::CompilerAllocPolicy> joins;
Vector<CrossJumpInScript, 64, mjit::CompilerAllocPolicy> scriptJoins;
Vector<Jump, 8, SystemAllocPolicy> jumpList;
public:
StubCompiler(JSContext *cx, mjit::Compiler &cc, FrameState &frame);
size_t size() {
return masm.size();
}
uint8_t *buffer() {
return masm.buffer();
}
/*
* Force a frame sync and return a label before the syncing code.
* A Jump may bind to the label with leaveExitDirect().
*/
JSC::MacroAssembler::Label syncExit(Uses uses);
/*
* Sync the exit, and state that code will be immediately outputted
* to the out-of-line buffer.
*/
JSC::MacroAssembler::Label syncExitAndJump(Uses uses);
/* Exits from the fast path into the slow path. */
JSC::MacroAssembler::Label linkExit(Jump j, Uses uses);
void linkExitForBranch(Jump j);
void linkExitDirect(Jump j, Label L);
void leave();
void leaveWithDepth(uint32_t depth);
/*
* Rejoins slow-path code back to the fast-path. The invalidation param
* specifies how many stack slots below sp must not be reloaded from
* registers.
*/
void rejoin(Changes changes);
void linkRejoin(Jump j);
/* Finish all native code patching. */
void fixCrossJumps(uint8_t *ncode, size_t offset, size_t total);
bool jumpInScript(Jump j, jsbytecode *target);
unsigned crossJump(Jump j, Label l);
Call emitStubCall(void *ptr, RejoinState rejoin, Uses uses);
Call emitStubCall(void *ptr, RejoinState rejoin, Uses uses, int32_t slots);
void patchJoin(unsigned i, bool script, Assembler::Address address, AnyRegisterID reg);
};
} /* namepsace mjit */
} /* namespace js */
#endif /* jsstub_compiler_h__ */

Просмотреть файл

@ -1,124 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "TrampolineCompiler.h"
#include "StubCalls.h"
#include "assembler/assembler/LinkBuffer.h"
#include "assembler/jit/ExecutableAllocator.h"
namespace js {
namespace mjit {
#define CHECK_RESULT(x) if (!(x)) return false
#define COMPILE(which, pool, how) CHECK_RESULT(compileTrampoline(&(which), &pool, how))
#define RELEASE(which, pool) JS_BEGIN_MACRO \
which = NULL; \
if (pool) \
pool->release(); \
pool = NULL; \
JS_END_MACRO
typedef JSC::MacroAssembler::Address Address;
typedef JSC::MacroAssembler::Label Label;
typedef JSC::MacroAssembler::Jump Jump;
typedef JSC::MacroAssembler::ImmPtr ImmPtr;
typedef JSC::MacroAssembler::Imm32 Imm32;
typedef JSC::MacroAssembler::Address Address;
bool
TrampolineCompiler::compile()
{
#ifdef JS_METHODJIT_SPEW
JMCheckLogging();
#endif
COMPILE(trampolines->forceReturn, trampolines->forceReturnPool, generateForceReturn);
#if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
COMPILE(trampolines->forceReturnFast, trampolines->forceReturnFastPool, generateForceReturnFast);
#endif
return true;
}
void
TrampolineCompiler::release(Trampolines *tramps)
{
RELEASE(tramps->forceReturn, tramps->forceReturnPool);
#if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
RELEASE(tramps->forceReturnFast, tramps->forceReturnFastPool);
#endif
}
bool
TrampolineCompiler::compileTrampoline(Trampolines::TrampolinePtr *where,
JSC::ExecutablePool **poolp, TrampolineGenerator generator)
{
Assembler masm;
Label entry = masm.label();
CHECK_RESULT(generator(masm));
JS_ASSERT(entry.isSet());
bool ok;
JSC::LinkBuffer buffer(&masm, execAlloc, poolp, &ok, JSC::JAEGER_CODE);
if (!ok)
return false;
masm.finalize(buffer);
uint8_t *result = (uint8_t*)buffer.finalizeCodeAddendum().dataLocation();
*where = JS_DATA_TO_FUNC_PTR(Trampolines::TrampolinePtr, result + masm.distanceOf(entry));
return true;
}
/*
* This is shamelessly copied from emitReturn, but with several changes:
* - There was always at least one inline call.
* - We don't know if there are activation objects or a script with nesting
* state whose active frames need adjustment, so we always stub the epilogue.
* - We don't know where we came from, so we don't know frame depth or PC.
* - There is no stub buffer.
*/
bool
TrampolineCompiler::generateForceReturn(Assembler &masm)
{
/* The JSStackFrame register may have been clobbered while returning, reload it. */
masm.loadPtr(FrameAddress(VMFrame::offsetOfFp), JSFrameReg);
/* Perform the frame epilogue. */
masm.fallibleVMCall(true, JS_FUNC_TO_DATA_PTR(void *, stubs::AnyFrameEpilogue), NULL, NULL, 0);
/* Store any known return value */
masm.loadValueAsComponents(UndefinedValue(), JSReturnReg_Type, JSReturnReg_Data);
Jump rvalClear = masm.branchTest32(Assembler::Zero,
FrameFlagsAddress(), Imm32(StackFrame::HAS_RVAL));
Address rvalAddress(JSFrameReg, StackFrame::offsetOfReturnValue());
masm.loadValueAsComponents(rvalAddress, JSReturnReg_Type, JSReturnReg_Data);
rvalClear.linkTo(masm.label(), &masm);
/* Return to the caller */
masm.loadPtr(Address(JSFrameReg, StackFrame::offsetOfNcode()), Registers::ReturnReg);
masm.jump(Registers::ReturnReg);
return true;
}
#if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
bool
TrampolineCompiler::generateForceReturnFast(Assembler &masm)
{
#ifdef _WIN64
masm.addPtr(Imm32(32), Registers::StackPointer);
#else
// In case of no fast call, when we change the return address,
// we need to make sure add esp by 8.
masm.addPtr(Imm32(16), Registers::StackPointer);
#endif
return generateForceReturn(masm);
}
#endif
} /* namespace mjit */
} /* namespace js */

Просмотреть файл

@ -1,47 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined trampolines_h__ && defined JS_METHODJIT
#define trampolines_h__
#include "assembler/jit/ExecutableAllocator.h"
#include "methodjit/CodeGenIncludes.h"
namespace js {
namespace mjit {
class TrampolineCompiler
{
typedef bool (*TrampolineGenerator)(Assembler &masm);
public:
TrampolineCompiler(JSC::ExecutableAllocator *alloc, Trampolines *tramps)
: execAlloc(alloc), trampolines(tramps)
{ }
bool compile();
static void release(Trampolines *tramps);
private:
bool compileTrampoline(Trampolines::TrampolinePtr *where, JSC::ExecutablePool **pool,
TrampolineGenerator generator);
/* Generators for trampolines. */
static bool generateForceReturn(Assembler &masm);
#if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
static bool generateForceReturnFast(Assembler &masm);
#endif
JSC::ExecutableAllocator *execAlloc;
Trampolines *trampolines;
};
} /* namespace mjit */
} /* namespace js */
#endif

Просмотреть файл

@ -1,310 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jstypes.h"
/*
* The MIPS VMFrame is 112 bytes as follows.
*
* 108 [ unused4 ] For alignment.
* 104 [ ra ]
* 100 [ gp ] If PIC code is generated, we will save gp.
* 96 [ s7 ]
* 92 [ s6 ]
* 88 [ s5 ]
* 84 [ s4 ]
* 80 [ s3 ]
* 76 [ s2 ]
* 72 [ s1 ]
* 68 [ s0 ]
* 64 [ stubRejoin ]
* 60 [ entrycode ]
* 56 [ entryfp ]
* 52 [ stkLimit ]
* 48 [ cx ]
* 44 [ regs.fp_ ]
* 40 [ regs.inlined_]
* 36 [ regs.pc ]
* 32 [ regs.sp ]
* 28 [ scratch ]
* 24 [ previous ]
* 20 [ args.ptr2 ] [ dynamicArgc ] (union)
* 16 [ args.ptr ] [ lazyArgsObj ] (union)
* 12 [ unused3 ] O32 ABI, space for a3 (used in callee)
* 8 [ unused2 ] O32 ABI, space for a2 (used in callee)
* 4 [ unused1 ] O32 ABI, space for a1 (used in callee)
* 0 [ unused0 ] O32 ABI, space for a0 (used in callee)
*/
asm (
".text" "\n"
".align 2" "\n"
".set noreorder" "\n"
".set nomacro" "\n"
".set nomips16" "\n"
".globl JaegerThrowpoline" "\n"
".ent JaegerThrowpoline" "\n"
".type JaegerThrowpoline,@function" "\n"
"JaegerThrowpoline:" "\n"
#if defined(__PIC__)
"lw $28,100($29)" "\n"
"la $25,js_InternalThrow" "\n"
".reloc 1f,R_MIPS_JALR,js_InternalThrow" "\n"
"1: jalr $25" "\n"
"move $4,$29 # set up a0" "\n"
#else
"jal js_InternalThrow" "\n"
"move $4,$29 # set up a0" "\n"
#endif
"beq $2,$0,1f" "\n"
"nop" "\n"
"jr $2 # jump to a scripted handler" "\n"
"nop" "\n"
"1:" "\n"
#if defined(__PIC__)
"lw $28,100($29)" "\n"
"la $25,PopActiveVMFrame" "\n"
".reloc 1f,R_MIPS_JALR,PopActiveVMFrame" "\n"
"1: jalr $25" "\n"
"move $4,$29 # set up a0" "\n"
#else
"jal PopActiveVMFrame" "\n"
"move $4,$29 # set up a0" "\n"
#endif
"lw $31,104($29)" "\n"
#if defined(__PIC__)
"lw $28,100($29)" "\n"
#endif
"lw $23,96($29)" "\n"
"lw $22,92($29)" "\n"
"lw $21,88($29)" "\n"
"lw $20,84($29)" "\n"
"lw $19,80($29)" "\n"
"lw $18,76($29)" "\n"
"lw $17,72($29)" "\n"
"lw $16,68($29)" "\n"
"li $2,0 # return 0 to represent an unhandled exception." "\n"
"jr $31" "\n"
"addiu $29,$29,112" "\n"
".set reorder" "\n"
".set macro" "\n"
".end JaegerThrowpoline" "\n"
".size JaegerThrowpoline,.-JaegerThrowpoline" "\n"
);
asm (
".text" "\n"
".align 2" "\n"
".set noreorder" "\n"
".set nomacro" "\n"
".set nomips16" "\n"
".globl JaegerTrampoline" "\n"
".ent JaegerTrampoline" "\n"
".type JaegerTrampoline,@function" "\n"
"JaegerTrampoline:" "\n"
#if defined(__PIC__)
"lui $28,%hi(_gp_disp)" "\n"
"addiu $28,$28,%lo(_gp_disp)" "\n"
"addu $28,$28,$25" "\n"
#endif
"addiu $29,$29,-112" "\n"
"sw $31,104($29)" "\n"
#if defined(__PIC__)
"sw $28,100($29)" "\n"
#endif
"sw $23,96($29)" "\n"
"sw $22,92($29)" "\n"
"sw $21,88($29)" "\n"
"sw $20,84($29)" "\n"
"sw $19,80($29)" "\n"
"sw $18,76($29)" "\n"
"sw $17,72($29)" "\n"
"sw $16,68($29)" "\n"
"sw $0,64($29) # stubRejoin" "\n"
"sw $5,60($29) # entrycode" "\n"
"sw $5,56($29) # entryfp" "\n"
"sw $7,52($29) # stackLimit" "\n"
"sw $4,48($29) # cx" "\n"
"sw $5,44($29) # regs.fp" "\n"
"move $16,$5 # preserve fp to s0" "\n"
"move $17,$6 # preserve code to s1" "\n"
#if defined(__PIC__)
"la $25,PushActiveVMFrame" "\n"
".reloc 1f,R_MIPS_JALR,PushActiveVMFrame" "\n"
"1: jalr $25" "\n"
"move $4,$29 # set up a0" "\n"
#else
"jal PushActiveVMFrame" "\n"
"move $4,$29 # set up a0" "\n"
#endif
"move $25,$17 # move code to $25" "\n"
"jr $25 # jump to the compiled JavaScript Function" "\n"
"nop" "\n"
".set reorder" "\n"
".set macro" "\n"
".end JaegerTrampoline" "\n"
".size JaegerTrampoline,.-JaegerTrampoline" "\n"
);
asm (
".text" "\n"
".align 2" "\n"
".set noreorder" "\n"
".set nomacro" "\n"
".set nomips16" "\n"
".globl JaegerTrampolineReturn" "\n"
".ent JaegerTrampolineReturn" "\n"
".type JaegerTrampolineReturn,@function" "\n"
"JaegerTrampolineReturn:" "\n"
#if defined(IS_LITTLE_ENDIAN)
"sw $4,28($16) # a0: fp->rval type for LITTLE-ENDIAN" "\n"
"sw $6,24($16) # a2: fp->rval data for LITTLE-ENDIAN" "\n"
#else
"sw $4,24($16) # a0: fp->rval type for BIG-ENDIAN" "\n"
"sw $6,28($16) # a2: fp->rval data for BIG-ENDIAN" "\n"
#endif
#if defined(__PIC__)
"lw $28,100($29)" "\n"
"la $25,PopActiveVMFrame" "\n"
".reloc 1f,R_MIPS_JALR,PopActiveVMFrame" "\n"
"1: jalr $25" "\n"
"move $4,$29 # set up a0" "\n"
#else
"jal PopActiveVMFrame" "\n"
"move $4,$29 # set up a0" "\n"
#endif
"lw $31,104($29)" "\n"
#if defined(__PIC__)
"lw $28,100($29)" "\n"
#endif
"lw $23,96($29)" "\n"
"lw $22,92($29)" "\n"
"lw $21,88($29)" "\n"
"lw $20,84($29)" "\n"
"lw $19,80($29)" "\n"
"lw $18,76($29)" "\n"
"lw $17,72($29)" "\n"
"lw $16,68($29)" "\n"
"li $2,1 # return ture to indicate successful completion" "\n"
"jr $31" "\n"
"addiu $29,$29,112" "\n"
".set reorder" "\n"
".set macro" "\n"
".end JaegerTrampolineReturn" "\n"
".size JaegerTrampolineReturn,.-JaegerTrampolineReturn" "\n"
);
asm (
".text" "\n"
".align 2" "\n"
".set noreorder" "\n"
".set nomacro" "\n"
".set nomips16" "\n"
".globl JaegerStubVeneer" "\n"
".ent JaegerStubVeneer" "\n"
".type JaegerStubVeneer,@function" "\n"
"JaegerStubVeneer:" "\n"
"addiu $29,$29,-24 # Need 16 (a0-a3) + 4 (align) + 4 ($31) bytes" "\n"
"sw $31,20($29) # Store $31 to 20($29)" "\n"
"move $25,$2 # the target address is passed from $2" "\n"
"jalr $25" "\n"
"nop" "\n"
"lw $31,20($29)" "\n"
"jr $31" "\n"
"addiu $29,$29,24" "\n"
".set reorder" "\n"
".set macro" "\n"
".end JaegerStubVeneer" "\n"
".size JaegerStubVeneer,.-JaegerStubVeneer" "\n"
);
asm (
".text" "\n"
".align 2" "\n"
".set noreorder" "\n"
".set nomacro" "\n"
".set nomips16" "\n"
".globl JaegerInterpolineScripted" "\n"
".ent JaegerInterpolineScripted" "\n"
".type JaegerInterpolineScripted,@function" "\n"
"JaegerInterpolineScripted:" "\n"
"lw $16,16($16) # Load f->prev_" "\n"
"b JaegerInterpoline" "\n"
"sw $16,44($29) # Update f->regs->fp_" "\n"
".set reorder" "\n"
".set macro" "\n"
".end JaegerInterpolineScripted" "\n"
".size JaegerInterpolineScripted,.-JaegerInterpolineScripted" "\n"
);
asm (
".text" "\n"
".align 2" "\n"
".set noreorder" "\n"
".set nomacro" "\n"
".set nomips16" "\n"
".globl JaegerInterpoline" "\n"
".ent JaegerInterpoline" "\n"
".type JaegerInterpoline,@function" "\n"
"JaegerInterpoline:" "\n"
"move $5,$4 # returntype" "\n"
"move $4,$6 # returnData" "\n"
"move $6,$2 # returnReg" "\n"
#if defined(__PIC__)
"lw $28,100($29)" "\n"
"la $25,js_InternalInterpret" "\n"
".reloc 1f,R_MIPS_JALR,js_InternalInterpret" "\n"
"1: jalr $25" "\n"
"move $7,$29 # f" "\n"
#else
"jal js_InternalInterpret" "\n"
"move $7,$29 # f" "\n"
#endif
"lw $16,44($29) # Load f->regs->fp_ to s0" "\n"
#if defined(IS_LITTLE_ENDIAN)
"lw $4,28($16) # a0: fp->rval type for LITTLE-ENDIAN" "\n"
"lw $6,24($16) # a2: fp->rval data for LITTLE-ENDIAN" "\n"
#else
"lw $4,24($16) # a0: fp->rval type for BIG-ENDIAN" "\n"
"lw $6,28($16) # a2: fp->rval data for BIG-ENDIAN" "\n"
#endif
"lw $5,28($29) # Load sctrach -> argc" "\n"
"beq $2,$0,1f" "\n"
"nop" "\n"
"jr $2" "\n"
"nop" "\n"
"1:" "\n"
#if defined(__PIC__)
"lw $28,100($29)" "\n"
"la $25,PopActiveVMFrame" "\n"
".reloc 1f,R_MIPS_JALR,PopActiveVMFrame" "\n"
"1: jalr $25" "\n"
"move $4,$29 # set up a0" "\n"
#else
"jal PopActiveVMFrame" "\n"
"move $4,$29 # set up a0" "\n"
#endif
"lw $31,104($29)" "\n"
#if defined(__PIC__)
"lw $28,100($29)" "\n"
#endif
"lw $23,96($29)" "\n"
"lw $22,92($29)" "\n"
"lw $21,88($29)" "\n"
"lw $20,84($29)" "\n"
"lw $19,80($29)" "\n"
"lw $18,76($29)" "\n"
"lw $17,72($29)" "\n"
"lw $16,68($29)" "\n"
"li $2,0 # return 0" "\n"
"jr $31" "\n"
"addiu $29,$29,112" "\n"
".set reorder" "\n"
".set macro" "\n"
".end JaegerInterpoline" "\n"
".size JaegerInterpoline,.-JaegerInterpoline" "\n"
);

Просмотреть файл

@ -1,229 +0,0 @@
; -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
; This Source Code Form is subject to the terms of the Mozilla Public
; License, v. 2.0. If a copy of the MPL was not distributed with this
; file, You can obtain one at http://mozilla.org/MPL/2.0/.
extern js_InternalThrow:PROC
extern PushActiveVMFrame:PROC
extern PopActiveVMFrame:PROC
extern js_InternalInterpret:PROC
.CODE
; JSBool JaegerTrampoline(JSContext *cx, StackFrame *fp, void *code,
; Value *stackLimit, void *safePoint);
JaegerTrampoline PROC FRAME
push rbp
.PUSHREG rbp
mov rbp, rsp
.SETFRAME rbp, 0
push r12
.PUSHREG r12
push r13
.PUSHREG r13
push r14
.PUSHREG r14
push r15
.PUSHREG r15
push rdi
.PUSHREG rdi
push rsi
.PUSHREG rsi
push rbx
.PUSHREG rbx
sub rsp, 16*10+8
.ALLOCSTACK 168
; .SAVEXMM128 only supports 16 byte alignment offset
movdqa xmmword ptr [rsp], xmm6
.SAVEXMM128 xmm6, 0
movdqa xmmword ptr [rsp+16], xmm7
.SAVEXMM128 xmm7, 16
movdqa xmmword ptr [rsp+16*2], xmm8
.SAVEXMM128 xmm8, 32
movdqa xmmword ptr [rsp+16*3], xmm9
.SAVEXMM128 xmm9, 48
movdqa xmmword ptr [rsp+16*4], xmm10
.SAVEXMM128 xmm10, 64
movdqa xmmword ptr [rsp+16*5], xmm11
.SAVEXMM128 xmm11, 80
movdqa xmmword ptr [rsp+16*6], xmm12
.SAVEXMM128 xmm12, 96
movdqa xmmword ptr [rsp+16*7], xmm13
.SAVEXMM128 xmm13, 112
movdqa xmmword ptr [rsp+16*8], xmm14
.SAVEXMM128 xmm14, 128
movdqa xmmword ptr [rsp+16*9], xmm15
.SAVEXMM128 xmm15, 144
; stack aligment for Win64 ABI
sub rsp, 8
.ALLOCSTACK 8
.ENDPROLOG
; Load mask registers
mov r13, 0ffff800000000000h
mov r14, 7fffffffffffh
; Build the JIT frame.
; rcx = cx
; rdx = fp
; r9 = inlineCallCount
; fp must go into rbx
push 0 ; stubRejoin
push rdx ; entryncode
push rdx ; entryFp
push r9 ; inlineCallCount
push rcx ; cx
push rdx ; fp
mov rbx, rdx
; Space for the rest of the VMFrame.
sub rsp, 28h
; This is actually part of the VMFrame.
mov r10, [rbp+8*5+8]
push r10
; Set cx->regs and set the active frame. Save r8 and align frame in one
push r8
mov rcx, rsp
sub rsp, 20h
call PushActiveVMFrame
add rsp, 20h
; Jump into the JIT code.
jmp qword ptr [rsp]
JaegerTrampoline ENDP
; void JaegerTrampolineReturn();
JaegerTrampolineReturn PROC FRAME
.ENDPROLOG
or rsi, rdi
mov qword ptr [rbx+30h], rsi
sub rsp, 20h
lea rcx, [rsp+20h]
call PopActiveVMFrame
add rsp, 68h+20h+8+16*10+8
movdqa xmm6, xmmword ptr [rsp-16*10-8]
movdqa xmm7, xmmword ptr [rsp-16*9-8]
movdqa xmm8, xmmword ptr [rsp-16*8-8]
movdqa xmm9, xmmword ptr [rsp-16*7-8]
movdqa xmm10, xmmword ptr [rsp-16*6-8]
movdqa xmm11, xmmword ptr [rsp-16*5-8]
movdqa xmm12, xmmword ptr [rsp-16*4-8]
movdqa xmm13, xmmword ptr [rsp-16*3-8]
movdqa xmm14, xmmword ptr [rsp-16*2-8]
movdqa xmm15, xmmword ptr [rsp-16*1-8]
pop rbx
pop rsi
pop rdi
pop r15
pop r14
pop r13
pop r12
pop rbp
mov rax, 1
ret
JaegerTrampolineReturn ENDP
; void JaegerThrowpoline()
JaegerThrowpoline PROC FRAME
.ENDPROLOG
; For Windows x64 stub calls, we pad the stack by 32 before
; calling, so we must account for that here. See doStubCall.
lea rcx, [rsp+20h]
call js_InternalThrow
test rax, rax
je throwpoline_exit
add rsp, 20h
jmp rax
throwpoline_exit:
lea rcx, [rsp+20h]
call PopActiveVMFrame
add rsp, 68h+20h+8+16*10+8
movdqa xmm6, xmmword ptr [rsp-16*10-8]
movdqa xmm7, xmmword ptr [rsp-16*9-8]
movdqa xmm8, xmmword ptr [rsp-16*8-8]
movdqa xmm9, xmmword ptr [rsp-16*7-8]
movdqa xmm10, xmmword ptr [rsp-16*6-8]
movdqa xmm11, xmmword ptr [rsp-16*5-8]
movdqa xmm12, xmmword ptr [rsp-16*4-8]
movdqa xmm13, xmmword ptr [rsp-16*3-8]
movdqa xmm14, xmmword ptr [rsp-16*2-8]
movdqa xmm15, xmmword ptr [rsp-16*1-8]
pop rbx
pop rsi
pop rdi
pop r15
pop r14
pop r13
pop r12
pop rbp
xor rax, rax
ret
JaegerThrowpoline ENDP
JaegerInterpoline PROC FRAME
.ENDPROLOG
mov rcx, rdi
mov rdx, rsi
lea r9, [rsp+20h]
mov r8, rax
call js_InternalInterpret
mov rbx, qword ptr [rsp+38h+20h] ; Load Frame
mov rsi, qword ptr [rbx+30h] ; Load rval payload
and rsi, r14 ; Mask rval payload
mov rdi, qword ptr [rbx+30h] ; Load rval type
and rdi, r13 ; Mask rval type
mov rcx, qword ptr [rsp+18h+20h] ; Load scratch -> argc
test rax, rax
je interpoline_exit
add rsp, 20h
jmp rax
interpoline_exit:
lea rcx, [rsp+20h]
call PopActiveVMFrame
add rsp, 68h+20h+8+16*10+8
movdqa xmm6, xmmword ptr [rsp-16*10-8]
movdqa xmm7, xmmword ptr [rsp-16*9-8]
movdqa xmm8, xmmword ptr [rsp-16*8-8]
movdqa xmm9, xmmword ptr [rsp-16*7-8]
movdqa xmm10, xmmword ptr [rsp-16*6-8]
movdqa xmm11, xmmword ptr [rsp-16*5-8]
movdqa xmm12, xmmword ptr [rsp-16*4-8]
movdqa xmm13, xmmword ptr [rsp-16*3-8]
movdqa xmm14, xmmword ptr [rsp-16*2-8]
movdqa xmm15, xmmword ptr [rsp-16*1-8]
pop rbx
pop rsi
pop rdi
pop r15
pop r14
pop r13
pop r12
pop rbp
xor rax, rax
ret
JaegerInterpoline ENDP
JaegerInterpolineScripted PROC FRAME
.ENDPROLOG
mov rbx, qword ptr [rbx+20h] ; Load prev
mov qword ptr [rsp+38h], rbx ; fp -> regs.fp
sub rsp, 20h
jmp JaegerInterpoline
JaegerInterpolineScripted ENDP
JaegerInterpolinePatched PROC FRAME
sub rsp, 20h
.ALLOCSTACK 32
.ENDPROLOG
jmp JaegerInterpoline
JaegerInterpolinePatched ENDP
END

Просмотреть файл

@ -1,252 +0,0 @@
# -*- Mode: C++# tab-width: 4# indent-tabs-mode: nil# c-basic-offset: 4 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
.extern js_InternalThrow
.extern PushActiveVMFrame
.extern PopActiveVMFrame
.extern js_InternalInterpret
.text
.intel_syntax noprefix
# JSBool JaegerTrampoline(JSContext *cx, StackFrame *fp, void *code,
# Value *stackLimit, void *safePoint)#
.globl JaegerTrampoline
.def JaegerTrampoline
.scl 3
.type 46
.endef
JaegerTrampoline:
push rbp
# .PUSHREG rbp
mov rbp, rsp
# .SETFRAME rbp, 0
push r12
# .PUSHREG r12
push r13
# .PUSHREG r13
push r14
# .PUSHREG r14
push r15
# .PUSHREG r15
push rdi
# .PUSHREG rdi
push rsi
# .PUSHREG rsi
push rbx
# .PUSHREG rbx
sub rsp, 16*10+8
# .ALLOCSTACK 168
# .SAVEXMM128 only supports 16 byte alignment offset
movdqa xmmword ptr [rsp], xmm6
# .SAVEXMM128 xmm6, 0
movdqa xmmword ptr [rsp+16], xmm7
# .SAVEXMM128 xmm7, 16
movdqa xmmword ptr [rsp+16*2], xmm8
# .SAVEXMM128 xmm8, 32
movdqa xmmword ptr [rsp+16*3], xmm9
# .SAVEXMM128 xmm9, 48
movdqa xmmword ptr [rsp+16*4], xmm10
# .SAVEXMM128 xmm10, 64
movdqa xmmword ptr [rsp+16*5], xmm11
# .SAVEXMM128 xmm11, 80
movdqa xmmword ptr [rsp+16*6], xmm12
# .SAVEXMM128 xmm12, 96
movdqa xmmword ptr [rsp+16*7], xmm13
# .SAVEXMM128 xmm13, 112
movdqa xmmword ptr [rsp+16*8], xmm14
# .SAVEXMM128 xmm14, 128
movdqa xmmword ptr [rsp+16*9], xmm15
# .SAVEXMM128 xmm15, 144
# stack aligment for Win64 ABI
sub rsp, 8
# .ALLOCSTACK 8
# .ENDPROLOG
# Load mask registers
mov r13, 0xffff800000000000
mov r14, 0x7fffffffffff
# Build the JIT frame.
# rcx = cx
# rdx = fp
# r9 = inlineCallCount
# fp must go into rbx
push 0 # stubRejoin
push rdx # entryncode
push rdx # entryFp
push r9 # inlineCallCount
push rcx # cx
push rdx # fp
mov rbx, rdx
# Space for the rest of the VMFrame.
sub rsp, 0x28
# This is actually part of the VMFrame.
mov r10, [rbp+8*5+8]
push r10
# Set cx->regs and set the active frame. Save r8 and align frame in one
push r8
mov rcx, rsp
sub rsp, 0x20
call PushActiveVMFrame
add rsp, 0x20
# Jump into the JIT code.
jmp qword ptr [rsp]
# void JaegerTrampolineReturn()#
.globl JaegerTrampolineReturn
.def JaegerTrampolineReturn
.scl 3
.type 46
.endef
JaegerTrampolineReturn:
# .ENDPROLOG
or rsi, rdi
mov qword ptr [rbx + 0x30], rsi
sub rsp, 0x20
lea rcx, [rsp+0x20]
call PopActiveVMFrame
add rsp, 0x68+0x20+8+16*10+8
movdqa xmm6, xmmword ptr [rsp-16*10-8]
movdqa xmm7, xmmword ptr [rsp-16*9-8]
movdqa xmm8, xmmword ptr [rsp-16*8-8]
movdqa xmm9, xmmword ptr [rsp-16*7-8]
movdqa xmm10, xmmword ptr [rsp-16*6-8]
movdqa xmm11, xmmword ptr [rsp-16*5-8]
movdqa xmm12, xmmword ptr [rsp-16*4-8]
movdqa xmm13, xmmword ptr [rsp-16*3-8]
movdqa xmm14, xmmword ptr [rsp-16*2-8]
movdqa xmm15, xmmword ptr [rsp-16*1-8]
pop rbx
pop rsi
pop rdi
pop r15
pop r14
pop r13
pop r12
pop rbp
mov rax, 1
ret
# void JaegerThrowpoline()
.globl JaegerThrowpoline
.def JaegerTrampoline
.scl 3
.type 46
.endef
JaegerThrowpoline:
# .ENDPROLOG
# For Windows x64 stub calls, we pad the stack by 32 before
# calling, so we must account for that here. See doStubCall.
lea rcx, [rsp+0x20]
call js_InternalThrow
test rax, rax
je throwpoline_exit
add rsp, 0x20
jmp rax
throwpoline_exit:
lea rcx, [rsp+0x20]
call PopActiveVMFrame
add rsp, 0x68+0x20+8+16*10+8
movdqa xmm6, xmmword ptr [rsp-16*10-8]
movdqa xmm7, xmmword ptr [rsp-16*9-8]
movdqa xmm8, xmmword ptr [rsp-16*8-8]
movdqa xmm9, xmmword ptr [rsp-16*7-8]
movdqa xmm10, xmmword ptr [rsp-16*6-8]
movdqa xmm11, xmmword ptr [rsp-16*5-8]
movdqa xmm12, xmmword ptr [rsp-16*4-8]
movdqa xmm13, xmmword ptr [rsp-16*3-8]
movdqa xmm14, xmmword ptr [rsp-16*2-8]
movdqa xmm15, xmmword ptr [rsp-16*1-8]
pop rbx
pop rsi
pop rdi
pop r15
pop r14
pop r13
pop r12
pop rbp
xor rax, rax
ret
.globl JaegerInterpoline
.def JaegerInterpoline
.scl 3
.type 46
.endef
JaegerInterpoline:
#.ENDPROLOG
mov rcx, rdi
mov rdx, rsi
lea r9, [rsp+0x20]
mov r8, rax
call js_InternalInterpret
mov rbx, qword ptr [rsp+0x38+0x20] # Load Frame
mov rsi, qword ptr [rbx+0x30] # Load rval payload
and rsi, r14 # Mask rval payload
mov rdi, qword ptr [rbx+0x30] # Load rval type
and rdi, r13 # Mask rval type
mov rcx, qword ptr [rsp+0x18+0x20] # Load scratch -> argc
test rax, rax
je interpoline_exit
add rsp, 0x20
jmp rax
interpoline_exit:
lea rcx, [rsp+0x20]
call PopActiveVMFrame
add rsp, 0x68+0x20+8+16*10+8
movdqa xmm6, xmmword ptr [rsp-16*10-8]
movdqa xmm7, xmmword ptr [rsp-16*9-8]
movdqa xmm8, xmmword ptr [rsp-16*8-8]
movdqa xmm9, xmmword ptr [rsp-16*7-8]
movdqa xmm10, xmmword ptr [rsp-16*6-8]
movdqa xmm11, xmmword ptr [rsp-16*5-8]
movdqa xmm12, xmmword ptr [rsp-16*4-8]
movdqa xmm13, xmmword ptr [rsp-16*3-8]
movdqa xmm14, xmmword ptr [rsp-16*2-8]
movdqa xmm15, xmmword ptr [rsp-16*1-8]
pop rbx
pop rsi
pop rdi
pop r15
pop r14
pop r13
pop r12
pop rbp
xor rax, rax
ret
.globl JaegerInterpolineScripted
.def JaegerInterpolineScripted
.scl 3
.type 46
.endef
JaegerInterpolineScripted:
#.ENDPROLOG
mov rbx, qword ptr [rbx+0x20] # Load prev
mov qword ptr [rsp+0x38], rbx # fp -> regs.fp
sub rsp, 0x20
jmp JaegerInterpoline
.globl JaegerInterpolinePatched
.def JaegerInterpolinePatched
.scl 3
.type 46
.endef
JaegerInterpolinePatched:
sub rsp, 0x20
#.ALLOCSTACK 32
#.ENDPROLOG
jmp JaegerInterpoline

Просмотреть файл

@ -1,137 +0,0 @@
/ -*- Mode: C++/ tab-width: 4/ indent-tabs-mode: nil/ c-basic-offset: 4 -*-
/ This Source Code Form is subject to the terms of the Mozilla Public
/ License, v. 2.0. If a copy of the MPL was not distributed with this
/ file, You can obtain one at http://mozilla.org/MPL/2.0/.
.text
/ JSBool JaegerTrampoline(JSContext *cx, StackFrame *fp, void *code,
/ Value *stackLimit)
.global JaegerTrampoline
.type JaegerTrampoline, @function
JaegerTrampoline:
/* Prologue. */
pushq %rbp
movq %rsp, %rbp
/* Save non-volatile registers. */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
/* Load mask registers. */
movq $0xFFFF800000000000, %r13
movq $0x00007FFFFFFFFFFF, %r14
/* Build the JIT frame.
* rdi = cx
* rsi = fp
* rcx = inlineCallCount
* fp must go into rbx
*/
pushq $0x0 /* stubRejoin */
pushq %rsi /* entryncode */
pushq %rsi /* entryfp */
pushq %rcx /* inlineCallCount */
pushq %rdi /* cx */
pushq %rsi /* fp */
movq %rsi, %rbx
/* Space for the rest of the VMFrame. */
subq $0x28, %rsp
/* This is actually part of the VMFrame. */
pushq %r8
/* Set cx->regs and set the active frame. Save rdx and align frame in one. */
pushq %rdx
movq %rsp, %rdi
call PushActiveVMFrame
/* Jump into into the JIT'd code. */
jmp *0(%rsp)
.size JaegerTrampoline, . - JaegerTrampoline
/ void JaegerTrampolineReturn()
.global JaegerTrampolineReturn
.type JaegerTrampolineReturn, @function
JaegerTrampolineReturn:
or %rdi, %rsi
movq %rsx, 0x30(%rbx)
movq %rsp, %rdi
call PopActiveVMFrame
addq $0x68, %rsp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
movq $1, %rax
ret
.size JaegerTrampolineReturn, . - JaegerTrampolineReturn
/ void *JaegerThrowpoline(js::VMFrame *vmFrame)
.global JaegerThrowpoline
.type JaegerThrowpoline, @function
JaegerThrowpoline:
movq %rsp, %rdi
call js_InternalThrow
testq %rax, %rax
je throwpoline_exit
jmp *%rax
throwpoline_exit:
movq %rsp, %rdi
call PopActiveVMFrame
addq $0x68, %rsp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
xorq %rax,%rax
ret
.size JaegerThrowpoline, . - JaegerThrowpoline
/ void JaegerInterpoline()
.global JaegerInterpoline
.type JaegerInterpoline, @function
JaegerInterpoline:
movq %rsp, %rcx
movq %rax, %rdx
call js_InternalInterpret
movq 0x38(%rsp), %rbx /* Load frame */
movq 0x30(%rbx), %rsi /* Load rval payload */
and %r14, %rsi /* Mask rval payload */
movq 0x30(%rbx), %rdi /* Load rval type */
and %r13, %rdi /* Mask rval type */
movq 0x18(%rsp), %rcx /* Load scratch -> argc */
testq %rax, %rax
je interpoline_exit
jmp *%rax
interpoline_exit:
movq %rsp, %rdi
call PopActiveVMFrame
addq $0x68, %rsp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
xorq %rax,%rax
ret
.size JaegerInterpoline, . - JaegerInterpoline
/ void JaegerInterpolineScripted()
.global JaegerInterpolineScripted
.type JaegerInterpolineScripted, @function
JaegerInterpolineScripted:
movq 0x20(%rbx), %rbx /* load prev */
movq %rbx, 0x38(%rsp)
jmp JaegerInterpoline
.size JaegerInterpolineScripted, . - JaegerInterpolineScripted

Просмотреть файл

@ -1,146 +0,0 @@
/ -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
/ This Source Code Form is subject to the terms of the Mozilla Public
/ License, v. 2.0. If a copy of the MPL was not distributed with this
/ file, You can obtain one at http://mozilla.org/MPL/2.0/.
.text
/ JSBool JaegerTrampoline(JSContext *cx, StackFrame *fp, void *code,
/ Value *stackLimit)
.global JaegerTrampoline
.type JaegerTrampoline, @function
JaegerTrampoline:
/* Prologue. */
pushl %ebp
movl %esp, %ebp
/* Save non-volatile registers. */
pushl %esi
pushl %edi
pushl %ebx
/* Build the JIT frame. Push fields in order, */
/* then align the stack to form esp == VMFrame. */
movl 12(%ebp), %ebx /* load fp */
pushl %ebx /* unused1 */
pushl %ebx /* unused0 */
pushl $0x0 /* stubRejoin */
pushl %ebx /* entryncode */
pushl %ebx /* entryfp */
pushl 20(%ebp) /* stackLimit */
pushl 8(%ebp) /* cx */
pushl %ebx /* fp */
subl $0x1C, %esp
/* Jump into the JIT'd code. */
/* No fastcall for sunstudio. */
pushl %esp
call PushActiveVMFrame
popl %edx
movl 28(%esp), %ebp /* load fp for JIT code */
jmp *88(%esp)
.size JaegerTrampoline, . - JaegerTrampoline
/ void JaegerTrampolineReturn()
.global JaegerTrampolineReturn
.type JaegerTrampolineReturn, @function
JaegerTrampolineReturn:
movl %esi, 0x18(%ebp)
movl %edi, 0x1C(%ebp)
movl %esp, %ebp
addl $0x48, %ebp
pushl %esp
call PopActiveVMFrame
addl $0x40, %esp
popl %ebx
popl %edi
popl %esi
popl %ebp
movl $1, %eax
ret
.size JaegerTrampolineReturn, . - JaegerTrampolineReturn
/ void *JaegerThrowpoline(js::VMFrame *vmFrame)
.global JaegerThrowpoline
.type JaegerThrowpoline, @function
JaegerThrowpoline:
/* For Sun Studio there is no fast call. */
/* We add the stack by 16 before. */
addl $0x10, %esp
/* Align the stack to 16 bytes. */
pushl %esp
pushl (%esp)
pushl (%esp)
pushl (%esp)
call js_InternalThrow
/* Bump the stack by 0x2c, as in the basic trampoline, but */
/* also one more word to clean up the stack for jsl_InternalThrow,*/
/* and another to balance the alignment above. */
addl $0x10, %esp
testl %eax, %eax
je throwpoline_exit
jmp *%eax
throwpoline_exit:
pushl %esp
call PopActiveVMFrame
addl $0x40, %esp
popl %ebx
popl %edi
popl %esi
popl %ebp
xorl %eax, %eax
ret
.size JaegerThrowpoline, . - JaegerThrowpoline
/ void JaegerInterpoline()
.global JaegerInterpoline
.type JaegerInterpoline, @function
JaegerInterpoline:
/* For Sun Studio there is no fast call. */
/* We add the stack by 16 before. */
addl $0x10, %esp
/* Align the stack to 16 bytes. */
pushl %esp
pushl %eax
pushl %edi
pushl %esi
call js_InternalInterpret
addl $0x10, %esp
movl 0x1C(%esp), %ebp /* Load frame */
movl 0x18(%ebp), %esi /* Load rval payload */
movl 0x1C(%ebp), %edi /* Load rval type */
movl 0xC(%esp), %ecx /* Load scratch -> argc, for any scripted call */
testl %eax, %eax
je interpoline_exit
jmp *%eax
interpoline_exit:
pushl %esp
call PopActiveVMFrame
addl $0x40, %esp
popl %ebx
popl %edi
popl %esi
popl %ebp
xorl %eax, %eax
ret
.size JaegerInterpoline, . - JaegerInterpoline
/ void JaegerInterpolineScripted()
.global JaegerInterpolineScripted
.type JaegerInterpolineScripted, @function
JaegerInterpolineScripted:
movl 0x10(%ebp), %ebp
movl %ebp, 0x1C(%esp)
subl $0x10, %esp
jmp JaegerInterpoline
.size JaegerInterpolineScripted, . - JaegerInterpolineScripted
/ void JaegerInterpolinePatched()
.global JaegerInterpolinePatched
.type JaegerInterpolinePatched, @function
JaegerInterpolinePatched:
subl $0x10, %esp
jmp JaegerInterpoline
.size JaegerInterpolinePatched, . - JaegerInterpolinePatched

Просмотреть файл

@ -1,132 +0,0 @@
! -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
! This Source Code Form is subject to the terms of the Mozilla Public
! License, v. 2.0. If a copy of the MPL was not distributed with this
! file, You can obtain one at http://mozilla.org/MPL/2.0/.
.text
! JSBool JaegerTrampoline(JSContext *cx, JSStackFrame *fp, void *code,
! , uintptr_t inlineCallCount)
.global JaegerTrampoline
.type JaegerTrampoline, #function
JaegerTrampoline:
save %sp,-168,%sp
st %i1, [%fp - 36] ! fp
st %i0, [%fp - 32] ! cx
st %i3, [%fp - 28] ! stackLimit
st %i1, [%fp - 24] ! entryFp
st %i1, [%fp - 20] ! entryncode
st %g0, [%fp - 16] ! stubRejoin
call PushActiveVMFrame
mov %sp, %o0
ld [%fp - 36], %l0 ! fp
jmp %i2
st %i7, [%fp - 12] ! return address
.size JaegerTrampoline, . - JaegerTrampoline
! void JaegerTrampolineReturn()
.global JaegerTrampolineReturn
.type JaegerTrampolineReturn, #function
JaegerTrampolineReturn:
st %l2, [%l0 + 0x18] /* fp->rval type */
st %l3, [%l0 + 0x1c] /* fp->rval data */
call PopActiveVMFrame
mov %sp, %o0
ld [%fp - 12], %i7 ! return address
mov 1, %i0
ret
restore
.size JaegerTrampolineReturn, . - JaegerTrampolineReturn
! void *JaegerThrowpoline(js::VMFrame *vmFrame)
.global JaegerThrowpoline
.type JaegerThrowpoline, #function
JaegerThrowpoline:
call js_InternalThrow
mov %sp,%o0
tst %o0
be throwpoline_exit
nop
jmp %o0
nop
throwpoline_exit:
ta 3
mov %sp, %o2
mov %fp, %o3
ldd [%o2 + (0*8)], %l0
ldd [%o2 + (1*8)], %l2
ldd [%o2 + (2*8)], %l4
ldd [%o2 + (3*8)], %l6
ldd [%o2 + (4*8)], %i0
ldd [%o2 + (5*8)], %i2
ldd [%o2 + (6*8)], %i4
ldd [%o2 + (7*8)], %i6
ld [%o3 - 12], %i7 ! return address
mov %o2, %sp
call PopActiveVMFrame
mov %sp, %o0
clr %i0
ret
restore
.size JaegerThrowpoline, . - JaegerThrowpoline
! void JaegerInterpolineScripted()
.global JaegerInterpolineScripted
.type JaegerInterpolineScripted, #function
JaegerInterpolineScripted:
ld [%l0 + 0x10], %l0 /* Load f->prev_ */
st %l0, [%fp - 36] /* Update f->regs->fp_ */
ba interpoline_enter
nop
.size JaegerInterpolineScripted, . - JaegerInterpolineScripted
! void JaegerInterpoline()
.global JaegerInterpoline
.type JaegerInterpoline, #function
JaegerInterpoline:
interpoline_enter:
mov %o0,%o2
mov %l3,%o0
mov %l2,%o1
call js_InternalInterpret
mov %sp,%o3
ld [%fp - 36], %l0
ld [%l0 + 0x18], %l2 /* fp->rval type */
ld [%l0 + 0x1c], %l3 /* fp->rval data */
ld [%fp - 48], %l4
tst %o0
be interpoline_exit
nop
jmp %o0
nop
interpoline_exit:
ta 3
mov %sp, %o2
mov %fp, %o3
ldd [%o2 + (0*8)], %l0
ldd [%o2 + (1*8)], %l2
ldd [%o2 + (2*8)], %l4
ldd [%o2 + (3*8)], %l6
ldd [%o2 + (4*8)], %i0
ldd [%o2 + (5*8)], %i2
ldd [%o2 + (6*8)], %i4
ldd [%o2 + (7*8)], %i6
ld [%o3 - 12], %i7 ! return address
mov %o2, %sp
call PopActiveVMFrame
mov %sp, %o0
clr %i0
ret
restore
.size JaegerInterpoline, . - JaegerInterpoline
! void JaegerStubVeneer()
.global JaegerStubVeneer
.type JaegerStubVeneer, #function
JaegerStubVeneer:
call %i0
nop
ld [%fp - 8], %g2
jmp %g2
nop
.size JaegerStubVeneer, . - JaegerStubVeneer

Просмотреть файл

@ -1,409 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef js_typedarray_ic_h___
#define js_typedarray_ic_h___
#include "jscntxt.h"
#include "jstypedarray.h"
#include "vm/NumericConversions.h"
#include "jsnuminlines.h"
#include "jstypedarrayinlines.h"
namespace js {
namespace mjit {
#ifdef JS_METHODJIT_TYPED_ARRAY
typedef JSC::MacroAssembler::RegisterID RegisterID;
typedef JSC::MacroAssembler::FPRegisterID FPRegisterID;
typedef JSC::MacroAssembler::Jump Jump;
typedef JSC::MacroAssembler::Imm32 Imm32;
typedef JSC::MacroAssembler::ImmDouble ImmDouble;
static inline bool
ConstantFoldForFloatArray(JSContext *cx, ValueRemat *vr)
{
if (!vr->isTypeKnown())
return true;
// Objects and undefined coerce to NaN, which coerces to 0.
// Null converts to 0.
if (vr->knownType() == JSVAL_TYPE_OBJECT ||
vr->knownType() == JSVAL_TYPE_UNDEFINED) {
*vr = ValueRemat::FromConstant(DoubleValue(js_NaN));
return true;
}
if (vr->knownType() == JSVAL_TYPE_NULL) {
*vr = ValueRemat::FromConstant(DoubleValue(0));
return true;
}
if (!vr->isConstant())
return true;
if (vr->knownType() == JSVAL_TYPE_DOUBLE)
return true;
double d = 0;
Value v = vr->value();
if (v.isString()) {
if (!StringToNumberType<double>(cx, v.toString(), &d))
return false;
} else if (v.isBoolean()) {
d = v.toBoolean() ? 1 : 0;
} else if (v.isInt32()) {
d = v.toInt32();
} else {
JS_NOT_REACHED("unknown constant type");
}
*vr = ValueRemat::FromConstant(DoubleValue(d));
return true;
}
static inline bool
ConstantFoldForIntArray(JSContext *cx, JSObject *tarray, ValueRemat *vr)
{
if (!vr->isTypeKnown())
return true;
// Objects and undefined coerce to NaN, which coerces to 0.
// Null converts to 0.
if (vr->knownType() == JSVAL_TYPE_OBJECT ||
vr->knownType() == JSVAL_TYPE_UNDEFINED ||
vr->knownType() == JSVAL_TYPE_NULL) {
*vr = ValueRemat::FromConstant(Int32Value(0));
return true;
}
if (!vr->isConstant())
return true;
// Convert from string to double first (see bug 624483).
Value v = vr->value();
if (v.isString()) {
double d;
if (!StringToNumberType<double>(cx, v.toString(), &d))
return false;
v.setNumber(d);
}
int32_t i32 = 0;
if (v.isDouble()) {
i32 = (TypedArray::type(tarray) == js::TypedArray::TYPE_UINT8_CLAMPED)
? ClampDoubleToUint8(v.toDouble())
: ToInt32(v.toDouble());
} else if (v.isInt32()) {
i32 = v.toInt32();
if (TypedArray::type(tarray) == js::TypedArray::TYPE_UINT8_CLAMPED)
i32 = ClampIntForUint8Array(i32);
} else if (v.isBoolean()) {
i32 = v.toBoolean() ? 1 : 0;
} else {
JS_NOT_REACHED("unknown constant type");
}
*vr = ValueRemat::FromConstant(Int32Value(i32));
return true;
}
// Generate code that will ensure a dynamically typed value, pinned in |vr|,
// can be stored in an integer typed array. If any sort of conversion is
// required, |dataReg| will be clobbered by a new value. |saveMask| is
// used to ensure that |dataReg| (and volatile registers) are preserved
// across any conversion process.
static void
GenConversionForIntArray(Assembler &masm, JSObject *tarray, const ValueRemat &vr,
uint32_t saveMask)
{
if (vr.isConstant()) {
// Constants are always folded to ints up-front.
JS_ASSERT(vr.knownType() == JSVAL_TYPE_INT32);
return;
}
if (!vr.isTypeKnown() || vr.knownType() != JSVAL_TYPE_INT32) {
// If a conversion is necessary, save registers now.
MaybeJump checkInt32;
if (!vr.isTypeKnown())
checkInt32 = masm.testInt32(Assembler::Equal, vr.typeReg());
// Store the value to convert.
StackMarker vp = masm.allocStack(sizeof(Value), sizeof(double));
masm.storeValue(vr, masm.addressOfExtra(vp));
// Preserve volatile registers.
PreserveRegisters saveForCall(masm);
saveForCall.preserve(saveMask & Registers::TempRegs);
masm.setupABICall(Registers::FastCall, 2);
masm.storeArg(0, masm.vmFrameOffset(offsetof(VMFrame, cx)));
masm.storeArgAddr(1, masm.addressOfExtra(vp));
typedef int32_t (JS_FASTCALL *Int32CxVp)(JSContext *, Value *);
Int32CxVp stub;
if (TypedArray::type(tarray) == js::TypedArray::TYPE_UINT8_CLAMPED)
stub = stubs::ConvertToTypedInt<true>;
else
stub = stubs::ConvertToTypedInt<false>;
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, stub), false);
if (vr.dataReg() != Registers::ReturnReg)
masm.move(Registers::ReturnReg, vr.dataReg());
saveForCall.restore();
masm.freeStack(vp);
if (checkInt32.isSet())
checkInt32.get().linkTo(masm.label(), &masm);
}
// Performing clamping, if needed.
if (TypedArray::type(tarray) == js::TypedArray::TYPE_UINT8_CLAMPED)
masm.clampInt32ToUint8(vr.dataReg());
}
// Generate code that will ensure a dynamically typed value, pinned in |vr|,
// can be stored in an integer typed array. saveMask| is used to ensure that
// |dataReg| (and volatile registers) are preserved across any conversion
// process.
//
// Constants are left untouched. Any other value is placed into destReg.
static void
GenConversionForFloatArray(Assembler &masm, JSObject *tarray, const ValueRemat &vr,
FPRegisterID destReg, uint32_t saveMask)
{
if (vr.isConstant()) {
// Constants are always folded to doubles up-front.
JS_ASSERT(vr.knownType() == JSVAL_TYPE_DOUBLE);
return;
}
// Fast-path, if the value is a double, skip converting.
MaybeJump isDouble;
if (!vr.isTypeKnown())
isDouble = masm.testDouble(Assembler::Equal, vr.typeReg());
// If the value is an integer, inline the conversion.
MaybeJump skip1, skip2;
if (!vr.isTypeKnown() || vr.knownType() == JSVAL_TYPE_INT32) {
MaybeJump isNotInt32;
if (!vr.isTypeKnown())
isNotInt32 = masm.testInt32(Assembler::NotEqual, vr.typeReg());
masm.convertInt32ToDouble(vr.dataReg(), destReg);
if (isNotInt32.isSet()) {
skip1 = masm.jump();
isNotInt32.get().linkTo(masm.label(), &masm);
}
}
// Generate a generic conversion call, if not known to be int32_t or double.
if (!vr.isTypeKnown() ||
(vr.knownType() != JSVAL_TYPE_INT32 &&
vr.knownType() != JSVAL_TYPE_DOUBLE)) {
// Store this value, which is also an outparam.
StackMarker vp = masm.allocStack(sizeof(Value), sizeof(double));
masm.storeValue(vr, masm.addressOfExtra(vp));
// Preserve volatile registers, and make the call.
PreserveRegisters saveForCall(masm);
saveForCall.preserve(saveMask & Registers::TempRegs);
masm.setupABICall(Registers::FastCall, 2);
masm.storeArg(0, masm.vmFrameOffset(offsetof(VMFrame, cx)));
masm.storeArgAddr(1, masm.addressOfExtra(vp));
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, stubs::ConvertToTypedFloat), false);
saveForCall.restore();
// Load the value from the outparam, then pop the stack.
masm.loadDouble(masm.addressOfExtra(vp), destReg);
masm.freeStack(vp);
skip2 = masm.jump();
}
if (isDouble.isSet())
isDouble.get().linkTo(masm.label(), &masm);
// If it's possible the value was already a double, load it directly
// from registers (the known type is distinct from typeReg, which has
// 32-bits of the 64-bit double).
if (!vr.isTypeKnown() || vr.knownType() == JSVAL_TYPE_DOUBLE)
masm.fastLoadDouble(vr.dataReg(), vr.typeReg(), destReg);
// At this point, all loads into xmm1 are complete.
if (skip1.isSet())
skip1.get().linkTo(masm.label(), &masm);
if (skip2.isSet())
skip2.get().linkTo(masm.label(), &masm);
if (TypedArray::type(tarray) == js::TypedArray::TYPE_FLOAT32)
masm.convertDoubleToFloat(destReg, destReg);
}
template <typename T>
static bool
StoreToTypedArray(JSContext *cx, Assembler &masm, JSObject *tarray, T address,
const ValueRemat &vrIn, uint32_t saveMask)
{
ValueRemat vr = vrIn;
uint32_t type = TypedArray::type(tarray);
switch (type) {
case js::TypedArray::TYPE_INT8:
case js::TypedArray::TYPE_UINT8:
case js::TypedArray::TYPE_UINT8_CLAMPED:
case js::TypedArray::TYPE_INT16:
case js::TypedArray::TYPE_UINT16:
case js::TypedArray::TYPE_INT32:
case js::TypedArray::TYPE_UINT32:
{
if (!ConstantFoldForIntArray(cx, tarray, &vr))
return false;
PreserveRegisters saveRHS(masm);
PreserveRegisters saveLHS(masm);
// There are three tricky situations to handle:
// (1) The RHS needs conversion. saveMask will be stomped, and
// the RHS may need to be stomped.
// (2) The RHS may need to be clamped, which clobbers it.
// (3) The RHS may need to be in a single-byte register.
//
// In all of these cases, we try to find a free register that can be
// used to mutate the RHS. Failing that, we evict an existing volatile
// register.
//
// Note that we are careful to preserve the RHS before saving registers
// for the conversion call. This is because the object and key may be
// in temporary registers, and we want to restore those without killing
// the mutated RHS.
bool singleByte = (type == js::TypedArray::TYPE_INT8 ||
type == js::TypedArray::TYPE_UINT8 ||
type == js::TypedArray::TYPE_UINT8_CLAMPED);
bool mayNeedConversion = (!vr.isTypeKnown() || vr.knownType() != JSVAL_TYPE_INT32);
bool mayNeedClamping = !vr.isConstant() && (type == js::TypedArray::TYPE_UINT8_CLAMPED);
bool needsSingleByteReg = singleByte &&
!vr.isConstant() &&
!(Registers::SingleByteRegs & Registers::maskReg(vr.dataReg()));
bool rhsIsMutable = !vr.isConstant() && !(saveMask & Registers::maskReg(vr.dataReg()));
if (((mayNeedConversion || mayNeedClamping) && !rhsIsMutable) || needsSingleByteReg) {
// First attempt to find a free temporary register that:
// - is compatible with the RHS constraints
// - won't clobber the key, object, or RHS type regs
// - is temporary, but
// - is not in saveMask, which contains live volatile registers.
uint32_t allowMask = Registers::AvailRegs;
if (singleByte)
allowMask &= Registers::SingleByteRegs;
// Create a mask of registers we absolutely cannot clobber.
uint32_t pinned = Assembler::maskAddress(address);
if (!vr.isTypeKnown())
pinned |= Registers::maskReg(vr.typeReg());
Registers avail = allowMask & ~(pinned | saveMask);
RegisterID newReg;
if (!avail.empty()) {
newReg = avail.takeAnyReg().reg();
} else {
// If no registers meet the ideal set, relax a constraint and spill.
avail = allowMask & ~pinned;
if (!avail.empty()) {
newReg = avail.takeAnyReg().reg();
saveRHS.preserve(Registers::maskReg(newReg));
} else {
// Oh no! *All* single byte registers are pinned. This
// sucks. We'll swap the type and data registers in |vr|
// and unswap them later.
// If |vr|'s registers are part of the address, swapping is
// going to cause problems during the store.
uint32_t vrRegs = Registers::mask2Regs(vr.dataReg(), vr.typeReg());
uint32_t lhsMask = vrRegs & Assembler::maskAddress(address);
// We'll also need to save any of the registers which won't
// be restored via |lhsMask| above.
uint32_t rhsMask = vrRegs & ~lhsMask;
// Push them, but get the order right. We'll pop LHS first.
saveRHS.preserve(rhsMask);
saveLHS.preserve(lhsMask);
// Don't store/restore registers if we dont have to.
saveMask &= ~lhsMask;
// Actually perform the swap.
masm.swap(vr.typeReg(), vr.dataReg());
vr = ValueRemat::FromRegisters(vr.dataReg(), vr.typeReg());
newReg = vr.dataReg();
}
// Now, make sure the new register is not in the saveMask,
// so it won't get restored right after the call.
saveMask &= ~Registers::maskReg(newReg);
}
if (vr.dataReg() != newReg)
masm.move(vr.dataReg(), newReg);
// Update |vr|.
if (vr.isTypeKnown())
vr = ValueRemat::FromKnownType(vr.knownType(), newReg);
else
vr = ValueRemat::FromRegisters(vr.typeReg(), newReg);
}
GenConversionForIntArray(masm, tarray, vr, saveMask);
// Restore the registers in |address|. |GenConversionForIntArray| won't
// restore them because we told it not to by fiddling with |saveMask|.
saveLHS.restore();
if (vr.isConstant())
masm.storeToTypedIntArray(type, Imm32(vr.value().toInt32()), address);
else
masm.storeToTypedIntArray(type, vr.dataReg(), address);
// Note that this will finish restoring the damage from the
// earlier register swap.
saveRHS.restore();
break;
}
case js::TypedArray::TYPE_FLOAT32:
case js::TypedArray::TYPE_FLOAT64: {
/*
* Use a temporary for conversion. Inference is disabled, so no FP
* registers are live.
*/
Registers regs(Registers::TempFPRegs);
FPRegisterID temp = regs.takeAnyReg().fpreg();
if (!ConstantFoldForFloatArray(cx, &vr))
return false;
GenConversionForFloatArray(masm, tarray, vr, temp, saveMask);
if (vr.isConstant())
masm.storeToTypedFloatArray(type, ImmDouble(vr.value().toDouble()), address);
else
masm.storeToTypedFloatArray(type, temp, address);
break;
}
}
return true;
}
#endif /* JS_METHODJIT_TYPED_ARRAY */
} /* namespace mjit */
} /* namespace js */
#endif /* js_typedarray_ic_h___ */

Просмотреть файл

@ -45,7 +45,6 @@
#include "builtin/TestingFunctions.h"
#include "frontend/BytecodeEmitter.h"
#include "frontend/Parser.h"
#include "methodjit/MethodJIT.h"
#include "vm/Shape.h"
#include "prmjtime.h"

Просмотреть файл

@ -21,7 +21,6 @@
#include "frontend/BytecodeCompiler.h"
#include "frontend/BytecodeEmitter.h"
#include "gc/Marking.h"
#include "methodjit/Retcon.h"
#include "ion/BaselineJIT.h"
#include "js/Vector.h"
@ -237,13 +236,6 @@ BreakpointSite::BreakpointSite(JSScript *script, jsbytecode *pc)
void
BreakpointSite::recompile(FreeOp *fop)
{
#ifdef JS_METHODJIT
if (script->hasMJITInfo()) {
mjit::Recompiler::clearStackReferences(fop, script);
mjit::ReleaseScriptCode(fop, script);
}
#endif
#ifdef JS_ION
if (script->hasBaselineScript())
script->baselineScript()->toggleDebugTraps(script, pc);

Просмотреть файл

@ -27,10 +27,6 @@
#include "vm/RegExpObject-inl.h"
#include "vm/RegExpStatics-inl.h"
#ifdef JS_METHODJIT
#include "methodjit/Retcon.h"
#endif
using namespace js;
JSObject *

Просмотреть файл

@ -104,7 +104,7 @@ inline bool
RegExpShared::isJITRuntimeEnabled(JSContext *cx)
{
#if ENABLE_YARR_JIT
# if defined(ANDROID) && defined(JS_METHODJIT)
# if defined(ANDROID)
return !cx->jitIsBroken;
# else
return true;

Просмотреть файл

@ -9,9 +9,6 @@
#include "jsnum.h"
#include "jsscript.h"
#include "methodjit/MethodJIT.h"
#include "methodjit/Compiler.h"
#include "vm/SPSProfiler.h"
#include "vm/StringBuffer.h"
@ -40,12 +37,6 @@ SPSProfiler::~SPSProfiler()
for (ProfileStringMap::Enum e(strings); !e.empty(); e.popFront())
js_free(const_cast<char *>(e.front().value));
}
#ifdef JS_METHODJIT
if (jminfo.initialized()) {
for (JITInfoMap::Enum e(jminfo); !e.empty(); e.popFront())
js_delete(e.front().value);
}
#endif
}
void
@ -252,188 +243,6 @@ SPSProfiler::allocProfileString(JSContext *cx, JSScript *script, JSFunction *may
return cstr;
}
#ifdef JS_METHODJIT
typedef SPSProfiler::JMChunkInfo JMChunkInfo;
JMChunkInfo::JMChunkInfo(mjit::JSActiveFrame *frame,
mjit::PCLengthEntry *pcLengths,
mjit::JITChunk *chunk)
: mainStart(frame->mainCodeStart),
mainEnd(frame->mainCodeEnd),
stubStart(frame->stubCodeStart),
stubEnd(frame->stubCodeEnd),
pcLengths(pcLengths),
chunk(chunk)
{}
jsbytecode*
SPSProfiler::ipToPC(JSScript *script, size_t ip)
{
if (!jminfo.initialized())
return NULL;
JITInfoMap::Ptr ptr = jminfo.lookup(script);
if (!ptr)
return NULL;
JMScriptInfo *info = ptr->value;
/* First check if this ip is in any of the ICs compiled for the script */
for (unsigned i = 0; i < info->ics.length(); i++) {
ICInfo &ic = info->ics[i];
if (ic.base <= ip && ip < ic.base + ic.size)
return ic.pc;
}
/* Otherwise if it's not in any of the chunks, then we can't find it */
for (unsigned i = 0; i < info->chunks.length(); i++) {
jsbytecode *pc = info->chunks[i].convert(script, ip);
if (pc != NULL)
return pc;
}
return NULL;
}
jsbytecode*
JMChunkInfo::convert(JSScript *script, size_t ip)
{
if (mainStart <= ip && ip < mainEnd) {
size_t offset = 0;
uint32_t i;
for (i = 0; i < script->length - 1; i++) {
offset += (uint32_t) pcLengths[i].inlineLength;
if (mainStart + offset > ip)
break;
}
return &script->code[i];
} else if (stubStart <= ip && ip < stubEnd) {
size_t offset = 0;
uint32_t i;
for (i = 0; i < script->length - 1; i++) {
offset += (uint32_t) pcLengths[i].stubLength;
if (stubStart + offset > ip)
break;
}
return &script->code[i];
}
return NULL;
}
bool
SPSProfiler::registerMJITCode(mjit::JITChunk *chunk,
mjit::JSActiveFrame *outerFrame,
mjit::JSActiveFrame **inlineFrames)
{
if (!jminfo.initialized() && !jminfo.init(100))
return false;
JS_ASSERT(chunk->pcLengths != NULL);
JMChunkInfo *info = registerScript(outerFrame, chunk->pcLengths, chunk);
if (!info)
return false;
/*
* The pcLengths array has entries for both the outerFrame's script and also
* all of the inlineFrames' scripts. The layout is something like:
*
* [ outerFrame info ] [ inline frame 1 ] [ inline frame 2 ] ...
*
* This local pcLengths pointer tracks the position of each inline frame's
* pcLengths array. Each section of the array has length script->length for
* the corresponding script for that frame.
*/
mjit::PCLengthEntry *pcLengths = chunk->pcLengths + outerFrame->script->length;
for (unsigned i = 0; i < chunk->nInlineFrames; i++) {
JMChunkInfo *child = registerScript(inlineFrames[i], pcLengths, chunk);
if (!child)
return false;
/*
* When JM tells us about new code, each inline ActiveFrame only has the
* start/end listed relative to the start of the main instruction
* streams. This is corrected here so the addresses listed on the
* JMChunkInfo structure are absolute and can be tested directly.
*/
child->mainStart += info->mainStart;
child->mainEnd += info->mainStart;
child->stubStart += info->stubStart;
child->stubEnd += info->stubStart;
pcLengths += inlineFrames[i]->script->length;
}
return true;
}
JMChunkInfo*
SPSProfiler::registerScript(mjit::JSActiveFrame *frame,
mjit::PCLengthEntry *entries,
mjit::JITChunk *chunk)
{
/*
* An inlined script could possibly be compiled elsewhere as not having been
* inlined, so each JSScript* must be associated with a list of chunks
* instead of just one. Also, our script may already be in the map.
*/
JITInfoMap::AddPtr ptr = jminfo.lookupForAdd(frame->script);
JMScriptInfo *info;
if (ptr) {
info = ptr->value;
JS_ASSERT(info->chunks.length() > 0);
} else {
info = rt->new_<JMScriptInfo>();
if (info == NULL || !jminfo.add(ptr, frame->script, info))
return NULL;
}
if (!info->chunks.append(JMChunkInfo(frame, entries, chunk)))
return NULL;
return info->chunks.end() - 1;
}
bool
SPSProfiler::registerICCode(mjit::JITChunk *chunk,
JSScript *script, jsbytecode *pc,
void *base, size_t size)
{
JS_ASSERT(jminfo.initialized());
JITInfoMap::Ptr ptr = jminfo.lookup(script);
JS_ASSERT(ptr);
return ptr->value->ics.append(ICInfo(base, size, pc));
}
void
SPSProfiler::discardMJITCode(mjit::JITScript *jscr,
mjit::JITChunk *chunk, void* address)
{
if (!jminfo.initialized())
return;
unregisterScript(jscr->script, chunk);
for (unsigned i = 0; i < chunk->nInlineFrames; i++)
unregisterScript(chunk->inlineFrames()[i].fun->nonLazyScript(), chunk);
}
void
SPSProfiler::unregisterScript(JSScript *script, mjit::JITChunk *chunk)
{
JITInfoMap::Ptr ptr = jminfo.lookup(script);
if (!ptr)
return;
JMScriptInfo *info = ptr->value;
for (unsigned i = 0; i < info->chunks.length(); i++) {
if (info->chunks[i].chunk == chunk) {
info->chunks.erase(&info->chunks[i]);
break;
}
}
if (info->chunks.length() == 0) {
jminfo.remove(ptr);
js_delete(info);
}
}
#endif
SPSEntryMarker::SPSEntryMarker(JSRuntime *rt
MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
: profiler(&rt->spsProfiler)

Просмотреть файл

@ -109,15 +109,6 @@ namespace js {
class ProfileEntry;
#ifdef JS_METHODJIT
namespace mjit {
struct JITChunk;
struct JITScript;
struct JSActiveFrame;
struct PCLengthEntry;
}
#endif
typedef HashMap<JSScript*, const char*, DefaultHasher<JSScript*>, SystemAllocPolicy>
ProfileStringMap;
@ -190,77 +181,7 @@ class SPSProfiler
void enterNative(const char *string, void *sp);
void exitNative() { pop(); }
#ifdef JS_METHODJIT
struct ICInfo
{
size_t base;
size_t size;
jsbytecode *pc;
ICInfo(void *base, size_t size, jsbytecode *pc)
: base(size_t(base)), size(size), pc(pc)
{}
};
struct JMChunkInfo
{
size_t mainStart; // bounds for the inline code
size_t mainEnd;
size_t stubStart; // bounds of the ool code
size_t stubEnd;
mjit::PCLengthEntry *pcLengths; // pcLengths for this chunk
mjit::JITChunk *chunk; // stored to test when removing
JMChunkInfo(mjit::JSActiveFrame *frame,
mjit::PCLengthEntry *pcLengths,
mjit::JITChunk *chunk);
jsbytecode *convert(JSScript *script, size_t ip);
};
struct JMScriptInfo
{
Vector<ICInfo, 0, SystemAllocPolicy> ics;
Vector<JMChunkInfo, 1, SystemAllocPolicy> chunks;
};
typedef HashMap<JSScript*, JMScriptInfo*, DefaultHasher<JSScript*>,
SystemAllocPolicy> JITInfoMap;
/*
* This is the mapping which facilitates translation from an ip to a
* jsbytecode*. The mapping is from a JSScript* to a set of chunks and ics
* which are associated with the script. This way lookup/translation doesn't
* have to do something like iterate the entire map.
*
* Each IC is easy to test because they all have only one pc associated with
* them, and the range is easy to check. The main chunks of code are a bit
* harder because there are both the inline and out of line streams which
* need to be tested. Each of these streams is described by the pcLengths
* array stored within each chunk. This array describes the width of each
* opcode of the corresponding JSScript, and has the same number of entries
* as script->length.
*/
JITInfoMap jminfo;
bool registerMJITCode(mjit::JITChunk *chunk,
mjit::JSActiveFrame *outerFrame,
mjit::JSActiveFrame **inlineFrames);
void discardMJITCode(mjit::JITScript *jscr,
mjit::JITChunk *chunk, void* address);
bool registerICCode(mjit::JITChunk *chunk, JSScript *script, jsbytecode* pc,
void *start, size_t size);
jsbytecode *ipToPC(JSScript *script, size_t ip);
private:
JMChunkInfo *registerScript(mjit::JSActiveFrame *frame,
mjit::PCLengthEntry *lenths,
mjit::JITChunk *chunk);
void unregisterScript(JSScript *script, mjit::JITChunk *chunk);
public:
#else
jsbytecode *ipToPC(JSScript *script, size_t ip) { return NULL; }
#endif
void setProfilingStack(ProfileEntry *stack, uint32_t *size, uint32_t max);
const char *profileString(JSContext *cx, JSScript *script, JSFunction *maybeFun);

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше