Roll V8 back to 3.9.24.31
This commit is contained in:
Родитель
569acea0ee
Коммит
940a6863ea
|
@ -23,7 +23,6 @@ Daniel James <dnljms@gmail.com>
|
|||
Dineel D Sule <dsule@codeaurora.org>
|
||||
Erich Ocean <erich.ocean@me.com>
|
||||
Fedor Indutny <fedor@indutny.com>
|
||||
Filipe David Manana <fdmanana@gmail.com>
|
||||
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
|
||||
Jan de Mooij <jandemooij@gmail.com>
|
||||
Jay Freeman <saurik@saurik.com>
|
||||
|
|
|
@ -1,163 +1,3 @@
|
|||
2012-05-03: Version 3.10.8
|
||||
|
||||
Enabled MIPS cross-compilation.
|
||||
|
||||
Ensured reload of elements pointer in StoreFastDoubleElement stub.
|
||||
(Chromium issue 125515)
|
||||
|
||||
Fixed corner cases in truncation behavior when storing to
|
||||
TypedArrays. (issue 2110)
|
||||
|
||||
Fixed failure to properly recognize and report out-of-memory
|
||||
conditions when allocating code space pages. (Chromium issue
|
||||
118625)
|
||||
|
||||
Fixed idle notifications to perform a round of incremental GCs
|
||||
after context disposal. (issue 2107)
|
||||
|
||||
Fixed preparser for try statement. (issue 2109)
|
||||
|
||||
Performance and stability improvements on all platforms.
|
||||
|
||||
|
||||
2012-04-30: Version 3.10.7
|
||||
|
||||
Performance and stability improvements on all platforms.
|
||||
|
||||
|
||||
2012-04-26: Version 3.10.6
|
||||
|
||||
Fixed some bugs in accessing details of the last regexp match.
|
||||
|
||||
Fixed source property of empty RegExp objects. (issue 1982)
|
||||
|
||||
Enabled inlining some V8 API functions.
|
||||
|
||||
Performance and stability improvements on all platforms.
|
||||
|
||||
|
||||
2012-04-23: Version 3.10.5
|
||||
|
||||
Put new global var semantics behind a flag until WebKit tests are
|
||||
cleaned up.
|
||||
|
||||
Enabled stepping into callback passed to builtins.
|
||||
(Chromium issue 109564)
|
||||
|
||||
Performance and stability improvements on all platforms.
|
||||
|
||||
|
||||
2012-04-19: Version 3.10.4
|
||||
|
||||
Fixed issues when stressing compaction with WeakMaps.
|
||||
|
||||
Fixed missing GVN flag for new-space promotion. (Chromium issue 123919)
|
||||
|
||||
Simplify invocation sequence at monomorphic function invocation sites.
|
||||
(issue 2079)
|
||||
|
||||
Performance and stability improvements on all platforms.
|
||||
|
||||
|
||||
2012-04-17: Version 3.10.3
|
||||
|
||||
Fixed several bugs in heap profiles (including issue 2078).
|
||||
|
||||
Throw syntax errors on illegal escape sequences.
|
||||
|
||||
Implemented rudimentary module linking (behind --harmony flag)
|
||||
|
||||
Implemented ES5 erratum: Global declarations should shadow
|
||||
inherited properties.
|
||||
|
||||
Made handling of const more consistent when combined with 'eval'
|
||||
and 'with'.
|
||||
|
||||
Fixed V8 on MinGW-x64 (issue 2026).
|
||||
|
||||
Performance and stability improvements on all platforms.
|
||||
|
||||
|
||||
2012-04-13: Version 3.10.2
|
||||
|
||||
Fixed native ARM build (issues 1744, 539)
|
||||
|
||||
Return LOOKUP variable instead of CONTEXT for non-context allocated
|
||||
outer scope parameters (Chromium issue 119609).
|
||||
|
||||
Fixed regular and ElementsKind transitions interfering with each other
|
||||
(Chromium issue 122271).
|
||||
|
||||
Improved performance of keyed loads/stores which have a HeapNumber
|
||||
index (issues 1388, 1295).
|
||||
|
||||
Fixed WeakMap processing for evacuation candidates (issue 2060).
|
||||
|
||||
Bailout on possible direct eval calls (Chromium issue 122681).
|
||||
|
||||
Do not assume that names of function expressions are context-allocated
|
||||
(issue 2051).
|
||||
|
||||
Performance and stability improvements on all platforms.
|
||||
|
||||
|
||||
2012-04-10: Version 3.10.1
|
||||
|
||||
Fixed bug with arguments object in inlined functions (issue 2045).
|
||||
|
||||
Fixed performance bug with lazy initialization (Chromium issue
|
||||
118686).
|
||||
|
||||
Added suppport for Mac OS X 64bit builds with GYP.
|
||||
(Patch contributed by Filipe David Manana <fdmanana@gmail.com>)
|
||||
|
||||
Fixed bug with hidden properties (issue 2034).
|
||||
|
||||
Fixed a performance bug when reloading pages (Chromium issue 117767,
|
||||
V8 issue 1902).
|
||||
|
||||
Fixed bug when optimizing throw in top-level code (issue 2054).
|
||||
|
||||
Fixed two bugs with array literals (issue 2055, Chromium issue 121407).
|
||||
|
||||
Fixed bug with Math.min/Math.max with NaN inputs (issue 2056).
|
||||
|
||||
Fixed a bug with the new runtime profiler (Chromium issue 121147).
|
||||
|
||||
Fixed compilation of V8 using uClibc.
|
||||
|
||||
Optimized boot-up memory use.
|
||||
|
||||
Optimized regular expressions.
|
||||
|
||||
|
||||
2012-03-30: Version 3.10.0
|
||||
|
||||
Fixed store IC writability check in strict mode
|
||||
(Chromium issue 120099).
|
||||
|
||||
Resynchronize timers if the Windows system time was changed.
|
||||
(Chromium issue 119815)
|
||||
|
||||
Removed "-mfloat-abi=hard" from host compiler cflags when building for
|
||||
hardfp ARM
|
||||
(https://code.google.com/p/chrome-os-partner/issues/detail?id=8539)
|
||||
|
||||
Fixed edge case for case independent regexp character classes
|
||||
(issue 2032).
|
||||
|
||||
Reset function info counters after context disposal.
|
||||
(Chromium issue 117767, V8 issue 1902)
|
||||
|
||||
Fixed missing write barrier in CopyObjectToObjectElements.
|
||||
(Chromium issue 119926)
|
||||
|
||||
Fixed missing bounds check in HasElementImpl.
|
||||
(Chromium issue 119925)
|
||||
|
||||
Performance and stability improvements on all platforms.
|
||||
|
||||
|
||||
2012-03-23: Version 3.9.24
|
||||
|
||||
Activated count-based profiler for ARM.
|
||||
|
|
|
@ -137,6 +137,12 @@ ENVFILE = $(OUTDIR)/environment
|
|||
# Target definitions. "all" is the default.
|
||||
all: $(MODES)
|
||||
|
||||
# Special target for the buildbots to use. Depends on $(OUTDIR)/Makefile
|
||||
# having been created before.
|
||||
buildbot:
|
||||
$(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \
|
||||
builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)"
|
||||
|
||||
# Compile targets. MODES and ARCHES are convenience targets.
|
||||
.SECONDEXPANSION:
|
||||
$(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
|
||||
|
@ -144,21 +150,21 @@ $(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
|
|||
$(ARCHES): $(addprefix $$@.,$(MODES))
|
||||
|
||||
# Defines how to build a particular target (e.g. ia32.release).
|
||||
$(BUILDS): $(OUTDIR)/Makefile.$$(basename $$@)
|
||||
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \
|
||||
$(BUILDS): $(OUTDIR)/Makefile-$$(basename $$@)
|
||||
@$(MAKE) -C "$(OUTDIR)" -f Makefile-$(basename $@) \
|
||||
CXX="$(CXX)" LINK="$(LINK)" \
|
||||
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
|
||||
python -c "print raw_input().capitalize()") \
|
||||
builddir="$(shell pwd)/$(OUTDIR)/$@"
|
||||
|
||||
native: $(OUTDIR)/Makefile.native
|
||||
@$(MAKE) -C "$(OUTDIR)" -f Makefile.native \
|
||||
native: $(OUTDIR)/Makefile-native
|
||||
@$(MAKE) -C "$(OUTDIR)" -f Makefile-native \
|
||||
CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \
|
||||
builddir="$(shell pwd)/$(OUTDIR)/$@"
|
||||
|
||||
# TODO(jkummerow): add "android.debug" when we need it.
|
||||
android android.release: $(OUTDIR)/Makefile.android
|
||||
@$(MAKE) -C "$(OUTDIR)" -f Makefile.android \
|
||||
android android.release: $(OUTDIR)/Makefile-android
|
||||
@$(MAKE) -C "$(OUTDIR)" -f Makefile-android \
|
||||
CXX="$(ANDROID_TOOL_PREFIX)-g++" \
|
||||
AR="$(ANDROID_TOOL_PREFIX)-ar" \
|
||||
RANLIB="$(ANDROID_TOOL_PREFIX)-ranlib" \
|
||||
|
@ -191,40 +197,55 @@ native.check: native
|
|||
--arch-and-mode=. $(TESTFLAGS)
|
||||
|
||||
# Clean targets. You can clean each architecture individually, or everything.
|
||||
$(addsuffix .clean,$(ARCHES)) android.clean:
|
||||
rm -f $(OUTDIR)/Makefile.$(basename $@)
|
||||
$(addsuffix .clean,$(ARCHES)):
|
||||
rm -f $(OUTDIR)/Makefile-$(basename $@)
|
||||
rm -rf $(OUTDIR)/$(basename $@).release
|
||||
rm -rf $(OUTDIR)/$(basename $@).debug
|
||||
find $(OUTDIR) -regex '.*\(host\|target\).$(basename $@)\.mk' -delete
|
||||
find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete
|
||||
|
||||
native.clean:
|
||||
rm -f $(OUTDIR)/Makefile.native
|
||||
rm -f $(OUTDIR)/Makefile-native
|
||||
rm -rf $(OUTDIR)/native
|
||||
find $(OUTDIR) -regex '.*\(host\|target\).native\.mk' -delete
|
||||
find $(OUTDIR) -regex '.*\(host\|target\)-native\.mk' -delete
|
||||
|
||||
clean: $(addsuffix .clean,$(ARCHES)) native.clean android.clean
|
||||
android.clean:
|
||||
rm -f $(OUTDIR)/Makefile-android
|
||||
rm -rf $(OUTDIR)/android.release
|
||||
find $(OUTDIR) -regex '.*\(host\|target\)-android\.mk' -delete
|
||||
|
||||
clean: $(addsuffix .clean,$(ARCHES)) native.clean
|
||||
|
||||
# GYP file generation targets.
|
||||
MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES))
|
||||
$(MAKEFILES): $(GYPFILES) $(ENVFILE)
|
||||
GYP_GENERATORS=make \
|
||||
$(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE)
|
||||
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
|
||||
-Ibuild/standalone.gypi --depth=. \
|
||||
-Dv8_target_arch=$(subst .,,$(suffix $@)) \
|
||||
-S.$(subst .,,$(suffix $@)) $(GYPFLAGS)
|
||||
-Ibuild/standalone.gypi --depth=. -Dtarget_arch=ia32 \
|
||||
-S-ia32 $(GYPFLAGS)
|
||||
|
||||
$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
|
||||
GYP_GENERATORS=make \
|
||||
$(OUTDIR)/Makefile-x64: $(GYPFILES) $(ENVFILE)
|
||||
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
|
||||
-Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
|
||||
-Ibuild/standalone.gypi --depth=. -Dtarget_arch=x64 \
|
||||
-S-x64 $(GYPFLAGS)
|
||||
|
||||
$(OUTDIR)/Makefile.android: $(GYPFILES) $(ENVFILE) build/android.gypi \
|
||||
$(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE) build/armu.gypi
|
||||
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
|
||||
-Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \
|
||||
-S-arm $(GYPFLAGS)
|
||||
|
||||
$(OUTDIR)/Makefile-mips: $(GYPFILES) $(ENVFILE) build/mipsu.gypi
|
||||
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
|
||||
-Ibuild/standalone.gypi --depth=. -Ibuild/mipsu.gypi \
|
||||
-S-mips $(GYPFLAGS)
|
||||
|
||||
$(OUTDIR)/Makefile-native: $(GYPFILES) $(ENVFILE)
|
||||
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
|
||||
-Ibuild/standalone.gypi --depth=. -S-native $(GYPFLAGS)
|
||||
|
||||
$(OUTDIR)/Makefile-android: $(GYPFILES) $(ENVFILE) build/android.gypi \
|
||||
must-set-ANDROID_NDK_ROOT
|
||||
GYP_GENERATORS=make \
|
||||
CC="${ANDROID_TOOL_PREFIX}-gcc" \
|
||||
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
|
||||
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
|
||||
-S.android $(GYPFLAGS)
|
||||
-S-android $(GYPFLAGS)
|
||||
|
||||
must-set-ANDROID_NDK_ROOT:
|
||||
ifndef ANDROID_NDK_ROOT
|
||||
|
@ -240,10 +261,9 @@ $(ENVFILE): $(ENVFILE).new
|
|||
|
||||
# Stores current GYPFLAGS in a file.
|
||||
$(ENVFILE).new:
|
||||
@mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS)" > $(ENVFILE).new; \
|
||||
echo "CXX=$(CXX)" >> $(ENVFILE).new
|
||||
@mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS)" > $(ENVFILE).new;
|
||||
|
||||
# Dependencies.
|
||||
dependencies:
|
||||
svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \
|
||||
--revision 1282
|
||||
--revision 1026
|
||||
|
|
|
@ -1601,4 +1601,17 @@ except:
|
|||
pass
|
||||
|
||||
|
||||
def WarnAboutDeprecation():
|
||||
print """
|
||||
#######################################################
|
||||
# WARNING: Building V8 with SCons is deprecated and #
|
||||
# will not work much longer. Please switch to using #
|
||||
# the GYP-based build now. Instructions are at #
|
||||
# http://code.google.com/p/v8/wiki/BuildingWithGYP. #
|
||||
#######################################################
|
||||
"""
|
||||
|
||||
WarnAboutDeprecation()
|
||||
import atexit
|
||||
atexit.register(WarnAboutDeprecation)
|
||||
Build()
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
# Copyright 2011 the V8 project authors. All rights reserved.
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials provided
|
||||
# with the distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived
|
||||
# from this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
{
|
||||
'variables': {
|
||||
'target_arch': 'ia32',
|
||||
'v8_target_arch': 'arm',
|
||||
'armv7': 1,
|
||||
'arm_neon': 0,
|
||||
'arm_fpu': 'vfpv3',
|
||||
},
|
||||
}
|
|
@ -142,10 +142,8 @@
|
|||
'USE_EABI_HARDFLOAT=1',
|
||||
'CAN_USE_VFP_INSTRUCTIONS',
|
||||
],
|
||||
'target_conditions': [
|
||||
['_toolset=="target"', {
|
||||
'cflags': ['-mfloat-abi=hard',],
|
||||
}],
|
||||
'cflags': [
|
||||
'-mfloat-abi=hard',
|
||||
],
|
||||
}, {
|
||||
'defines': [
|
||||
|
@ -173,11 +171,8 @@
|
|||
'defines': [
|
||||
'V8_TARGET_ARCH_MIPS',
|
||||
],
|
||||
'variables': {
|
||||
'mipscompiler': '<!($(echo ${CXX:-$(which g++)}) -v 2>&1 | grep -q "^Target: mips-" && echo "yes" || echo "no")',
|
||||
},
|
||||
'conditions': [
|
||||
['mipscompiler=="yes"', {
|
||||
[ 'target_arch=="mips"', {
|
||||
'target_conditions': [
|
||||
['_toolset=="target"', {
|
||||
'cflags': ['-EL'],
|
||||
|
@ -241,19 +236,6 @@
|
|||
],
|
||||
}],
|
||||
],
|
||||
}, { # Section for OS=="mac".
|
||||
'conditions': [
|
||||
['target_arch=="ia32"', {
|
||||
'xcode_settings': {
|
||||
'ARCHS': ['i386'],
|
||||
}
|
||||
}],
|
||||
['target_arch=="x64"', {
|
||||
'xcode_settings': {
|
||||
'ARCHS': ['x86_64'],
|
||||
}
|
||||
}],
|
||||
],
|
||||
}],
|
||||
['v8_use_liveobjectlist=="true"', {
|
||||
'defines': [
|
||||
|
@ -280,16 +262,19 @@
|
|||
},
|
||||
},
|
||||
}],
|
||||
['OS=="win" and v8_target_arch=="x64"', {
|
||||
'msvs_settings': {
|
||||
'VCLinkerTool': {
|
||||
'StackReserveSize': '2097152',
|
||||
},
|
||||
},
|
||||
}],
|
||||
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
|
||||
or OS=="netbsd"', {
|
||||
'conditions': [
|
||||
[ 'v8_target_arch!="x64"', {
|
||||
# Pass -m32 to the compiler iff it understands the flag.
|
||||
'variables': {
|
||||
'm32flag': '<!((echo | $(echo ${CXX:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
|
||||
},
|
||||
'cflags': [ '<(m32flag)' ],
|
||||
'ldflags': [ '<(m32flag)' ],
|
||||
[ 'target_arch=="ia32"', {
|
||||
'cflags': [ '-m32' ],
|
||||
'ldflags': [ '-m32' ],
|
||||
}],
|
||||
[ 'v8_no_strict_aliasing==1', {
|
||||
'cflags': [ '-fno-strict-aliasing' ],
|
||||
|
@ -322,10 +307,6 @@
|
|||
},
|
||||
'VCLinkerTool': {
|
||||
'LinkIncremental': '2',
|
||||
# For future reference, the stack size needs to be increased
|
||||
# when building for Windows 64-bit, otherwise some test cases
|
||||
# can cause stack overflow.
|
||||
# 'StackReserveSize': '297152',
|
||||
},
|
||||
},
|
||||
'conditions': [
|
||||
|
@ -336,7 +317,7 @@
|
|||
'cflags': [ '-I/usr/pkg/include' ],
|
||||
}],
|
||||
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
|
||||
'cflags': [ '-Wno-unused-parameter',
|
||||
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
|
||||
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
|
||||
}],
|
||||
],
|
||||
|
@ -407,12 +388,7 @@
|
|||
'VCLinkerTool': {
|
||||
'LinkIncremental': '1',
|
||||
'OptimizeReferences': '2',
|
||||
'OptimizeForWindows98': '1',
|
||||
'EnableCOMDATFolding': '2',
|
||||
# For future reference, the stack size needs to be
|
||||
# increased when building for Windows 64-bit, otherwise
|
||||
# some test cases can cause stack overflow.
|
||||
# 'StackReserveSize': '297152',
|
||||
},
|
||||
},
|
||||
}], # OS=="win"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2012 the V8 project authors. All rights reserved.
|
||||
# Copyright 2010 the V8 project authors. All rights reserved.
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
|
@ -38,6 +38,11 @@ import sys
|
|||
script_dir = os.path.dirname(__file__)
|
||||
v8_root = os.path.normpath(os.path.join(script_dir, os.pardir))
|
||||
|
||||
if __name__ == '__main__':
|
||||
os.chdir(v8_root)
|
||||
script_dir = os.path.dirname(__file__)
|
||||
v8_root = '.'
|
||||
|
||||
sys.path.insert(0, os.path.join(v8_root, 'tools'))
|
||||
import utils
|
||||
|
||||
|
@ -93,7 +98,7 @@ def additional_include_files(args=[]):
|
|||
result.append(path)
|
||||
|
||||
# Always include standalone.gypi
|
||||
AddInclude(os.path.join(script_dir, 'standalone.gypi'))
|
||||
AddInclude(os.path.join(v8_root, 'build', 'standalone.gypi'))
|
||||
|
||||
# Optionally add supplemental .gypi files if present.
|
||||
supplements = glob.glob(os.path.join(v8_root, '*', 'supplement.gypi'))
|
||||
|
@ -135,7 +140,10 @@ if __name__ == '__main__':
|
|||
# path separators even on Windows due to the use of shlex.split().
|
||||
args.extend(shlex.split(gyp_file))
|
||||
else:
|
||||
args.append(os.path.join(script_dir, 'all.gyp'))
|
||||
# Note that this must not start with "./" or things break.
|
||||
# So we rely on having done os.chdir(v8_root) above and use the
|
||||
# relative path.
|
||||
args.append(os.path.join('build', 'all.gyp'))
|
||||
|
||||
args.extend(['-I' + i for i in additional_include_files(args)])
|
||||
|
||||
|
@ -156,28 +164,6 @@ if __name__ == '__main__':
|
|||
|
||||
# Generate for the architectures supported on the given platform.
|
||||
gyp_args = list(args)
|
||||
target_arch = None
|
||||
for p in gyp_args:
|
||||
if p.find('-Dtarget_arch=') == 0:
|
||||
target_arch = p
|
||||
if target_arch is None:
|
||||
gyp_args.append('-Dtarget_arch=ia32')
|
||||
if utils.GuessOS() == 'linux':
|
||||
gyp_args.append('-S.ia32')
|
||||
gyp_args.append('--generator-output=out')
|
||||
run_gyp(gyp_args)
|
||||
|
||||
if utils.GuessOS() == 'linux':
|
||||
gyp_args = list(args)
|
||||
gyp_args.append('-Dtarget_arch=x64')
|
||||
gyp_args.append('-S.x64')
|
||||
run_gyp(gyp_args)
|
||||
|
||||
gyp_args = list(args)
|
||||
gyp_args.append('-Dv8_target_arch=arm')
|
||||
gyp_args.append('-S.arm')
|
||||
run_gyp(gyp_args)
|
||||
|
||||
gyp_args = list(args)
|
||||
gyp_args.append('-Dv8_target_arch=mips')
|
||||
gyp_args.append('-S.mips')
|
||||
run_gyp(gyp_args)
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
# Copyright 2012 the V8 project authors. All rights reserved.
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials provided
|
||||
# with the distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived
|
||||
# from this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
{
|
||||
'variables': {
|
||||
'target_arch': 'ia32',
|
||||
'v8_target_arch': 'mips',
|
||||
},
|
||||
}
|
|
@ -71,10 +71,6 @@
|
|||
'want_separate_host_toolset': 0,
|
||||
}],
|
||||
],
|
||||
# Default ARM variable settings.
|
||||
'armv7%': 1,
|
||||
'arm_neon%': 0,
|
||||
'arm_fpu%': 'vfpv3',
|
||||
},
|
||||
'target_defaults': {
|
||||
'default_configuration': 'Debug',
|
||||
|
@ -169,6 +165,9 @@
|
|||
},
|
||||
}], # OS=="win"
|
||||
['OS=="mac"', {
|
||||
'xcode_settings': {
|
||||
'SYMROOT': '<(DEPTH)/xcodebuild',
|
||||
},
|
||||
'target_defaults': {
|
||||
'xcode_settings': {
|
||||
'ALWAYS_SEARCH_USER_PATHS': 'NO',
|
||||
|
@ -188,6 +187,7 @@
|
|||
'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES', # -Wnewline-eof
|
||||
'MACOSX_DEPLOYMENT_TARGET': '10.4', # -mmacosx-version-min=10.4
|
||||
'PREBINDING': 'NO', # No -Wl,-prebind
|
||||
'SYMROOT': '<(DEPTH)/xcodebuild',
|
||||
'USE_HEADERMAP': 'NO',
|
||||
'OTHER_CFLAGS': [
|
||||
'-fno-strict-aliasing',
|
||||
|
|
|
@ -64,7 +64,6 @@
|
|||
*/
|
||||
namespace v8 {
|
||||
|
||||
typedef uint32_t SnapshotObjectId;
|
||||
|
||||
/**
|
||||
* CpuProfileNode represents a node in a call graph.
|
||||
|
@ -275,7 +274,7 @@ class V8EXPORT HeapGraphNode {
|
|||
* Returns node id. For the same heap object, the id remains the same
|
||||
* across all snapshots.
|
||||
*/
|
||||
SnapshotObjectId GetId() const;
|
||||
uint64_t GetId() const;
|
||||
|
||||
/** Returns node's own size, in bytes. */
|
||||
int GetSelfSize() const;
|
||||
|
@ -339,7 +338,7 @@ class V8EXPORT HeapSnapshot {
|
|||
const HeapGraphNode* GetRoot() const;
|
||||
|
||||
/** Returns a node by its id. */
|
||||
const HeapGraphNode* GetNodeById(SnapshotObjectId id) const;
|
||||
const HeapGraphNode* GetNodeById(uint64_t id) const;
|
||||
|
||||
/** Returns total nodes count in the snapshot. */
|
||||
int GetNodesCount() const;
|
||||
|
@ -347,9 +346,6 @@ class V8EXPORT HeapSnapshot {
|
|||
/** Returns a node by index. */
|
||||
const HeapGraphNode* GetNode(int index) const;
|
||||
|
||||
/** Returns a max seen JS object Id. */
|
||||
SnapshotObjectId GetMaxSnapshotJSObjectId() const;
|
||||
|
||||
/**
|
||||
* Deletes the snapshot and removes it from HeapProfiler's list.
|
||||
* All pointers to nodes, edges and paths previously returned become
|
||||
|
@ -368,20 +364,16 @@ class V8EXPORT HeapSnapshot {
|
|||
* with the following structure:
|
||||
*
|
||||
* {
|
||||
* snapshot: {
|
||||
* title: "...",
|
||||
* uid: nnn,
|
||||
* meta: { meta-info },
|
||||
* node_count: nnn,
|
||||
* edge_count: nnn
|
||||
* },
|
||||
* nodes: [nodes array],
|
||||
* edges: [edges array],
|
||||
* strings: [strings array]
|
||||
* snapshot: {title: "...", uid: nnn},
|
||||
* nodes: [
|
||||
* meta-info (JSON string),
|
||||
* nodes themselves
|
||||
* ],
|
||||
* strings: [strings]
|
||||
* }
|
||||
*
|
||||
* Nodes reference strings, other nodes, and edges by their indexes
|
||||
* in corresponding arrays.
|
||||
* Outgoing node links are stored after each node. Nodes reference strings
|
||||
* and other nodes by their indexes in corresponding arrays.
|
||||
*/
|
||||
void Serialize(OutputStream* stream, SerializationFormat format) const;
|
||||
};
|
||||
|
@ -412,19 +404,6 @@ class V8EXPORT HeapProfiler {
|
|||
/** Returns a profile by uid. */
|
||||
static const HeapSnapshot* FindSnapshot(unsigned uid);
|
||||
|
||||
/**
|
||||
* Returns SnapshotObjectId for a heap object referenced by |value| if
|
||||
* it has been seen by the heap profiler, kUnknownObjectId otherwise.
|
||||
*/
|
||||
static SnapshotObjectId GetSnapshotObjectId(Handle<Value> value);
|
||||
|
||||
/**
|
||||
* A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return
|
||||
* it in case heap profiler cannot find id for the object passed as
|
||||
* parameter. HeapSnapshot::GetNodeById will always return NULL for such id.
|
||||
*/
|
||||
static const SnapshotObjectId kUnknownObjectId = 0;
|
||||
|
||||
/**
|
||||
* Takes a heap snapshot and returns it. Title may be an empty string.
|
||||
* See HeapSnapshot::Type for types description.
|
||||
|
@ -434,33 +413,6 @@ class V8EXPORT HeapProfiler {
|
|||
HeapSnapshot::Type type = HeapSnapshot::kFull,
|
||||
ActivityControl* control = NULL);
|
||||
|
||||
/**
|
||||
* Starts tracking of heap objects population statistics. After calling
|
||||
* this method, all heap objects relocations done by the garbage collector
|
||||
* are being registered.
|
||||
*/
|
||||
static void StartHeapObjectsTracking();
|
||||
|
||||
/**
|
||||
* Adds a new time interval entry to the aggregated statistics array. The
|
||||
* time interval entry contains information on the current heap objects
|
||||
* population size. The method also updates aggregated statistics and
|
||||
* reports updates for all previous time intervals via the OutputStream
|
||||
* object. Updates on each time interval are provided as a stream of the
|
||||
* HeapStatsUpdate structure instances.
|
||||
*
|
||||
* StartHeapObjectsTracking must be called before the first call to this
|
||||
* method.
|
||||
*/
|
||||
static void PushHeapObjectsStats(OutputStream* stream);
|
||||
|
||||
/**
|
||||
* Stops tracking of heap objects population statistics, cleans up all
|
||||
* collected data. StartHeapObjectsTracking must be called again prior to
|
||||
* calling PushHeapObjectsStats next time.
|
||||
*/
|
||||
static void StopHeapObjectsTracking();
|
||||
|
||||
/**
|
||||
* Deletes all snapshots taken. All previously returned pointers to
|
||||
* snapshots and their contents become invalid after this call.
|
||||
|
@ -558,19 +510,6 @@ class V8EXPORT RetainedObjectInfo { // NOLINT
|
|||
};
|
||||
|
||||
|
||||
/**
|
||||
* A struct for exporting HeapStats data from V8, using "push" model.
|
||||
* See HeapProfiler::PushHeapObjectsStats.
|
||||
*/
|
||||
struct HeapStatsUpdate {
|
||||
HeapStatsUpdate(uint32_t index, uint32_t count, uint32_t size)
|
||||
: index(index), count(count), size(size) { }
|
||||
uint32_t index; // Index of the time interval that was changed.
|
||||
uint32_t count; // New value of count field for the interval with this index.
|
||||
uint32_t size; // New value of size field for the interval with this index.
|
||||
};
|
||||
|
||||
|
||||
} // namespace v8
|
||||
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
@ -107,7 +107,6 @@ class Data;
|
|||
class AccessorInfo;
|
||||
class StackTrace;
|
||||
class StackFrame;
|
||||
class Isolate;
|
||||
|
||||
namespace internal {
|
||||
|
||||
|
@ -863,13 +862,13 @@ class Value : public Data {
|
|||
* Returns true if this value is the undefined value. See ECMA-262
|
||||
* 4.3.10.
|
||||
*/
|
||||
inline bool IsUndefined() const;
|
||||
V8EXPORT bool IsUndefined() const;
|
||||
|
||||
/**
|
||||
* Returns true if this value is the null value. See ECMA-262
|
||||
* 4.3.11.
|
||||
*/
|
||||
inline bool IsNull() const;
|
||||
V8EXPORT bool IsNull() const;
|
||||
|
||||
/**
|
||||
* Returns true if this value is true.
|
||||
|
@ -983,11 +982,7 @@ class Value : public Data {
|
|||
V8EXPORT bool StrictEquals(Handle<Value> that) const;
|
||||
|
||||
private:
|
||||
inline bool QuickIsUndefined() const;
|
||||
inline bool QuickIsNull() const;
|
||||
inline bool QuickIsString() const;
|
||||
V8EXPORT bool FullIsUndefined() const;
|
||||
V8EXPORT bool FullIsNull() const;
|
||||
V8EXPORT bool FullIsString() const;
|
||||
};
|
||||
|
||||
|
@ -1084,7 +1079,6 @@ class String : public Primitive {
|
|||
* A zero length string.
|
||||
*/
|
||||
V8EXPORT static v8::Local<v8::String> Empty();
|
||||
inline static v8::Local<v8::String> Empty(Isolate* isolate);
|
||||
|
||||
/**
|
||||
* Returns true if the string is external
|
||||
|
@ -1242,7 +1236,8 @@ class String : public Primitive {
|
|||
* this function should not otherwise delete or modify the resource. Neither
|
||||
* should the underlying buffer be deallocated or modified except through the
|
||||
* destructor of the external string resource.
|
||||
*/ V8EXPORT static Local<String> NewExternal(
|
||||
*/
|
||||
V8EXPORT static Local<String> NewExternal(
|
||||
ExternalAsciiStringResource* resource);
|
||||
|
||||
/**
|
||||
|
@ -1973,13 +1968,10 @@ class Arguments {
|
|||
inline Local<Object> Holder() const;
|
||||
inline bool IsConstructCall() const;
|
||||
inline Local<Value> Data() const;
|
||||
inline Isolate* GetIsolate() const;
|
||||
|
||||
private:
|
||||
static const int kIsolateIndex = 0;
|
||||
static const int kDataIndex = -1;
|
||||
static const int kCalleeIndex = -2;
|
||||
static const int kHolderIndex = -3;
|
||||
static const int kDataIndex = 0;
|
||||
static const int kCalleeIndex = -1;
|
||||
static const int kHolderIndex = -2;
|
||||
|
||||
friend class ImplementationUtilities;
|
||||
inline Arguments(internal::Object** implicit_args,
|
||||
|
@ -2001,11 +1993,9 @@ class V8EXPORT AccessorInfo {
|
|||
public:
|
||||
inline AccessorInfo(internal::Object** args)
|
||||
: args_(args) { }
|
||||
inline Isolate* GetIsolate() const;
|
||||
inline Local<Value> Data() const;
|
||||
inline Local<Object> This() const;
|
||||
inline Local<Object> Holder() const;
|
||||
|
||||
private:
|
||||
internal::Object** args_;
|
||||
};
|
||||
|
@ -2562,11 +2552,6 @@ Handle<Primitive> V8EXPORT Null();
|
|||
Handle<Boolean> V8EXPORT True();
|
||||
Handle<Boolean> V8EXPORT False();
|
||||
|
||||
inline Handle<Primitive> Undefined(Isolate* isolate);
|
||||
inline Handle<Primitive> Null(Isolate* isolate);
|
||||
inline Handle<Boolean> True(Isolate* isolate);
|
||||
inline Handle<Boolean> False(Isolate* isolate);
|
||||
|
||||
|
||||
/**
|
||||
* A set of constraints that specifies the limits of the runtime's memory use.
|
||||
|
@ -2817,13 +2802,13 @@ class V8EXPORT Isolate {
|
|||
/**
|
||||
* Associate embedder-specific data with the isolate
|
||||
*/
|
||||
inline void SetData(void* data);
|
||||
void SetData(void* data);
|
||||
|
||||
/**
|
||||
* Retrieve embedder-specific data from the isolate.
|
||||
* Retrive embedder-specific data from the isolate.
|
||||
* Returns NULL if SetData has never been called.
|
||||
*/
|
||||
inline void* GetData();
|
||||
void* GetData();
|
||||
|
||||
private:
|
||||
Isolate();
|
||||
|
@ -3168,8 +3153,7 @@ class V8EXPORT V8 {
|
|||
* that is kept alive by JavaScript objects.
|
||||
* \returns the adjusted value.
|
||||
*/
|
||||
static intptr_t AdjustAmountOfExternalAllocatedMemory(
|
||||
intptr_t change_in_bytes);
|
||||
static int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
|
||||
|
||||
/**
|
||||
* Suspends recording of tick samples in the profiler.
|
||||
|
@ -3751,12 +3735,6 @@ class V8EXPORT Locker {
|
|||
};
|
||||
|
||||
|
||||
/**
|
||||
* A struct for exporting HeapStats data from V8, using "push" model.
|
||||
*/
|
||||
struct HeapStatsUpdate;
|
||||
|
||||
|
||||
/**
|
||||
* An interface for exporting data from V8, using "push" model.
|
||||
*/
|
||||
|
@ -3782,14 +3760,6 @@ class V8EXPORT OutputStream { // NOLINT
|
|||
* will not be called in case writing was aborted.
|
||||
*/
|
||||
virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
|
||||
/**
|
||||
* Writes the next chunk of heap stats data into the stream. Writing
|
||||
* can be stopped by returning kAbort as function result. EndOfStream
|
||||
* will not be called in case writing was aborted.
|
||||
*/
|
||||
virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
|
||||
return kAbort;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
|
@ -3878,6 +3848,18 @@ const uintptr_t kEncodablePointerMask =
|
|||
PlatformSmiTagging::kEncodablePointerMask;
|
||||
const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift;
|
||||
|
||||
template <size_t ptr_size> struct InternalConstants;
|
||||
|
||||
// Internal constants for 32-bit systems.
|
||||
template <> struct InternalConstants<4> {
|
||||
static const int kStringResourceOffset = 3 * kApiPointerSize;
|
||||
};
|
||||
|
||||
// Internal constants for 64-bit systems.
|
||||
template <> struct InternalConstants<8> {
|
||||
static const int kStringResourceOffset = 3 * kApiPointerSize;
|
||||
};
|
||||
|
||||
/**
|
||||
* This class exports constants and functionality from within v8 that
|
||||
* is necessary to implement inline functions in the v8 api. Don't
|
||||
|
@ -3889,31 +3871,18 @@ class Internals {
|
|||
// the implementation of v8.
|
||||
static const int kHeapObjectMapOffset = 0;
|
||||
static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize;
|
||||
static const int kStringResourceOffset = 3 * kApiPointerSize;
|
||||
static const int kStringResourceOffset =
|
||||
InternalConstants<kApiPointerSize>::kStringResourceOffset;
|
||||
|
||||
static const int kOddballKindOffset = 3 * kApiPointerSize;
|
||||
static const int kForeignAddressOffset = kApiPointerSize;
|
||||
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
|
||||
static const int kFullStringRepresentationMask = 0x07;
|
||||
static const int kExternalTwoByteRepresentationTag = 0x02;
|
||||
|
||||
static const int kIsolateStateOffset = 0;
|
||||
static const int kIsolateEmbedderDataOffset = 1 * kApiPointerSize;
|
||||
static const int kIsolateRootsOffset = 3 * kApiPointerSize;
|
||||
static const int kUndefinedValueRootIndex = 5;
|
||||
static const int kNullValueRootIndex = 7;
|
||||
static const int kTrueValueRootIndex = 8;
|
||||
static const int kFalseValueRootIndex = 9;
|
||||
static const int kEmptySymbolRootIndex = 128;
|
||||
|
||||
static const int kJSObjectType = 0xaa;
|
||||
static const int kFirstNonstringType = 0x80;
|
||||
static const int kOddballType = 0x82;
|
||||
static const int kForeignType = 0x85;
|
||||
|
||||
static const int kUndefinedOddballKind = 5;
|
||||
static const int kNullOddballKind = 3;
|
||||
|
||||
static inline bool HasHeapObjectTag(internal::Object* value) {
|
||||
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
|
||||
kHeapObjectTag);
|
||||
|
@ -3933,11 +3902,6 @@ class Internals {
|
|||
return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
|
||||
}
|
||||
|
||||
static inline int GetOddballKind(internal::Object* obj) {
|
||||
typedef internal::Object O;
|
||||
return SmiValue(ReadField<O*>(obj, kOddballKindOffset));
|
||||
}
|
||||
|
||||
static inline void* GetExternalPointerFromSmi(internal::Object* value) {
|
||||
const uintptr_t address = reinterpret_cast<uintptr_t>(value);
|
||||
return reinterpret_cast<void*>(address >> kPointerToSmiShift);
|
||||
|
@ -3958,28 +3922,6 @@ class Internals {
|
|||
return representation == kExternalTwoByteRepresentationTag;
|
||||
}
|
||||
|
||||
static inline bool IsInitialized(v8::Isolate* isolate) {
|
||||
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateStateOffset;
|
||||
return *reinterpret_cast<int*>(addr) == 1;
|
||||
}
|
||||
|
||||
static inline void SetEmbedderData(v8::Isolate* isolate, void* data) {
|
||||
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
|
||||
kIsolateEmbedderDataOffset;
|
||||
*reinterpret_cast<void**>(addr) = data;
|
||||
}
|
||||
|
||||
static inline void* GetEmbedderData(v8::Isolate* isolate) {
|
||||
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
|
||||
kIsolateEmbedderDataOffset;
|
||||
return *reinterpret_cast<void**>(addr);
|
||||
}
|
||||
|
||||
static inline internal::Object** GetRoot(v8::Isolate* isolate, int index) {
|
||||
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateRootsOffset;
|
||||
return reinterpret_cast<internal::Object**>(addr + index * kApiPointerSize);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline T ReadField(Object* ptr, int offset) {
|
||||
uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag;
|
||||
|
@ -4106,11 +4048,6 @@ Local<Value> Arguments::Data() const {
|
|||
}
|
||||
|
||||
|
||||
Isolate* Arguments::GetIsolate() const {
|
||||
return *reinterpret_cast<Isolate**>(&implicit_args_[kIsolateIndex]);
|
||||
}
|
||||
|
||||
|
||||
bool Arguments::IsConstructCall() const {
|
||||
return is_construct_call_;
|
||||
}
|
||||
|
@ -4223,15 +4160,6 @@ String* String::Cast(v8::Value* value) {
|
|||
}
|
||||
|
||||
|
||||
Local<String> String::Empty(Isolate* isolate) {
|
||||
typedef internal::Object* S;
|
||||
typedef internal::Internals I;
|
||||
if (!I::IsInitialized(isolate)) return Empty();
|
||||
S* slot = I::GetRoot(isolate, I::kEmptySymbolRootIndex);
|
||||
return Local<String>(reinterpret_cast<String*>(slot));
|
||||
}
|
||||
|
||||
|
||||
String::ExternalStringResource* String::GetExternalStringResource() const {
|
||||
typedef internal::Object O;
|
||||
typedef internal::Internals I;
|
||||
|
@ -4250,42 +4178,6 @@ String::ExternalStringResource* String::GetExternalStringResource() const {
|
|||
}
|
||||
|
||||
|
||||
bool Value::IsUndefined() const {
|
||||
#ifdef V8_ENABLE_CHECKS
|
||||
return FullIsUndefined();
|
||||
#else
|
||||
return QuickIsUndefined();
|
||||
#endif
|
||||
}
|
||||
|
||||
bool Value::QuickIsUndefined() const {
|
||||
typedef internal::Object O;
|
||||
typedef internal::Internals I;
|
||||
O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
|
||||
if (!I::HasHeapObjectTag(obj)) return false;
|
||||
if (I::GetInstanceType(obj) != I::kOddballType) return false;
|
||||
return (I::GetOddballKind(obj) == I::kUndefinedOddballKind);
|
||||
}
|
||||
|
||||
|
||||
bool Value::IsNull() const {
|
||||
#ifdef V8_ENABLE_CHECKS
|
||||
return FullIsNull();
|
||||
#else
|
||||
return QuickIsNull();
|
||||
#endif
|
||||
}
|
||||
|
||||
bool Value::QuickIsNull() const {
|
||||
typedef internal::Object O;
|
||||
typedef internal::Internals I;
|
||||
O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
|
||||
if (!I::HasHeapObjectTag(obj)) return false;
|
||||
if (I::GetInstanceType(obj) != I::kOddballType) return false;
|
||||
return (I::GetOddballKind(obj) == I::kNullOddballKind);
|
||||
}
|
||||
|
||||
|
||||
bool Value::IsString() const {
|
||||
#ifdef V8_ENABLE_CHECKS
|
||||
return FullIsString();
|
||||
|
@ -4391,11 +4283,6 @@ External* External::Cast(v8::Value* value) {
|
|||
}
|
||||
|
||||
|
||||
Isolate* AccessorInfo::GetIsolate() const {
|
||||
return *reinterpret_cast<Isolate**>(&args_[-3]);
|
||||
}
|
||||
|
||||
|
||||
Local<Value> AccessorInfo::Data() const {
|
||||
return Local<Value>(reinterpret_cast<Value*>(&args_[-2]));
|
||||
}
|
||||
|
@ -4411,54 +4298,6 @@ Local<Object> AccessorInfo::Holder() const {
|
|||
}
|
||||
|
||||
|
||||
Handle<Primitive> Undefined(Isolate* isolate) {
|
||||
typedef internal::Object* S;
|
||||
typedef internal::Internals I;
|
||||
if (!I::IsInitialized(isolate)) return Undefined();
|
||||
S* slot = I::GetRoot(isolate, I::kUndefinedValueRootIndex);
|
||||
return Handle<Primitive>(reinterpret_cast<Primitive*>(slot));
|
||||
}
|
||||
|
||||
|
||||
Handle<Primitive> Null(Isolate* isolate) {
|
||||
typedef internal::Object* S;
|
||||
typedef internal::Internals I;
|
||||
if (!I::IsInitialized(isolate)) return Null();
|
||||
S* slot = I::GetRoot(isolate, I::kNullValueRootIndex);
|
||||
return Handle<Primitive>(reinterpret_cast<Primitive*>(slot));
|
||||
}
|
||||
|
||||
|
||||
Handle<Boolean> True(Isolate* isolate) {
|
||||
typedef internal::Object* S;
|
||||
typedef internal::Internals I;
|
||||
if (!I::IsInitialized(isolate)) return True();
|
||||
S* slot = I::GetRoot(isolate, I::kTrueValueRootIndex);
|
||||
return Handle<Boolean>(reinterpret_cast<Boolean*>(slot));
|
||||
}
|
||||
|
||||
|
||||
Handle<Boolean> False(Isolate* isolate) {
|
||||
typedef internal::Object* S;
|
||||
typedef internal::Internals I;
|
||||
if (!I::IsInitialized(isolate)) return False();
|
||||
S* slot = I::GetRoot(isolate, I::kFalseValueRootIndex);
|
||||
return Handle<Boolean>(reinterpret_cast<Boolean*>(slot));
|
||||
}
|
||||
|
||||
|
||||
void Isolate::SetData(void* data) {
|
||||
typedef internal::Internals I;
|
||||
I::SetEmbedderData(this, data);
|
||||
}
|
||||
|
||||
|
||||
void* Isolate::GetData() {
|
||||
typedef internal::Internals I;
|
||||
return I::GetEmbedderData(this);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \example shell.cc
|
||||
* A simple shell that takes a list of expressions on the
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Copyright 2009 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
@ -434,9 +434,9 @@ v8::Handle<v8::String> ReadLine() {
|
|||
}
|
||||
if (res == NULL) {
|
||||
v8::Handle<v8::Primitive> t = v8::Undefined();
|
||||
return v8::Handle<v8::String>(v8::String::Cast(*t));
|
||||
return reinterpret_cast<v8::Handle<v8::String>&>(t);
|
||||
}
|
||||
// Remove newline char
|
||||
// remove newline char
|
||||
for (char* pos = buffer; *pos != '\0'; pos++) {
|
||||
if (*pos == '\n') {
|
||||
*pos = '\0';
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2012 the V8 project authors. All rights reserved.
|
||||
# Copyright 2011 the V8 project authors. All rights reserved.
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
|
@ -48,12 +48,6 @@
|
|||
'sources': [
|
||||
'process.cc',
|
||||
],
|
||||
},
|
||||
{
|
||||
'target_name': 'lineprocessor',
|
||||
'sources': [
|
||||
'lineprocessor.cc',
|
||||
],
|
||||
}
|
||||
],
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
@ -67,20 +67,17 @@ static bool run_shell;
|
|||
int main(int argc, char* argv[]) {
|
||||
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
|
||||
run_shell = (argc == 1);
|
||||
int result;
|
||||
{
|
||||
v8::HandleScope handle_scope;
|
||||
v8::Persistent<v8::Context> context = CreateShellContext();
|
||||
if (context.IsEmpty()) {
|
||||
printf("Error creating context\n");
|
||||
return 1;
|
||||
}
|
||||
context->Enter();
|
||||
result = RunMain(argc, argv);
|
||||
if (run_shell) RunShell(context);
|
||||
context->Exit();
|
||||
context.Dispose();
|
||||
v8::HandleScope handle_scope;
|
||||
v8::Persistent<v8::Context> context = CreateShellContext();
|
||||
if (context.IsEmpty()) {
|
||||
printf("Error creating context\n");
|
||||
return 1;
|
||||
}
|
||||
context->Enter();
|
||||
int result = RunMain(argc, argv);
|
||||
if (run_shell) RunShell(context);
|
||||
context->Exit();
|
||||
context.Dispose();
|
||||
v8::V8::Dispose();
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -512,16 +512,6 @@ void RegisteredExtension::Register(RegisteredExtension* that) {
|
|||
}
|
||||
|
||||
|
||||
void RegisteredExtension::UnregisterAll() {
|
||||
RegisteredExtension* re = first_extension_;
|
||||
while (re != NULL) {
|
||||
RegisteredExtension* next = re->next();
|
||||
delete re;
|
||||
re = next;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void RegisterExtension(Extension* that) {
|
||||
RegisteredExtension* extension = new RegisteredExtension(that);
|
||||
RegisteredExtension::Register(extension);
|
||||
|
@ -2101,21 +2091,17 @@ bool StackFrame::IsConstructor() const {
|
|||
|
||||
// --- D a t a ---
|
||||
|
||||
bool Value::FullIsUndefined() const {
|
||||
bool Value::IsUndefined() const {
|
||||
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUndefined()")) {
|
||||
return false;
|
||||
}
|
||||
bool result = Utils::OpenHandle(this)->IsUndefined();
|
||||
ASSERT_EQ(result, QuickIsUndefined());
|
||||
return result;
|
||||
return Utils::OpenHandle(this)->IsUndefined();
|
||||
}
|
||||
|
||||
|
||||
bool Value::FullIsNull() const {
|
||||
bool Value::IsNull() const {
|
||||
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNull()")) return false;
|
||||
bool result = Utils::OpenHandle(this)->IsNull();
|
||||
ASSERT_EQ(result, QuickIsNull());
|
||||
return result;
|
||||
return Utils::OpenHandle(this)->IsNull();
|
||||
}
|
||||
|
||||
|
||||
|
@ -2813,13 +2799,9 @@ bool v8::Object::ForceDelete(v8::Handle<Value> key) {
|
|||
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
|
||||
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
|
||||
|
||||
// When deleting a property on the global object using ForceDelete
|
||||
// deoptimize all functions as optimized code does not check for the hole
|
||||
// value with DontDelete properties. We have to deoptimize all contexts
|
||||
// because of possible cross-context inlined functions.
|
||||
if (self->IsJSGlobalProxy() || self->IsGlobalObject()) {
|
||||
i::Deoptimizer::DeoptimizeAll();
|
||||
}
|
||||
// When turning on access checks for a global object deoptimize all functions
|
||||
// as optimized code does not always handle access checks.
|
||||
i::Deoptimizer::DeoptimizeGlobalObject(*self);
|
||||
|
||||
EXCEPTION_PREAMBLE(isolate);
|
||||
i::Handle<i::Object> obj = i::ForceDeleteProperty(self, key_obj);
|
||||
|
@ -4630,9 +4612,7 @@ void* External::Value() const {
|
|||
|
||||
Local<String> v8::String::Empty() {
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
if (!EnsureInitializedForIsolate(isolate, "v8::String::Empty()")) {
|
||||
return v8::Local<String>();
|
||||
}
|
||||
EnsureInitializedForIsolate(isolate, "v8::String::Empty()");
|
||||
LOG_API(isolate, "String::Empty()");
|
||||
return Utils::ToLocal(isolate->factory()->empty_symbol());
|
||||
}
|
||||
|
@ -5218,7 +5198,7 @@ void V8::AddImplicitReferences(Persistent<Object> parent,
|
|||
}
|
||||
|
||||
|
||||
intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) {
|
||||
int V8::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
if (IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
|
||||
return 0;
|
||||
|
@ -5398,6 +5378,17 @@ void Isolate::Exit() {
|
|||
}
|
||||
|
||||
|
||||
void Isolate::SetData(void* data) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
|
||||
isolate->SetData(data);
|
||||
}
|
||||
|
||||
void* Isolate::GetData() {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
|
||||
return isolate->GetData();
|
||||
}
|
||||
|
||||
|
||||
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
|
||||
: str_(NULL), length_(0) {
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
|
@ -5997,7 +5988,7 @@ Handle<Value> HeapGraphEdge::GetName() const {
|
|||
const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
IsDeadCheck(isolate, "v8::HeapGraphEdge::GetFromNode");
|
||||
const i::HeapEntry* from = ToInternal(this)->from();
|
||||
const i::HeapEntry* from = ToInternal(this)->From();
|
||||
return reinterpret_cast<const HeapGraphNode*>(from);
|
||||
}
|
||||
|
||||
|
@ -6031,7 +6022,7 @@ Handle<String> HeapGraphNode::GetName() const {
|
|||
}
|
||||
|
||||
|
||||
SnapshotObjectId HeapGraphNode::GetId() const {
|
||||
uint64_t HeapGraphNode::GetId() const {
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
IsDeadCheck(isolate, "v8::HeapGraphNode::GetId");
|
||||
return ToInternal(this)->id();
|
||||
|
@ -6146,11 +6137,11 @@ const HeapGraphNode* HeapSnapshot::GetRoot() const {
|
|||
}
|
||||
|
||||
|
||||
const HeapGraphNode* HeapSnapshot::GetNodeById(SnapshotObjectId id) const {
|
||||
const HeapGraphNode* HeapSnapshot::GetNodeById(uint64_t id) const {
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById");
|
||||
return reinterpret_cast<const HeapGraphNode*>(
|
||||
ToInternal(this)->GetEntryById(id));
|
||||
ToInternal(this)->GetEntryById(static_cast<i::SnapshotObjectId>(id)));
|
||||
}
|
||||
|
||||
|
||||
|
@ -6169,13 +6160,6 @@ const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
|
|||
}
|
||||
|
||||
|
||||
SnapshotObjectId HeapSnapshot::GetMaxSnapshotJSObjectId() const {
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
IsDeadCheck(isolate, "v8::HeapSnapshot::GetMaxSnapshotJSObjectId");
|
||||
return ToInternal(this)->max_snapshot_js_object_id();
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshot::Serialize(OutputStream* stream,
|
||||
HeapSnapshot::SerializationFormat format) const {
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
|
@ -6217,14 +6201,6 @@ const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
|
|||
}
|
||||
|
||||
|
||||
SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Value> value) {
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotObjectId");
|
||||
i::Handle<i::Object> obj = Utils::OpenHandle(*value);
|
||||
return i::HeapProfiler::GetSnapshotObjectId(obj);
|
||||
}
|
||||
|
||||
|
||||
const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
|
||||
HeapSnapshot::Type type,
|
||||
ActivityControl* control) {
|
||||
|
@ -6244,27 +6220,6 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
|
|||
}
|
||||
|
||||
|
||||
void HeapProfiler::StartHeapObjectsTracking() {
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
IsDeadCheck(isolate, "v8::HeapProfiler::StartHeapObjectsTracking");
|
||||
i::HeapProfiler::StartHeapObjectsTracking();
|
||||
}
|
||||
|
||||
|
||||
void HeapProfiler::StopHeapObjectsTracking() {
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
IsDeadCheck(isolate, "v8::HeapProfiler::StopHeapObjectsTracking");
|
||||
i::HeapProfiler::StopHeapObjectsTracking();
|
||||
}
|
||||
|
||||
|
||||
void HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
IsDeadCheck(isolate, "v8::HeapProfiler::PushHeapObjectsStats");
|
||||
return i::HeapProfiler::PushHeapObjectsStats(stream);
|
||||
}
|
||||
|
||||
|
||||
void HeapProfiler::DeleteAllSnapshots() {
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
IsDeadCheck(isolate, "v8::HeapProfiler::DeleteAllSnapshots");
|
||||
|
@ -6312,11 +6267,7 @@ static void SetFlagsFromString(const char* flags) {
|
|||
|
||||
void Testing::PrepareStressRun(int run) {
|
||||
static const char* kLazyOptimizations =
|
||||
"--prepare-always-opt "
|
||||
"--max-inlined-source-size=999999 "
|
||||
"--max-inlined-nodes=999999 "
|
||||
"--max-inlined-nodes-cumulative=999999 "
|
||||
"--noalways-opt";
|
||||
"--prepare-always-opt --nolimit-inlining --noalways-opt";
|
||||
static const char* kForcedOptimizations = "--always-opt";
|
||||
|
||||
// If deoptimization stressed turn on frequent deoptimization. If no value
|
||||
|
|
|
@ -146,7 +146,6 @@ class RegisteredExtension {
|
|||
public:
|
||||
explicit RegisteredExtension(Extension* extension);
|
||||
static void Register(RegisteredExtension* that);
|
||||
static void UnregisterAll();
|
||||
Extension* extension() { return extension_; }
|
||||
RegisteredExtension* next() { return next_; }
|
||||
RegisteredExtension* next_auto() { return next_auto_; }
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Copyright 2009 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
@ -40,17 +40,14 @@ class ImplementationUtilities {
|
|||
}
|
||||
|
||||
// Packs additional parameters for the NewArguments function. |implicit_args|
|
||||
// is a pointer to the last element of 4-elements array controlled by GC.
|
||||
// is a pointer to the last element of 3-elements array controlled by GC.
|
||||
static void PrepareArgumentsData(internal::Object** implicit_args,
|
||||
internal::Isolate* isolate,
|
||||
internal::Object* data,
|
||||
internal::JSFunction* callee,
|
||||
internal::Object* holder) {
|
||||
implicit_args[v8::Arguments::kDataIndex] = data;
|
||||
implicit_args[v8::Arguments::kCalleeIndex] = callee;
|
||||
implicit_args[v8::Arguments::kHolderIndex] = holder;
|
||||
implicit_args[v8::Arguments::kIsolateIndex] =
|
||||
reinterpret_cast<internal::Object*>(isolate);
|
||||
}
|
||||
|
||||
static v8::Arguments NewArguments(internal::Object** implicit_args,
|
||||
|
@ -58,8 +55,6 @@ class ImplementationUtilities {
|
|||
bool is_construct_call) {
|
||||
ASSERT(implicit_args[v8::Arguments::kCalleeIndex]->IsJSFunction());
|
||||
ASSERT(implicit_args[v8::Arguments::kHolderIndex]->IsHeapObject());
|
||||
// The implicit isolate argument is not tagged and looks like a SMI.
|
||||
ASSERT(implicit_args[v8::Arguments::kIsolateIndex]->IsSmi());
|
||||
|
||||
return v8::Arguments(implicit_args, argv, argc, is_construct_call);
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Copyright 2006-2008 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
@ -91,11 +91,9 @@ class CustomArguments : public Relocatable {
|
|||
Object* data,
|
||||
Object* self,
|
||||
JSObject* holder) : Relocatable(isolate) {
|
||||
ASSERT(reinterpret_cast<Object*>(isolate)->IsSmi());
|
||||
values_[3] = self;
|
||||
values_[2] = holder;
|
||||
values_[1] = data;
|
||||
values_[0] = reinterpret_cast<Object*>(isolate);
|
||||
values_[2] = self;
|
||||
values_[1] = holder;
|
||||
values_[0] = data;
|
||||
}
|
||||
|
||||
inline explicit CustomArguments(Isolate* isolate) : Relocatable(isolate) {
|
||||
|
@ -108,9 +106,8 @@ class CustomArguments : public Relocatable {
|
|||
|
||||
void IterateInstance(ObjectVisitor* v);
|
||||
Object** end() { return values_ + ARRAY_SIZE(values_) - 1; }
|
||||
|
||||
private:
|
||||
Object* values_[4];
|
||||
Object* values_[3];
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -5169,9 +5169,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
|
|||
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
|
||||
__ b(ne, &call);
|
||||
// Patch the receiver on the stack with the global receiver object.
|
||||
__ ldr(r3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset));
|
||||
__ str(r3, MemOperand(sp, argc_ * kPointerSize));
|
||||
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
|
||||
__ str(r2, MemOperand(sp, argc_ * kPointerSize));
|
||||
__ bind(&call);
|
||||
}
|
||||
|
||||
|
@ -5179,13 +5179,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
|
|||
// r1: pushed function (to be verified)
|
||||
__ JumpIfSmi(r1, &non_function);
|
||||
// Get the map of the function object.
|
||||
__ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
|
||||
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
|
||||
__ b(ne, &slow);
|
||||
|
||||
if (RecordCallTarget()) {
|
||||
GenerateRecordCallTarget(masm);
|
||||
}
|
||||
|
||||
// Fast-case: Invoke the function now.
|
||||
// r1: pushed function
|
||||
ParameterCount actual(argc_);
|
||||
|
@ -5209,17 +5205,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
|
|||
|
||||
// Slow-case: Non-function called.
|
||||
__ bind(&slow);
|
||||
if (RecordCallTarget()) {
|
||||
// If there is a call target cache, mark it megamorphic in the
|
||||
// non-function case. MegamorphicSentinel is an immortal immovable
|
||||
// object (undefined) so no write barrier is needed.
|
||||
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
|
||||
masm->isolate()->heap()->undefined_value());
|
||||
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
||||
__ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
|
||||
}
|
||||
// Check for function proxy.
|
||||
__ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
|
||||
__ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
|
||||
__ b(ne, &non_function);
|
||||
__ push(r1); // put proxy as additional argument
|
||||
__ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
|
||||
|
@ -5886,12 +5873,36 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
|||
// r2: result string length
|
||||
__ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
|
||||
__ cmp(r2, Operand(r4, ASR, 1));
|
||||
// Return original string.
|
||||
__ b(eq, &return_r0);
|
||||
// Longer than original string's length or negative: unsafe arguments.
|
||||
__ b(hi, &runtime);
|
||||
// Shorter than original string's length: an actual substring.
|
||||
|
||||
Label result_longer_than_two;
|
||||
// Check for special case of two character ASCII string, in which case
|
||||
// we do a lookup in the symbol table first.
|
||||
__ cmp(r2, Operand(2));
|
||||
__ b(gt, &result_longer_than_two);
|
||||
__ b(lt, &runtime);
|
||||
|
||||
__ JumpIfInstanceTypeIsNotSequentialAscii(r1, r1, &runtime);
|
||||
|
||||
// Get the two characters forming the sub string.
|
||||
__ add(r0, r0, Operand(r3));
|
||||
__ ldrb(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
|
||||
__ ldrb(r4, FieldMemOperand(r0, SeqAsciiString::kHeaderSize + 1));
|
||||
|
||||
// Try to lookup two character string in symbol table.
|
||||
Label make_two_character_string;
|
||||
StringHelper::GenerateTwoCharacterSymbolTableProbe(
|
||||
masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
|
||||
__ jmp(&return_r0);
|
||||
|
||||
// r2: result string length.
|
||||
// r3: two characters combined into halfword in little endian byte order.
|
||||
__ bind(&make_two_character_string);
|
||||
__ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
|
||||
__ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
|
||||
__ jmp(&return_r0);
|
||||
|
||||
__ bind(&result_longer_than_two);
|
||||
// Deal with different string types: update the index if necessary
|
||||
// and put the underlying string into r5.
|
||||
// r0: original string
|
||||
|
|
|
@ -112,6 +112,13 @@ class JumpPatchSite BASE_EMBEDDED {
|
|||
};
|
||||
|
||||
|
||||
// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
|
||||
int FullCodeGenerator::self_optimization_header_size() {
|
||||
UNREACHABLE();
|
||||
return 24;
|
||||
}
|
||||
|
||||
|
||||
// Generate code for a JS function. On entry to the function the receiver
|
||||
// and arguments have been pushed on the stack left to right. The actual
|
||||
// argument count matches the formal parameter count expected by the
|
||||
|
@ -268,11 +275,11 @@ void FullCodeGenerator::Generate() {
|
|||
// For named function expressions, declare the function name as a
|
||||
// constant.
|
||||
if (scope()->is_function_scope() && scope()->function() != NULL) {
|
||||
VariableDeclaration* function = scope()->function();
|
||||
ASSERT(function->proxy()->var()->mode() == CONST ||
|
||||
function->proxy()->var()->mode() == CONST_HARMONY);
|
||||
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
|
||||
VisitVariableDeclaration(function);
|
||||
VariableProxy* proxy = scope()->function();
|
||||
ASSERT(proxy->var()->mode() == CONST ||
|
||||
proxy->var()->mode() == CONST_HARMONY);
|
||||
ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
|
||||
EmitDeclaration(proxy, proxy->var()->mode(), NULL);
|
||||
}
|
||||
VisitDeclarations(scope()->declarations());
|
||||
}
|
||||
|
@ -782,51 +789,62 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
|
|||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
|
||||
// The variable in the declaration always resides in the current function
|
||||
// context.
|
||||
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
|
||||
if (FLAG_debug_code) {
|
||||
// Check that we're not inside a with or catch context.
|
||||
__ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
|
||||
__ CompareRoot(r1, Heap::kWithContextMapRootIndex);
|
||||
__ Check(ne, "Declaration in with context.");
|
||||
__ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
|
||||
__ Check(ne, "Declaration in catch context.");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitVariableDeclaration(
|
||||
VariableDeclaration* declaration) {
|
||||
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
|
||||
VariableMode mode,
|
||||
FunctionLiteral* function) {
|
||||
// If it was not possible to allocate the variable at compile time, we
|
||||
// need to "declare" it at runtime to make sure it actually exists in the
|
||||
// local context.
|
||||
VariableProxy* proxy = declaration->proxy();
|
||||
VariableMode mode = declaration->mode();
|
||||
Variable* variable = proxy->var();
|
||||
bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
|
||||
bool binding_needs_init = (function == NULL) &&
|
||||
(mode == CONST || mode == CONST_HARMONY || mode == LET);
|
||||
switch (variable->location()) {
|
||||
case Variable::UNALLOCATED:
|
||||
globals_->Add(variable->name());
|
||||
globals_->Add(variable->binding_needs_init()
|
||||
? isolate()->factory()->the_hole_value()
|
||||
: isolate()->factory()->undefined_value());
|
||||
++global_count_;
|
||||
break;
|
||||
|
||||
case Variable::PARAMETER:
|
||||
case Variable::LOCAL:
|
||||
if (hole_init) {
|
||||
Comment cmnt(masm_, "[ VariableDeclaration");
|
||||
if (function != NULL) {
|
||||
Comment cmnt(masm_, "[ Declaration");
|
||||
VisitForAccumulatorValue(function);
|
||||
__ str(result_register(), StackOperand(variable));
|
||||
} else if (binding_needs_init) {
|
||||
Comment cmnt(masm_, "[ Declaration");
|
||||
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
||||
__ str(ip, StackOperand(variable));
|
||||
}
|
||||
break;
|
||||
|
||||
case Variable::CONTEXT:
|
||||
if (hole_init) {
|
||||
Comment cmnt(masm_, "[ VariableDeclaration");
|
||||
EmitDebugCheckDeclarationContext(variable);
|
||||
// The variable in the decl always resides in the current function
|
||||
// context.
|
||||
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
|
||||
if (FLAG_debug_code) {
|
||||
// Check that we're not inside a with or catch context.
|
||||
__ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
|
||||
__ CompareRoot(r1, Heap::kWithContextMapRootIndex);
|
||||
__ Check(ne, "Declaration in with context.");
|
||||
__ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
|
||||
__ Check(ne, "Declaration in catch context.");
|
||||
}
|
||||
if (function != NULL) {
|
||||
Comment cmnt(masm_, "[ Declaration");
|
||||
VisitForAccumulatorValue(function);
|
||||
__ str(result_register(), ContextOperand(cp, variable->index()));
|
||||
int offset = Context::SlotOffset(variable->index());
|
||||
// We know that we have written a function, which is not a smi.
|
||||
__ RecordWriteContextSlot(cp,
|
||||
offset,
|
||||
result_register(),
|
||||
r2,
|
||||
kLRHasBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
|
||||
} else if (binding_needs_init) {
|
||||
Comment cmnt(masm_, "[ Declaration");
|
||||
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
||||
__ str(ip, ContextOperand(cp, variable->index()));
|
||||
// No write barrier since the_hole_value is in old space.
|
||||
|
@ -835,11 +853,13 @@ void FullCodeGenerator::VisitVariableDeclaration(
|
|||
break;
|
||||
|
||||
case Variable::LOOKUP: {
|
||||
Comment cmnt(masm_, "[ VariableDeclaration");
|
||||
Comment cmnt(masm_, "[ Declaration");
|
||||
__ mov(r2, Operand(variable->name()));
|
||||
// Declaration nodes are always introduced in one of four modes.
|
||||
ASSERT(mode == VAR || mode == LET ||
|
||||
mode == CONST || mode == CONST_HARMONY);
|
||||
ASSERT(mode == VAR ||
|
||||
mode == CONST ||
|
||||
mode == CONST_HARMONY ||
|
||||
mode == LET);
|
||||
PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
|
||||
? READ_ONLY : NONE;
|
||||
__ mov(r1, Operand(Smi::FromInt(attr)));
|
||||
|
@ -847,7 +867,11 @@ void FullCodeGenerator::VisitVariableDeclaration(
|
|||
// Note: For variables we must not push an initial value (such as
|
||||
// 'undefined') because we may have a (legal) redeclaration and we
|
||||
// must not destroy the current value.
|
||||
if (hole_init) {
|
||||
if (function != NULL) {
|
||||
__ Push(cp, r2, r1);
|
||||
// Push initial value for function declaration.
|
||||
VisitForStackValue(function);
|
||||
} else if (binding_needs_init) {
|
||||
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
|
||||
__ Push(cp, r2, r1, r0);
|
||||
} else {
|
||||
|
@ -861,122 +885,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
|
|||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitFunctionDeclaration(
|
||||
FunctionDeclaration* declaration) {
|
||||
VariableProxy* proxy = declaration->proxy();
|
||||
Variable* variable = proxy->var();
|
||||
switch (variable->location()) {
|
||||
case Variable::UNALLOCATED: {
|
||||
globals_->Add(variable->name());
|
||||
Handle<SharedFunctionInfo> function =
|
||||
Compiler::BuildFunctionInfo(declaration->fun(), script());
|
||||
// Check for stack-overflow exception.
|
||||
if (function.is_null()) return SetStackOverflow();
|
||||
globals_->Add(function);
|
||||
break;
|
||||
}
|
||||
|
||||
case Variable::PARAMETER:
|
||||
case Variable::LOCAL: {
|
||||
Comment cmnt(masm_, "[ FunctionDeclaration");
|
||||
VisitForAccumulatorValue(declaration->fun());
|
||||
__ str(result_register(), StackOperand(variable));
|
||||
break;
|
||||
}
|
||||
|
||||
case Variable::CONTEXT: {
|
||||
Comment cmnt(masm_, "[ FunctionDeclaration");
|
||||
EmitDebugCheckDeclarationContext(variable);
|
||||
VisitForAccumulatorValue(declaration->fun());
|
||||
__ str(result_register(), ContextOperand(cp, variable->index()));
|
||||
int offset = Context::SlotOffset(variable->index());
|
||||
// We know that we have written a function, which is not a smi.
|
||||
__ RecordWriteContextSlot(cp,
|
||||
offset,
|
||||
result_register(),
|
||||
r2,
|
||||
kLRHasBeenSaved,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
|
||||
break;
|
||||
}
|
||||
|
||||
case Variable::LOOKUP: {
|
||||
Comment cmnt(masm_, "[ FunctionDeclaration");
|
||||
__ mov(r2, Operand(variable->name()));
|
||||
__ mov(r1, Operand(Smi::FromInt(NONE)));
|
||||
__ Push(cp, r2, r1);
|
||||
// Push initial value for function declaration.
|
||||
VisitForStackValue(declaration->fun());
|
||||
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
|
||||
VariableProxy* proxy = declaration->proxy();
|
||||
Variable* variable = proxy->var();
|
||||
Handle<JSModule> instance = declaration->module()->interface()->Instance();
|
||||
ASSERT(!instance.is_null());
|
||||
|
||||
switch (variable->location()) {
|
||||
case Variable::UNALLOCATED: {
|
||||
Comment cmnt(masm_, "[ ModuleDeclaration");
|
||||
globals_->Add(variable->name());
|
||||
globals_->Add(instance);
|
||||
Visit(declaration->module());
|
||||
break;
|
||||
}
|
||||
|
||||
case Variable::CONTEXT: {
|
||||
Comment cmnt(masm_, "[ ModuleDeclaration");
|
||||
EmitDebugCheckDeclarationContext(variable);
|
||||
__ mov(r1, Operand(instance));
|
||||
__ str(r1, ContextOperand(cp, variable->index()));
|
||||
Visit(declaration->module());
|
||||
break;
|
||||
}
|
||||
|
||||
case Variable::PARAMETER:
|
||||
case Variable::LOCAL:
|
||||
case Variable::LOOKUP:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
|
||||
VariableProxy* proxy = declaration->proxy();
|
||||
Variable* variable = proxy->var();
|
||||
switch (variable->location()) {
|
||||
case Variable::UNALLOCATED:
|
||||
// TODO(rossberg)
|
||||
break;
|
||||
|
||||
case Variable::CONTEXT: {
|
||||
Comment cmnt(masm_, "[ ImportDeclaration");
|
||||
EmitDebugCheckDeclarationContext(variable);
|
||||
// TODO(rossberg)
|
||||
break;
|
||||
}
|
||||
|
||||
case Variable::PARAMETER:
|
||||
case Variable::LOCAL:
|
||||
case Variable::LOOKUP:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
|
||||
// TODO(rossberg)
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
|
||||
// Call the runtime to declare the globals.
|
||||
// The context is the first argument.
|
||||
|
@ -2363,18 +2271,6 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
|
|||
}
|
||||
// Record source position for debugger.
|
||||
SetSourcePosition(expr->position());
|
||||
|
||||
// Record call targets in unoptimized code, but not in the snapshot.
|
||||
if (!Serializer::enabled()) {
|
||||
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
|
||||
Handle<Object> uninitialized =
|
||||
TypeFeedbackCells::UninitializedSentinel(isolate());
|
||||
Handle<JSGlobalPropertyCell> cell =
|
||||
isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
|
||||
RecordTypeFeedbackCell(expr->id(), cell);
|
||||
__ mov(r2, Operand(cell));
|
||||
}
|
||||
|
||||
CallFunctionStub stub(arg_count, flags);
|
||||
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
|
||||
__ CallStub(&stub);
|
||||
|
@ -3668,7 +3564,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
|
|||
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
|
||||
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
|
||||
__ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
|
||||
__ add(string_length, string_length, Operand(scratch1), SetCC);
|
||||
__ add(string_length, string_length, Operand(scratch1));
|
||||
__ b(vs, &bailout);
|
||||
__ cmp(element, elements_end);
|
||||
__ b(lt, &loop);
|
||||
|
@ -3705,7 +3601,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
|
|||
__ b(ne, &bailout);
|
||||
__ tst(scratch2, Operand(0x80000000));
|
||||
__ b(ne, &bailout);
|
||||
__ add(string_length, string_length, Operand(scratch2), SetCC);
|
||||
__ add(string_length, string_length, Operand(scratch2));
|
||||
__ b(vs, &bailout);
|
||||
__ SmiUntag(string_length);
|
||||
|
||||
|
@ -4461,8 +4357,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
|
|||
|
||||
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
|
||||
Scope* declaration_scope = scope()->DeclarationScope();
|
||||
if (declaration_scope->is_global_scope() ||
|
||||
declaration_scope->is_module_scope()) {
|
||||
if (declaration_scope->is_global_scope()) {
|
||||
// Contexts nested in the global context have a canonical empty function
|
||||
// as their closure, not the anonymous closure containing the global
|
||||
// code. Pass a smi sentinel and let the runtime look up the empty
|
||||
|
|
|
@ -108,17 +108,22 @@ void LInstruction::PrintTo(StringStream* stream) {
|
|||
}
|
||||
|
||||
|
||||
void LInstruction::PrintDataTo(StringStream* stream) {
|
||||
template<int R, int I, int T>
|
||||
void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
|
||||
stream->Add("= ");
|
||||
for (int i = 0; i < InputCount(); i++) {
|
||||
for (int i = 0; i < inputs_.length(); i++) {
|
||||
if (i > 0) stream->Add(" ");
|
||||
InputAt(i)->PrintTo(stream);
|
||||
inputs_[i]->PrintTo(stream);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LInstruction::PrintOutputOperandTo(StringStream* stream) {
|
||||
if (HasResult()) result()->PrintTo(stream);
|
||||
template<int R, int I, int T>
|
||||
void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
|
||||
for (int i = 0; i < results_.length(); i++) {
|
||||
if (i > 0) stream->Add(" ");
|
||||
results_[i]->PrintTo(stream);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -727,6 +732,22 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
|
|||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
|
||||
LInstruction* instr, int ast_id) {
|
||||
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
|
||||
ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
|
||||
instruction_pending_deoptimization_environment_ = instr;
|
||||
pending_deoptimization_ast_id_ = ast_id;
|
||||
return instr;
|
||||
}
|
||||
|
||||
|
||||
void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
|
||||
instruction_pending_deoptimization_environment_ = NULL;
|
||||
pending_deoptimization_ast_id_ = AstNode::kNoNumber;
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
|
||||
HInstruction* hinstr,
|
||||
CanDeoptimize can_deoptimize) {
|
||||
|
@ -739,10 +760,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
|
|||
if (hinstr->HasObservableSideEffects()) {
|
||||
ASSERT(hinstr->next()->IsSimulate());
|
||||
HSimulate* sim = HSimulate::cast(hinstr->next());
|
||||
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
|
||||
ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
|
||||
instruction_pending_deoptimization_environment_ = instr;
|
||||
pending_deoptimization_ast_id_ = sim->ast_id();
|
||||
instr = SetInstructionPendingDeoptimizationEnvironment(
|
||||
instr, sim->ast_id());
|
||||
}
|
||||
|
||||
// If instruction does not have side-effects lazy deoptimization
|
||||
|
@ -760,6 +779,12 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
|
|||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
|
||||
instr->MarkAsSaveDoubles();
|
||||
return instr;
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
|
||||
ASSERT(!instr->HasPointerMap());
|
||||
instr->set_pointer_map(new(zone()) LPointerMap(position_));
|
||||
|
@ -1270,7 +1295,6 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
|
|||
LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
|
||||
ASSERT(instr->value()->representation().IsInteger32());
|
||||
ASSERT(instr->representation().IsInteger32());
|
||||
if (instr->HasNoUses()) return NULL;
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
return DefineAsRegister(new(zone()) LBitNotI(value));
|
||||
}
|
||||
|
@ -1295,75 +1319,6 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
|
|||
}
|
||||
|
||||
|
||||
bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
|
||||
uint32_t divisor_abs = abs(divisor);
|
||||
// Dividing by 0, 1, and powers of 2 is easy.
|
||||
// Note that IsPowerOf2(0) returns true;
|
||||
ASSERT(IsPowerOf2(0) == true);
|
||||
if (IsPowerOf2(divisor_abs)) return true;
|
||||
|
||||
// We have magic numbers for a few specific divisors.
|
||||
// Details and proofs can be found in:
|
||||
// - Hacker's Delight, Henry S. Warren, Jr.
|
||||
// - The PowerPC Compiler Writer’s Guide
|
||||
// and probably many others.
|
||||
//
|
||||
// We handle
|
||||
// <divisor with magic numbers> * <power of 2>
|
||||
// but not
|
||||
// <divisor with magic numbers> * <other divisor with magic numbers>
|
||||
int32_t power_of_2_factor =
|
||||
CompilerIntrinsics::CountTrailingZeros(divisor_abs);
|
||||
DivMagicNumbers magic_numbers =
|
||||
DivMagicNumberFor(divisor_abs >> power_of_2_factor);
|
||||
if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
|
||||
// A value with an integer representation does not need to be transformed.
|
||||
if (dividend->representation().IsInteger32()) {
|
||||
return dividend;
|
||||
// A change from an integer32 can be replaced by the integer32 value.
|
||||
} else if (dividend->IsChange() &&
|
||||
HChange::cast(dividend)->from().IsInteger32()) {
|
||||
return HChange::cast(dividend)->value();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
|
||||
// Only optimize when we have magic numbers for the divisor.
|
||||
// The standard integer division routine is usually slower than transitionning
|
||||
// to VFP.
|
||||
if (divisor->IsConstant() &&
|
||||
HConstant::cast(divisor)->HasInteger32Value()) {
|
||||
HConstant* constant_val = HConstant::cast(divisor);
|
||||
int32_t int32_val = constant_val->Integer32Value();
|
||||
if (LChunkBuilder::HasMagicNumberForDivisor(int32_val)) {
|
||||
return constant_val->CopyToRepresentation(Representation::Integer32());
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
|
||||
HValue* right = instr->right();
|
||||
LOperand* dividend = UseRegister(instr->left());
|
||||
LOperand* divisor = UseRegisterOrConstant(right);
|
||||
LOperand* remainder = TempRegister();
|
||||
ASSERT(right->IsConstant() &&
|
||||
HConstant::cast(right)->HasInteger32Value() &&
|
||||
HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()));
|
||||
return AssignEnvironment(DefineAsRegister(
|
||||
new LMathFloorOfDiv(dividend, divisor, remainder)));
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
|
||||
if (instr->representation().IsInteger32()) {
|
||||
ASSERT(instr->left()->representation().IsInteger32());
|
||||
|
@ -1798,9 +1753,9 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
|
|||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
|
||||
LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
LInstruction* result = new(zone()) LCheckMaps(value);
|
||||
LInstruction* result = new(zone()) LCheckMap(value);
|
||||
return AssignEnvironment(result);
|
||||
}
|
||||
|
||||
|
@ -2287,12 +2242,9 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
|
|||
if (pending_deoptimization_ast_id_ == instr->ast_id()) {
|
||||
LInstruction* result = new(zone()) LLazyBailout;
|
||||
result = AssignEnvironment(result);
|
||||
// Store the lazy deopt environment with the instruction if needed. Right
|
||||
// now it is only used for LInstanceOfKnownGlobal.
|
||||
instruction_pending_deoptimization_environment_->
|
||||
SetDeferredLazyDeoptimizationEnvironment(result->environment());
|
||||
instruction_pending_deoptimization_environment_ = NULL;
|
||||
pending_deoptimization_ast_id_ = AstNode::kNoNumber;
|
||||
set_deoptimization_environment(result->environment());
|
||||
ClearInstructionPendingDeoptimizationEnvironment();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -2319,8 +2271,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
|
|||
undefined,
|
||||
instr->call_kind(),
|
||||
instr->is_construct());
|
||||
if (instr->arguments_var() != NULL) {
|
||||
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
|
||||
if (instr->arguments() != NULL) {
|
||||
inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
|
||||
}
|
||||
current_block_->UpdateEnvironment(inner);
|
||||
chunk_->AddInlinedClosure(instr->closure());
|
||||
|
@ -2329,21 +2281,10 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
|
|||
|
||||
|
||||
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
|
||||
LInstruction* pop = NULL;
|
||||
|
||||
HEnvironment* env = current_block_->last_environment();
|
||||
|
||||
if (instr->arguments_pushed()) {
|
||||
int argument_count = env->arguments_environment()->parameter_count();
|
||||
pop = new(zone()) LDrop(argument_count);
|
||||
argument_count_ -= argument_count;
|
||||
}
|
||||
|
||||
HEnvironment* outer = current_block_->last_environment()->
|
||||
DiscardInlined(false);
|
||||
current_block_->UpdateEnvironment(outer);
|
||||
|
||||
return pop;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ class LCodeGen;
|
|||
V(CheckFunction) \
|
||||
V(CheckInstanceType) \
|
||||
V(CheckNonSmi) \
|
||||
V(CheckMaps) \
|
||||
V(CheckMap) \
|
||||
V(CheckPrototypeMaps) \
|
||||
V(CheckSmi) \
|
||||
V(ClampDToUint8) \
|
||||
|
@ -132,7 +132,6 @@ class LCodeGen;
|
|||
V(LoadNamedField) \
|
||||
V(LoadNamedFieldPolymorphic) \
|
||||
V(LoadNamedGeneric) \
|
||||
V(MathFloorOfDiv) \
|
||||
V(ModI) \
|
||||
V(MulI) \
|
||||
V(NumberTagD) \
|
||||
|
@ -180,8 +179,7 @@ class LCodeGen;
|
|||
V(CheckMapValue) \
|
||||
V(LoadFieldByIndex) \
|
||||
V(DateField) \
|
||||
V(WrapReceiver) \
|
||||
V(Drop)
|
||||
V(WrapReceiver)
|
||||
|
||||
|
||||
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
|
||||
|
@ -205,14 +203,15 @@ class LInstruction: public ZoneObject {
|
|||
LInstruction()
|
||||
: environment_(NULL),
|
||||
hydrogen_value_(NULL),
|
||||
is_call_(false) { }
|
||||
is_call_(false),
|
||||
is_save_doubles_(false) { }
|
||||
virtual ~LInstruction() { }
|
||||
|
||||
virtual void CompileToNative(LCodeGen* generator) = 0;
|
||||
virtual const char* Mnemonic() const = 0;
|
||||
virtual void PrintTo(StringStream* stream);
|
||||
virtual void PrintDataTo(StringStream* stream);
|
||||
virtual void PrintOutputOperandTo(StringStream* stream);
|
||||
virtual void PrintDataTo(StringStream* stream) = 0;
|
||||
virtual void PrintOutputOperandTo(StringStream* stream) = 0;
|
||||
|
||||
enum Opcode {
|
||||
// Declare a unique enum value for each instruction.
|
||||
|
@ -247,12 +246,22 @@ class LInstruction: public ZoneObject {
|
|||
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
|
||||
HValue* hydrogen_value() const { return hydrogen_value_; }
|
||||
|
||||
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
|
||||
void set_deoptimization_environment(LEnvironment* env) {
|
||||
deoptimization_environment_.set(env);
|
||||
}
|
||||
LEnvironment* deoptimization_environment() const {
|
||||
return deoptimization_environment_.get();
|
||||
}
|
||||
bool HasDeoptimizationEnvironment() const {
|
||||
return deoptimization_environment_.is_set();
|
||||
}
|
||||
|
||||
void MarkAsCall() { is_call_ = true; }
|
||||
void MarkAsSaveDoubles() { is_save_doubles_ = true; }
|
||||
|
||||
// Interface to the register allocator and iterators.
|
||||
bool IsMarkedAsCall() const { return is_call_; }
|
||||
bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
|
||||
|
||||
virtual bool HasResult() const = 0;
|
||||
virtual LOperand* result() = 0;
|
||||
|
@ -273,7 +282,9 @@ class LInstruction: public ZoneObject {
|
|||
LEnvironment* environment_;
|
||||
SetOncePointer<LPointerMap> pointer_map_;
|
||||
HValue* hydrogen_value_;
|
||||
SetOncePointer<LEnvironment> deoptimization_environment_;
|
||||
bool is_call_;
|
||||
bool is_save_doubles_;
|
||||
};
|
||||
|
||||
|
||||
|
@ -295,6 +306,9 @@ class LTemplateInstruction: public LInstruction {
|
|||
int TempCount() { return T; }
|
||||
LOperand* TempAt(int i) { return temps_[i]; }
|
||||
|
||||
virtual void PrintDataTo(StringStream* stream);
|
||||
virtual void PrintOutputOperandTo(StringStream* stream);
|
||||
|
||||
protected:
|
||||
EmbeddedContainer<LOperand*, R> results_;
|
||||
EmbeddedContainer<LOperand*, I> inputs_;
|
||||
|
@ -520,8 +534,9 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
|
|||
|
||||
class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
|
||||
public:
|
||||
LArgumentsElements() { }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
|
||||
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
|
||||
};
|
||||
|
||||
|
||||
|
@ -567,21 +582,6 @@ class LDivI: public LTemplateInstruction<1, 2, 0> {
|
|||
};
|
||||
|
||||
|
||||
class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
|
||||
public:
|
||||
LMathFloorOfDiv(LOperand* left,
|
||||
LOperand* right,
|
||||
LOperand* temp = NULL) {
|
||||
inputs_[0] = left;
|
||||
inputs_[1] = right;
|
||||
temps_[0] = temp;
|
||||
}
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
|
||||
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
|
||||
};
|
||||
|
||||
|
||||
class LMulI: public LTemplateInstruction<1, 2, 1> {
|
||||
public:
|
||||
LMulI(LOperand* left, LOperand* right, LOperand* temp) {
|
||||
|
@ -834,15 +834,6 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
|
|||
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
|
||||
|
||||
Handle<JSFunction> function() const { return hydrogen()->function(); }
|
||||
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
|
||||
return lazy_deopt_env_;
|
||||
}
|
||||
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
|
||||
lazy_deopt_env_ = env;
|
||||
}
|
||||
|
||||
private:
|
||||
LEnvironment* lazy_deopt_env_;
|
||||
};
|
||||
|
||||
|
||||
|
@ -1387,19 +1378,6 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
|
|||
};
|
||||
|
||||
|
||||
class LDrop: public LTemplateInstruction<0, 0, 0> {
|
||||
public:
|
||||
explicit LDrop(int count) : count_(count) { }
|
||||
|
||||
int count() const { return count_; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
|
||||
|
||||
private:
|
||||
int count_;
|
||||
};
|
||||
|
||||
|
||||
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
|
||||
public:
|
||||
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
|
||||
|
@ -1482,7 +1460,6 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
|
|||
virtual void PrintDataTo(StringStream* stream);
|
||||
|
||||
int arity() const { return hydrogen()->argument_count() - 1; }
|
||||
Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
|
||||
};
|
||||
|
||||
|
||||
|
@ -1762,8 +1739,6 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
|
|||
LOperand* elements() { return inputs_[0]; }
|
||||
LOperand* key() { return inputs_[1]; }
|
||||
LOperand* value() { return inputs_[2]; }
|
||||
|
||||
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
|
||||
};
|
||||
|
||||
|
||||
|
@ -1914,14 +1889,14 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
|
|||
};
|
||||
|
||||
|
||||
class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
|
||||
class LCheckMap: public LTemplateInstruction<0, 1, 0> {
|
||||
public:
|
||||
explicit LCheckMaps(LOperand* value) {
|
||||
explicit LCheckMap(LOperand* value) {
|
||||
inputs_[0] = value;
|
||||
}
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
|
||||
DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
|
||||
DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
|
||||
DECLARE_HYDROGEN_ACCESSOR(CheckMap)
|
||||
};
|
||||
|
||||
|
||||
|
@ -2299,10 +2274,6 @@ class LChunkBuilder BASE_EMBEDDED {
|
|||
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
|
||||
#undef DECLARE_DO
|
||||
|
||||
static bool HasMagicNumberForDivisor(int32_t divisor);
|
||||
static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
|
||||
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
|
||||
|
||||
private:
|
||||
enum Status {
|
||||
UNUSED,
|
||||
|
@ -2398,6 +2369,11 @@ class LChunkBuilder BASE_EMBEDDED {
|
|||
LInstruction* instr,
|
||||
HInstruction* hinstr,
|
||||
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
|
||||
LInstruction* MarkAsSaveDoubles(LInstruction* instr);
|
||||
|
||||
LInstruction* SetInstructionPendingDeoptimizationEnvironment(
|
||||
LInstruction* instr, int ast_id);
|
||||
void ClearInstructionPendingDeoptimizationEnvironment();
|
||||
|
||||
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
|
||||
int* argument_index_accumulator);
|
||||
|
|
|
@ -1034,100 +1034,6 @@ void LCodeGen::DoModI(LModI* instr) {
|
|||
}
|
||||
|
||||
|
||||
void LCodeGen::EmitSignedIntegerDivisionByConstant(
|
||||
Register result,
|
||||
Register dividend,
|
||||
int32_t divisor,
|
||||
Register remainder,
|
||||
Register scratch,
|
||||
LEnvironment* environment) {
|
||||
ASSERT(!AreAliased(dividend, scratch, ip));
|
||||
ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
|
||||
|
||||
uint32_t divisor_abs = abs(divisor);
|
||||
|
||||
int32_t power_of_2_factor =
|
||||
CompilerIntrinsics::CountTrailingZeros(divisor_abs);
|
||||
|
||||
switch (divisor_abs) {
|
||||
case 0:
|
||||
DeoptimizeIf(al, environment);
|
||||
return;
|
||||
|
||||
case 1:
|
||||
if (divisor > 0) {
|
||||
__ Move(result, dividend);
|
||||
} else {
|
||||
__ rsb(result, dividend, Operand(0), SetCC);
|
||||
DeoptimizeIf(vs, environment);
|
||||
}
|
||||
// Compute the remainder.
|
||||
__ mov(remainder, Operand(0));
|
||||
return;
|
||||
|
||||
default:
|
||||
if (IsPowerOf2(divisor_abs)) {
|
||||
// Branch and condition free code for integer division by a power
|
||||
// of two.
|
||||
int32_t power = WhichPowerOf2(divisor_abs);
|
||||
if (power > 1) {
|
||||
__ mov(scratch, Operand(dividend, ASR, power - 1));
|
||||
}
|
||||
__ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
|
||||
__ mov(result, Operand(scratch, ASR, power));
|
||||
// Negate if necessary.
|
||||
// We don't need to check for overflow because the case '-1' is
|
||||
// handled separately.
|
||||
if (divisor < 0) {
|
||||
ASSERT(divisor != -1);
|
||||
__ rsb(result, result, Operand(0));
|
||||
}
|
||||
// Compute the remainder.
|
||||
if (divisor > 0) {
|
||||
__ sub(remainder, dividend, Operand(result, LSL, power));
|
||||
} else {
|
||||
__ add(remainder, dividend, Operand(result, LSL, power));
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
// Use magic numbers for a few specific divisors.
|
||||
// Details and proofs can be found in:
|
||||
// - Hacker's Delight, Henry S. Warren, Jr.
|
||||
// - The PowerPC Compiler Writer’s Guide
|
||||
// and probably many others.
|
||||
//
|
||||
// We handle
|
||||
// <divisor with magic numbers> * <power of 2>
|
||||
// but not
|
||||
// <divisor with magic numbers> * <other divisor with magic numbers>
|
||||
DivMagicNumbers magic_numbers =
|
||||
DivMagicNumberFor(divisor_abs >> power_of_2_factor);
|
||||
// Branch and condition free code for integer division by a power
|
||||
// of two.
|
||||
const int32_t M = magic_numbers.M;
|
||||
const int32_t s = magic_numbers.s + power_of_2_factor;
|
||||
|
||||
__ mov(ip, Operand(M));
|
||||
__ smull(ip, scratch, dividend, ip);
|
||||
if (M < 0) {
|
||||
__ add(scratch, scratch, Operand(dividend));
|
||||
}
|
||||
if (s > 0) {
|
||||
__ mov(scratch, Operand(scratch, ASR, s));
|
||||
}
|
||||
__ add(result, scratch, Operand(dividend, LSR, 31));
|
||||
if (divisor < 0) __ rsb(result, result, Operand(0));
|
||||
// Compute the remainder.
|
||||
__ mov(ip, Operand(divisor));
|
||||
// This sequence could be replaced with 'mls' when
|
||||
// it gets implemented.
|
||||
__ mul(scratch, result, ip);
|
||||
__ sub(remainder, dividend, scratch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoDivI(LDivI* instr) {
|
||||
class DeferredDivI: public LDeferredCode {
|
||||
public:
|
||||
|
@ -1209,34 +1115,6 @@ void LCodeGen::DoDivI(LDivI* instr) {
|
|||
}
|
||||
|
||||
|
||||
void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
|
||||
const Register result = ToRegister(instr->result());
|
||||
const Register left = ToRegister(instr->InputAt(0));
|
||||
const Register remainder = ToRegister(instr->TempAt(0));
|
||||
const Register scratch = scratch0();
|
||||
|
||||
// We only optimize this for division by constants, because the standard
|
||||
// integer division routine is usually slower than transitionning to VFP.
|
||||
// This could be optimized on processors with SDIV available.
|
||||
ASSERT(instr->InputAt(1)->IsConstantOperand());
|
||||
int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
|
||||
if (divisor < 0) {
|
||||
__ cmp(left, Operand(0));
|
||||
DeoptimizeIf(eq, instr->environment());
|
||||
}
|
||||
EmitSignedIntegerDivisionByConstant(result,
|
||||
left,
|
||||
divisor,
|
||||
remainder,
|
||||
scratch,
|
||||
instr->environment());
|
||||
// We operated a truncating division. Correct the result if necessary.
|
||||
__ cmp(remainder, Operand(0));
|
||||
__ teq(remainder, Operand(divisor), ne);
|
||||
__ sub(result, result, Operand(1), LeaveCC, mi);
|
||||
}
|
||||
|
||||
|
||||
template<int T>
|
||||
void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
|
||||
Token::Value op) {
|
||||
|
@ -2389,7 +2267,8 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
|||
RelocInfo::CODE_TARGET,
|
||||
instr,
|
||||
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
|
||||
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
|
||||
ASSERT(instr->HasDeoptimizationEnvironment());
|
||||
LEnvironment* env = instr->deoptimization_environment();
|
||||
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
|
||||
// Put the result value into the result register slot and
|
||||
// restore all registers.
|
||||
|
@ -2885,20 +2764,16 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
|
|||
Register scratch = scratch0();
|
||||
Register result = ToRegister(instr->result());
|
||||
|
||||
if (instr->hydrogen()->from_inlined()) {
|
||||
__ sub(result, sp, Operand(2 * kPointerSize));
|
||||
} else {
|
||||
// Check if the calling frame is an arguments adaptor frame.
|
||||
Label done, adapted;
|
||||
__ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
||||
__ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
|
||||
__ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
||||
// Check if the calling frame is an arguments adaptor frame.
|
||||
Label done, adapted;
|
||||
__ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
||||
__ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
|
||||
__ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
||||
|
||||
// Result is the frame pointer for the frame if not adapted and for the real
|
||||
// frame below the adaptor frame if adapted.
|
||||
__ mov(result, fp, LeaveCC, ne);
|
||||
__ mov(result, scratch, LeaveCC, eq);
|
||||
}
|
||||
// Result is the frame pointer for the frame if not adapted and for the real
|
||||
// frame below the adaptor frame if adapted.
|
||||
__ mov(result, fp, LeaveCC, ne);
|
||||
__ mov(result, scratch, LeaveCC, eq);
|
||||
}
|
||||
|
||||
|
||||
|
@ -3007,7 +2882,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
|
|||
__ b(ne, &loop);
|
||||
|
||||
__ bind(&invoke);
|
||||
ASSERT(instr->HasPointerMap());
|
||||
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
RecordPosition(pointers->position());
|
||||
SafepointGenerator safepoint_generator(
|
||||
|
@ -3032,11 +2907,6 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
|
|||
}
|
||||
|
||||
|
||||
void LCodeGen::DoDrop(LDrop* instr) {
|
||||
__ Drop(instr->count());
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoThisFunction(LThisFunction* instr) {
|
||||
Register result = ToRegister(instr->result());
|
||||
__ LoadHeapObject(result, instr->hydrogen()->closure());
|
||||
|
@ -3083,8 +2953,7 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
|
|||
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
|
||||
int arity,
|
||||
LInstruction* instr,
|
||||
CallKind call_kind,
|
||||
R1State r1_state) {
|
||||
CallKind call_kind) {
|
||||
bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
|
||||
function->shared()->formal_parameter_count() == arity;
|
||||
|
||||
|
@ -3092,10 +2961,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
|
|||
RecordPosition(pointers->position());
|
||||
|
||||
if (can_invoke_directly) {
|
||||
if (r1_state == R1_UNINITIALIZED) {
|
||||
__ LoadHeapObject(r1, function);
|
||||
}
|
||||
|
||||
__ LoadHeapObject(r1, function);
|
||||
// Change context if needed.
|
||||
bool change_context =
|
||||
(info()->closure()->context() != function->context()) ||
|
||||
|
@ -3134,8 +3000,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
|
|||
CallKnownFunction(instr->function(),
|
||||
instr->arity(),
|
||||
instr,
|
||||
CALL_AS_METHOD,
|
||||
R1_UNINITIALIZED);
|
||||
CALL_AS_METHOD);
|
||||
}
|
||||
|
||||
|
||||
|
@ -3559,21 +3424,13 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
|
|||
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
|
||||
ASSERT(ToRegister(instr->function()).is(r1));
|
||||
ASSERT(instr->HasPointerMap());
|
||||
|
||||
if (instr->known_function().is_null()) {
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
RecordPosition(pointers->position());
|
||||
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
ParameterCount count(instr->arity());
|
||||
__ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
|
||||
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
||||
} else {
|
||||
CallKnownFunction(instr->known_function(),
|
||||
instr->arity(),
|
||||
instr,
|
||||
CALL_AS_METHOD,
|
||||
R1_CONTAINS_TARGET);
|
||||
}
|
||||
ASSERT(instr->HasDeoptimizationEnvironment());
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
RecordPosition(pointers->position());
|
||||
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
ParameterCount count(instr->arity());
|
||||
__ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
|
||||
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
||||
}
|
||||
|
||||
|
||||
|
@ -3628,11 +3485,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
|
|||
|
||||
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
|
||||
ASSERT(ToRegister(instr->result()).is(r0));
|
||||
CallKnownFunction(instr->target(),
|
||||
instr->arity(),
|
||||
instr,
|
||||
CALL_AS_FUNCTION,
|
||||
R1_UNINITIALIZED);
|
||||
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
|
||||
}
|
||||
|
||||
|
||||
|
@ -3762,6 +3615,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
|
|||
Register scratch = scratch0();
|
||||
bool key_is_constant = instr->key()->IsConstantOperand();
|
||||
int constant_key = 0;
|
||||
Label not_nan;
|
||||
|
||||
// Calculate the effective address of the slot in the array to store the
|
||||
// double value.
|
||||
|
@ -3784,15 +3638,13 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
|
|||
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
|
||||
}
|
||||
|
||||
if (instr->NeedsCanonicalization()) {
|
||||
// Check for NaN. All NaNs must be canonicalized.
|
||||
__ VFPCompareAndSetFlags(value, value);
|
||||
// Only load canonical NaN if the comparison above set the overflow.
|
||||
__ Vmov(value,
|
||||
FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
|
||||
vs);
|
||||
}
|
||||
// Check for NaN. All NaNs must be canonicalized.
|
||||
__ VFPCompareAndSetFlags(value, value);
|
||||
|
||||
// Only load canonical NaN if the comparison above set the overflow.
|
||||
__ Vmov(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double(), vs);
|
||||
|
||||
__ bind(¬_nan);
|
||||
__ vstr(value, scratch, 0);
|
||||
}
|
||||
|
||||
|
@ -4486,22 +4338,14 @@ void LCodeGen::DoCheckMapCommon(Register reg,
|
|||
}
|
||||
|
||||
|
||||
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
|
||||
void LCodeGen::DoCheckMap(LCheckMap* instr) {
|
||||
Register scratch = scratch0();
|
||||
LOperand* input = instr->InputAt(0);
|
||||
ASSERT(input->IsRegister());
|
||||
Register reg = ToRegister(input);
|
||||
|
||||
Label success;
|
||||
SmallMapList* map_set = instr->hydrogen()->map_set();
|
||||
for (int i = 0; i < map_set->length() - 1; i++) {
|
||||
Handle<Map> map = map_set->at(i);
|
||||
__ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP);
|
||||
__ b(eq, &success);
|
||||
}
|
||||
Handle<Map> map = map_set->last();
|
||||
DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
|
||||
__ bind(&success);
|
||||
Handle<Map> map = instr->hydrogen()->map();
|
||||
DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(),
|
||||
instr->environment());
|
||||
}
|
||||
|
||||
|
||||
|
@ -4620,14 +4464,6 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
|
|||
deferred->entry(),
|
||||
TAG_OBJECT);
|
||||
|
||||
__ bind(deferred->exit());
|
||||
if (FLAG_debug_code) {
|
||||
Label is_in_new_space;
|
||||
__ JumpIfInNewSpace(result, scratch, &is_in_new_space);
|
||||
__ Abort("Allocated object is not in new-space");
|
||||
__ bind(&is_in_new_space);
|
||||
}
|
||||
|
||||
// Load the initial map.
|
||||
Register map = scratch;
|
||||
__ LoadHeapObject(map, constructor);
|
||||
|
@ -4646,14 +4482,14 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
|
|||
__ str(scratch, FieldMemOperand(result, property_offset));
|
||||
}
|
||||
}
|
||||
|
||||
__ bind(deferred->exit());
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
|
||||
Register result = ToRegister(instr->result());
|
||||
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
|
||||
Handle<Map> initial_map(constructor->initial_map());
|
||||
int instance_size = initial_map->instance_size();
|
||||
|
||||
// TODO(3095996): Get rid of this. For now, we need to make the
|
||||
// result register contain a valid pointer because it is already
|
||||
|
@ -4661,9 +4497,9 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
|
|||
__ mov(result, Operand(0));
|
||||
|
||||
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
|
||||
__ mov(r0, Operand(Smi::FromInt(instance_size)));
|
||||
__ LoadHeapObject(r0, constructor);
|
||||
__ push(r0);
|
||||
CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
|
||||
CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
|
||||
__ StoreToSafepointRegisterSlot(r0, result);
|
||||
}
|
||||
|
||||
|
@ -4797,10 +4633,9 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
|
|||
__ str(r2, FieldMemOperand(result, total_offset + 4));
|
||||
}
|
||||
} else if (elements->IsFixedArray()) {
|
||||
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
|
||||
for (int i = 0; i < elements_length; i++) {
|
||||
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
|
||||
Handle<Object> value(fast_elements->get(i));
|
||||
Handle<Object> value = JSObject::GetElement(object, i);
|
||||
if (value->IsJSObject()) {
|
||||
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
|
||||
__ add(r2, result, Operand(*offset));
|
||||
|
@ -4824,23 +4659,6 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
|
|||
|
||||
void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
|
||||
int size = instr->hydrogen()->total_size();
|
||||
ElementsKind boilerplate_elements_kind =
|
||||
instr->hydrogen()->boilerplate()->GetElementsKind();
|
||||
|
||||
// Deopt if the literal boilerplate ElementsKind is of a type different than
|
||||
// the expected one. The check isn't necessary if the boilerplate has already
|
||||
// been converted to FAST_ELEMENTS.
|
||||
if (boilerplate_elements_kind != FAST_ELEMENTS) {
|
||||
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
|
||||
// Load map into r2.
|
||||
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
|
||||
// Load the map's "bit field 2".
|
||||
__ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
|
||||
// Retrieve elements_kind from bit field 2.
|
||||
__ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
|
||||
__ cmp(r2, Operand(boilerplate_elements_kind));
|
||||
DeoptimizeIf(ne, instr->environment());
|
||||
}
|
||||
|
||||
// Allocate all objects that are part of the literal in one big
|
||||
// allocation. This avoids multiple limit checks.
|
||||
|
@ -5136,7 +4954,7 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
|
|||
Register strict = scratch0();
|
||||
__ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
|
||||
__ Push(object, key, strict);
|
||||
ASSERT(instr->HasPointerMap());
|
||||
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
RecordPosition(pointers->position());
|
||||
SafepointGenerator safepoint_generator(
|
||||
|
@ -5149,7 +4967,7 @@ void LCodeGen::DoIn(LIn* instr) {
|
|||
Register obj = ToRegister(instr->object());
|
||||
Register key = ToRegister(instr->key());
|
||||
__ Push(key, obj);
|
||||
ASSERT(instr->HasPointerMap());
|
||||
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
RecordPosition(pointers->position());
|
||||
SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
|
|
|
@ -215,18 +215,12 @@ class LCodeGen BASE_EMBEDDED {
|
|||
int argc,
|
||||
LInstruction* instr);
|
||||
|
||||
enum R1State {
|
||||
R1_UNINITIALIZED,
|
||||
R1_CONTAINS_TARGET
|
||||
};
|
||||
|
||||
// Generate a direct call to a known function. Expects the function
|
||||
// to be in r1.
|
||||
void CallKnownFunction(Handle<JSFunction> function,
|
||||
int arity,
|
||||
LInstruction* instr,
|
||||
CallKind call_kind,
|
||||
R1State r1_state);
|
||||
CallKind call_kind);
|
||||
|
||||
void LoadHeapObject(Register result, Handle<HeapObject> object);
|
||||
|
||||
|
@ -323,17 +317,6 @@ class LCodeGen BASE_EMBEDDED {
|
|||
Register source,
|
||||
int* offset);
|
||||
|
||||
// Emit optimized code for integer division.
|
||||
// Inputs are signed.
|
||||
// All registers are clobbered.
|
||||
// If 'remainder' is no_reg, it is not computed.
|
||||
void EmitSignedIntegerDivisionByConstant(Register result,
|
||||
Register dividend,
|
||||
int32_t divisor,
|
||||
Register remainder,
|
||||
Register scratch,
|
||||
LEnvironment* environment);
|
||||
|
||||
struct JumpTableEntry {
|
||||
explicit inline JumpTableEntry(Address entry)
|
||||
: label(),
|
||||
|
|
|
@ -3710,28 +3710,15 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
|
|||
}
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
bool AreAliased(Register reg1,
|
||||
Register reg2,
|
||||
Register reg3,
|
||||
Register reg4,
|
||||
Register reg5,
|
||||
Register reg6) {
|
||||
int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
|
||||
reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
|
||||
|
||||
RegList regs = 0;
|
||||
if (reg1.is_valid()) regs |= reg1.bit();
|
||||
if (reg2.is_valid()) regs |= reg2.bit();
|
||||
if (reg3.is_valid()) regs |= reg3.bit();
|
||||
if (reg4.is_valid()) regs |= reg4.bit();
|
||||
if (reg5.is_valid()) regs |= reg5.bit();
|
||||
if (reg6.is_valid()) regs |= reg6.bit();
|
||||
int n_of_non_aliasing_regs = NumRegs(regs);
|
||||
|
||||
return n_of_valid_regs != n_of_non_aliasing_regs;
|
||||
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
|
||||
if (r1.is(r2)) return true;
|
||||
if (r1.is(r3)) return true;
|
||||
if (r1.is(r4)) return true;
|
||||
if (r2.is(r3)) return true;
|
||||
if (r2.is(r4)) return true;
|
||||
if (r3.is(r4)) return true;
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
CodePatcher::CodePatcher(byte* address, int instructions)
|
||||
|
|
|
@ -85,14 +85,7 @@ enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
|
|||
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
bool AreAliased(Register reg1,
|
||||
Register reg2,
|
||||
Register reg3 = no_reg,
|
||||
Register reg4 = no_reg,
|
||||
Register reg5 = no_reg,
|
||||
Register reg6 = no_reg);
|
||||
#endif
|
||||
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
|
||||
|
||||
|
||||
// MacroAssembler implements a collection of frequently used macros.
|
||||
|
@ -1328,6 +1321,7 @@ class MacroAssembler: public Assembler {
|
|||
};
|
||||
|
||||
|
||||
#ifdef ENABLE_DEBUGGER_SUPPORT
|
||||
// The code patcher is used to patch (typically) small parts of code e.g. for
|
||||
// debugging and other types of instrumentation. When using the code patcher
|
||||
// the exact number of bytes specified must be emitted. It is not legal to emit
|
||||
|
@ -1357,6 +1351,7 @@ class CodePatcher {
|
|||
int size_; // Number of bytes of the expected patch size.
|
||||
MacroAssembler masm_; // Macro assembler used to generate the code.
|
||||
};
|
||||
#endif // ENABLE_DEBUGGER_SUPPORT
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -452,12 +452,8 @@ void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c,
|
|||
void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
|
||||
uint32_t mask,
|
||||
Label* on_equal) {
|
||||
if (c == 0) {
|
||||
__ tst(current_character(), Operand(mask));
|
||||
} else {
|
||||
__ and_(r0, current_character(), Operand(mask));
|
||||
__ cmp(r0, Operand(c));
|
||||
}
|
||||
__ and_(r0, current_character(), Operand(mask));
|
||||
__ cmp(r0, Operand(c));
|
||||
BranchOrBacktrack(eq, on_equal);
|
||||
}
|
||||
|
||||
|
@ -465,12 +461,8 @@ void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
|
|||
void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(unsigned c,
|
||||
unsigned mask,
|
||||
Label* on_not_equal) {
|
||||
if (c == 0) {
|
||||
__ tst(current_character(), Operand(mask));
|
||||
} else {
|
||||
__ and_(r0, current_character(), Operand(mask));
|
||||
__ cmp(r0, Operand(c));
|
||||
}
|
||||
__ and_(r0, current_character(), Operand(mask));
|
||||
__ cmp(r0, Operand(c));
|
||||
BranchOrBacktrack(ne, on_not_equal);
|
||||
}
|
||||
|
||||
|
@ -488,44 +480,6 @@ void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
|
|||
}
|
||||
|
||||
|
||||
void RegExpMacroAssemblerARM::CheckCharacterInRange(
|
||||
uc16 from,
|
||||
uc16 to,
|
||||
Label* on_in_range) {
|
||||
__ sub(r0, current_character(), Operand(from));
|
||||
__ cmp(r0, Operand(to - from));
|
||||
BranchOrBacktrack(ls, on_in_range); // Unsigned lower-or-same condition.
|
||||
}
|
||||
|
||||
|
||||
void RegExpMacroAssemblerARM::CheckCharacterNotInRange(
|
||||
uc16 from,
|
||||
uc16 to,
|
||||
Label* on_not_in_range) {
|
||||
__ sub(r0, current_character(), Operand(from));
|
||||
__ cmp(r0, Operand(to - from));
|
||||
BranchOrBacktrack(hi, on_not_in_range); // Unsigned higher condition.
|
||||
}
|
||||
|
||||
|
||||
void RegExpMacroAssemblerARM::CheckBitInTable(
|
||||
Handle<ByteArray> table,
|
||||
Label* on_bit_set) {
|
||||
__ mov(r0, Operand(table));
|
||||
if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
|
||||
__ and_(r1, current_character(), Operand(kTableSize - 1));
|
||||
__ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag));
|
||||
} else {
|
||||
__ add(r1,
|
||||
current_character(),
|
||||
Operand(ByteArray::kHeaderSize - kHeapObjectTag));
|
||||
}
|
||||
__ ldrb(r0, MemOperand(r0, r1));
|
||||
__ cmp(r0, Operand(0));
|
||||
BranchOrBacktrack(ne, on_bit_set);
|
||||
}
|
||||
|
||||
|
||||
bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
|
||||
Label* on_no_match) {
|
||||
// Range checks (c in min..max) are generally implemented by an unsigned
|
||||
|
|
|
@ -79,14 +79,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
|
|||
uc16 minus,
|
||||
uc16 mask,
|
||||
Label* on_not_equal);
|
||||
virtual void CheckCharacterInRange(uc16 from,
|
||||
uc16 to,
|
||||
Label* on_in_range);
|
||||
virtual void CheckCharacterNotInRange(uc16 from,
|
||||
uc16 to,
|
||||
Label* on_not_in_range);
|
||||
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
|
||||
|
||||
// Checks whether the given offset from the current position is before
|
||||
// the end of the string.
|
||||
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
|
||||
|
|
|
@ -582,8 +582,6 @@ static void PushInterceptorArguments(MacroAssembler* masm,
|
|||
__ push(holder);
|
||||
__ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
|
||||
__ push(scratch);
|
||||
__ mov(scratch, Operand(ExternalReference::isolate_address()));
|
||||
__ push(scratch);
|
||||
}
|
||||
|
||||
|
||||
|
@ -598,7 +596,7 @@ static void CompileCallLoadPropertyWithInterceptor(
|
|||
ExternalReference ref =
|
||||
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
|
||||
masm->isolate());
|
||||
__ mov(r0, Operand(6));
|
||||
__ mov(r0, Operand(5));
|
||||
__ mov(r1, Operand(ref));
|
||||
|
||||
CEntryStub stub(1);
|
||||
|
@ -606,9 +604,9 @@ static void CompileCallLoadPropertyWithInterceptor(
|
|||
}
|
||||
|
||||
|
||||
static const int kFastApiCallArguments = 4;
|
||||
static const int kFastApiCallArguments = 3;
|
||||
|
||||
// Reserves space for the extra arguments to API function in the
|
||||
// Reserves space for the extra arguments to FastHandleApiCall in the
|
||||
// caller's frame.
|
||||
//
|
||||
// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
|
||||
|
@ -634,8 +632,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
|
|||
// -- sp[0] : holder (set by CheckPrototypes)
|
||||
// -- sp[4] : callee JS function
|
||||
// -- sp[8] : call data
|
||||
// -- sp[12] : isolate
|
||||
// -- sp[16] : last JS argument
|
||||
// -- sp[12] : last JS argument
|
||||
// -- ...
|
||||
// -- sp[(argc + 3) * 4] : first JS argument
|
||||
// -- sp[(argc + 4) * 4] : receiver
|
||||
|
@ -645,7 +642,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
|
|||
__ LoadHeapObject(r5, function);
|
||||
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
|
||||
|
||||
// Pass the additional arguments.
|
||||
// Pass the additional arguments FastHandleApiCall expects.
|
||||
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
|
||||
Handle<Object> call_data(api_call_info->data());
|
||||
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
|
||||
|
@ -654,15 +651,13 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
|
|||
} else {
|
||||
__ Move(r6, call_data);
|
||||
}
|
||||
__ mov(r7, Operand(ExternalReference::isolate_address()));
|
||||
// Store JS function, call data and isolate.
|
||||
__ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
|
||||
// Store JS function and call data.
|
||||
__ stm(ib, sp, r5.bit() | r6.bit());
|
||||
|
||||
// Prepare arguments.
|
||||
__ add(r2, sp, Operand(3 * kPointerSize));
|
||||
// r2 points to call data as expected by Arguments
|
||||
// (refer to layout above).
|
||||
__ add(r2, sp, Operand(2 * kPointerSize));
|
||||
|
||||
// Allocate the v8::Arguments structure in the arguments' space since
|
||||
// it's not controlled by GC.
|
||||
const int kApiStackSpace = 4;
|
||||
|
||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||
|
@ -671,9 +666,9 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
|
|||
// r0 = v8::Arguments&
|
||||
// Arguments is after the return address.
|
||||
__ add(r0, sp, Operand(1 * kPointerSize));
|
||||
// v8::Arguments::implicit_args_
|
||||
// v8::Arguments::implicit_args = data
|
||||
__ str(r2, MemOperand(r0, 0 * kPointerSize));
|
||||
// v8::Arguments::values_
|
||||
// v8::Arguments::values = last argument
|
||||
__ add(ip, r2, Operand(argc * kPointerSize));
|
||||
__ str(ip, MemOperand(r0, 1 * kPointerSize));
|
||||
// v8::Arguments::length_ = argc
|
||||
|
@ -850,7 +845,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
|
|||
__ CallExternalReference(
|
||||
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
|
||||
masm->isolate()),
|
||||
6);
|
||||
5);
|
||||
// Restore the name_ register.
|
||||
__ pop(name_);
|
||||
// Leave the internal frame.
|
||||
|
@ -1209,9 +1204,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
|
|||
} else {
|
||||
__ Move(scratch3, Handle<Object>(callback->data()));
|
||||
}
|
||||
__ Push(reg, scratch3);
|
||||
__ mov(scratch3, Operand(ExternalReference::isolate_address()));
|
||||
__ Push(scratch3, name_reg);
|
||||
__ Push(reg, scratch3, name_reg);
|
||||
__ mov(r0, sp); // r0 = Handle<String>
|
||||
|
||||
const int kApiStackSpace = 1;
|
||||
|
@ -1223,7 +1216,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
|
|||
__ str(scratch2, MemOperand(sp, 1 * kPointerSize));
|
||||
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
|
||||
|
||||
const int kStackUnwindSpace = 5;
|
||||
const int kStackUnwindSpace = 4;
|
||||
Address getter_address = v8::ToCData<Address>(callback->getter());
|
||||
ApiFunction fun(getter_address);
|
||||
ExternalReference ref =
|
||||
|
@ -1351,19 +1344,20 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
|
|||
if (!receiver.is(holder_reg)) {
|
||||
ASSERT(scratch1.is(holder_reg));
|
||||
__ Push(receiver, holder_reg);
|
||||
__ ldr(scratch3,
|
||||
FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
|
||||
__ Push(scratch3, scratch2, name_reg);
|
||||
} else {
|
||||
__ push(receiver);
|
||||
__ push(holder_reg);
|
||||
__ ldr(scratch3,
|
||||
FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
|
||||
__ Push(holder_reg, scratch3, scratch2, name_reg);
|
||||
}
|
||||
__ ldr(scratch3,
|
||||
FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
|
||||
__ mov(scratch1, Operand(ExternalReference::isolate_address()));
|
||||
__ Push(scratch3, scratch1, scratch2, name_reg);
|
||||
|
||||
ExternalReference ref =
|
||||
ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
|
||||
masm()->isolate());
|
||||
__ TailCallExternalReference(ref, 6, 1);
|
||||
__ TailCallExternalReference(ref, 5, 1);
|
||||
}
|
||||
} else { // !compile_followup_inline
|
||||
// Call the runtime system to load the interceptor.
|
||||
|
@ -1377,7 +1371,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
|
|||
ExternalReference ref =
|
||||
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
|
||||
masm()->isolate());
|
||||
__ TailCallExternalReference(ref, 6, 1);
|
||||
__ TailCallExternalReference(ref, 5, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1745,7 +1739,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
|
|||
// We can't address the last element in one operation. Compute the more
|
||||
// expensive shift first, and use an offset later on.
|
||||
__ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
|
||||
__ ldr(r0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ cmp(r0, r6);
|
||||
__ b(eq, &call_builtin);
|
||||
|
||||
|
@ -1753,7 +1747,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
|
|||
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
|
||||
// Fill with the hole.
|
||||
__ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize));
|
||||
__ str(r6, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ Drop(argc + 1);
|
||||
__ Ret();
|
||||
|
||||
|
@ -3383,44 +3377,6 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
|
|||
}
|
||||
|
||||
|
||||
static void GenerateSmiKeyCheck(MacroAssembler* masm,
|
||||
Register key,
|
||||
Register scratch0,
|
||||
Register scratch1,
|
||||
DwVfpRegister double_scratch0,
|
||||
Label* fail) {
|
||||
if (CpuFeatures::IsSupported(VFP3)) {
|
||||
CpuFeatures::Scope scope(VFP3);
|
||||
Label key_ok;
|
||||
// Check for smi or a smi inside a heap number. We convert the heap
|
||||
// number and check if the conversion is exact and fits into the smi
|
||||
// range.
|
||||
__ JumpIfSmi(key, &key_ok);
|
||||
__ CheckMap(key,
|
||||
scratch0,
|
||||
Heap::kHeapNumberMapRootIndex,
|
||||
fail,
|
||||
DONT_DO_SMI_CHECK);
|
||||
__ sub(ip, key, Operand(kHeapObjectTag));
|
||||
__ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
|
||||
__ EmitVFPTruncate(kRoundToZero,
|
||||
double_scratch0.low(),
|
||||
double_scratch0,
|
||||
scratch0,
|
||||
scratch1,
|
||||
kCheckForInexactConversion);
|
||||
__ b(ne, fail);
|
||||
__ vmov(scratch0, double_scratch0.low());
|
||||
__ TrySmiTag(scratch0, fail, scratch1);
|
||||
__ mov(key, scratch0);
|
||||
__ bind(&key_ok);
|
||||
} else {
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(key, fail);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
|
||||
MacroAssembler* masm,
|
||||
ElementsKind elements_kind) {
|
||||
|
@ -3437,8 +3393,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
|
|||
// This stub is meant to be tail-jumped to, the receiver must already
|
||||
// have been verified by the caller to not be a smi.
|
||||
|
||||
// Check that the key is a smi or a heap number convertible to a smi.
|
||||
GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic);
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(key, &miss_force_generic);
|
||||
|
||||
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
// r3: elements array
|
||||
|
@ -3768,8 +3724,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||
// This stub is meant to be tail-jumped to, the receiver must already
|
||||
// have been verified by the caller to not be a smi.
|
||||
|
||||
// Check that the key is a smi or a heap number convertible to a smi.
|
||||
GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic);
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(key, &miss_force_generic);
|
||||
|
||||
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
|
||||
|
@ -4094,8 +4050,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
|
|||
// This stub is meant to be tail-jumped to, the receiver must already
|
||||
// have been verified by the caller to not be a smi.
|
||||
|
||||
// Check that the key is a smi or a heap number convertible to a smi.
|
||||
GenerateSmiKeyCheck(masm, r0, r4, r5, d1, &miss_force_generic);
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(r0, &miss_force_generic);
|
||||
|
||||
// Get the elements array.
|
||||
__ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
|
||||
|
@ -4146,8 +4102,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
|
|||
// This stub is meant to be tail-jumped to, the receiver must already
|
||||
// have been verified by the caller to not be a smi.
|
||||
|
||||
// Check that the key is a smi or a heap number convertible to a smi.
|
||||
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(key_reg, &miss_force_generic);
|
||||
|
||||
// Get the elements array.
|
||||
__ ldr(elements_reg,
|
||||
|
@ -4222,8 +4178,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
|
|||
// This stub is meant to be tail-jumped to, the receiver must already
|
||||
// have been verified by the caller to not be a smi.
|
||||
|
||||
// Check that the key is a smi or a heap number convertible to a smi.
|
||||
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(key_reg, &miss_force_generic);
|
||||
|
||||
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
|
||||
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
|
||||
|
@ -4389,9 +4345,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
|
|||
|
||||
// This stub is meant to be tail-jumped to, the receiver must already
|
||||
// have been verified by the caller to not be a smi.
|
||||
|
||||
// Check that the key is a smi or a heap number convertible to a smi.
|
||||
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
|
||||
__ JumpIfNotSmi(key_reg, &miss_force_generic);
|
||||
|
||||
__ ldr(elements_reg,
|
||||
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
@ -465,19 +465,15 @@ function ArrayPush() {
|
|||
}
|
||||
|
||||
|
||||
// Returns an array containing the array elements of the object followed
|
||||
// by the array elements of each argument in order. See ECMA-262,
|
||||
// section 15.4.4.7.
|
||||
function ArrayConcat(arg1) { // length == 1
|
||||
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
|
||||
throw MakeTypeError("called_on_null_or_undefined",
|
||||
["Array.prototype.concat"]);
|
||||
}
|
||||
|
||||
var array = ToObject(this);
|
||||
var arg_count = %_ArgumentsLength();
|
||||
var arrays = new InternalArray(1 + arg_count);
|
||||
arrays[0] = array;
|
||||
arrays[0] = this;
|
||||
for (var i = 0; i < arg_count; i++) {
|
||||
arrays[i + 1] = %_Arguments(i);
|
||||
}
|
||||
|
@ -1031,28 +1027,13 @@ function ArrayFilter(f, receiver) {
|
|||
var result = new $Array();
|
||||
var accumulator = new InternalArray();
|
||||
var accumulator_length = 0;
|
||||
if (%DebugCallbackSupportsStepping(f)) {
|
||||
for (var i = 0; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
// Prepare break slots for debugger step in.
|
||||
%DebugPrepareStepInIfStepping(f);
|
||||
if (%_CallFunction(receiver, element, i, array, f)) {
|
||||
accumulator[accumulator_length++] = element;
|
||||
}
|
||||
for (var i = 0; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
if (%_CallFunction(receiver, element, i, array, f)) {
|
||||
accumulator[accumulator_length++] = element;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// This is a duplicate of the previous loop sans debug stepping.
|
||||
for (var i = 0; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
if (%_CallFunction(receiver, element, i, array, f)) {
|
||||
accumulator[accumulator_length++] = element;
|
||||
}
|
||||
}
|
||||
}
|
||||
// End of duplicate.
|
||||
}
|
||||
%MoveArrayContents(accumulator, result);
|
||||
return result;
|
||||
|
@ -1078,24 +1059,12 @@ function ArrayForEach(f, receiver) {
|
|||
} else if (!IS_SPEC_OBJECT(receiver)) {
|
||||
receiver = ToObject(receiver);
|
||||
}
|
||||
if (%DebugCallbackSupportsStepping(f)) {
|
||||
for (var i = 0; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
// Prepare break slots for debugger step in.
|
||||
%DebugPrepareStepInIfStepping(f);
|
||||
%_CallFunction(receiver, element, i, array, f);
|
||||
}
|
||||
|
||||
for (var i = 0; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
%_CallFunction(receiver, element, i, array, f);
|
||||
}
|
||||
} else {
|
||||
// This is a duplicate of the previous loop sans debug stepping.
|
||||
for (var i = 0; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
%_CallFunction(receiver, element, i, array, f);
|
||||
}
|
||||
}
|
||||
// End of duplicate.
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1122,24 +1091,11 @@ function ArraySome(f, receiver) {
|
|||
receiver = ToObject(receiver);
|
||||
}
|
||||
|
||||
if (%DebugCallbackSupportsStepping(f)) {
|
||||
for (var i = 0; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
// Prepare break slots for debugger step in.
|
||||
%DebugPrepareStepInIfStepping(f);
|
||||
if (%_CallFunction(receiver, element, i, array, f)) return true;
|
||||
}
|
||||
for (var i = 0; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
if (%_CallFunction(receiver, element, i, array, f)) return true;
|
||||
}
|
||||
} else {
|
||||
// This is a duplicate of the previous loop sans debug stepping.
|
||||
for (var i = 0; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
if (%_CallFunction(receiver, element, i, array, f)) return true;
|
||||
}
|
||||
}
|
||||
// End of duplicate.
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -1165,24 +1121,11 @@ function ArrayEvery(f, receiver) {
|
|||
receiver = ToObject(receiver);
|
||||
}
|
||||
|
||||
if (%DebugCallbackSupportsStepping(f)) {
|
||||
for (var i = 0; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
// Prepare break slots for debugger step in.
|
||||
%DebugPrepareStepInIfStepping(f);
|
||||
if (!%_CallFunction(receiver, element, i, array, f)) return false;
|
||||
}
|
||||
for (var i = 0; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
if (!%_CallFunction(receiver, element, i, array, f)) return false;
|
||||
}
|
||||
} else {
|
||||
// This is a duplicate of the previous loop sans debug stepping.
|
||||
for (var i = 0; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
if (!%_CallFunction(receiver, element, i, array, f)) return false;
|
||||
}
|
||||
}
|
||||
// End of duplicate.
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -1209,24 +1152,11 @@ function ArrayMap(f, receiver) {
|
|||
|
||||
var result = new $Array();
|
||||
var accumulator = new InternalArray(length);
|
||||
if (%DebugCallbackSupportsStepping(f)) {
|
||||
for (var i = 0; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
// Prepare break slots for debugger step in.
|
||||
%DebugPrepareStepInIfStepping(f);
|
||||
accumulator[i] = %_CallFunction(receiver, element, i, array, f);
|
||||
}
|
||||
for (var i = 0; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
accumulator[i] = %_CallFunction(receiver, element, i, array, f);
|
||||
}
|
||||
} else {
|
||||
// This is a duplicate of the previous loop sans debug stepping.
|
||||
for (var i = 0; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
accumulator[i] = %_CallFunction(receiver, element, i, array, f);
|
||||
}
|
||||
}
|
||||
// End of duplicate.
|
||||
}
|
||||
%MoveArrayContents(accumulator, result);
|
||||
return result;
|
||||
|
@ -1381,27 +1311,11 @@ function ArrayReduce(callback, current) {
|
|||
}
|
||||
|
||||
var receiver = %GetDefaultReceiver(callback);
|
||||
|
||||
if (%DebugCallbackSupportsStepping(callback)) {
|
||||
for (; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
// Prepare break slots for debugger step in.
|
||||
%DebugPrepareStepInIfStepping(callback);
|
||||
current =
|
||||
%_CallFunction(receiver, current, element, i, array, callback);
|
||||
}
|
||||
for (; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
current = %_CallFunction(receiver, current, element, i, array, callback);
|
||||
}
|
||||
} else {
|
||||
// This is a duplicate of the previous loop sans debug stepping.
|
||||
for (; i < length; i++) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
current =
|
||||
%_CallFunction(receiver, current, element, i, array, callback);
|
||||
}
|
||||
}
|
||||
// End of duplicate.
|
||||
}
|
||||
return current;
|
||||
}
|
||||
|
@ -1434,27 +1348,11 @@ function ArrayReduceRight(callback, current) {
|
|||
}
|
||||
|
||||
var receiver = %GetDefaultReceiver(callback);
|
||||
|
||||
if (%DebugCallbackSupportsStepping(callback)) {
|
||||
for (; i >= 0; i--) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
// Prepare break slots for debugger step in.
|
||||
%DebugPrepareStepInIfStepping(callback);
|
||||
current =
|
||||
%_CallFunction(receiver, current, element, i, array, callback);
|
||||
}
|
||||
for (; i >= 0; i--) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
current = %_CallFunction(receiver, current, element, i, array, callback);
|
||||
}
|
||||
} else {
|
||||
// This is a duplicate of the previous loop sans debug stepping.
|
||||
for (; i >= 0; i--) {
|
||||
if (i in array) {
|
||||
var element = array[i];
|
||||
current =
|
||||
%_CallFunction(receiver, current, element, i, array, callback);
|
||||
}
|
||||
}
|
||||
// End of duplicate.
|
||||
}
|
||||
return current;
|
||||
}
|
||||
|
|
|
@ -99,7 +99,21 @@ struct DoubleConstant BASE_EMBEDDED {
|
|||
double the_hole_nan;
|
||||
};
|
||||
|
||||
static DoubleConstant double_constants;
|
||||
struct InitializeDoubleConstants {
|
||||
static void Construct(DoubleConstant* double_constants) {
|
||||
double_constants->min_int = kMinInt;
|
||||
double_constants->one_half = 0.5;
|
||||
double_constants->minus_zero = -0.0;
|
||||
double_constants->uint8_max_value = 255;
|
||||
double_constants->zero = 0.0;
|
||||
double_constants->canonical_non_hole_nan = OS::nan_value();
|
||||
double_constants->the_hole_nan = BitCast<double>(kHoleNanInt64);
|
||||
double_constants->negative_infinity = -V8_INFINITY;
|
||||
}
|
||||
};
|
||||
|
||||
static LazyInstance<DoubleConstant, InitializeDoubleConstants>::type
|
||||
double_constants = LAZY_INSTANCE_INITIALIZER;
|
||||
|
||||
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
|
||||
|
||||
|
@ -712,18 +726,6 @@ void RelocInfo::Verify() {
|
|||
// -----------------------------------------------------------------------------
|
||||
// Implementation of ExternalReference
|
||||
|
||||
void ExternalReference::SetUp() {
|
||||
double_constants.min_int = kMinInt;
|
||||
double_constants.one_half = 0.5;
|
||||
double_constants.minus_zero = -0.0;
|
||||
double_constants.uint8_max_value = 255;
|
||||
double_constants.zero = 0.0;
|
||||
double_constants.canonical_non_hole_nan = OS::nan_value();
|
||||
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
|
||||
double_constants.negative_infinity = -V8_INFINITY;
|
||||
}
|
||||
|
||||
|
||||
ExternalReference::ExternalReference(Builtins::CFunctionId id, Isolate* isolate)
|
||||
: address_(Redirect(isolate, Builtins::c_function_address(id))) {}
|
||||
|
||||
|
@ -956,47 +958,50 @@ ExternalReference ExternalReference::scheduled_exception_address(
|
|||
|
||||
|
||||
ExternalReference ExternalReference::address_of_min_int() {
|
||||
return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int));
|
||||
return ExternalReference(reinterpret_cast<void*>(
|
||||
&double_constants.Pointer()->min_int));
|
||||
}
|
||||
|
||||
|
||||
ExternalReference ExternalReference::address_of_one_half() {
|
||||
return ExternalReference(reinterpret_cast<void*>(&double_constants.one_half));
|
||||
return ExternalReference(reinterpret_cast<void*>(
|
||||
&double_constants.Pointer()->one_half));
|
||||
}
|
||||
|
||||
|
||||
ExternalReference ExternalReference::address_of_minus_zero() {
|
||||
return ExternalReference(
|
||||
reinterpret_cast<void*>(&double_constants.minus_zero));
|
||||
return ExternalReference(reinterpret_cast<void*>(
|
||||
&double_constants.Pointer()->minus_zero));
|
||||
}
|
||||
|
||||
|
||||
ExternalReference ExternalReference::address_of_zero() {
|
||||
return ExternalReference(reinterpret_cast<void*>(&double_constants.zero));
|
||||
return ExternalReference(reinterpret_cast<void*>(
|
||||
&double_constants.Pointer()->zero));
|
||||
}
|
||||
|
||||
|
||||
ExternalReference ExternalReference::address_of_uint8_max_value() {
|
||||
return ExternalReference(
|
||||
reinterpret_cast<void*>(&double_constants.uint8_max_value));
|
||||
return ExternalReference(reinterpret_cast<void*>(
|
||||
&double_constants.Pointer()->uint8_max_value));
|
||||
}
|
||||
|
||||
|
||||
ExternalReference ExternalReference::address_of_negative_infinity() {
|
||||
return ExternalReference(
|
||||
reinterpret_cast<void*>(&double_constants.negative_infinity));
|
||||
return ExternalReference(reinterpret_cast<void*>(
|
||||
&double_constants.Pointer()->negative_infinity));
|
||||
}
|
||||
|
||||
|
||||
ExternalReference ExternalReference::address_of_canonical_non_hole_nan() {
|
||||
return ExternalReference(
|
||||
reinterpret_cast<void*>(&double_constants.canonical_non_hole_nan));
|
||||
return ExternalReference(reinterpret_cast<void*>(
|
||||
&double_constants.Pointer()->canonical_non_hole_nan));
|
||||
}
|
||||
|
||||
|
||||
ExternalReference ExternalReference::address_of_the_hole_nan() {
|
||||
return ExternalReference(
|
||||
reinterpret_cast<void*>(&double_constants.the_hole_nan));
|
||||
return ExternalReference(reinterpret_cast<void*>(
|
||||
&double_constants.Pointer()->the_hole_nan));
|
||||
}
|
||||
|
||||
|
||||
|
@ -1153,20 +1158,6 @@ double power_double_int(double x, int y) {
|
|||
|
||||
|
||||
double power_double_double(double x, double y) {
|
||||
#ifdef __MINGW64_VERSION_MAJOR
|
||||
// MinGW64 has a custom implementation for pow. This handles certain
|
||||
// special cases that are different.
|
||||
if ((x == 0.0 || isinf(x)) && isfinite(y)) {
|
||||
double f;
|
||||
if (modf(y, &f) != 0.0) return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
|
||||
}
|
||||
|
||||
if (x == 2.0) {
|
||||
int y_int = static_cast<int>(y);
|
||||
if (y == y_int) return ldexp(1.0, y_int);
|
||||
}
|
||||
#endif
|
||||
|
||||
// The checks for special cases can be dropped in ia32 because it has already
|
||||
// been done in generated code before bailing out here.
|
||||
if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value();
|
||||
|
|
|
@ -539,8 +539,6 @@ class ExternalReference BASE_EMBEDDED {
|
|||
DIRECT_GETTER_CALL
|
||||
};
|
||||
|
||||
static void SetUp();
|
||||
|
||||
typedef void* ExternalReferenceRedirector(void* original, Type type);
|
||||
|
||||
ExternalReference(Builtins::CFunctionId id, Isolate* isolate);
|
||||
|
|
|
@ -962,14 +962,6 @@ RegExpDisjunction::RegExpDisjunction(ZoneList<RegExpTree*>* alternatives)
|
|||
}
|
||||
|
||||
|
||||
static int IncreaseBy(int previous, int increase) {
|
||||
if (RegExpTree::kInfinity - previous < increase) {
|
||||
return RegExpTree::kInfinity;
|
||||
} else {
|
||||
return previous + increase;
|
||||
}
|
||||
}
|
||||
|
||||
RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
|
||||
: nodes_(nodes) {
|
||||
ASSERT(nodes->length() > 1);
|
||||
|
@ -977,10 +969,13 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
|
|||
max_match_ = 0;
|
||||
for (int i = 0; i < nodes->length(); i++) {
|
||||
RegExpTree* node = nodes->at(i);
|
||||
int node_min_match = node->min_match();
|
||||
min_match_ = IncreaseBy(min_match_, node_min_match);
|
||||
min_match_ += node->min_match();
|
||||
int node_max_match = node->max_match();
|
||||
max_match_ = IncreaseBy(max_match_, node_max_match);
|
||||
if (kInfinity - max_match_ < node_max_match) {
|
||||
max_match_ = kInfinity;
|
||||
} else {
|
||||
max_match_ += node->max_match();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -998,78 +993,138 @@ CaseClause::CaseClause(Isolate* isolate,
|
|||
}
|
||||
|
||||
|
||||
#define REGULAR_NODE(NodeType) \
|
||||
#define INCREASE_NODE_COUNT(NodeType) \
|
||||
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
|
||||
increase_node_count(); \
|
||||
}
|
||||
#define DONT_OPTIMIZE_NODE(NodeType) \
|
||||
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
|
||||
increase_node_count(); \
|
||||
add_flag(kDontOptimize); \
|
||||
add_flag(kDontInline); \
|
||||
add_flag(kDontSelfOptimize); \
|
||||
}
|
||||
#define DONT_INLINE_NODE(NodeType) \
|
||||
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
|
||||
increase_node_count(); \
|
||||
add_flag(kDontInline); \
|
||||
}
|
||||
#define DONT_SELFOPTIMIZE_NODE(NodeType) \
|
||||
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
|
||||
increase_node_count(); \
|
||||
add_flag(kDontSelfOptimize); \
|
||||
}
|
||||
|
||||
REGULAR_NODE(VariableDeclaration)
|
||||
REGULAR_NODE(FunctionDeclaration)
|
||||
REGULAR_NODE(Block)
|
||||
REGULAR_NODE(ExpressionStatement)
|
||||
REGULAR_NODE(EmptyStatement)
|
||||
REGULAR_NODE(IfStatement)
|
||||
REGULAR_NODE(ContinueStatement)
|
||||
REGULAR_NODE(BreakStatement)
|
||||
REGULAR_NODE(ReturnStatement)
|
||||
REGULAR_NODE(SwitchStatement)
|
||||
REGULAR_NODE(Conditional)
|
||||
REGULAR_NODE(Literal)
|
||||
REGULAR_NODE(ObjectLiteral)
|
||||
REGULAR_NODE(Assignment)
|
||||
REGULAR_NODE(Throw)
|
||||
REGULAR_NODE(Property)
|
||||
REGULAR_NODE(UnaryOperation)
|
||||
REGULAR_NODE(CountOperation)
|
||||
REGULAR_NODE(BinaryOperation)
|
||||
REGULAR_NODE(CompareOperation)
|
||||
REGULAR_NODE(ThisFunction)
|
||||
REGULAR_NODE(Call)
|
||||
REGULAR_NODE(CallNew)
|
||||
// In theory, for VariableProxy we'd have to add:
|
||||
// if (node->var()->IsLookupSlot()) add_flag(kDontInline);
|
||||
// But node->var() is usually not bound yet at VariableProxy creation time, and
|
||||
// LOOKUP variables only result from constructs that cannot be inlined anyway.
|
||||
REGULAR_NODE(VariableProxy)
|
||||
INCREASE_NODE_COUNT(VariableDeclaration)
|
||||
INCREASE_NODE_COUNT(FunctionDeclaration)
|
||||
INCREASE_NODE_COUNT(ModuleDeclaration)
|
||||
INCREASE_NODE_COUNT(ImportDeclaration)
|
||||
INCREASE_NODE_COUNT(ExportDeclaration)
|
||||
INCREASE_NODE_COUNT(ModuleLiteral)
|
||||
INCREASE_NODE_COUNT(ModuleVariable)
|
||||
INCREASE_NODE_COUNT(ModulePath)
|
||||
INCREASE_NODE_COUNT(ModuleUrl)
|
||||
INCREASE_NODE_COUNT(Block)
|
||||
INCREASE_NODE_COUNT(ExpressionStatement)
|
||||
INCREASE_NODE_COUNT(EmptyStatement)
|
||||
INCREASE_NODE_COUNT(IfStatement)
|
||||
INCREASE_NODE_COUNT(ContinueStatement)
|
||||
INCREASE_NODE_COUNT(BreakStatement)
|
||||
INCREASE_NODE_COUNT(ReturnStatement)
|
||||
INCREASE_NODE_COUNT(Conditional)
|
||||
INCREASE_NODE_COUNT(Literal)
|
||||
INCREASE_NODE_COUNT(ObjectLiteral)
|
||||
INCREASE_NODE_COUNT(Assignment)
|
||||
INCREASE_NODE_COUNT(Throw)
|
||||
INCREASE_NODE_COUNT(Property)
|
||||
INCREASE_NODE_COUNT(UnaryOperation)
|
||||
INCREASE_NODE_COUNT(CountOperation)
|
||||
INCREASE_NODE_COUNT(BinaryOperation)
|
||||
INCREASE_NODE_COUNT(CompareOperation)
|
||||
INCREASE_NODE_COUNT(ThisFunction)
|
||||
INCREASE_NODE_COUNT(Call)
|
||||
INCREASE_NODE_COUNT(CallNew)
|
||||
|
||||
DONT_OPTIMIZE_NODE(ModuleDeclaration)
|
||||
DONT_OPTIMIZE_NODE(ImportDeclaration)
|
||||
DONT_OPTIMIZE_NODE(ExportDeclaration)
|
||||
DONT_OPTIMIZE_NODE(ModuleLiteral)
|
||||
DONT_OPTIMIZE_NODE(ModuleVariable)
|
||||
DONT_OPTIMIZE_NODE(ModulePath)
|
||||
DONT_OPTIMIZE_NODE(ModuleUrl)
|
||||
DONT_OPTIMIZE_NODE(WithStatement)
|
||||
DONT_OPTIMIZE_NODE(TryCatchStatement)
|
||||
DONT_OPTIMIZE_NODE(TryFinallyStatement)
|
||||
DONT_OPTIMIZE_NODE(DebuggerStatement)
|
||||
DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral)
|
||||
#undef INCREASE_NODE_COUNT
|
||||
|
||||
DONT_INLINE_NODE(FunctionLiteral)
|
||||
DONT_INLINE_NODE(RegExpLiteral) // TODO(1322): Allow materialized literals.
|
||||
DONT_INLINE_NODE(ArrayLiteral) // TODO(1322): Allow materialized literals.
|
||||
|
||||
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
|
||||
DONT_SELFOPTIMIZE_NODE(WhileStatement)
|
||||
DONT_SELFOPTIMIZE_NODE(ForStatement)
|
||||
DONT_SELFOPTIMIZE_NODE(ForInStatement)
|
||||
void AstConstructionVisitor::VisitWithStatement(WithStatement* node) {
|
||||
increase_node_count();
|
||||
add_flag(kDontOptimize);
|
||||
add_flag(kDontInline);
|
||||
}
|
||||
|
||||
|
||||
void AstConstructionVisitor::VisitSwitchStatement(SwitchStatement* node) {
|
||||
increase_node_count();
|
||||
add_flag(kDontInline);
|
||||
}
|
||||
|
||||
|
||||
void AstConstructionVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
|
||||
increase_node_count();
|
||||
add_flag(kDontSelfOptimize);
|
||||
}
|
||||
|
||||
|
||||
void AstConstructionVisitor::VisitWhileStatement(WhileStatement* node) {
|
||||
increase_node_count();
|
||||
add_flag(kDontSelfOptimize);
|
||||
}
|
||||
|
||||
|
||||
void AstConstructionVisitor::VisitForStatement(ForStatement* node) {
|
||||
increase_node_count();
|
||||
add_flag(kDontSelfOptimize);
|
||||
}
|
||||
|
||||
|
||||
void AstConstructionVisitor::VisitForInStatement(ForInStatement* node) {
|
||||
increase_node_count();
|
||||
add_flag(kDontSelfOptimize);
|
||||
}
|
||||
|
||||
|
||||
void AstConstructionVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
|
||||
increase_node_count();
|
||||
add_flag(kDontOptimize);
|
||||
add_flag(kDontInline);
|
||||
}
|
||||
|
||||
|
||||
void AstConstructionVisitor::VisitTryFinallyStatement(
|
||||
TryFinallyStatement* node) {
|
||||
increase_node_count();
|
||||
add_flag(kDontOptimize);
|
||||
add_flag(kDontInline);
|
||||
}
|
||||
|
||||
|
||||
void AstConstructionVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
|
||||
increase_node_count();
|
||||
add_flag(kDontOptimize);
|
||||
add_flag(kDontInline);
|
||||
}
|
||||
|
||||
|
||||
void AstConstructionVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
|
||||
increase_node_count();
|
||||
add_flag(kDontInline);
|
||||
}
|
||||
|
||||
|
||||
void AstConstructionVisitor::VisitSharedFunctionInfoLiteral(
|
||||
SharedFunctionInfoLiteral* node) {
|
||||
increase_node_count();
|
||||
add_flag(kDontOptimize);
|
||||
add_flag(kDontInline);
|
||||
}
|
||||
|
||||
|
||||
void AstConstructionVisitor::VisitVariableProxy(VariableProxy* node) {
|
||||
increase_node_count();
|
||||
// In theory, we'd have to add:
|
||||
// if(node->var()->IsLookupSlot()) { add_flag(kDontInline); }
|
||||
// However, node->var() is usually not bound yet at VariableProxy creation
|
||||
// time, and LOOKUP variables only result from constructs that cannot
|
||||
// be inlined anyway.
|
||||
}
|
||||
|
||||
|
||||
void AstConstructionVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
|
||||
increase_node_count();
|
||||
add_flag(kDontInline); // TODO(1322): Allow materialized literals.
|
||||
}
|
||||
|
||||
|
||||
void AstConstructionVisitor::VisitArrayLiteral(ArrayLiteral* node) {
|
||||
increase_node_count();
|
||||
add_flag(kDontInline); // TODO(1322): Allow materialized literals.
|
||||
}
|
||||
|
||||
|
||||
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
|
||||
increase_node_count();
|
||||
|
@ -1087,11 +1142,6 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
|
|||
}
|
||||
}
|
||||
|
||||
#undef REGULAR_NODE
|
||||
#undef DONT_OPTIMIZE_NODE
|
||||
#undef DONT_INLINE_NODE
|
||||
#undef DONT_SELFOPTIMIZE_NODE
|
||||
|
||||
|
||||
Handle<String> Literal::ToString() {
|
||||
if (handle_->IsString()) return Handle<String>::cast(handle_);
|
||||
|
|
|
@ -270,7 +270,6 @@ class SmallMapList {
|
|||
|
||||
void Reserve(int capacity) { list_.Reserve(capacity); }
|
||||
void Clear() { list_.Clear(); }
|
||||
void Sort() { list_.Sort(); }
|
||||
|
||||
bool is_empty() const { return list_.is_empty(); }
|
||||
int length() const { return list_.length(); }
|
||||
|
@ -421,8 +420,8 @@ class Block: public BreakableStatement {
|
|||
ZoneList<Statement*>* statements() { return &statements_; }
|
||||
bool is_initializer_block() const { return is_initializer_block_; }
|
||||
|
||||
Scope* scope() const { return scope_; }
|
||||
void set_scope(Scope* scope) { scope_ = scope; }
|
||||
Scope* block_scope() const { return block_scope_; }
|
||||
void set_block_scope(Scope* block_scope) { block_scope_ = block_scope; }
|
||||
|
||||
protected:
|
||||
template<class> friend class AstNodeFactory;
|
||||
|
@ -434,13 +433,13 @@ class Block: public BreakableStatement {
|
|||
: BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
|
||||
statements_(capacity),
|
||||
is_initializer_block_(is_initializer_block),
|
||||
scope_(NULL) {
|
||||
block_scope_(NULL) {
|
||||
}
|
||||
|
||||
private:
|
||||
ZoneList<Statement*> statements_;
|
||||
bool is_initializer_block_;
|
||||
Scope* scope_;
|
||||
Scope* block_scope_;
|
||||
};
|
||||
|
||||
|
||||
|
@ -608,7 +607,6 @@ class ModuleLiteral: public Module {
|
|||
DECLARE_NODE_TYPE(ModuleLiteral)
|
||||
|
||||
Block* body() const { return body_; }
|
||||
Handle<Context> context() const { return context_; }
|
||||
|
||||
protected:
|
||||
template<class> friend class AstNodeFactory;
|
||||
|
@ -620,7 +618,6 @@ class ModuleLiteral: public Module {
|
|||
|
||||
private:
|
||||
Block* body_;
|
||||
Handle<Context> context_;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -1011,7 +1011,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
|
|||
proto_map->set_prototype(global_context()->initial_object_prototype());
|
||||
Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
|
||||
proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
|
||||
heap->query_colon_symbol());
|
||||
heap->empty_string());
|
||||
proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
|
||||
heap->false_value());
|
||||
proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
|
||||
|
@ -2159,7 +2159,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
|
|||
Handle<DescriptorArray> descs =
|
||||
Handle<DescriptorArray>(from->map()->instance_descriptors());
|
||||
for (int i = 0; i < descs->number_of_descriptors(); i++) {
|
||||
PropertyDetails details = descs->GetDetails(i);
|
||||
PropertyDetails details = PropertyDetails(descs->GetDetails(i));
|
||||
switch (details.type()) {
|
||||
case FIELD: {
|
||||
HandleScope inner;
|
||||
|
|
|
@ -1103,7 +1103,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
|
|||
|
||||
CustomArguments custom(isolate);
|
||||
v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
|
||||
isolate, data_obj, *function, raw_holder);
|
||||
data_obj, *function, raw_holder);
|
||||
|
||||
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
|
||||
custom.end(),
|
||||
|
@ -1143,6 +1143,68 @@ BUILTIN(HandleApiCallConstruct) {
|
|||
}
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
|
||||
static void VerifyTypeCheck(Handle<JSObject> object,
|
||||
Handle<JSFunction> function) {
|
||||
ASSERT(function->shared()->IsApiFunction());
|
||||
FunctionTemplateInfo* info = function->shared()->get_api_func_data();
|
||||
if (info->signature()->IsUndefined()) return;
|
||||
SignatureInfo* signature = SignatureInfo::cast(info->signature());
|
||||
Object* receiver_type = signature->receiver();
|
||||
if (receiver_type->IsUndefined()) return;
|
||||
FunctionTemplateInfo* type = FunctionTemplateInfo::cast(receiver_type);
|
||||
ASSERT(object->IsInstanceOf(type));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
BUILTIN(FastHandleApiCall) {
|
||||
ASSERT(!CalledAsConstructor(isolate));
|
||||
Heap* heap = isolate->heap();
|
||||
const bool is_construct = false;
|
||||
|
||||
// We expect four more arguments: callback, function, call data, and holder.
|
||||
const int args_length = args.length() - 4;
|
||||
ASSERT(args_length >= 0);
|
||||
|
||||
Object* callback_obj = args[args_length];
|
||||
|
||||
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
|
||||
&args[args_length + 1],
|
||||
&args[0] - 1,
|
||||
args_length - 1,
|
||||
is_construct);
|
||||
|
||||
#ifdef DEBUG
|
||||
VerifyTypeCheck(Utils::OpenHandle(*new_args.Holder()),
|
||||
Utils::OpenHandle(*new_args.Callee()));
|
||||
#endif
|
||||
HandleScope scope(isolate);
|
||||
Object* result;
|
||||
v8::Handle<v8::Value> value;
|
||||
{
|
||||
// Leaving JavaScript.
|
||||
VMState state(isolate, EXTERNAL);
|
||||
ExternalCallbackScope call_scope(isolate,
|
||||
v8::ToCData<Address>(callback_obj));
|
||||
v8::InvocationCallback callback =
|
||||
v8::ToCData<v8::InvocationCallback>(callback_obj);
|
||||
|
||||
value = callback(new_args);
|
||||
}
|
||||
if (value.IsEmpty()) {
|
||||
result = heap->undefined_value();
|
||||
} else {
|
||||
result = *reinterpret_cast<Object**>(*value);
|
||||
}
|
||||
|
||||
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
// Helper function to handle calls to non-function objects created through the
|
||||
// API. The object can be called as either a constructor (using new) or just as
|
||||
// a function (without new).
|
||||
|
@ -1181,7 +1243,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
|
|||
|
||||
CustomArguments custom(isolate);
|
||||
v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
|
||||
isolate, call_data->data(), constructor, obj);
|
||||
call_data->data(), constructor, obj);
|
||||
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
|
||||
custom.end(),
|
||||
&args[0] - 1,
|
||||
|
|
|
@ -56,6 +56,7 @@ enum BuiltinExtraArguments {
|
|||
V(ArrayConcat, NO_EXTRA_ARGUMENTS) \
|
||||
\
|
||||
V(HandleApiCall, NEEDS_CALLED_FUNCTION) \
|
||||
V(FastHandleApiCall, NO_EXTRA_ARGUMENTS) \
|
||||
V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \
|
||||
V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \
|
||||
V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS) \
|
||||
|
|
|
@ -72,23 +72,24 @@ V(AND_CHECK_4_CHARS, 27, 16) /* bc8 pad24 uint32 uint32 addr32 */ \
|
|||
V(AND_CHECK_CHAR, 28, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
|
||||
V(AND_CHECK_NOT_4_CHARS, 29, 16) /* bc8 pad24 uint32 uint32 addr32 */ \
|
||||
V(AND_CHECK_NOT_CHAR, 30, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
|
||||
V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 uc16 addr32 */ \
|
||||
V(CHECK_CHAR_IN_RANGE, 32, 12) /* bc8 pad24 uc16 uc16 addr32 */ \
|
||||
V(CHECK_CHAR_NOT_IN_RANGE, 33, 12) /* bc8 pad24 uc16 uc16 addr32 */ \
|
||||
V(CHECK_BIT_IN_TABLE, 34, 24) /* bc8 pad24 addr32 bits128 */ \
|
||||
V(CHECK_LT, 35, 8) /* bc8 pad8 uc16 addr32 */ \
|
||||
V(CHECK_GT, 36, 8) /* bc8 pad8 uc16 addr32 */ \
|
||||
V(CHECK_NOT_BACK_REF, 37, 8) /* bc8 reg_idx24 addr32 */ \
|
||||
V(CHECK_NOT_BACK_REF_NO_CASE, 38, 8) /* bc8 reg_idx24 addr32 */ \
|
||||
V(CHECK_NOT_REGS_EQUAL, 39, 12) /* bc8 regidx24 reg_idx32 addr32 */ \
|
||||
V(CHECK_REGISTER_LT, 40, 12) /* bc8 reg_idx24 value32 addr32 */ \
|
||||
V(CHECK_REGISTER_GE, 41, 12) /* bc8 reg_idx24 value32 addr32 */ \
|
||||
V(CHECK_REGISTER_EQ_POS, 42, 8) /* bc8 reg_idx24 addr32 */ \
|
||||
V(CHECK_AT_START, 43, 8) /* bc8 pad24 addr32 */ \
|
||||
V(CHECK_NOT_AT_START, 44, 8) /* bc8 pad24 addr32 */ \
|
||||
V(CHECK_GREEDY, 45, 8) /* bc8 pad24 addr32 */ \
|
||||
V(ADVANCE_CP_AND_GOTO, 46, 8) /* bc8 offset24 addr32 */ \
|
||||
V(SET_CURRENT_POSITION_FROM_END, 47, 4) /* bc8 idx24 */
|
||||
V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 addr32 */ \
|
||||
V(CHECK_LT, 32, 8) /* bc8 pad8 uc16 addr32 */ \
|
||||
V(CHECK_GT, 33, 8) /* bc8 pad8 uc16 addr32 */ \
|
||||
V(CHECK_NOT_BACK_REF, 34, 8) /* bc8 reg_idx24 addr32 */ \
|
||||
V(CHECK_NOT_BACK_REF_NO_CASE, 35, 8) /* bc8 reg_idx24 addr32 */ \
|
||||
V(CHECK_NOT_REGS_EQUAL, 36, 12) /* bc8 regidx24 reg_idx32 addr32 */ \
|
||||
V(LOOKUP_MAP1, 37, 12) /* bc8 pad8 start16 bit_map_addr32 addr32 */ \
|
||||
V(LOOKUP_MAP2, 38, 96) /* bc8 pad8 start16 half_nibble_map_addr32* */ \
|
||||
V(LOOKUP_MAP8, 39, 96) /* bc8 pad8 start16 byte_map addr32* */ \
|
||||
V(LOOKUP_HI_MAP8, 40, 96) /* bc8 start24 byte_map_addr32 addr32* */ \
|
||||
V(CHECK_REGISTER_LT, 41, 12) /* bc8 reg_idx24 value32 addr32 */ \
|
||||
V(CHECK_REGISTER_GE, 42, 12) /* bc8 reg_idx24 value32 addr32 */ \
|
||||
V(CHECK_REGISTER_EQ_POS, 43, 8) /* bc8 reg_idx24 addr32 */ \
|
||||
V(CHECK_AT_START, 44, 8) /* bc8 pad24 addr32 */ \
|
||||
V(CHECK_NOT_AT_START, 45, 8) /* bc8 pad24 addr32 */ \
|
||||
V(CHECK_GREEDY, 46, 8) /* bc8 pad24 addr32 */ \
|
||||
V(ADVANCE_CP_AND_GOTO, 47, 8) /* bc8 offset24 addr32 */ \
|
||||
V(SET_CURRENT_POSITION_FROM_END, 48, 4) /* bc8 idx24 */
|
||||
|
||||
#define DECLARE_BYTECODES(name, code, length) \
|
||||
static const int BC_##name = code;
|
||||
|
|
|
@ -73,12 +73,21 @@ SmartArrayPointer<const char> CodeStub::GetName() {
|
|||
|
||||
|
||||
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
|
||||
code->set_major_key(MajorKey());
|
||||
|
||||
Isolate* isolate = masm->isolate();
|
||||
SmartArrayPointer<const char> name = GetName();
|
||||
PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
|
||||
GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
|
||||
Counters* counters = isolate->counters();
|
||||
counters->total_stubs_code_size()->Increment(code->instruction_size());
|
||||
|
||||
#ifdef ENABLE_DISASSEMBLER
|
||||
if (FLAG_print_code_stubs) {
|
||||
code->Disassemble(*name);
|
||||
PrintF("\n");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -116,16 +125,8 @@ Handle<Code> CodeStub::GetCode() {
|
|||
GetICState());
|
||||
Handle<Code> new_object = factory->NewCode(
|
||||
desc, flags, masm.CodeObject(), NeedsImmovableCode());
|
||||
new_object->set_major_key(MajorKey());
|
||||
FinishCode(new_object);
|
||||
RecordCodeGeneration(*new_object, &masm);
|
||||
|
||||
#ifdef ENABLE_DISASSEMBLER
|
||||
if (FLAG_print_code_stubs) {
|
||||
new_object->Disassemble(*GetName());
|
||||
PrintF("\n");
|
||||
}
|
||||
#endif
|
||||
FinishCode(new_object);
|
||||
|
||||
if (UseSpecialCache()) {
|
||||
AddToSpecialCache(new_object);
|
||||
|
|
|
@ -40,9 +40,6 @@ class CompilerIntrinsics {
|
|||
// Returns number of zero bits following most significant 1 bit.
|
||||
// Undefined for zero value.
|
||||
INLINE(static int CountLeadingZeros(uint32_t value));
|
||||
|
||||
// Returns the number of bits set.
|
||||
INLINE(static int CountSetBits(uint32_t value));
|
||||
};
|
||||
|
||||
#ifdef __GNUC__
|
||||
|
@ -54,10 +51,6 @@ int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
|
|||
return __builtin_clz(value);
|
||||
}
|
||||
|
||||
int CompilerIntrinsics::CountSetBits(uint32_t value) {
|
||||
return __builtin_popcount(value);
|
||||
}
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
#pragma intrinsic(_BitScanForward)
|
||||
|
@ -75,16 +68,6 @@ int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
|
|||
return 31 - static_cast<int>(result);
|
||||
}
|
||||
|
||||
int CompilerIntrinsics::CountSetBits(uint32_t value) {
|
||||
// Manually count set bits.
|
||||
value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
|
||||
value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
|
||||
value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
|
||||
value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
|
||||
value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
|
||||
return value;
|
||||
}
|
||||
|
||||
#else
|
||||
#error Unsupported compiler
|
||||
#endif
|
||||
|
|
|
@ -397,7 +397,7 @@ class Context: public FixedArray {
|
|||
GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSORS)
|
||||
#undef GLOBAL_CONTEXT_FIELD_ACCESSORS
|
||||
|
||||
// Lookup the slot called name, starting with the current context.
|
||||
// Lookup the the slot called name, starting with the current context.
|
||||
// There are three possibilities:
|
||||
//
|
||||
// 1) result->IsContext():
|
||||
|
|
|
@ -228,7 +228,9 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache,
|
|||
}
|
||||
|
||||
ASSERT(number != 0);
|
||||
return ldexp(static_cast<double>(negative ? -number : number), exponent);
|
||||
// The double could be constructed faster from number (mantissa), exponent
|
||||
// and sign. Assuming it's a rare case more simple code is used.
|
||||
return static_cast<double>(negative ? -number : number) * pow(2.0, exponent);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -318,7 +318,6 @@ static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
|
|||
const char kArrayBufferReferencePropName[] = "_is_array_buffer_";
|
||||
const char kArrayBufferMarkerPropName[] = "_array_buffer_ref_";
|
||||
|
||||
static const int kExternalArrayAllocationHeaderSize = 2;
|
||||
|
||||
Handle<Value> Shell::CreateExternalArray(const Arguments& args,
|
||||
ExternalArrayType type,
|
||||
|
@ -427,26 +426,14 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
|
|||
}
|
||||
|
||||
Persistent<Object> persistent_array = Persistent<Object>::New(array);
|
||||
persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
|
||||
persistent_array.MarkIndependent();
|
||||
if (data == NULL && length != 0) {
|
||||
// Make sure the total size fits into a (signed) int.
|
||||
static const int kMaxSize = 0x7fffffff;
|
||||
if (length > (kMaxSize - sizeof(size_t)) / element_size) {
|
||||
return ThrowException(String::New("Array exceeds maximum size (2G)"));
|
||||
}
|
||||
// Prepend the size of the allocated chunk to the data itself.
|
||||
int total_size = length * element_size +
|
||||
kExternalArrayAllocationHeaderSize * sizeof(size_t);
|
||||
data = malloc(total_size);
|
||||
data = calloc(length, element_size);
|
||||
if (data == NULL) {
|
||||
return ThrowException(String::New("Memory allocation failed."));
|
||||
}
|
||||
*reinterpret_cast<size_t*>(data) = total_size;
|
||||
data = reinterpret_cast<size_t*>(data) + kExternalArrayAllocationHeaderSize;
|
||||
memset(data, 0, length * element_size);
|
||||
V8::AdjustAmountOfExternalAllocatedMemory(total_size);
|
||||
}
|
||||
persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
|
||||
persistent_array.MarkIndependent();
|
||||
|
||||
array->SetIndexedPropertiesToExternalArrayData(
|
||||
reinterpret_cast<uint8_t*>(data) + offset, type,
|
||||
|
@ -465,9 +452,6 @@ void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
|
|||
Handle<Object> converted_object = object->ToObject();
|
||||
Local<Value> prop_value = converted_object->Get(prop_name);
|
||||
if (data != NULL && !prop_value->IsObject()) {
|
||||
data = reinterpret_cast<size_t*>(data) - kExternalArrayAllocationHeaderSize;
|
||||
V8::AdjustAmountOfExternalAllocatedMemory(
|
||||
-static_cast<int>(*reinterpret_cast<size_t*>(data)));
|
||||
free(data);
|
||||
}
|
||||
object.Dispose();
|
||||
|
@ -993,8 +977,8 @@ void Shell::OnExit() {
|
|||
printf("+--------------------------------------------+-------------+\n");
|
||||
delete [] counters;
|
||||
}
|
||||
delete counters_file_;
|
||||
delete counter_map_;
|
||||
if (counters_file_ != NULL)
|
||||
delete counters_file_;
|
||||
}
|
||||
#endif // V8_SHARED
|
||||
|
||||
|
|
|
@ -2174,7 +2174,7 @@ function DebugResponseDetails(response) {
|
|||
}
|
||||
|
||||
var current_line = from_line + num;
|
||||
var spacer = maxdigits - (1 + Math.floor(log10(current_line)));
|
||||
spacer = maxdigits - (1 + Math.floor(log10(current_line)));
|
||||
if (current_line == Debug.State.currentSourceLine + 1) {
|
||||
for (var i = 0; i < maxdigits; i++) {
|
||||
result += '>';
|
||||
|
|
|
@ -323,41 +323,41 @@ bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
|
|||
const char* embedding_host) {
|
||||
static const int kBufferSize = 80;
|
||||
char buffer[kBufferSize]; // Sending buffer.
|
||||
bool ok;
|
||||
int len;
|
||||
int r;
|
||||
|
||||
// Send the header.
|
||||
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
|
||||
"Type: connect\r\n");
|
||||
r = conn->Send(buffer, len);
|
||||
if (r != len) return false;
|
||||
ok = conn->Send(buffer, len);
|
||||
if (!ok) return false;
|
||||
|
||||
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
|
||||
"V8-Version: %s\r\n", v8::V8::GetVersion());
|
||||
r = conn->Send(buffer, len);
|
||||
if (r != len) return false;
|
||||
ok = conn->Send(buffer, len);
|
||||
if (!ok) return false;
|
||||
|
||||
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
|
||||
"Protocol-Version: 1\r\n");
|
||||
r = conn->Send(buffer, len);
|
||||
if (r != len) return false;
|
||||
ok = conn->Send(buffer, len);
|
||||
if (!ok) return false;
|
||||
|
||||
if (embedding_host != NULL) {
|
||||
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
|
||||
"Embedding-Host: %s\r\n", embedding_host);
|
||||
r = conn->Send(buffer, len);
|
||||
if (r != len) return false;
|
||||
ok = conn->Send(buffer, len);
|
||||
if (!ok) return false;
|
||||
}
|
||||
|
||||
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
|
||||
"%s: 0\r\n", kContentLength);
|
||||
r = conn->Send(buffer, len);
|
||||
if (r != len) return false;
|
||||
ok = conn->Send(buffer, len);
|
||||
if (!ok) return false;
|
||||
|
||||
// Terminate header with empty line.
|
||||
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
|
||||
r = conn->Send(buffer, len);
|
||||
if (r != len) return false;
|
||||
ok = conn->Send(buffer, len);
|
||||
if (!ok) return false;
|
||||
|
||||
// No body for connect message.
|
||||
|
||||
|
|
|
@ -1957,7 +1957,7 @@ DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
|
|||
if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
|
||||
frame_index = request.arguments.frameNumber;
|
||||
if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
|
||||
throw new Error('Invalid frame number');
|
||||
return response.failed('Invalid frame number');
|
||||
}
|
||||
return this.exec_state_.frame(frame_index);
|
||||
} else {
|
||||
|
@ -1966,44 +1966,20 @@ DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
|
|||
};
|
||||
|
||||
|
||||
// Gets scope host object from request. It is either a function
|
||||
// ('functionHandle' argument must be specified) or a stack frame
|
||||
// ('frameNumber' may be specified and the current frame is taken by default).
|
||||
DebugCommandProcessor.prototype.scopeHolderForScopeRequest_ =
|
||||
function(request) {
|
||||
if (request.arguments && "functionHandle" in request.arguments) {
|
||||
if (!IS_NUMBER(request.arguments.functionHandle)) {
|
||||
throw new Error('Function handle must be a number');
|
||||
}
|
||||
var function_mirror = LookupMirror(request.arguments.functionHandle);
|
||||
if (!function_mirror) {
|
||||
throw new Error('Failed to find function object by handle');
|
||||
}
|
||||
if (!function_mirror.isFunction()) {
|
||||
throw new Error('Value of non-function type is found by handle');
|
||||
}
|
||||
return function_mirror;
|
||||
} else {
|
||||
// No frames no scopes.
|
||||
if (this.exec_state_.frameCount() == 0) {
|
||||
throw new Error('No scopes');
|
||||
}
|
||||
|
||||
// Get the frame for which the scopes are requested.
|
||||
var frame = this.frameForScopeRequest_(request);
|
||||
return frame;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
|
||||
var scope_holder = this.scopeHolderForScopeRequest_(request);
|
||||
// No frames no scopes.
|
||||
if (this.exec_state_.frameCount() == 0) {
|
||||
return response.failed('No scopes');
|
||||
}
|
||||
|
||||
// Fill all scopes for this frame or function.
|
||||
var total_scopes = scope_holder.scopeCount();
|
||||
// Get the frame for which the scopes are requested.
|
||||
var frame = this.frameForScopeRequest_(request);
|
||||
|
||||
// Fill all scopes for this frame.
|
||||
var total_scopes = frame.scopeCount();
|
||||
var scopes = [];
|
||||
for (var i = 0; i < total_scopes; i++) {
|
||||
scopes.push(scope_holder.scope(i));
|
||||
scopes.push(frame.scope(i));
|
||||
}
|
||||
response.body = {
|
||||
fromScope: 0,
|
||||
|
@ -2015,19 +1991,24 @@ DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
|
|||
|
||||
|
||||
DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
|
||||
// Get the frame or function for which the scope is requested.
|
||||
var scope_holder = this.scopeHolderForScopeRequest_(request);
|
||||
// No frames no scopes.
|
||||
if (this.exec_state_.frameCount() == 0) {
|
||||
return response.failed('No scopes');
|
||||
}
|
||||
|
||||
// Get the frame for which the scope is requested.
|
||||
var frame = this.frameForScopeRequest_(request);
|
||||
|
||||
// With no scope argument just return top scope.
|
||||
var scope_index = 0;
|
||||
if (request.arguments && !IS_UNDEFINED(request.arguments.number)) {
|
||||
scope_index = %ToNumber(request.arguments.number);
|
||||
if (scope_index < 0 || scope_holder.scopeCount() <= scope_index) {
|
||||
if (scope_index < 0 || frame.scopeCount() <= scope_index) {
|
||||
return response.failed('Invalid scope number');
|
||||
}
|
||||
}
|
||||
|
||||
response.body = scope_holder.scope(scope_index);
|
||||
response.body = frame.scope(scope_index);
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -1857,6 +1857,13 @@ static void RedirectActivationsToRecompiledCodeOnThread(
|
|||
// break slots.
|
||||
debug_break_slot_count++;
|
||||
}
|
||||
if (frame_code->has_self_optimization_header() &&
|
||||
!new_code->has_self_optimization_header()) {
|
||||
delta -= FullCodeGenerator::self_optimization_header_size();
|
||||
} else {
|
||||
ASSERT(frame_code->has_self_optimization_header() ==
|
||||
new_code->has_self_optimization_header());
|
||||
}
|
||||
int debug_break_slot_bytes =
|
||||
debug_break_slot_count * Assembler::kDebugBreakSlotLength;
|
||||
if (FLAG_trace_deopt) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
@ -245,8 +245,6 @@ class Debug {
|
|||
bool IsBreakOnException(ExceptionBreakType type);
|
||||
void PrepareStep(StepAction step_action, int step_count);
|
||||
void ClearStepping();
|
||||
void ClearStepOut();
|
||||
bool IsStepping() { return thread_local_.step_count_ > 0; }
|
||||
bool StepNextContinue(BreakLocationIterator* break_location_iterator,
|
||||
JavaScriptFrame* frame);
|
||||
static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
|
||||
|
@ -466,6 +464,7 @@ class Debug {
|
|||
void ActivateStepIn(StackFrame* frame);
|
||||
void ClearStepIn();
|
||||
void ActivateStepOut(StackFrame* frame);
|
||||
void ClearStepOut();
|
||||
void ClearStepNext();
|
||||
// Returns whether the compile succeeded.
|
||||
void RemoveDebugInfo(Handle<DebugInfo> debug_info);
|
||||
|
|
|
@ -130,6 +130,12 @@ class Double {
|
|||
return (d64 & kExponentMask) == kExponentMask;
|
||||
}
|
||||
|
||||
bool IsNan() const {
|
||||
uint64_t d64 = AsUint64();
|
||||
return ((d64 & kExponentMask) == kExponentMask) &&
|
||||
((d64 & kSignificandMask) != 0);
|
||||
}
|
||||
|
||||
bool IsInfinite() const {
|
||||
uint64_t d64 = AsUint64();
|
||||
return ((d64 & kExponentMask) == kExponentMask) &&
|
||||
|
|
|
@ -1332,8 +1332,18 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
|
|||
|
||||
|
||||
void ElementsAccessor::InitializeOncePerProcess() {
|
||||
static struct ConcreteElementsAccessors {
|
||||
#define ACCESSOR_STRUCT(Class, Kind, Store) Class* Kind##_handler;
|
||||
ELEMENTS_LIST(ACCESSOR_STRUCT)
|
||||
#undef ACCESSOR_STRUCT
|
||||
} element_accessors = {
|
||||
#define ACCESSOR_INIT(Class, Kind, Store) new Class(#Kind),
|
||||
ELEMENTS_LIST(ACCESSOR_INIT)
|
||||
#undef ACCESSOR_INIT
|
||||
};
|
||||
|
||||
static ElementsAccessor* accessor_array[] = {
|
||||
#define ACCESSOR_ARRAY(Class, Kind, Store) new Class(#Kind),
|
||||
#define ACCESSOR_ARRAY(Class, Kind, Store) element_accessors.Kind##_handler,
|
||||
ELEMENTS_LIST(ACCESSOR_ARRAY)
|
||||
#undef ACCESSOR_ARRAY
|
||||
};
|
||||
|
@ -1345,14 +1355,6 @@ void ElementsAccessor::InitializeOncePerProcess() {
|
|||
}
|
||||
|
||||
|
||||
void ElementsAccessor::TearDown() {
|
||||
#define ACCESSOR_DELETE(Class, Kind, Store) delete elements_accessors_[Kind];
|
||||
ELEMENTS_LIST(ACCESSOR_DELETE)
|
||||
#undef ACCESSOR_DELETE
|
||||
elements_accessors_ = NULL;
|
||||
}
|
||||
|
||||
|
||||
template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
|
||||
MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
|
||||
ElementsKindTraits>::
|
||||
|
|
|
@ -131,7 +131,6 @@ class ElementsAccessor {
|
|||
static ElementsAccessor* ForArray(FixedArrayBase* array);
|
||||
|
||||
static void InitializeOncePerProcess();
|
||||
static void TearDown();
|
||||
|
||||
protected:
|
||||
friend class NonStrictArgumentsElementsAccessor;
|
||||
|
|
|
@ -133,8 +133,11 @@ v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii(
|
|||
|
||||
|
||||
void ExternalizeStringExtension::Register() {
|
||||
static ExternalizeStringExtension externalize_extension;
|
||||
static v8::DeclareExtension declaration(&externalize_extension);
|
||||
static ExternalizeStringExtension* externalize_extension = NULL;
|
||||
if (externalize_extension == NULL)
|
||||
externalize_extension = new ExternalizeStringExtension;
|
||||
static v8::DeclareExtension externalize_extension_declaration(
|
||||
externalize_extension);
|
||||
}
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
|
|
@ -46,8 +46,9 @@ v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
|
|||
|
||||
|
||||
void GCExtension::Register() {
|
||||
static GCExtension gc_extension;
|
||||
static v8::DeclareExtension declaration(&gc_extension);
|
||||
static GCExtension* gc_extension = NULL;
|
||||
if (gc_extension == NULL) gc_extension = new GCExtension();
|
||||
static v8::DeclareExtension gc_extension_declaration(gc_extension);
|
||||
}
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
|
|
@ -291,15 +291,6 @@ Handle<Context> Factory::NewGlobalContext() {
|
|||
}
|
||||
|
||||
|
||||
Handle<Context> Factory::NewModuleContext(Handle<Context> previous,
|
||||
Handle<ScopeInfo> scope_info) {
|
||||
CALL_HEAP_FUNCTION(
|
||||
isolate(),
|
||||
isolate()->heap()->AllocateModuleContext(*previous, *scope_info),
|
||||
Context);
|
||||
}
|
||||
|
||||
|
||||
Handle<Context> Factory::NewFunctionContext(int length,
|
||||
Handle<JSFunction> function) {
|
||||
CALL_HEAP_FUNCTION(
|
||||
|
@ -333,9 +324,10 @@ Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
|
|||
}
|
||||
|
||||
|
||||
Handle<Context> Factory::NewBlockContext(Handle<JSFunction> function,
|
||||
Handle<Context> previous,
|
||||
Handle<ScopeInfo> scope_info) {
|
||||
Handle<Context> Factory::NewBlockContext(
|
||||
Handle<JSFunction> function,
|
||||
Handle<Context> previous,
|
||||
Handle<ScopeInfo> scope_info) {
|
||||
CALL_HEAP_FUNCTION(
|
||||
isolate(),
|
||||
isolate()->heap()->AllocateBlockContext(*function,
|
||||
|
@ -936,13 +928,6 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
|
|||
}
|
||||
|
||||
|
||||
Handle<JSModule> Factory::NewJSModule() {
|
||||
CALL_HEAP_FUNCTION(
|
||||
isolate(),
|
||||
isolate()->heap()->AllocateJSModule(), JSModule);
|
||||
}
|
||||
|
||||
|
||||
Handle<GlobalObject> Factory::NewGlobalObject(
|
||||
Handle<JSFunction> constructor) {
|
||||
CALL_HEAP_FUNCTION(isolate(),
|
||||
|
|
|
@ -162,12 +162,9 @@ class Factory {
|
|||
// Create a global (but otherwise uninitialized) context.
|
||||
Handle<Context> NewGlobalContext();
|
||||
|
||||
// Create a module context.
|
||||
Handle<Context> NewModuleContext(Handle<Context> previous,
|
||||
Handle<ScopeInfo> scope_info);
|
||||
|
||||
// Create a function context.
|
||||
Handle<Context> NewFunctionContext(int length, Handle<JSFunction> function);
|
||||
Handle<Context> NewFunctionContext(int length,
|
||||
Handle<JSFunction> function);
|
||||
|
||||
// Create a catch context.
|
||||
Handle<Context> NewCatchContext(Handle<JSFunction> function,
|
||||
|
@ -180,7 +177,7 @@ class Factory {
|
|||
Handle<Context> previous,
|
||||
Handle<JSObject> extension);
|
||||
|
||||
// Create a block context.
|
||||
// Create a 'block' context.
|
||||
Handle<Context> NewBlockContext(Handle<JSFunction> function,
|
||||
Handle<Context> previous,
|
||||
Handle<ScopeInfo> scope_info);
|
||||
|
@ -265,9 +262,6 @@ class Factory {
|
|||
// runtime.
|
||||
Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
|
||||
|
||||
// JS modules are pretenured.
|
||||
Handle<JSModule> NewJSModule();
|
||||
|
||||
// JS arrays are pretenured when allocated by the parser.
|
||||
Handle<JSArray> NewJSArray(int capacity,
|
||||
ElementsKind elements_kind = FAST_ELEMENTS,
|
||||
|
|
|
@ -132,8 +132,6 @@ public:
|
|||
|
||||
// Flags for language modes and experimental language features.
|
||||
DEFINE_bool(use_strict, false, "enforce strict mode")
|
||||
DEFINE_bool(es52_globals, false,
|
||||
"activate new semantics for global var declarations")
|
||||
|
||||
DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
|
||||
DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
|
||||
|
@ -167,12 +165,7 @@ DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
|
|||
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
|
||||
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
|
||||
DEFINE_bool(use_inlining, true, "use function inlining")
|
||||
DEFINE_int(max_inlined_source_size, 600,
|
||||
"maximum source size in bytes considered for a single inlining")
|
||||
DEFINE_int(max_inlined_nodes, 196,
|
||||
"maximum number of AST nodes considered for a single inlining")
|
||||
DEFINE_int(max_inlined_nodes_cumulative, 196,
|
||||
"maximum cumulative number of AST nodes considered for inlining")
|
||||
DEFINE_bool(limit_inlining, true, "limit code size growth from inlining")
|
||||
DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
|
||||
DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
|
||||
true,
|
||||
|
@ -195,8 +188,6 @@ DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
|
|||
DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
|
||||
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
|
||||
DEFINE_bool(use_osr, true, "use on-stack replacement")
|
||||
DEFINE_bool(array_bounds_checks_elimination, true,
|
||||
"perform array bounds checks elimination")
|
||||
|
||||
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
|
||||
DEFINE_int(stress_runs, 0, "number of stress runs")
|
||||
|
|
|
@ -1359,28 +1359,34 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
|
|||
// -------------------------------------------------------------------------
|
||||
|
||||
int NumRegs(RegList reglist) {
|
||||
return CompilerIntrinsics::CountSetBits(reglist);
|
||||
int n = 0;
|
||||
while (reglist != 0) {
|
||||
n++;
|
||||
reglist &= reglist - 1; // clear one bit
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
|
||||
struct JSCallerSavedCodeData {
|
||||
JSCallerSavedCodeData() {
|
||||
int i = 0;
|
||||
for (int r = 0; r < kNumRegs; r++)
|
||||
if ((kJSCallerSaved & (1 << r)) != 0)
|
||||
reg_code[i++] = r;
|
||||
|
||||
ASSERT(i == kNumJSCallerSaved);
|
||||
}
|
||||
int reg_code[kNumJSCallerSaved];
|
||||
};
|
||||
|
||||
JSCallerSavedCodeData caller_saved_code_data;
|
||||
|
||||
void SetUpJSCallerSavedCodeData() {
|
||||
int i = 0;
|
||||
for (int r = 0; r < kNumRegs; r++)
|
||||
if ((kJSCallerSaved & (1 << r)) != 0)
|
||||
caller_saved_code_data.reg_code[i++] = r;
|
||||
|
||||
ASSERT(i == kNumJSCallerSaved);
|
||||
}
|
||||
static LazyInstance<JSCallerSavedCodeData>::type caller_saved_code_data =
|
||||
LAZY_INSTANCE_INITIALIZER;
|
||||
|
||||
int JSCallerSavedCode(int n) {
|
||||
ASSERT(0 <= n && n < kNumJSCallerSaved);
|
||||
return caller_saved_code_data.reg_code[n];
|
||||
return caller_saved_code_data.Get().reg_code[n];
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -40,8 +40,6 @@ typedef uint32_t RegList;
|
|||
// Get the number of registers in a given register list.
|
||||
int NumRegs(RegList list);
|
||||
|
||||
void SetUpJSCallerSavedCodeData();
|
||||
|
||||
// Return the code of the n-th saved register available to JavaScript.
|
||||
int JSCallerSavedCode(int n);
|
||||
|
||||
|
|
|
@ -316,6 +316,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
|
|||
code->set_optimizable(info->IsOptimizable() &&
|
||||
!info->function()->flags()->Contains(kDontOptimize) &&
|
||||
info->function()->scope()->AllowsLazyRecompilation());
|
||||
code->set_self_optimization_header(cgen.has_self_optimization_header_);
|
||||
cgen.PopulateDeoptimizationData(code);
|
||||
cgen.PopulateTypeFeedbackInfo(code);
|
||||
cgen.PopulateTypeFeedbackCells(code);
|
||||
|
@ -331,6 +332,9 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
|
|||
code->set_stack_check_table_offset(table_offset);
|
||||
CodeGenerator::PrintCode(code, info);
|
||||
info->SetCode(code); // May be an empty handle.
|
||||
if (!code.is_null()) {
|
||||
isolate->runtime_profiler()->NotifyCodeGenerated(code->instruction_size());
|
||||
}
|
||||
#ifdef ENABLE_GDB_JIT_INTERFACE
|
||||
if (FLAG_gdbjit && !code.is_null()) {
|
||||
GDBJITLineInfo* lineinfo =
|
||||
|
@ -569,91 +573,88 @@ void FullCodeGenerator::DoTest(const TestContext* context) {
|
|||
|
||||
void FullCodeGenerator::VisitDeclarations(
|
||||
ZoneList<Declaration*>* declarations) {
|
||||
ZoneList<Handle<Object> >* saved_globals = globals_;
|
||||
ZoneList<Handle<Object> > inner_globals(10);
|
||||
globals_ = &inner_globals;
|
||||
int save_global_count = global_count_;
|
||||
global_count_ = 0;
|
||||
|
||||
AstVisitor::VisitDeclarations(declarations);
|
||||
if (!globals_->is_empty()) {
|
||||
|
||||
// Batch declare global functions and variables.
|
||||
if (global_count_ > 0) {
|
||||
Handle<FixedArray> array =
|
||||
isolate()->factory()->NewFixedArray(2 * global_count_, TENURED);
|
||||
int length = declarations->length();
|
||||
for (int j = 0, i = 0; i < length; i++) {
|
||||
Declaration* decl = declarations->at(i);
|
||||
Variable* var = decl->proxy()->var();
|
||||
|
||||
if (var->IsUnallocated()) {
|
||||
array->set(j++, *(var->name()));
|
||||
FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration();
|
||||
if (fun_decl == NULL) {
|
||||
if (var->binding_needs_init()) {
|
||||
// In case this binding needs initialization use the hole.
|
||||
array->set_the_hole(j++);
|
||||
} else {
|
||||
array->set_undefined(j++);
|
||||
}
|
||||
} else {
|
||||
Handle<SharedFunctionInfo> function =
|
||||
Compiler::BuildFunctionInfo(fun_decl->fun(), script());
|
||||
// Check for stack-overflow exception.
|
||||
if (function.is_null()) {
|
||||
SetStackOverflow();
|
||||
return;
|
||||
}
|
||||
array->set(j++, *function);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Invoke the platform-dependent code generator to do the actual
|
||||
// declaration the global functions and variables.
|
||||
Handle<FixedArray> array =
|
||||
isolate()->factory()->NewFixedArray(globals_->length(), TENURED);
|
||||
for (int i = 0; i < globals_->length(); ++i)
|
||||
array->set(i, *globals_->at(i));
|
||||
DeclareGlobals(array);
|
||||
}
|
||||
|
||||
globals_ = saved_globals;
|
||||
global_count_ = save_global_count;
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
|
||||
EmitDeclaration(decl->proxy(), decl->mode(), NULL);
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
|
||||
EmitDeclaration(decl->proxy(), decl->mode(), decl->fun());
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* decl) {
|
||||
EmitDeclaration(decl->proxy(), decl->mode(), NULL);
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* decl) {
|
||||
EmitDeclaration(decl->proxy(), decl->mode(), NULL);
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) {
|
||||
// TODO(rossberg)
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
|
||||
Handle<JSModule> instance = module->interface()->Instance();
|
||||
ASSERT(!instance.is_null());
|
||||
|
||||
// Allocate a module context statically.
|
||||
Block* block = module->body();
|
||||
Scope* saved_scope = scope();
|
||||
scope_ = block->scope();
|
||||
Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
|
||||
|
||||
// Generate code for module creation and linking.
|
||||
Comment cmnt(masm_, "[ ModuleLiteral");
|
||||
SetStatementPosition(block);
|
||||
|
||||
if (scope_info->HasContext()) {
|
||||
// Set up module context.
|
||||
__ Push(scope_info);
|
||||
__ Push(instance);
|
||||
__ CallRuntime(Runtime::kPushModuleContext, 2);
|
||||
StoreToFrameField(
|
||||
StandardFrameConstants::kContextOffset, context_register());
|
||||
}
|
||||
|
||||
{
|
||||
Comment cmnt(masm_, "[ Declarations");
|
||||
VisitDeclarations(scope_->declarations());
|
||||
}
|
||||
|
||||
scope_ = saved_scope;
|
||||
if (scope_info->HasContext()) {
|
||||
// Pop module context.
|
||||
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
|
||||
// Update local stack frame context field.
|
||||
StoreToFrameField(
|
||||
StandardFrameConstants::kContextOffset, context_register());
|
||||
}
|
||||
|
||||
// Populate module instance object.
|
||||
const PropertyAttributes attr =
|
||||
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE | DONT_ENUM);
|
||||
for (Interface::Iterator it = module->interface()->iterator();
|
||||
!it.done(); it.Advance()) {
|
||||
if (it.interface()->IsModule()) {
|
||||
Handle<Object> value = it.interface()->Instance();
|
||||
ASSERT(!value.is_null());
|
||||
JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode);
|
||||
} else {
|
||||
// TODO(rossberg): set proper getters instead of undefined...
|
||||
// instance->DefineAccessor(*it.name(), ACCESSOR_GETTER, *getter, attr);
|
||||
Handle<Object> value(isolate()->heap()->undefined_value());
|
||||
JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode);
|
||||
}
|
||||
}
|
||||
USE(instance->PreventExtensions());
|
||||
// TODO(rossberg)
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitModuleVariable(ModuleVariable* module) {
|
||||
// Noting to do.
|
||||
// The instance object is resolved statically through the module's interface.
|
||||
// TODO(rossberg)
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitModulePath(ModulePath* module) {
|
||||
// Noting to do.
|
||||
// The instance object is resolved statically through the module's interface.
|
||||
// TODO(rossberg)
|
||||
}
|
||||
|
||||
|
||||
|
@ -915,9 +916,9 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
|
|||
|
||||
Scope* saved_scope = scope();
|
||||
// Push a block context when entering a block with block scoped variables.
|
||||
if (stmt->scope() != NULL) {
|
||||
if (stmt->block_scope() != NULL) {
|
||||
{ Comment cmnt(masm_, "[ Extend block context");
|
||||
scope_ = stmt->scope();
|
||||
scope_ = stmt->block_scope();
|
||||
Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
|
||||
int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
|
||||
__ Push(scope_info);
|
||||
|
@ -944,7 +945,7 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
|
|||
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
|
||||
|
||||
// Pop block context if necessary.
|
||||
if (stmt->scope() != NULL) {
|
||||
if (stmt->block_scope() != NULL) {
|
||||
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
|
||||
// Update local stack frame context field.
|
||||
StoreToFrameField(StandardFrameConstants::kContextOffset,
|
||||
|
|
|
@ -83,17 +83,22 @@ class FullCodeGenerator: public AstVisitor {
|
|||
scope_(info->scope()),
|
||||
nesting_stack_(NULL),
|
||||
loop_depth_(0),
|
||||
globals_(NULL),
|
||||
global_count_(0),
|
||||
context_(NULL),
|
||||
bailout_entries_(info->HasDeoptimizationSupport()
|
||||
? info->function()->ast_node_count() : 0),
|
||||
stack_checks_(2), // There's always at least one.
|
||||
type_feedback_cells_(info->HasDeoptimizationSupport()
|
||||
? info->function()->ast_node_count() : 0),
|
||||
ic_total_count_(0) { }
|
||||
ic_total_count_(0),
|
||||
has_self_optimization_header_(false) { }
|
||||
|
||||
static bool MakeCode(CompilationInfo* info);
|
||||
|
||||
// Returns the platform-specific size in bytes of the self-optimization
|
||||
// header.
|
||||
static int self_optimization_header_size();
|
||||
|
||||
// Encode state and pc-offset as a BitField<type, start, size>.
|
||||
// Only use 30 bits because we encode the result as a smi.
|
||||
class StateField : public BitField<State, 0, 1> { };
|
||||
|
@ -202,7 +207,7 @@ class FullCodeGenerator: public AstVisitor {
|
|||
virtual ~NestedBlock() {}
|
||||
|
||||
virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
|
||||
if (statement()->AsBlock()->scope() != NULL) {
|
||||
if (statement()->AsBlock()->block_scope() != NULL) {
|
||||
++(*context_length);
|
||||
}
|
||||
return previous_;
|
||||
|
@ -413,9 +418,12 @@ class FullCodeGenerator: public AstVisitor {
|
|||
Label* if_true,
|
||||
Label* if_false);
|
||||
|
||||
// If enabled, emit debug code for checking that the current context is
|
||||
// neither a with nor a catch context.
|
||||
void EmitDebugCheckDeclarationContext(Variable* variable);
|
||||
// Platform-specific code for a variable, constant, or function
|
||||
// declaration. Functions have an initial value.
|
||||
// Increments global_count_ for unallocated variables.
|
||||
void EmitDeclaration(VariableProxy* proxy,
|
||||
VariableMode mode,
|
||||
FunctionLiteral* function);
|
||||
|
||||
// Platform-specific code for checking the stack limit at the back edge of
|
||||
// a loop.
|
||||
|
@ -545,8 +553,12 @@ class FullCodeGenerator: public AstVisitor {
|
|||
Handle<Script> script() { return info_->script(); }
|
||||
bool is_eval() { return info_->is_eval(); }
|
||||
bool is_native() { return info_->is_native(); }
|
||||
bool is_classic_mode() { return language_mode() == CLASSIC_MODE; }
|
||||
LanguageMode language_mode() { return function()->language_mode(); }
|
||||
bool is_classic_mode() {
|
||||
return language_mode() == CLASSIC_MODE;
|
||||
}
|
||||
LanguageMode language_mode() {
|
||||
return function()->language_mode();
|
||||
}
|
||||
FunctionLiteral* function() { return info_->function(); }
|
||||
Scope* scope() { return scope_; }
|
||||
|
||||
|
@ -778,12 +790,13 @@ class FullCodeGenerator: public AstVisitor {
|
|||
Label return_label_;
|
||||
NestedStatement* nesting_stack_;
|
||||
int loop_depth_;
|
||||
ZoneList<Handle<Object> >* globals_;
|
||||
int global_count_;
|
||||
const ExpressionContext* context_;
|
||||
ZoneList<BailoutEntry> bailout_entries_;
|
||||
ZoneList<BailoutEntry> stack_checks_;
|
||||
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
|
||||
int ic_total_count_;
|
||||
bool has_self_optimization_header_;
|
||||
Handle<FixedArray> handler_table_;
|
||||
Handle<JSGlobalPropertyCell> profiling_counter_;
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
@ -729,9 +729,9 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
|
|||
Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
|
||||
|
||||
for (int i = 0; i < descs->number_of_descriptors(); i++) {
|
||||
if (descs->IsProperty(i) && !descs->GetDetails(i).IsDontEnum()) {
|
||||
if (descs->IsProperty(i) && !descs->IsDontEnum(i)) {
|
||||
storage->set(index, descs->GetKey(i));
|
||||
PropertyDetails details = descs->GetDetails(i);
|
||||
PropertyDetails details(descs->GetDetails(i));
|
||||
sort_array->set(index, Smi::FromInt(details.index()));
|
||||
if (!indices.is_null()) {
|
||||
if (details.type() != FIELD) {
|
||||
|
|
|
@ -63,9 +63,7 @@ class TemplateHashMapImpl {
|
|||
Entry* Lookup(void* key, uint32_t hash, bool insert);
|
||||
|
||||
// Removes the entry with matching key.
|
||||
// It returns the value of the deleted entry
|
||||
// or null if there is no value for such key.
|
||||
void* Remove(void* key, uint32_t hash);
|
||||
void Remove(void* key, uint32_t hash);
|
||||
|
||||
// Empties the hash map (occupancy() == 0).
|
||||
void Clear();
|
||||
|
@ -148,15 +146,14 @@ typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Lookup(
|
|||
|
||||
|
||||
template<class P>
|
||||
void* TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) {
|
||||
void TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) {
|
||||
// Lookup the entry for the key to remove.
|
||||
Entry* p = Probe(key, hash);
|
||||
if (p->key == NULL) {
|
||||
// Key not found nothing to remove.
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
void* value = p->value;
|
||||
// To remove an entry we need to ensure that it does not create an empty
|
||||
// entry that will cause the search for another entry to stop too soon. If all
|
||||
// the entries between the entry to remove and the next empty slot have their
|
||||
|
@ -205,7 +202,6 @@ void* TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) {
|
|||
// Clear the entry which is allowed to en emptied.
|
||||
p->key = NULL;
|
||||
occupancy_--;
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -460,16 +460,15 @@ MaybeObject* Heap::PrepareForCompare(String* str) {
|
|||
}
|
||||
|
||||
|
||||
intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
|
||||
intptr_t change_in_bytes) {
|
||||
int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
|
||||
ASSERT(HasBeenSetUp());
|
||||
intptr_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
|
||||
int amount = amount_of_external_allocated_memory_ + change_in_bytes;
|
||||
if (change_in_bytes >= 0) {
|
||||
// Avoid overflow.
|
||||
if (amount > amount_of_external_allocated_memory_) {
|
||||
amount_of_external_allocated_memory_ = amount;
|
||||
}
|
||||
intptr_t amount_since_last_global_gc =
|
||||
int amount_since_last_global_gc =
|
||||
amount_of_external_allocated_memory_ -
|
||||
amount_of_external_allocated_memory_at_last_global_gc_;
|
||||
if (amount_since_last_global_gc > external_allocation_limit_) {
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
HeapProfiler::HeapProfiler()
|
||||
: snapshots_(new HeapSnapshotsCollection()),
|
||||
next_snapshot_uid_(1) {
|
||||
|
@ -85,24 +86,6 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(String* name,
|
|||
}
|
||||
|
||||
|
||||
void HeapProfiler::StartHeapObjectsTracking() {
|
||||
ASSERT(Isolate::Current()->heap_profiler() != NULL);
|
||||
Isolate::Current()->heap_profiler()->StartHeapObjectsTrackingImpl();
|
||||
}
|
||||
|
||||
|
||||
void HeapProfiler::StopHeapObjectsTracking() {
|
||||
ASSERT(Isolate::Current()->heap_profiler() != NULL);
|
||||
Isolate::Current()->heap_profiler()->StopHeapObjectsTrackingImpl();
|
||||
}
|
||||
|
||||
|
||||
void HeapProfiler::PushHeapObjectsStats(v8::OutputStream* stream) {
|
||||
ASSERT(Isolate::Current()->heap_profiler() != NULL);
|
||||
return Isolate::Current()->heap_profiler()->PushHeapObjectsStatsImpl(stream);
|
||||
}
|
||||
|
||||
|
||||
void HeapProfiler::DefineWrapperClass(
|
||||
uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) {
|
||||
ASSERT(class_id != v8::HeapProfiler::kPersistentHandleNoClassId);
|
||||
|
@ -153,20 +136,6 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name,
|
|||
return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control);
|
||||
}
|
||||
|
||||
void HeapProfiler::StartHeapObjectsTrackingImpl() {
|
||||
snapshots_->StartHeapObjectsTracking();
|
||||
}
|
||||
|
||||
|
||||
void HeapProfiler::PushHeapObjectsStatsImpl(OutputStream* stream) {
|
||||
snapshots_->PushHeapObjectsStats(stream);
|
||||
}
|
||||
|
||||
|
||||
void HeapProfiler::StopHeapObjectsTrackingImpl() {
|
||||
snapshots_->StopHeapObjectsTracking();
|
||||
}
|
||||
|
||||
|
||||
int HeapProfiler::GetSnapshotsCount() {
|
||||
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
|
||||
|
@ -189,15 +158,6 @@ HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
|
|||
}
|
||||
|
||||
|
||||
SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
|
||||
if (!obj->IsHeapObject())
|
||||
return v8::HeapProfiler::kUnknownObjectId;
|
||||
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
|
||||
ASSERT(profiler != NULL);
|
||||
return profiler->snapshots_->FindObjectId(HeapObject::cast(*obj)->address());
|
||||
}
|
||||
|
||||
|
||||
void HeapProfiler::DeleteAllSnapshots() {
|
||||
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
|
||||
ASSERT(profiler != NULL);
|
||||
|
|
|
@ -44,6 +44,8 @@ class HeapSnapshotsCollection;
|
|||
} \
|
||||
} while (false)
|
||||
|
||||
// The HeapProfiler writes data to the log files, which can be postprocessed
|
||||
// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
|
||||
class HeapProfiler {
|
||||
public:
|
||||
static void SetUp();
|
||||
|
@ -55,14 +57,9 @@ class HeapProfiler {
|
|||
static HeapSnapshot* TakeSnapshot(String* name,
|
||||
int type,
|
||||
v8::ActivityControl* control);
|
||||
|
||||
static void StartHeapObjectsTracking();
|
||||
static void StopHeapObjectsTracking();
|
||||
static void PushHeapObjectsStats(OutputStream* stream);
|
||||
static int GetSnapshotsCount();
|
||||
static HeapSnapshot* GetSnapshot(int index);
|
||||
static HeapSnapshot* FindSnapshot(unsigned uid);
|
||||
static SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
|
||||
static void DeleteAllSnapshots();
|
||||
|
||||
void ObjectMoveEvent(Address from, Address to);
|
||||
|
@ -87,10 +84,6 @@ class HeapProfiler {
|
|||
v8::ActivityControl* control);
|
||||
void ResetSnapshots();
|
||||
|
||||
void StartHeapObjectsTrackingImpl();
|
||||
void StopHeapObjectsTrackingImpl();
|
||||
void PushHeapObjectsStatsImpl(OutputStream* stream);
|
||||
|
||||
HeapSnapshotsCollection* snapshots_;
|
||||
unsigned next_snapshot_uid_;
|
||||
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
|
||||
|
|
|
@ -42,7 +42,6 @@
|
|||
#include "natives.h"
|
||||
#include "objects-visiting.h"
|
||||
#include "objects-visiting-inl.h"
|
||||
#include "once.h"
|
||||
#include "runtime-profiler.h"
|
||||
#include "scopeinfo.h"
|
||||
#include "snapshot.h"
|
||||
|
@ -61,26 +60,33 @@
|
|||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
static LazyMutex gc_initializer_mutex = LAZY_MUTEX_INITIALIZER;
|
||||
|
||||
|
||||
Heap::Heap()
|
||||
: isolate_(NULL),
|
||||
// semispace_size_ should be a power of 2 and old_generation_size_ should be
|
||||
// a multiple of Page::kPageSize.
|
||||
#if defined(ANDROID)
|
||||
#define LUMP_OF_MEMORY (128 * KB)
|
||||
code_range_size_(0),
|
||||
#elif defined(V8_TARGET_ARCH_X64)
|
||||
#if defined(V8_TARGET_ARCH_X64)
|
||||
#define LUMP_OF_MEMORY (2 * MB)
|
||||
code_range_size_(512*MB),
|
||||
#else
|
||||
#define LUMP_OF_MEMORY MB
|
||||
code_range_size_(0),
|
||||
#endif
|
||||
#if defined(ANDROID)
|
||||
reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
|
||||
max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
|
||||
initial_semispace_size_(Page::kPageSize),
|
||||
max_old_generation_size_(192*MB),
|
||||
max_executable_size_(max_old_generation_size_),
|
||||
#else
|
||||
reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
|
||||
max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
|
||||
initial_semispace_size_(Page::kPageSize),
|
||||
max_old_generation_size_(700ul * LUMP_OF_MEMORY),
|
||||
max_executable_size_(256l * LUMP_OF_MEMORY),
|
||||
#endif
|
||||
|
||||
// Variables set based on semispace_size_ and old_generation_size_ in
|
||||
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
|
||||
|
@ -238,17 +244,12 @@ int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
|
|||
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
|
||||
const char** reason) {
|
||||
// Is global GC requested?
|
||||
if (space != NEW_SPACE) {
|
||||
if (space != NEW_SPACE || FLAG_gc_global) {
|
||||
isolate_->counters()->gc_compactor_caused_by_request()->Increment();
|
||||
*reason = "GC in old space requested";
|
||||
return MARK_COMPACTOR;
|
||||
}
|
||||
|
||||
if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
|
||||
*reason = "GC in old space forced by flags";
|
||||
return MARK_COMPACTOR;
|
||||
}
|
||||
|
||||
// Is enough data promoted to justify a global GC?
|
||||
if (OldGenerationPromotionLimitReached()) {
|
||||
isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
|
||||
|
@ -1129,27 +1130,6 @@ void PromotionQueue::RelocateQueueHead() {
|
|||
}
|
||||
|
||||
|
||||
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
|
||||
public:
|
||||
explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
|
||||
|
||||
virtual Object* RetainAs(Object* object) {
|
||||
if (!heap_->InFromSpace(object)) {
|
||||
return object;
|
||||
}
|
||||
|
||||
MapWord map_word = HeapObject::cast(object)->map_word();
|
||||
if (map_word.IsForwardingAddress()) {
|
||||
return map_word.ToForwardingAddress();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
private:
|
||||
Heap* heap_;
|
||||
};
|
||||
|
||||
|
||||
void Heap::Scavenge() {
|
||||
#ifdef DEBUG
|
||||
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
|
||||
|
@ -1248,9 +1228,6 @@ void Heap::Scavenge() {
|
|||
}
|
||||
incremental_marking()->UpdateMarkingDequeAfterScavenge();
|
||||
|
||||
ScavengeWeakObjectRetainer weak_object_retainer(this);
|
||||
ProcessWeakReferences(&weak_object_retainer);
|
||||
|
||||
ASSERT(new_space_front == new_space_.top());
|
||||
|
||||
// Set age mark.
|
||||
|
@ -1337,8 +1314,7 @@ void Heap::UpdateReferencesInExternalStringTable(
|
|||
|
||||
static Object* ProcessFunctionWeakReferences(Heap* heap,
|
||||
Object* function,
|
||||
WeakObjectRetainer* retainer,
|
||||
bool record_slots) {
|
||||
WeakObjectRetainer* retainer) {
|
||||
Object* undefined = heap->undefined_value();
|
||||
Object* head = undefined;
|
||||
JSFunction* tail = NULL;
|
||||
|
@ -1355,12 +1331,6 @@ static Object* ProcessFunctionWeakReferences(Heap* heap,
|
|||
// Subsequent elements in the list.
|
||||
ASSERT(tail != NULL);
|
||||
tail->set_next_function_link(retain);
|
||||
if (record_slots) {
|
||||
Object** next_function =
|
||||
HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
|
||||
heap->mark_compact_collector()->RecordSlot(
|
||||
next_function, next_function, retain);
|
||||
}
|
||||
}
|
||||
// Retained function is new tail.
|
||||
candidate_function = reinterpret_cast<JSFunction*>(retain);
|
||||
|
@ -1389,15 +1359,6 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
|
|||
Object* head = undefined;
|
||||
Context* tail = NULL;
|
||||
Object* candidate = global_contexts_list_;
|
||||
|
||||
// We don't record weak slots during marking or scavenges.
|
||||
// Instead we do it once when we complete mark-compact cycle.
|
||||
// Note that write barrier has no effect if we are already in the middle of
|
||||
// compacting mark-sweep cycle and we have to record slots manually.
|
||||
bool record_slots =
|
||||
gc_state() == MARK_COMPACT &&
|
||||
mark_compact_collector()->is_compacting();
|
||||
|
||||
while (candidate != undefined) {
|
||||
// Check whether to keep the candidate in the list.
|
||||
Context* candidate_context = reinterpret_cast<Context*>(candidate);
|
||||
|
@ -1413,14 +1374,6 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
|
|||
Context::NEXT_CONTEXT_LINK,
|
||||
retain,
|
||||
UPDATE_WRITE_BARRIER);
|
||||
|
||||
if (record_slots) {
|
||||
Object** next_context =
|
||||
HeapObject::RawField(
|
||||
tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
|
||||
mark_compact_collector()->RecordSlot(
|
||||
next_context, next_context, retain);
|
||||
}
|
||||
}
|
||||
// Retained context is new tail.
|
||||
candidate_context = reinterpret_cast<Context*>(retain);
|
||||
|
@ -1433,19 +1386,11 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
|
|||
ProcessFunctionWeakReferences(
|
||||
this,
|
||||
candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
|
||||
retainer,
|
||||
record_slots);
|
||||
retainer);
|
||||
candidate_context->set_unchecked(this,
|
||||
Context::OPTIMIZED_FUNCTIONS_LIST,
|
||||
function_list_head,
|
||||
UPDATE_WRITE_BARRIER);
|
||||
if (record_slots) {
|
||||
Object** optimized_functions =
|
||||
HeapObject::RawField(
|
||||
tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
|
||||
mark_compact_collector()->RecordSlot(
|
||||
optimized_functions, optimized_functions, function_list_head);
|
||||
}
|
||||
}
|
||||
|
||||
// Move to next element in the list.
|
||||
|
@ -1545,27 +1490,6 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
|
|||
}
|
||||
|
||||
|
||||
STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
|
||||
|
||||
|
||||
INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
|
||||
HeapObject* object,
|
||||
int size));
|
||||
|
||||
static HeapObject* EnsureDoubleAligned(Heap* heap,
|
||||
HeapObject* object,
|
||||
int size) {
|
||||
if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
|
||||
heap->CreateFillerObjectAt(object->address(), kPointerSize);
|
||||
return HeapObject::FromAddress(object->address() + kPointerSize);
|
||||
} else {
|
||||
heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
|
||||
kPointerSize);
|
||||
return object;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
enum LoggingAndProfiling {
|
||||
LOGGING_AND_PROFILING_ENABLED,
|
||||
LOGGING_AND_PROFILING_DISABLED
|
||||
|
@ -1689,10 +1613,7 @@ class ScavengingVisitor : public StaticVisitorBase {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
template<ObjectContents object_contents,
|
||||
SizeRestriction size_restriction,
|
||||
int alignment>
|
||||
template<ObjectContents object_contents, SizeRestriction size_restriction>
|
||||
static inline void EvacuateObject(Map* map,
|
||||
HeapObject** slot,
|
||||
HeapObject* object,
|
||||
|
@ -1701,26 +1622,19 @@ class ScavengingVisitor : public StaticVisitorBase {
|
|||
(object_size <= Page::kMaxNonCodeHeapObjectSize));
|
||||
SLOW_ASSERT(object->Size() == object_size);
|
||||
|
||||
int allocation_size = object_size;
|
||||
if (alignment != kObjectAlignment) {
|
||||
ASSERT(alignment == kDoubleAlignment);
|
||||
allocation_size += kPointerSize;
|
||||
}
|
||||
|
||||
Heap* heap = map->GetHeap();
|
||||
if (heap->ShouldBePromoted(object->address(), object_size)) {
|
||||
MaybeObject* maybe_result;
|
||||
|
||||
if ((size_restriction != SMALL) &&
|
||||
(allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
|
||||
maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
|
||||
(object_size > Page::kMaxNonCodeHeapObjectSize)) {
|
||||
maybe_result = heap->lo_space()->AllocateRaw(object_size,
|
||||
NOT_EXECUTABLE);
|
||||
} else {
|
||||
if (object_contents == DATA_OBJECT) {
|
||||
maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
|
||||
maybe_result = heap->old_data_space()->AllocateRaw(object_size);
|
||||
} else {
|
||||
maybe_result =
|
||||
heap->old_pointer_space()->AllocateRaw(allocation_size);
|
||||
maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1728,10 +1642,6 @@ class ScavengingVisitor : public StaticVisitorBase {
|
|||
if (maybe_result->ToObject(&result)) {
|
||||
HeapObject* target = HeapObject::cast(result);
|
||||
|
||||
if (alignment != kObjectAlignment) {
|
||||
target = EnsureDoubleAligned(heap, target, allocation_size);
|
||||
}
|
||||
|
||||
// Order is important: slot might be inside of the target if target
|
||||
// was allocated over a dead object and slot comes from the store
|
||||
// buffer.
|
||||
|
@ -1739,27 +1649,18 @@ class ScavengingVisitor : public StaticVisitorBase {
|
|||
MigrateObject(heap, object, target, object_size);
|
||||
|
||||
if (object_contents == POINTER_OBJECT) {
|
||||
if (map->instance_type() == JS_FUNCTION_TYPE) {
|
||||
heap->promotion_queue()->insert(
|
||||
target, JSFunction::kNonWeakFieldsEndOffset);
|
||||
} else {
|
||||
heap->promotion_queue()->insert(target, object_size);
|
||||
}
|
||||
heap->promotion_queue()->insert(target, object_size);
|
||||
}
|
||||
|
||||
heap->tracer()->increment_promoted_objects_size(object_size);
|
||||
return;
|
||||
}
|
||||
}
|
||||
MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
|
||||
MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
|
||||
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
|
||||
Object* result = allocation->ToObjectUnchecked();
|
||||
HeapObject* target = HeapObject::cast(result);
|
||||
|
||||
if (alignment != kObjectAlignment) {
|
||||
target = EnsureDoubleAligned(heap, target, allocation_size);
|
||||
}
|
||||
|
||||
// Order is important: slot might be inside of the target if target
|
||||
// was allocated over a dead object and slot comes from the store
|
||||
// buffer.
|
||||
|
@ -1795,7 +1696,7 @@ class ScavengingVisitor : public StaticVisitorBase {
|
|||
HeapObject** slot,
|
||||
HeapObject* object) {
|
||||
int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
|
||||
EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
|
||||
EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
|
||||
slot,
|
||||
object,
|
||||
object_size);
|
||||
|
@ -1807,11 +1708,10 @@ class ScavengingVisitor : public StaticVisitorBase {
|
|||
HeapObject* object) {
|
||||
int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
|
||||
int object_size = FixedDoubleArray::SizeFor(length);
|
||||
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
|
||||
map,
|
||||
slot,
|
||||
object,
|
||||
object_size);
|
||||
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map,
|
||||
slot,
|
||||
object,
|
||||
object_size);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1819,8 +1719,7 @@ class ScavengingVisitor : public StaticVisitorBase {
|
|||
HeapObject** slot,
|
||||
HeapObject* object) {
|
||||
int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
|
||||
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
|
||||
map, slot, object, object_size);
|
||||
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1829,8 +1728,7 @@ class ScavengingVisitor : public StaticVisitorBase {
|
|||
HeapObject* object) {
|
||||
int object_size = SeqAsciiString::cast(object)->
|
||||
SeqAsciiStringSize(map->instance_type());
|
||||
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
|
||||
map, slot, object, object_size);
|
||||
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1839,8 +1737,7 @@ class ScavengingVisitor : public StaticVisitorBase {
|
|||
HeapObject* object) {
|
||||
int object_size = SeqTwoByteString::cast(object)->
|
||||
SeqTwoByteStringSize(map->instance_type());
|
||||
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
|
||||
map, slot, object, object_size);
|
||||
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1883,8 +1780,7 @@ class ScavengingVisitor : public StaticVisitorBase {
|
|||
}
|
||||
|
||||
int object_size = ConsString::kSize;
|
||||
EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
|
||||
map, slot, object, object_size);
|
||||
EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
|
||||
}
|
||||
|
||||
template<ObjectContents object_contents>
|
||||
|
@ -1894,16 +1790,14 @@ class ScavengingVisitor : public StaticVisitorBase {
|
|||
static inline void VisitSpecialized(Map* map,
|
||||
HeapObject** slot,
|
||||
HeapObject* object) {
|
||||
EvacuateObject<object_contents, SMALL, kObjectAlignment>(
|
||||
map, slot, object, object_size);
|
||||
EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
|
||||
}
|
||||
|
||||
static inline void Visit(Map* map,
|
||||
HeapObject** slot,
|
||||
HeapObject* object) {
|
||||
int object_size = map->instance_size();
|
||||
EvacuateObject<object_contents, SMALL, kObjectAlignment>(
|
||||
map, slot, object, object_size);
|
||||
EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -3939,16 +3833,6 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
|
|||
}
|
||||
|
||||
|
||||
MaybeObject* Heap::AllocateJSModule() {
|
||||
// Allocate a fresh map. Modules do not have a prototype.
|
||||
Map* map;
|
||||
MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
|
||||
if (!maybe_map->To(&map)) return maybe_map;
|
||||
// Allocate the object based on the map.
|
||||
return AllocateJSObjectFromMap(map, TENURED);
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* Heap::AllocateJSArrayAndStorage(
|
||||
ElementsKind elements_kind,
|
||||
int length,
|
||||
|
@ -4085,7 +3969,7 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
|
|||
// Fill these accessors into the dictionary.
|
||||
DescriptorArray* descs = map->instance_descriptors();
|
||||
for (int i = 0; i < descs->number_of_descriptors(); i++) {
|
||||
PropertyDetails details = descs->GetDetails(i);
|
||||
PropertyDetails details(descs->GetDetails(i));
|
||||
ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
|
||||
PropertyDetails d =
|
||||
PropertyDetails(details.attributes(), CALLBACKS, details.index());
|
||||
|
@ -4778,11 +4662,6 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
|
|||
AllocationSpace space =
|
||||
(pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
|
||||
int size = FixedDoubleArray::SizeFor(length);
|
||||
|
||||
#ifndef V8_HOST_ARCH_64_BIT
|
||||
size += kPointerSize;
|
||||
#endif
|
||||
|
||||
if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
|
||||
// Too big for new space.
|
||||
space = LO_SPACE;
|
||||
|
@ -4795,12 +4674,7 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
|
|||
AllocationSpace retry_space =
|
||||
(size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
|
||||
|
||||
HeapObject* object;
|
||||
{ MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
|
||||
if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
|
||||
}
|
||||
|
||||
return EnsureDoubleAligned(this, object, size);
|
||||
return AllocateRaw(size, space, retry_space);
|
||||
}
|
||||
|
||||
|
||||
|
@ -4833,22 +4707,6 @@ MaybeObject* Heap::AllocateGlobalContext() {
|
|||
}
|
||||
|
||||
|
||||
MaybeObject* Heap::AllocateModuleContext(Context* previous,
|
||||
ScopeInfo* scope_info) {
|
||||
Object* result;
|
||||
{ MaybeObject* maybe_result =
|
||||
AllocateFixedArrayWithHoles(scope_info->ContextLength(), TENURED);
|
||||
if (!maybe_result->ToObject(&result)) return maybe_result;
|
||||
}
|
||||
Context* context = reinterpret_cast<Context*>(result);
|
||||
context->set_map_no_write_barrier(module_context_map());
|
||||
context->set_previous(previous);
|
||||
context->set_extension(scope_info);
|
||||
context->set_global(previous->global());
|
||||
return context;
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
|
||||
ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
|
||||
Object* result;
|
||||
|
@ -4991,10 +4849,8 @@ void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
|
|||
|
||||
bool Heap::IdleNotification(int hint) {
|
||||
const int kMaxHint = 1000;
|
||||
intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
|
||||
// The size factor is in range [5..250]. The numbers here are chosen from
|
||||
// experiments. If you changes them, make sure to test with
|
||||
// chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
|
||||
intptr_t size_factor = Min(Max(hint, 30), kMaxHint) / 10;
|
||||
// The size factor is in range [3..100].
|
||||
intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
|
||||
|
||||
if (contexts_disposed_ > 0) {
|
||||
|
@ -5018,14 +4874,11 @@ bool Heap::IdleNotification(int hint) {
|
|||
// Take into account that we might have decided to delay full collection
|
||||
// because incremental marking is in progress.
|
||||
ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
|
||||
// After context disposal there is likely a lot of garbage remaining, reset
|
||||
// the idle notification counters in order to trigger more incremental GCs
|
||||
// on subsequent idle notifications.
|
||||
StartIdleRound();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
|
||||
if (hint >= kMaxHint || !FLAG_incremental_marking ||
|
||||
FLAG_expose_gc || Serializer::enabled()) {
|
||||
return IdleGlobalGC();
|
||||
}
|
||||
|
||||
|
@ -5064,6 +4917,10 @@ bool Heap::IdleNotification(int hint) {
|
|||
}
|
||||
|
||||
if (incremental_marking()->IsStopped()) {
|
||||
if (!WorthStartingGCWhenIdle()) {
|
||||
FinishIdleRound();
|
||||
return true;
|
||||
}
|
||||
incremental_marking()->Start();
|
||||
}
|
||||
|
||||
|
@ -5701,11 +5558,6 @@ bool Heap::ConfigureHeap(int max_semispace_size,
|
|||
intptr_t max_executable_size) {
|
||||
if (HasBeenSetUp()) return false;
|
||||
|
||||
if (FLAG_stress_compaction) {
|
||||
// This will cause more frequent GCs when stressing.
|
||||
max_semispace_size_ = Page::kPageSize;
|
||||
}
|
||||
|
||||
if (max_semispace_size > 0) {
|
||||
if (max_semispace_size < Page::kPageSize) {
|
||||
max_semispace_size = Page::kPageSize;
|
||||
|
@ -5830,7 +5682,7 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {
|
|||
}
|
||||
|
||||
|
||||
intptr_t Heap::PromotedExternalMemorySize() {
|
||||
int Heap::PromotedExternalMemorySize() {
|
||||
if (amount_of_external_allocated_memory_
|
||||
<= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
|
||||
return amount_of_external_allocated_memory_
|
||||
|
@ -6003,15 +5855,6 @@ class HeapDebugUtils {
|
|||
|
||||
#endif
|
||||
|
||||
|
||||
V8_DECLARE_ONCE(initialize_gc_once);
|
||||
|
||||
static void InitializeGCOnce() {
|
||||
InitializeScavengingVisitorsTables();
|
||||
NewSpaceScavenger::Initialize();
|
||||
MarkCompactCollector::Initialize();
|
||||
}
|
||||
|
||||
bool Heap::SetUp(bool create_heap_objects) {
|
||||
#ifdef DEBUG
|
||||
allocation_timeout_ = FLAG_gc_interval;
|
||||
|
@ -6030,7 +5873,15 @@ bool Heap::SetUp(bool create_heap_objects) {
|
|||
if (!ConfigureHeapDefault()) return false;
|
||||
}
|
||||
|
||||
CallOnce(&initialize_gc_once, &InitializeGCOnce);
|
||||
gc_initializer_mutex.Pointer()->Lock();
|
||||
static bool initialized_gc = false;
|
||||
if (!initialized_gc) {
|
||||
initialized_gc = true;
|
||||
InitializeScavengingVisitorsTables();
|
||||
NewSpaceScavenger::Initialize();
|
||||
MarkCompactCollector::Initialize();
|
||||
}
|
||||
gc_initializer_mutex.Pointer()->Unlock();
|
||||
|
||||
MarkMapPointersAsEncoded(false);
|
||||
|
||||
|
@ -6142,11 +5993,6 @@ void Heap::SetStackLimits() {
|
|||
|
||||
|
||||
void Heap::TearDown() {
|
||||
#ifdef DEBUG
|
||||
if (FLAG_verify_heap) {
|
||||
Verify();
|
||||
}
|
||||
#endif
|
||||
if (FLAG_print_cumulative_gc_stat) {
|
||||
PrintF("\n\n");
|
||||
PrintF("gc_count=%d ", gc_count_);
|
||||
|
|
|
@ -243,8 +243,7 @@ namespace internal {
|
|||
V(compare_ic_symbol, ".compare_ic") \
|
||||
V(infinity_symbol, "Infinity") \
|
||||
V(minus_infinity_symbol, "-Infinity") \
|
||||
V(hidden_stack_trace_symbol, "v8::hidden_stack_trace") \
|
||||
V(query_colon_symbol, "(?:)")
|
||||
V(hidden_stack_trace_symbol, "v8::hidden_stack_trace")
|
||||
|
||||
// Forward declarations.
|
||||
class GCTracer;
|
||||
|
@ -530,8 +529,6 @@ class Heap {
|
|||
MUST_USE_RESULT MaybeObject* AllocateJSObject(
|
||||
JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
|
||||
|
||||
MUST_USE_RESULT MaybeObject* AllocateJSModule();
|
||||
|
||||
// Allocate a JSArray with no elements
|
||||
MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray(
|
||||
ElementsKind elements_kind,
|
||||
|
@ -823,10 +820,6 @@ class Heap {
|
|||
// Allocate a global (but otherwise uninitialized) context.
|
||||
MUST_USE_RESULT MaybeObject* AllocateGlobalContext();
|
||||
|
||||
// Allocate a module context.
|
||||
MUST_USE_RESULT MaybeObject* AllocateModuleContext(Context* previous,
|
||||
ScopeInfo* scope_info);
|
||||
|
||||
// Allocate a function context.
|
||||
MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length,
|
||||
JSFunction* function);
|
||||
|
@ -1333,8 +1326,7 @@ class Heap {
|
|||
|
||||
// Adjusts the amount of registered external memory.
|
||||
// Returns the adjusted value.
|
||||
inline intptr_t AdjustAmountOfExternalAllocatedMemory(
|
||||
intptr_t change_in_bytes);
|
||||
inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
|
||||
|
||||
// Allocate uninitialized fixed array.
|
||||
MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length);
|
||||
|
@ -1419,12 +1411,6 @@ class Heap {
|
|||
kRootListLength
|
||||
};
|
||||
|
||||
STATIC_CHECK(kUndefinedValueRootIndex == Internals::kUndefinedValueRootIndex);
|
||||
STATIC_CHECK(kNullValueRootIndex == Internals::kNullValueRootIndex);
|
||||
STATIC_CHECK(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
|
||||
STATIC_CHECK(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
|
||||
STATIC_CHECK(kempty_symbolRootIndex == Internals::kEmptySymbolRootIndex);
|
||||
|
||||
MUST_USE_RESULT MaybeObject* NumberToString(
|
||||
Object* number, bool check_number_string_cache = true);
|
||||
MUST_USE_RESULT MaybeObject* Uint32ToString(
|
||||
|
@ -1456,8 +1442,6 @@ class Heap {
|
|||
inline bool NextGCIsLikelyToBeFull() {
|
||||
if (FLAG_gc_global) return true;
|
||||
|
||||
if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
|
||||
|
||||
intptr_t total_promoted = PromotedTotalSize();
|
||||
|
||||
intptr_t adjusted_promotion_limit =
|
||||
|
@ -1621,8 +1605,6 @@ class Heap {
|
|||
// more expedient to get at the isolate directly from within Heap methods.
|
||||
Isolate* isolate_;
|
||||
|
||||
Object* roots_[kRootListLength];
|
||||
|
||||
intptr_t code_range_size_;
|
||||
int reserved_semispace_size_;
|
||||
int max_semispace_size_;
|
||||
|
@ -1664,7 +1646,7 @@ class Heap {
|
|||
int gc_post_processing_depth_;
|
||||
|
||||
// Returns the amount of external memory registered since last global gc.
|
||||
intptr_t PromotedExternalMemorySize();
|
||||
int PromotedExternalMemorySize();
|
||||
|
||||
int ms_count_; // how many mark-sweep collections happened
|
||||
unsigned int gc_count_; // how many gc happened
|
||||
|
@ -1729,15 +1711,17 @@ class Heap {
|
|||
|
||||
// The amount of external memory registered through the API kept alive
|
||||
// by global handles
|
||||
intptr_t amount_of_external_allocated_memory_;
|
||||
int amount_of_external_allocated_memory_;
|
||||
|
||||
// Caches the amount of external memory registered at the last global gc.
|
||||
intptr_t amount_of_external_allocated_memory_at_last_global_gc_;
|
||||
int amount_of_external_allocated_memory_at_last_global_gc_;
|
||||
|
||||
// Indicates that an allocation has failed in the old generation since the
|
||||
// last GC.
|
||||
int old_gen_exhausted_;
|
||||
|
||||
Object* roots_[kRootListLength];
|
||||
|
||||
Object* global_contexts_list_;
|
||||
|
||||
StoreBufferRebuilder store_buffer_rebuilder_;
|
||||
|
@ -1990,6 +1974,13 @@ class Heap {
|
|||
return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold);
|
||||
}
|
||||
|
||||
bool WorthStartingGCWhenIdle() {
|
||||
if (contexts_disposed_ > 0) {
|
||||
return true;
|
||||
}
|
||||
return incremental_marking()->WorthActivating();
|
||||
}
|
||||
|
||||
// Estimates how many milliseconds a Mark-Sweep would take to complete.
|
||||
// In idle notification handler we assume that this function will return:
|
||||
// - a number less than 10 for small heaps, which are less than 8Mb.
|
||||
|
|
|
@ -416,7 +416,6 @@ void HValue::Kill() {
|
|||
SetFlag(kIsDead);
|
||||
for (int i = 0; i < OperandCount(); ++i) {
|
||||
HValue* operand = OperandAt(i);
|
||||
if (operand == NULL) continue;
|
||||
HUseListNode* first = operand->use_list_;
|
||||
if (first != NULL && first->value() == this && first->index() == i) {
|
||||
operand->use_list_ = first->tail();
|
||||
|
@ -463,8 +462,7 @@ void HValue::PrintChangesTo(StringStream* stream) {
|
|||
add_comma = true; \
|
||||
stream->Add(#type); \
|
||||
}
|
||||
GVN_TRACKED_FLAG_LIST(PRINT_DO);
|
||||
GVN_UNTRACKED_FLAG_LIST(PRINT_DO);
|
||||
GVN_FLAG_LIST(PRINT_DO);
|
||||
#undef PRINT_DO
|
||||
}
|
||||
stream->Add("]");
|
||||
|
@ -601,9 +599,6 @@ void HInstruction::InsertAfter(HInstruction* previous) {
|
|||
SetBlock(block);
|
||||
previous->next_ = this;
|
||||
if (next != NULL) next->previous_ = this;
|
||||
if (block->last() == previous) {
|
||||
block->set_last(this);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -613,7 +608,6 @@ void HInstruction::Verify() {
|
|||
HBasicBlock* cur_block = block();
|
||||
for (int i = 0; i < OperandCount(); ++i) {
|
||||
HValue* other_operand = OperandAt(i);
|
||||
if (other_operand == NULL) continue;
|
||||
HBasicBlock* other_block = other_operand->block();
|
||||
if (cur_block == other_block) {
|
||||
if (!other_operand->IsPhi()) {
|
||||
|
@ -872,17 +866,6 @@ HValue* HBitwise::Canonicalize() {
|
|||
}
|
||||
|
||||
|
||||
HValue* HBitNot::Canonicalize() {
|
||||
// Optimize ~~x, a common pattern used for ToInt32(x).
|
||||
if (value()->IsBitNot()) {
|
||||
HValue* result = HBitNot::cast(value())->value();
|
||||
ASSERT(result->representation().IsInteger32());
|
||||
return result;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
HValue* HAdd::Canonicalize() {
|
||||
if (!representation().IsInteger32()) return this;
|
||||
if (CheckUsesForFlag(kTruncatingToInt32)) ClearFlag(kCanOverflow);
|
||||
|
@ -933,62 +916,6 @@ void HJSArrayLength::PrintDataTo(StringStream* stream) {
|
|||
}
|
||||
|
||||
|
||||
HValue* HUnaryMathOperation::Canonicalize() {
|
||||
if (op() == kMathFloor) {
|
||||
// If the input is integer32 then we replace the floor instruction
|
||||
// with its input. This happens before the representation changes are
|
||||
// introduced.
|
||||
if (value()->representation().IsInteger32()) return value();
|
||||
|
||||
#ifdef V8_TARGET_ARCH_ARM
|
||||
if (value()->IsDiv() && (value()->UseCount() == 1)) {
|
||||
// TODO(2038): Implement this optimization for non ARM architectures.
|
||||
HDiv* hdiv = HDiv::cast(value());
|
||||
HValue* left = hdiv->left();
|
||||
HValue* right = hdiv->right();
|
||||
// Try to simplify left and right values of the division.
|
||||
HValue* new_left =
|
||||
LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(left);
|
||||
HValue* new_right =
|
||||
LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(right);
|
||||
|
||||
// Return if left or right are not optimizable.
|
||||
if ((new_left == NULL) || (new_right == NULL)) return this;
|
||||
|
||||
// Insert the new values in the graph.
|
||||
if (new_left->IsInstruction() &&
|
||||
!HInstruction::cast(new_left)->IsLinked()) {
|
||||
HInstruction::cast(new_left)->InsertBefore(this);
|
||||
}
|
||||
if (new_right->IsInstruction() &&
|
||||
!HInstruction::cast(new_right)->IsLinked()) {
|
||||
HInstruction::cast(new_right)->InsertBefore(this);
|
||||
}
|
||||
HMathFloorOfDiv* instr = new HMathFloorOfDiv(context(),
|
||||
new_left,
|
||||
new_right);
|
||||
// Replace this HMathFloor instruction by the new HMathFloorOfDiv.
|
||||
instr->InsertBefore(this);
|
||||
ReplaceAllUsesWith(instr);
|
||||
Kill();
|
||||
// We know the division had no other uses than this HMathFloor. Delete it.
|
||||
// Also delete the arguments of the division if they are not used any
|
||||
// more.
|
||||
hdiv->DeleteAndReplaceWith(NULL);
|
||||
ASSERT(left->IsChange() || left->IsConstant());
|
||||
ASSERT(right->IsChange() || right->IsConstant());
|
||||
if (left->HasNoUses()) left->DeleteAndReplaceWith(NULL);
|
||||
if (right->HasNoUses()) right->DeleteAndReplaceWith(NULL);
|
||||
|
||||
// Return NULL to remove this instruction from the graph.
|
||||
return NULL;
|
||||
}
|
||||
#endif // V8_TARGET_ARCH_ARM
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
HValue* HCheckInstanceType::Canonicalize() {
|
||||
if (check_ == IS_STRING &&
|
||||
!value()->type().IsUninitialized() &&
|
||||
|
@ -1038,13 +965,16 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
|
|||
}
|
||||
|
||||
|
||||
void HCheckMaps::PrintDataTo(StringStream* stream) {
|
||||
void HCheckMap::PrintDataTo(StringStream* stream) {
|
||||
value()->PrintNameTo(stream);
|
||||
stream->Add(" [%p", *map_set()->first());
|
||||
for (int i = 1; i < map_set()->length(); ++i) {
|
||||
stream->Add(",%p", *map_set()->at(i));
|
||||
stream->Add(" %p", *map());
|
||||
if (mode() == REQUIRE_EXACT_MAP) {
|
||||
stream->Add(" [EXACT]");
|
||||
} else if (!has_element_transitions_) {
|
||||
stream->Add(" [EXACT*]");
|
||||
} else {
|
||||
stream->Add(" [MATCH ELEMENTS]");
|
||||
}
|
||||
stream->Add("]");
|
||||
}
|
||||
|
||||
|
||||
|
@ -1806,9 +1736,6 @@ void HStoreNamedField::PrintDataTo(StringStream* stream) {
|
|||
stream->Add(" = ");
|
||||
value()->PrintNameTo(stream);
|
||||
stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
|
||||
if (NeedsWriteBarrier()) {
|
||||
stream->Add(" (write-barrier)");
|
||||
}
|
||||
if (!transition().is_null()) {
|
||||
stream->Add(" (transition map %p)", *transition());
|
||||
}
|
||||
|
@ -1952,7 +1879,7 @@ HType HValue::CalculateInferredType() {
|
|||
}
|
||||
|
||||
|
||||
HType HCheckMaps::CalculateInferredType() {
|
||||
HType HCheckMap::CalculateInferredType() {
|
||||
return value()->type();
|
||||
}
|
||||
|
||||
|
@ -2162,17 +2089,6 @@ HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
|
|||
}
|
||||
|
||||
|
||||
bool HStoreKeyedFastDoubleElement::NeedsCanonicalization() {
|
||||
// If value was loaded from unboxed double backing store or
|
||||
// converted from an integer then we don't have to canonicalize it.
|
||||
if (value()->IsLoadKeyedFastDoubleElement() ||
|
||||
(value()->IsChange() && HChange::cast(value())->from().IsInteger32())) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
#define H_CONSTANT_INT32(val) \
|
||||
new(zone) HConstant(FACTORY->NewNumberFromInt(val, TENURED), \
|
||||
Representation::Integer32())
|
||||
|
@ -2341,13 +2257,6 @@ void HIn::PrintDataTo(StringStream* stream) {
|
|||
}
|
||||
|
||||
|
||||
void HBitwise::PrintDataTo(StringStream* stream) {
|
||||
stream->Add(Token::Name(op_));
|
||||
stream->Add(" ");
|
||||
HBitwiseBinaryOperation::PrintDataTo(stream);
|
||||
}
|
||||
|
||||
|
||||
Representation HPhi::InferredRepresentation() {
|
||||
bool double_occurred = false;
|
||||
bool int32_occurred = false;
|
||||
|
|
|
@ -85,7 +85,7 @@ class LChunkBuilder;
|
|||
V(Change) \
|
||||
V(CheckFunction) \
|
||||
V(CheckInstanceType) \
|
||||
V(CheckMaps) \
|
||||
V(CheckMap) \
|
||||
V(CheckNonSmi) \
|
||||
V(CheckPrototypeMaps) \
|
||||
V(CheckSmi) \
|
||||
|
@ -140,7 +140,6 @@ class LChunkBuilder;
|
|||
V(LoadNamedField) \
|
||||
V(LoadNamedFieldPolymorphic) \
|
||||
V(LoadNamedGeneric) \
|
||||
V(MathFloorOfDiv) \
|
||||
V(Mod) \
|
||||
V(Mul) \
|
||||
V(ObjectLiteral) \
|
||||
|
@ -189,10 +188,7 @@ class LChunkBuilder;
|
|||
V(DateField) \
|
||||
V(WrapReceiver)
|
||||
|
||||
#define GVN_TRACKED_FLAG_LIST(V) \
|
||||
V(NewSpacePromotion)
|
||||
|
||||
#define GVN_UNTRACKED_FLAG_LIST(V) \
|
||||
#define GVN_FLAG_LIST(V) \
|
||||
V(Calls) \
|
||||
V(InobjectFields) \
|
||||
V(BackingStoreFields) \
|
||||
|
@ -510,18 +506,14 @@ class HUseIterator BASE_EMBEDDED {
|
|||
|
||||
// There must be one corresponding kDepends flag for every kChanges flag and
|
||||
// the order of the kChanges flags must be exactly the same as of the kDepends
|
||||
// flags. All tracked flags should appear before untracked ones.
|
||||
// flags.
|
||||
enum GVNFlag {
|
||||
// Declare global value numbering flags.
|
||||
#define DECLARE_FLAG(type) kChanges##type, kDependsOn##type,
|
||||
GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
|
||||
GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
|
||||
GVN_FLAG_LIST(DECLARE_FLAG)
|
||||
#undef DECLARE_FLAG
|
||||
kAfterLastFlag,
|
||||
kLastFlag = kAfterLastFlag - 1,
|
||||
#define COUNT_FLAG(type) + 1
|
||||
kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG)
|
||||
#undef COUNT_FLAG
|
||||
kLastFlag = kAfterLastFlag - 1
|
||||
};
|
||||
|
||||
typedef EnumSet<GVNFlag> GVNFlagSet;
|
||||
|
@ -538,10 +530,6 @@ class HValue: public ZoneObject {
|
|||
// implement DataEquals(), which will be used to determine if other
|
||||
// occurrences of the instruction are indeed the same.
|
||||
kUseGVN,
|
||||
// Track instructions that are dominating side effects. If an instruction
|
||||
// sets this flag, it must implement SetSideEffectDominator() and should
|
||||
// indicate which side effects to track by setting GVN flags.
|
||||
kTrackSideEffectDominators,
|
||||
kCanOverflow,
|
||||
kBailoutOnMinusZero,
|
||||
kCanBeDivByZero,
|
||||
|
@ -556,12 +544,6 @@ class HValue: public ZoneObject {
|
|||
|
||||
static const int kChangesToDependsFlagsLeftShift = 1;
|
||||
|
||||
static GVNFlag ChangesFlagFromInt(int x) {
|
||||
return static_cast<GVNFlag>(x * 2);
|
||||
}
|
||||
static GVNFlag DependsOnFlagFromInt(int x) {
|
||||
return static_cast<GVNFlag>(x * 2 + 1);
|
||||
}
|
||||
static GVNFlagSet ConvertChangesToDependsFlags(GVNFlagSet flags) {
|
||||
return GVNFlagSet(flags.ToIntegral() << kChangesToDependsFlagsLeftShift);
|
||||
}
|
||||
|
@ -744,13 +726,6 @@ class HValue: public ZoneObject {
|
|||
|
||||
virtual HType CalculateInferredType();
|
||||
|
||||
// This function must be overridden for instructions which have the
|
||||
// kTrackSideEffectDominators flag set, to track instructions that are
|
||||
// dominating side effects.
|
||||
virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
virtual void Verify() = 0;
|
||||
#endif
|
||||
|
@ -781,8 +756,7 @@ class HValue: public ZoneObject {
|
|||
GVNFlagSet result;
|
||||
// Create changes mask.
|
||||
#define ADD_FLAG(type) result.Add(kDependsOn##type);
|
||||
GVN_TRACKED_FLAG_LIST(ADD_FLAG)
|
||||
GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
|
||||
GVN_FLAG_LIST(ADD_FLAG)
|
||||
#undef ADD_FLAG
|
||||
return result;
|
||||
}
|
||||
|
@ -791,8 +765,7 @@ class HValue: public ZoneObject {
|
|||
GVNFlagSet result;
|
||||
// Create changes mask.
|
||||
#define ADD_FLAG(type) result.Add(kChanges##type);
|
||||
GVN_TRACKED_FLAG_LIST(ADD_FLAG)
|
||||
GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
|
||||
GVN_FLAG_LIST(ADD_FLAG)
|
||||
#undef ADD_FLAG
|
||||
return result;
|
||||
}
|
||||
|
@ -808,7 +781,6 @@ class HValue: public ZoneObject {
|
|||
// an executing program (i.e. are not safe to repeat, move or remove);
|
||||
static GVNFlagSet AllObservableSideEffectsFlagSet() {
|
||||
GVNFlagSet result = AllChangesFlagSet();
|
||||
result.Remove(kChangesNewSpacePromotion);
|
||||
result.Remove(kChangesElementsKind);
|
||||
result.Remove(kChangesElementsPointer);
|
||||
result.Remove(kChangesMaps);
|
||||
|
@ -1224,7 +1196,6 @@ class HChange: public HUnaryOperation {
|
|||
SetFlag(kUseGVN);
|
||||
if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined);
|
||||
if (is_truncating) SetFlag(kTruncatingToInt32);
|
||||
if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
|
||||
}
|
||||
|
||||
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
|
||||
|
@ -1350,7 +1321,6 @@ class HStackCheck: public HTemplateInstruction<1> {
|
|||
|
||||
HStackCheck(HValue* context, Type type) : type_(type) {
|
||||
SetOperandAt(0, context);
|
||||
SetGVNFlag(kChangesNewSpacePromotion);
|
||||
}
|
||||
|
||||
HValue* context() { return OperandAt(0); }
|
||||
|
@ -1384,15 +1354,13 @@ class HEnterInlined: public HTemplateInstruction<0> {
|
|||
FunctionLiteral* function,
|
||||
CallKind call_kind,
|
||||
bool is_construct,
|
||||
Variable* arguments_var,
|
||||
ZoneList<HValue*>* arguments_values)
|
||||
Variable* arguments)
|
||||
: closure_(closure),
|
||||
arguments_count_(arguments_count),
|
||||
function_(function),
|
||||
call_kind_(call_kind),
|
||||
is_construct_(is_construct),
|
||||
arguments_var_(arguments_var),
|
||||
arguments_values_(arguments_values) {
|
||||
arguments_(arguments) {
|
||||
}
|
||||
|
||||
virtual void PrintDataTo(StringStream* stream);
|
||||
|
@ -1407,8 +1375,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
|
|||
return Representation::None();
|
||||
}
|
||||
|
||||
Variable* arguments_var() { return arguments_var_; }
|
||||
ZoneList<HValue*>* arguments_values() { return arguments_values_; }
|
||||
Variable* arguments() { return arguments_; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
|
||||
|
||||
|
@ -1418,28 +1385,19 @@ class HEnterInlined: public HTemplateInstruction<0> {
|
|||
FunctionLiteral* function_;
|
||||
CallKind call_kind_;
|
||||
bool is_construct_;
|
||||
Variable* arguments_var_;
|
||||
ZoneList<HValue*>* arguments_values_;
|
||||
Variable* arguments_;
|
||||
};
|
||||
|
||||
|
||||
class HLeaveInlined: public HTemplateInstruction<0> {
|
||||
public:
|
||||
explicit HLeaveInlined(bool arguments_pushed)
|
||||
: arguments_pushed_(arguments_pushed) { }
|
||||
HLeaveInlined() {}
|
||||
|
||||
virtual Representation RequiredInputRepresentation(int index) {
|
||||
return Representation::None();
|
||||
}
|
||||
|
||||
bool arguments_pushed() {
|
||||
return arguments_pushed_;
|
||||
}
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(LeaveInlined)
|
||||
|
||||
private:
|
||||
bool arguments_pushed_;
|
||||
};
|
||||
|
||||
|
||||
|
@ -1647,26 +1605,14 @@ class HInvokeFunction: public HBinaryCall {
|
|||
: HBinaryCall(context, function, argument_count) {
|
||||
}
|
||||
|
||||
HInvokeFunction(HValue* context,
|
||||
HValue* function,
|
||||
Handle<JSFunction> known_function,
|
||||
int argument_count)
|
||||
: HBinaryCall(context, function, argument_count),
|
||||
known_function_(known_function) {
|
||||
}
|
||||
|
||||
virtual Representation RequiredInputRepresentation(int index) {
|
||||
return Representation::Tagged();
|
||||
}
|
||||
|
||||
HValue* context() { return first(); }
|
||||
HValue* function() { return second(); }
|
||||
Handle<JSFunction> known_function() { return known_function_; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
|
||||
|
||||
private:
|
||||
Handle<JSFunction> known_function_;
|
||||
};
|
||||
|
||||
|
||||
|
@ -1919,8 +1865,6 @@ class HBitNot: public HUnaryOperation {
|
|||
}
|
||||
virtual HType CalculateInferredType();
|
||||
|
||||
virtual HValue* Canonicalize();
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(BitNot)
|
||||
|
||||
protected:
|
||||
|
@ -1943,7 +1887,6 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
|
|||
case kMathAbs:
|
||||
set_representation(Representation::Tagged());
|
||||
SetFlag(kFlexibleRepresentation);
|
||||
SetGVNFlag(kChangesNewSpacePromotion);
|
||||
break;
|
||||
case kMathSqrt:
|
||||
case kMathPowHalf:
|
||||
|
@ -1952,7 +1895,6 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
|
|||
case kMathCos:
|
||||
case kMathTan:
|
||||
set_representation(Representation::Double());
|
||||
SetGVNFlag(kChangesNewSpacePromotion);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
|
@ -1993,7 +1935,15 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
|
|||
}
|
||||
}
|
||||
|
||||
virtual HValue* Canonicalize();
|
||||
virtual HValue* Canonicalize() {
|
||||
// If the input is integer32 then we replace the floor instruction
|
||||
// with its inputs. This happens before the representation changes are
|
||||
// introduced.
|
||||
if (op() == kMathFloor) {
|
||||
if (value()->representation().IsInteger32()) return value();
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
BuiltinFunctionId op() const { return op_; }
|
||||
const char* OpName() const;
|
||||
|
@ -2053,9 +2003,14 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
|
|||
};
|
||||
|
||||
|
||||
class HCheckMaps: public HTemplateInstruction<2> {
|
||||
class HCheckMap: public HTemplateInstruction<2> {
|
||||
public:
|
||||
HCheckMaps(HValue* value, Handle<Map> map, HValue* typecheck = NULL) {
|
||||
HCheckMap(HValue* value,
|
||||
Handle<Map> map,
|
||||
HValue* typecheck = NULL,
|
||||
CompareMapMode mode = REQUIRE_EXACT_MAP)
|
||||
: map_(map),
|
||||
mode_(mode) {
|
||||
SetOperandAt(0, value);
|
||||
// If callers don't depend on a typecheck, they can pass in NULL. In that
|
||||
// case we use a copy of the |value| argument as a dummy value.
|
||||
|
@ -2063,49 +2018,14 @@ class HCheckMaps: public HTemplateInstruction<2> {
|
|||
set_representation(Representation::Tagged());
|
||||
SetFlag(kUseGVN);
|
||||
SetGVNFlag(kDependsOnMaps);
|
||||
SetGVNFlag(kDependsOnElementsKind);
|
||||
map_set()->Add(map);
|
||||
}
|
||||
HCheckMaps(HValue* value, SmallMapList* maps) {
|
||||
SetOperandAt(0, value);
|
||||
SetOperandAt(1, value);
|
||||
set_representation(Representation::Tagged());
|
||||
SetFlag(kUseGVN);
|
||||
SetGVNFlag(kDependsOnMaps);
|
||||
SetGVNFlag(kDependsOnElementsKind);
|
||||
for (int i = 0; i < maps->length(); i++) {
|
||||
map_set()->Add(maps->at(i));
|
||||
// If the map to check doesn't have the untransitioned elements, it must not
|
||||
// be hoisted above TransitionElements instructions.
|
||||
if (mode == REQUIRE_EXACT_MAP || !map->has_fast_smi_only_elements()) {
|
||||
SetGVNFlag(kDependsOnElementsKind);
|
||||
}
|
||||
map_set()->Sort();
|
||||
}
|
||||
|
||||
static HCheckMaps* NewWithTransitions(HValue* object, Handle<Map> map) {
|
||||
HCheckMaps* check_map = new HCheckMaps(object, map);
|
||||
SmallMapList* map_set = check_map->map_set();
|
||||
|
||||
// If the map to check has the untransitioned elements, it can be hoisted
|
||||
// above TransitionElements instructions.
|
||||
if (map->has_fast_smi_only_elements()) {
|
||||
check_map->ClearGVNFlag(kDependsOnElementsKind);
|
||||
}
|
||||
|
||||
Map* transitioned_fast_element_map =
|
||||
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL);
|
||||
ASSERT(transitioned_fast_element_map == NULL ||
|
||||
map->elements_kind() != FAST_ELEMENTS);
|
||||
if (transitioned_fast_element_map != NULL) {
|
||||
map_set->Add(Handle<Map>(transitioned_fast_element_map));
|
||||
}
|
||||
Map* transitioned_double_map =
|
||||
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL);
|
||||
ASSERT(transitioned_double_map == NULL ||
|
||||
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
|
||||
if (transitioned_double_map != NULL) {
|
||||
map_set->Add(Handle<Map>(transitioned_double_map));
|
||||
}
|
||||
map_set->Sort();
|
||||
|
||||
return check_map;
|
||||
has_element_transitions_ =
|
||||
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL) != NULL ||
|
||||
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL) != NULL;
|
||||
}
|
||||
|
||||
virtual Representation RequiredInputRepresentation(int index) {
|
||||
|
@ -2115,23 +2035,25 @@ class HCheckMaps: public HTemplateInstruction<2> {
|
|||
virtual HType CalculateInferredType();
|
||||
|
||||
HValue* value() { return OperandAt(0); }
|
||||
SmallMapList* map_set() { return &map_set_; }
|
||||
Handle<Map> map() const { return map_; }
|
||||
CompareMapMode mode() const { return mode_; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
|
||||
DECLARE_CONCRETE_INSTRUCTION(CheckMap)
|
||||
|
||||
protected:
|
||||
virtual bool DataEquals(HValue* other) {
|
||||
HCheckMaps* b = HCheckMaps::cast(other);
|
||||
// Relies on the fact that map_set has been sorted before.
|
||||
if (map_set()->length() != b->map_set()->length()) return false;
|
||||
for (int i = 0; i < map_set()->length(); i++) {
|
||||
if (!map_set()->at(i).is_identical_to(b->map_set()->at(i))) return false;
|
||||
}
|
||||
return true;
|
||||
HCheckMap* b = HCheckMap::cast(other);
|
||||
// Two CheckMaps instructions are DataEqual if their maps are identical and
|
||||
// they have the same mode. The mode comparison can be ignored if the map
|
||||
// has no elements transitions.
|
||||
return map_.is_identical_to(b->map()) &&
|
||||
(b->mode() == mode() || !has_element_transitions_);
|
||||
}
|
||||
|
||||
private:
|
||||
SmallMapList map_set_;
|
||||
bool has_element_transitions_;
|
||||
Handle<Map> map_;
|
||||
CompareMapMode mode_;
|
||||
};
|
||||
|
||||
|
||||
|
@ -2627,7 +2549,7 @@ class HApplyArguments: public HTemplateInstruction<4> {
|
|||
|
||||
class HArgumentsElements: public HTemplateInstruction<0> {
|
||||
public:
|
||||
explicit HArgumentsElements(bool from_inlined) : from_inlined_(from_inlined) {
|
||||
HArgumentsElements() {
|
||||
// The value produced by this instruction is a pointer into the stack
|
||||
// that looks as if it was a smi because of alignment.
|
||||
set_representation(Representation::Tagged());
|
||||
|
@ -2640,12 +2562,8 @@ class HArgumentsElements: public HTemplateInstruction<0> {
|
|||
return Representation::None();
|
||||
}
|
||||
|
||||
bool from_inlined() const { return from_inlined_; }
|
||||
|
||||
protected:
|
||||
virtual bool DataEquals(HValue* other) { return true; }
|
||||
|
||||
bool from_inlined_;
|
||||
};
|
||||
|
||||
|
||||
|
@ -2751,25 +2669,6 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
|
|||
};
|
||||
|
||||
|
||||
class HMathFloorOfDiv: public HBinaryOperation {
|
||||
public:
|
||||
HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
|
||||
: HBinaryOperation(context, left, right) {
|
||||
set_representation(Representation::Integer32());
|
||||
SetFlag(kUseGVN);
|
||||
}
|
||||
|
||||
virtual Representation RequiredInputRepresentation(int index) {
|
||||
return Representation::Integer32();
|
||||
}
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
|
||||
|
||||
protected:
|
||||
virtual bool DataEquals(HValue* other) { return true; }
|
||||
};
|
||||
|
||||
|
||||
class HArithmeticBinaryOperation: public HBinaryOperation {
|
||||
public:
|
||||
HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
|
||||
|
@ -3184,7 +3083,6 @@ class HPower: public HTemplateInstruction<2> {
|
|||
SetOperandAt(1, right);
|
||||
set_representation(Representation::Double());
|
||||
SetFlag(kUseGVN);
|
||||
SetGVNFlag(kChangesNewSpacePromotion);
|
||||
}
|
||||
|
||||
HValue* left() { return OperandAt(0); }
|
||||
|
@ -3384,8 +3282,6 @@ class HBitwise: public HBitwiseBinaryOperation {
|
|||
HValue* left,
|
||||
HValue* right);
|
||||
|
||||
virtual void PrintDataTo(StringStream* stream);
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(Bitwise)
|
||||
|
||||
protected:
|
||||
|
@ -3633,12 +3529,6 @@ inline bool StoringValueNeedsWriteBarrier(HValue* value) {
|
|||
}
|
||||
|
||||
|
||||
inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
|
||||
HValue* new_space_dominator) {
|
||||
return !object->IsAllocateObject() || (object != new_space_dominator);
|
||||
}
|
||||
|
||||
|
||||
class HStoreGlobalCell: public HUnaryOperation {
|
||||
public:
|
||||
HStoreGlobalCell(HValue* value,
|
||||
|
@ -4105,12 +3995,9 @@ class HStoreNamedField: public HTemplateInstruction<2> {
|
|||
int offset)
|
||||
: name_(name),
|
||||
is_in_object_(in_object),
|
||||
offset_(offset),
|
||||
new_space_dominator_(NULL) {
|
||||
offset_(offset) {
|
||||
SetOperandAt(0, obj);
|
||||
SetOperandAt(1, val);
|
||||
SetFlag(kTrackSideEffectDominators);
|
||||
SetGVNFlag(kDependsOnNewSpacePromotion);
|
||||
if (is_in_object_) {
|
||||
SetGVNFlag(kChangesInobjectFields);
|
||||
} else {
|
||||
|
@ -4123,10 +4010,6 @@ class HStoreNamedField: public HTemplateInstruction<2> {
|
|||
virtual Representation RequiredInputRepresentation(int index) {
|
||||
return Representation::Tagged();
|
||||
}
|
||||
virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
|
||||
ASSERT(side_effect == kChangesNewSpacePromotion);
|
||||
new_space_dominator_ = dominator;
|
||||
}
|
||||
virtual void PrintDataTo(StringStream* stream);
|
||||
|
||||
HValue* object() { return OperandAt(0); }
|
||||
|
@ -4137,11 +4020,9 @@ class HStoreNamedField: public HTemplateInstruction<2> {
|
|||
int offset() const { return offset_; }
|
||||
Handle<Map> transition() const { return transition_; }
|
||||
void set_transition(Handle<Map> map) { transition_ = map; }
|
||||
HValue* new_space_dominator() const { return new_space_dominator_; }
|
||||
|
||||
bool NeedsWriteBarrier() {
|
||||
return StoringValueNeedsWriteBarrier(value()) &&
|
||||
ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
|
||||
return StoringValueNeedsWriteBarrier(value());
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -4149,7 +4030,6 @@ class HStoreNamedField: public HTemplateInstruction<2> {
|
|||
bool is_in_object_;
|
||||
int offset_;
|
||||
Handle<Map> transition_;
|
||||
HValue* new_space_dominator_;
|
||||
};
|
||||
|
||||
|
||||
|
@ -4259,8 +4139,6 @@ class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
|
|||
return StoringValueNeedsWriteBarrier(value());
|
||||
}
|
||||
|
||||
bool NeedsCanonicalization();
|
||||
|
||||
virtual void PrintDataTo(StringStream* stream);
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement)
|
||||
|
@ -4354,7 +4232,6 @@ class HTransitionElementsKind: public HTemplateInstruction<1> {
|
|||
SetFlag(kUseGVN);
|
||||
SetGVNFlag(kChangesElementsKind);
|
||||
SetGVNFlag(kChangesElementsPointer);
|
||||
SetGVNFlag(kChangesNewSpacePromotion);
|
||||
set_representation(Representation::Tagged());
|
||||
}
|
||||
|
||||
|
@ -4416,7 +4293,6 @@ class HStringCharCodeAt: public HTemplateInstruction<3> {
|
|||
set_representation(Representation::Integer32());
|
||||
SetFlag(kUseGVN);
|
||||
SetGVNFlag(kDependsOnMaps);
|
||||
SetGVNFlag(kChangesNewSpacePromotion);
|
||||
}
|
||||
|
||||
virtual Representation RequiredInputRepresentation(int index) {
|
||||
|
@ -4448,7 +4324,6 @@ class HStringCharFromCode: public HTemplateInstruction<2> {
|
|||
SetOperandAt(1, char_code);
|
||||
set_representation(Representation::Tagged());
|
||||
SetFlag(kUseGVN);
|
||||
SetGVNFlag(kChangesNewSpacePromotion);
|
||||
}
|
||||
|
||||
virtual Representation RequiredInputRepresentation(int index) {
|
||||
|
@ -4501,12 +4376,8 @@ class HAllocateObject: public HTemplateInstruction<1> {
|
|||
: constructor_(constructor) {
|
||||
SetOperandAt(0, context);
|
||||
set_representation(Representation::Tagged());
|
||||
SetGVNFlag(kChangesNewSpacePromotion);
|
||||
}
|
||||
|
||||
// Maximum instance size for which allocations will be inlined.
|
||||
static const int kMaxSize = 64 * kPointerSize;
|
||||
|
||||
HValue* context() { return OperandAt(0); }
|
||||
Handle<JSFunction> constructor() { return constructor_; }
|
||||
|
||||
|
@ -4550,7 +4421,6 @@ class HFastLiteral: public HMaterializedLiteral<1> {
|
|||
boilerplate_(boilerplate),
|
||||
total_size_(total_size) {
|
||||
SetOperandAt(0, context);
|
||||
SetGVNFlag(kChangesNewSpacePromotion);
|
||||
}
|
||||
|
||||
// Maximum depth and total number of elements and properties for literal
|
||||
|
@ -4586,7 +4456,6 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
|
|||
length_(length),
|
||||
boilerplate_object_(boilerplate_object) {
|
||||
SetOperandAt(0, context);
|
||||
SetGVNFlag(kChangesNewSpacePromotion);
|
||||
}
|
||||
|
||||
HValue* context() { return OperandAt(0); }
|
||||
|
@ -4627,7 +4496,6 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
|
|||
fast_elements_(fast_elements),
|
||||
has_function_(has_function) {
|
||||
SetOperandAt(0, context);
|
||||
SetGVNFlag(kChangesNewSpacePromotion);
|
||||
}
|
||||
|
||||
HValue* context() { return OperandAt(0); }
|
||||
|
@ -4689,7 +4557,6 @@ class HFunctionLiteral: public HTemplateInstruction<1> {
|
|||
: shared_info_(shared), pretenure_(pretenure) {
|
||||
SetOperandAt(0, context);
|
||||
set_representation(Representation::Tagged());
|
||||
SetGVNFlag(kChangesNewSpacePromotion);
|
||||
}
|
||||
|
||||
HValue* context() { return OperandAt(0); }
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -42,7 +42,6 @@ namespace internal {
|
|||
|
||||
// Forward declarations.
|
||||
class BitVector;
|
||||
class FunctionState;
|
||||
class HEnvironment;
|
||||
class HGraph;
|
||||
class HLoopInformation;
|
||||
|
@ -122,7 +121,7 @@ class HBasicBlock: public ZoneObject {
|
|||
|
||||
void Finish(HControlInstruction* last);
|
||||
void FinishExit(HControlInstruction* instruction);
|
||||
void Goto(HBasicBlock* block, FunctionState* state = NULL);
|
||||
void Goto(HBasicBlock* block, bool drop_extra = false);
|
||||
|
||||
int PredecessorIndexOf(HBasicBlock* predecessor) const;
|
||||
void AddSimulate(int ast_id) { AddInstruction(CreateSimulate(ast_id)); }
|
||||
|
@ -137,7 +136,7 @@ class HBasicBlock: public ZoneObject {
|
|||
// instruction and updating the bailout environment.
|
||||
void AddLeaveInlined(HValue* return_value,
|
||||
HBasicBlock* target,
|
||||
FunctionState* state = NULL);
|
||||
bool drop_extra = false);
|
||||
|
||||
// If a target block is tagged as an inline function return, all
|
||||
// predecessors should contain the inlined exit sequence:
|
||||
|
@ -241,7 +240,7 @@ class HLoopInformation: public ZoneObject {
|
|||
HStackCheck* stack_check_;
|
||||
};
|
||||
|
||||
class BoundsCheckTable;
|
||||
|
||||
class HGraph: public ZoneObject {
|
||||
public:
|
||||
explicit HGraph(CompilationInfo* info);
|
||||
|
@ -266,7 +265,6 @@ class HGraph: public ZoneObject {
|
|||
void OrderBlocks();
|
||||
void AssignDominators();
|
||||
void ReplaceCheckedValues();
|
||||
void EliminateRedundantBoundsChecks();
|
||||
void PropagateDeoptimizingMark();
|
||||
|
||||
// Returns false if there are phi-uses of the arguments-object
|
||||
|
@ -359,7 +357,6 @@ class HGraph: public ZoneObject {
|
|||
void InferTypes(ZoneList<HValue*>* worklist);
|
||||
void InitializeInferredTypes(int from_inclusive, int to_inclusive);
|
||||
void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
|
||||
void EliminateRedundantBoundsChecks(HBasicBlock* bb, BoundsCheckTable* table);
|
||||
|
||||
Isolate* isolate_;
|
||||
int next_block_id_;
|
||||
|
@ -718,16 +715,6 @@ class FunctionState {
|
|||
|
||||
FunctionState* outer() { return outer_; }
|
||||
|
||||
HEnterInlined* entry() { return entry_; }
|
||||
void set_entry(HEnterInlined* entry) { entry_ = entry; }
|
||||
|
||||
HArgumentsElements* arguments_elements() { return arguments_elements_; }
|
||||
void set_arguments_elements(HArgumentsElements* arguments_elements) {
|
||||
arguments_elements_ = arguments_elements;
|
||||
}
|
||||
|
||||
bool arguments_pushed() { return arguments_elements() != NULL; }
|
||||
|
||||
private:
|
||||
HGraphBuilder* owner_;
|
||||
|
||||
|
@ -754,12 +741,6 @@ class FunctionState {
|
|||
// return blocks. NULL in all other cases.
|
||||
TestContext* test_context_;
|
||||
|
||||
// When inlining HEnterInlined instruction corresponding to the function
|
||||
// entry.
|
||||
HEnterInlined* entry_;
|
||||
|
||||
HArgumentsElements* arguments_elements_;
|
||||
|
||||
FunctionState* outer_;
|
||||
};
|
||||
|
||||
|
@ -870,11 +851,15 @@ class HGraphBuilder: public AstVisitor {
|
|||
static const int kMaxLoadPolymorphism = 4;
|
||||
static const int kMaxStorePolymorphism = 4;
|
||||
|
||||
static const int kMaxInlinedNodes = 196;
|
||||
static const int kMaxInlinedSize = 196;
|
||||
static const int kMaxSourceSize = 600;
|
||||
|
||||
// Even in the 'unlimited' case we have to have some limit in order not to
|
||||
// overflow the stack.
|
||||
static const int kUnlimitedMaxInlinedSourceSize = 100000;
|
||||
static const int kUnlimitedMaxInlinedNodes = 10000;
|
||||
static const int kUnlimitedMaxInlinedNodesCumulative = 10000;
|
||||
static const int kUnlimitedMaxInlinedNodes = 1000;
|
||||
static const int kUnlimitedMaxInlinedSize = 1000;
|
||||
static const int kUnlimitedMaxSourceSize = 600;
|
||||
|
||||
// Simple accessors.
|
||||
void set_function_state(FunctionState* state) { function_state_ = state; }
|
||||
|
@ -911,6 +896,11 @@ class HGraphBuilder: public AstVisitor {
|
|||
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
|
||||
#undef INLINE_FUNCTION_GENERATOR_DECLARATION
|
||||
|
||||
void HandleDeclaration(VariableProxy* proxy,
|
||||
VariableMode mode,
|
||||
FunctionLiteral* function,
|
||||
int* global_count);
|
||||
|
||||
void VisitDelete(UnaryOperation* expr);
|
||||
void VisitVoid(UnaryOperation* expr);
|
||||
void VisitTypeof(UnaryOperation* expr);
|
||||
|
@ -1004,7 +994,6 @@ class HGraphBuilder: public AstVisitor {
|
|||
LookupResult* lookup,
|
||||
bool is_store);
|
||||
|
||||
void EnsureArgumentsArePushedForAccess();
|
||||
bool TryArgumentsAccess(Property* expr);
|
||||
|
||||
// Try to optimize fun.apply(receiver, arguments) pattern.
|
||||
|
@ -1040,10 +1029,6 @@ class HGraphBuilder: public AstVisitor {
|
|||
|
||||
void HandlePropertyAssignment(Assignment* expr);
|
||||
void HandleCompoundAssignment(Assignment* expr);
|
||||
void HandlePolymorphicLoadNamedField(Property* expr,
|
||||
HValue* object,
|
||||
SmallMapList* types,
|
||||
Handle<String> name);
|
||||
void HandlePolymorphicStoreNamedField(Assignment* expr,
|
||||
HValue* object,
|
||||
HValue* value,
|
||||
|
@ -1160,7 +1145,6 @@ class HGraphBuilder: public AstVisitor {
|
|||
HBasicBlock* current_block_;
|
||||
|
||||
int inlined_count_;
|
||||
ZoneList<Handle<Object> > globals_;
|
||||
|
||||
Zone* zone_;
|
||||
|
||||
|
@ -1235,30 +1219,6 @@ class HValueMap: public ZoneObject {
|
|||
};
|
||||
|
||||
|
||||
class HSideEffectMap BASE_EMBEDDED {
|
||||
public:
|
||||
HSideEffectMap();
|
||||
explicit HSideEffectMap(HSideEffectMap* other);
|
||||
|
||||
void Kill(GVNFlagSet flags);
|
||||
|
||||
void Store(GVNFlagSet flags, HInstruction* instr);
|
||||
|
||||
bool IsEmpty() const { return count_ == 0; }
|
||||
|
||||
inline HInstruction* operator[](int i) const {
|
||||
ASSERT(0 <= i);
|
||||
ASSERT(i < kNumberOfTrackedSideEffects);
|
||||
return data_[i];
|
||||
}
|
||||
inline HInstruction* at(int i) const { return operator[](i); }
|
||||
|
||||
private:
|
||||
int count_;
|
||||
HInstruction* data_[kNumberOfTrackedSideEffects];
|
||||
};
|
||||
|
||||
|
||||
class HStatistics: public Malloced {
|
||||
public:
|
||||
void Initialize(CompilationInfo* info);
|
||||
|
|
|
@ -831,7 +831,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
|
|||
|
||||
// Copy all arguments from the array to the stack.
|
||||
Label entry, loop;
|
||||
__ mov(ecx, Operand(ebp, kIndexOffset));
|
||||
__ mov(eax, Operand(ebp, kIndexOffset));
|
||||
__ jmp(&entry);
|
||||
__ bind(&loop);
|
||||
__ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments
|
||||
|
@ -848,17 +848,16 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
|
|||
__ push(eax);
|
||||
|
||||
// Update the index on the stack and in register eax.
|
||||
__ mov(ecx, Operand(ebp, kIndexOffset));
|
||||
__ add(ecx, Immediate(1 << kSmiTagSize));
|
||||
__ mov(Operand(ebp, kIndexOffset), ecx);
|
||||
__ mov(eax, Operand(ebp, kIndexOffset));
|
||||
__ add(eax, Immediate(1 << kSmiTagSize));
|
||||
__ mov(Operand(ebp, kIndexOffset), eax);
|
||||
|
||||
__ bind(&entry);
|
||||
__ cmp(ecx, Operand(ebp, kLimitOffset));
|
||||
__ cmp(eax, Operand(ebp, kLimitOffset));
|
||||
__ j(not_equal, &loop);
|
||||
|
||||
// Invoke the function.
|
||||
Label call_proxy;
|
||||
__ mov(eax, ecx);
|
||||
ParameterCount actual(eax);
|
||||
__ SmiUntag(eax);
|
||||
__ mov(edi, Operand(ebp, kFunctionOffset));
|
||||
|
|
|
@ -1681,11 +1681,6 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
|
|||
}
|
||||
|
||||
|
||||
// Input:
|
||||
// edx: left operand (tagged)
|
||||
// eax: right operand (tagged)
|
||||
// Output:
|
||||
// eax: result (tagged)
|
||||
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
||||
Label call_runtime;
|
||||
ASSERT(operands_type_ == BinaryOpIC::INT32);
|
||||
|
@ -1695,37 +1690,31 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|||
case Token::ADD:
|
||||
case Token::SUB:
|
||||
case Token::MUL:
|
||||
case Token::DIV:
|
||||
case Token::MOD: {
|
||||
case Token::DIV: {
|
||||
Label not_floats;
|
||||
Label not_int32;
|
||||
if (CpuFeatures::IsSupported(SSE2)) {
|
||||
CpuFeatures::Scope use_sse2(SSE2);
|
||||
FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
|
||||
FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, ¬_int32, ecx);
|
||||
if (op_ == Token::MOD) {
|
||||
GenerateRegisterArgsPush(masm);
|
||||
__ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
|
||||
} else {
|
||||
switch (op_) {
|
||||
case Token::ADD: __ addsd(xmm0, xmm1); break;
|
||||
case Token::SUB: __ subsd(xmm0, xmm1); break;
|
||||
case Token::MUL: __ mulsd(xmm0, xmm1); break;
|
||||
case Token::DIV: __ divsd(xmm0, xmm1); break;
|
||||
default: UNREACHABLE();
|
||||
}
|
||||
// Check result type if it is currently Int32.
|
||||
if (result_type_ <= BinaryOpIC::INT32) {
|
||||
__ cvttsd2si(ecx, Operand(xmm0));
|
||||
__ cvtsi2sd(xmm2, ecx);
|
||||
__ ucomisd(xmm0, xmm2);
|
||||
__ j(not_zero, ¬_int32);
|
||||
__ j(carry, ¬_int32);
|
||||
}
|
||||
GenerateHeapResultAllocation(masm, &call_runtime);
|
||||
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
|
||||
__ ret(0);
|
||||
switch (op_) {
|
||||
case Token::ADD: __ addsd(xmm0, xmm1); break;
|
||||
case Token::SUB: __ subsd(xmm0, xmm1); break;
|
||||
case Token::MUL: __ mulsd(xmm0, xmm1); break;
|
||||
case Token::DIV: __ divsd(xmm0, xmm1); break;
|
||||
default: UNREACHABLE();
|
||||
}
|
||||
// Check result type if it is currently Int32.
|
||||
if (result_type_ <= BinaryOpIC::INT32) {
|
||||
__ cvttsd2si(ecx, Operand(xmm0));
|
||||
__ cvtsi2sd(xmm2, ecx);
|
||||
__ ucomisd(xmm0, xmm2);
|
||||
__ j(not_zero, ¬_int32);
|
||||
__ j(carry, ¬_int32);
|
||||
}
|
||||
GenerateHeapResultAllocation(masm, &call_runtime);
|
||||
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
|
||||
__ ret(0);
|
||||
} else { // SSE2 not available, use FPU.
|
||||
FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx);
|
||||
FloatingPointHelper::LoadFloatOperands(
|
||||
|
@ -1733,28 +1722,20 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|||
ecx,
|
||||
FloatingPointHelper::ARGS_IN_REGISTERS);
|
||||
FloatingPointHelper::CheckFloatOperandsAreInt32(masm, ¬_int32);
|
||||
if (op_ == Token::MOD) {
|
||||
// The operands are now on the FPU stack, but we don't need them.
|
||||
__ fstp(0);
|
||||
__ fstp(0);
|
||||
GenerateRegisterArgsPush(masm);
|
||||
__ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
|
||||
} else {
|
||||
switch (op_) {
|
||||
case Token::ADD: __ faddp(1); break;
|
||||
case Token::SUB: __ fsubp(1); break;
|
||||
case Token::MUL: __ fmulp(1); break;
|
||||
case Token::DIV: __ fdivp(1); break;
|
||||
default: UNREACHABLE();
|
||||
}
|
||||
Label after_alloc_failure;
|
||||
GenerateHeapResultAllocation(masm, &after_alloc_failure);
|
||||
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
|
||||
__ ret(0);
|
||||
__ bind(&after_alloc_failure);
|
||||
__ fstp(0); // Pop FPU stack before calling runtime.
|
||||
__ jmp(&call_runtime);
|
||||
switch (op_) {
|
||||
case Token::ADD: __ faddp(1); break;
|
||||
case Token::SUB: __ fsubp(1); break;
|
||||
case Token::MUL: __ fmulp(1); break;
|
||||
case Token::DIV: __ fdivp(1); break;
|
||||
default: UNREACHABLE();
|
||||
}
|
||||
Label after_alloc_failure;
|
||||
GenerateHeapResultAllocation(masm, &after_alloc_failure);
|
||||
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
|
||||
__ ret(0);
|
||||
__ bind(&after_alloc_failure);
|
||||
__ ffree();
|
||||
__ jmp(&call_runtime);
|
||||
}
|
||||
|
||||
__ bind(¬_floats);
|
||||
|
@ -1763,6 +1744,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|||
break;
|
||||
}
|
||||
|
||||
case Token::MOD: {
|
||||
// For MOD we go directly to runtime in the non-smi case.
|
||||
break;
|
||||
}
|
||||
case Token::BIT_OR:
|
||||
case Token::BIT_AND:
|
||||
case Token::BIT_XOR:
|
||||
|
@ -1773,6 +1758,11 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|||
Label not_floats;
|
||||
Label not_int32;
|
||||
Label non_smi_result;
|
||||
/* {
|
||||
CpuFeatures::Scope use_sse2(SSE2);
|
||||
FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
|
||||
FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, ¬_int32, ecx);
|
||||
}*/
|
||||
FloatingPointHelper::LoadUnknownsAsIntegers(masm,
|
||||
use_sse3_,
|
||||
¬_floats);
|
||||
|
@ -1843,8 +1833,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|||
default: UNREACHABLE(); break;
|
||||
}
|
||||
|
||||
// If an allocation fails, or SHR hits a hard case, use the runtime system to
|
||||
// get the correct result.
|
||||
// If an allocation fails, or SHR or MOD hit a hard case,
|
||||
// use the runtime system to get the correct result.
|
||||
__ bind(&call_runtime);
|
||||
|
||||
switch (op_) {
|
||||
|
@ -1865,6 +1855,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|||
__ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
|
||||
break;
|
||||
case Token::MOD:
|
||||
GenerateRegisterArgsPush(masm);
|
||||
__ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
|
||||
break;
|
||||
case Token::BIT_OR:
|
||||
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
|
||||
|
@ -1965,7 +1957,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
|
|||
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
|
||||
__ ret(0);
|
||||
__ bind(&after_alloc_failure);
|
||||
__ fstp(0); // Pop FPU stack before calling runtime.
|
||||
__ ffree();
|
||||
__ jmp(&call_runtime);
|
||||
}
|
||||
|
||||
|
@ -2169,8 +2161,8 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
|
|||
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
|
||||
__ ret(0);
|
||||
__ bind(&after_alloc_failure);
|
||||
__ fstp(0); // Pop FPU stack before calling runtime.
|
||||
__ jmp(&call_runtime);
|
||||
__ ffree();
|
||||
__ jmp(&call_runtime);
|
||||
}
|
||||
__ bind(¬_floats);
|
||||
break;
|
||||
|
@ -5014,9 +5006,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
|||
__ j(not_equal, ¬_outermost_js, Label::kNear);
|
||||
__ mov(Operand::StaticVariable(js_entry_sp), ebp);
|
||||
__ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
|
||||
__ jmp(&invoke, Label::kNear);
|
||||
Label cont;
|
||||
__ jmp(&cont, Label::kNear);
|
||||
__ bind(¬_outermost_js);
|
||||
__ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
|
||||
__ bind(&cont);
|
||||
|
||||
// Jump to a faked try block that does the invoke, with a faked catch
|
||||
// block that sets the pending exception.
|
||||
|
@ -6168,11 +6162,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
|||
__ sub(ecx, edx);
|
||||
__ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
|
||||
Label not_original_string;
|
||||
// Shorter than original string's length: an actual substring.
|
||||
__ j(below, ¬_original_string, Label::kNear);
|
||||
// Longer than original string's length or negative: unsafe arguments.
|
||||
__ j(above, &runtime);
|
||||
// Return original string.
|
||||
__ j(not_equal, ¬_original_string, Label::kNear);
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->sub_string_native(), 1);
|
||||
__ ret(3 * kPointerSize);
|
||||
|
|
|
@ -397,25 +397,9 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
|
|||
// Allocate new FixedDoubleArray.
|
||||
// edx: receiver
|
||||
// edi: length of source FixedArray (smi-tagged)
|
||||
__ lea(esi, Operand(edi,
|
||||
times_4,
|
||||
FixedDoubleArray::kHeaderSize + kPointerSize));
|
||||
__ lea(esi, Operand(edi, times_4, FixedDoubleArray::kHeaderSize));
|
||||
__ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
|
||||
|
||||
Label aligned, aligned_done;
|
||||
__ test(eax, Immediate(kDoubleAlignmentMask - kHeapObjectTag));
|
||||
__ j(zero, &aligned, Label::kNear);
|
||||
__ mov(FieldOperand(eax, 0),
|
||||
Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
|
||||
__ add(eax, Immediate(kPointerSize));
|
||||
__ jmp(&aligned_done);
|
||||
|
||||
__ bind(&aligned);
|
||||
__ mov(Operand(eax, esi, times_1, -kPointerSize-1),
|
||||
Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
|
||||
|
||||
__ bind(&aligned_done);
|
||||
|
||||
// eax: destination FixedDoubleArray
|
||||
// edi: number of elements
|
||||
// edx: receiver
|
||||
|
|
|
@ -172,10 +172,10 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
|
|||
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for IC load call (from ic-ia32.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : receiver
|
||||
// -- ecx : name
|
||||
// -- edx : receiver
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
|
||||
Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), 0, false);
|
||||
}
|
||||
|
||||
|
||||
|
@ -194,10 +194,10 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
|
|||
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for keyed IC load call (from ic-ia32.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- edx : receiver
|
||||
// -- eax : key
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
|
||||
Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), 0, false);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -239,13 +239,13 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
|
|||
// ok:
|
||||
|
||||
if (FLAG_count_based_interrupts) {
|
||||
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
|
||||
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
|
||||
ASSERT_EQ(*(call_target_address - 3), kJnsInstruction);
|
||||
ASSERT_EQ(*(call_target_address - 2), kJnsOffset);
|
||||
} else {
|
||||
ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
|
||||
ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
|
||||
ASSERT_EQ(*(call_target_address - 3), kJaeInstruction);
|
||||
ASSERT_EQ(*(call_target_address - 2), kJaeOffset);
|
||||
}
|
||||
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
|
||||
ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
|
||||
*(call_target_address - 3) = kNopByteOne;
|
||||
*(call_target_address - 2) = kNopByteTwo;
|
||||
Assembler::set_target_address_at(call_target_address,
|
||||
|
@ -266,9 +266,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
|
|||
|
||||
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
|
||||
// restore the conditional branch.
|
||||
ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
|
||||
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
|
||||
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
|
||||
ASSERT_EQ(*(call_target_address - 3), kNopByteOne);
|
||||
ASSERT_EQ(*(call_target_address - 2), kNopByteTwo);
|
||||
ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
|
||||
if (FLAG_count_based_interrupts) {
|
||||
*(call_target_address - 3) = kJnsInstruction;
|
||||
*(call_target_address - 2) = kJnsOffset;
|
||||
|
|
|
@ -101,6 +101,13 @@ class JumpPatchSite BASE_EMBEDDED {
|
|||
};
|
||||
|
||||
|
||||
// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
|
||||
int FullCodeGenerator::self_optimization_header_size() {
|
||||
UNREACHABLE();
|
||||
return 13;
|
||||
}
|
||||
|
||||
|
||||
// Generate code for a JS function. On entry to the function the receiver
|
||||
// and arguments have been pushed on the stack left to right, with the
|
||||
// return address on top of them. The actual argument count matches the
|
||||
|
@ -262,11 +269,11 @@ void FullCodeGenerator::Generate() {
|
|||
// For named function expressions, declare the function name as a
|
||||
// constant.
|
||||
if (scope()->is_function_scope() && scope()->function() != NULL) {
|
||||
VariableDeclaration* function = scope()->function();
|
||||
ASSERT(function->proxy()->var()->mode() == CONST ||
|
||||
function->proxy()->var()->mode() == CONST_HARMONY);
|
||||
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
|
||||
VisitVariableDeclaration(function);
|
||||
VariableProxy* proxy = scope()->function();
|
||||
ASSERT(proxy->var()->mode() == CONST ||
|
||||
proxy->var()->mode() == CONST_HARMONY);
|
||||
ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
|
||||
EmitDeclaration(proxy, proxy->var()->mode(), NULL);
|
||||
}
|
||||
VisitDeclarations(scope()->declarations());
|
||||
}
|
||||
|
@ -756,51 +763,60 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
|
|||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
|
||||
// The variable in the declaration always resides in the current function
|
||||
// context.
|
||||
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
|
||||
if (FLAG_debug_code) {
|
||||
// Check that we're not inside a with or catch context.
|
||||
__ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
|
||||
__ cmp(ebx, isolate()->factory()->with_context_map());
|
||||
__ Check(not_equal, "Declaration in with context.");
|
||||
__ cmp(ebx, isolate()->factory()->catch_context_map());
|
||||
__ Check(not_equal, "Declaration in catch context.");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitVariableDeclaration(
|
||||
VariableDeclaration* declaration) {
|
||||
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
|
||||
VariableMode mode,
|
||||
FunctionLiteral* function) {
|
||||
// If it was not possible to allocate the variable at compile time, we
|
||||
// need to "declare" it at runtime to make sure it actually exists in the
|
||||
// local context.
|
||||
VariableProxy* proxy = declaration->proxy();
|
||||
VariableMode mode = declaration->mode();
|
||||
Variable* variable = proxy->var();
|
||||
bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
|
||||
bool binding_needs_init = (function == NULL) &&
|
||||
(mode == CONST || mode == CONST_HARMONY || mode == LET);
|
||||
switch (variable->location()) {
|
||||
case Variable::UNALLOCATED:
|
||||
globals_->Add(variable->name());
|
||||
globals_->Add(variable->binding_needs_init()
|
||||
? isolate()->factory()->the_hole_value()
|
||||
: isolate()->factory()->undefined_value());
|
||||
++global_count_;
|
||||
break;
|
||||
|
||||
case Variable::PARAMETER:
|
||||
case Variable::LOCAL:
|
||||
if (hole_init) {
|
||||
Comment cmnt(masm_, "[ VariableDeclaration");
|
||||
if (function != NULL) {
|
||||
Comment cmnt(masm_, "[ Declaration");
|
||||
VisitForAccumulatorValue(function);
|
||||
__ mov(StackOperand(variable), result_register());
|
||||
} else if (binding_needs_init) {
|
||||
Comment cmnt(masm_, "[ Declaration");
|
||||
__ mov(StackOperand(variable),
|
||||
Immediate(isolate()->factory()->the_hole_value()));
|
||||
}
|
||||
break;
|
||||
|
||||
case Variable::CONTEXT:
|
||||
if (hole_init) {
|
||||
Comment cmnt(masm_, "[ VariableDeclaration");
|
||||
EmitDebugCheckDeclarationContext(variable);
|
||||
// The variable in the decl always resides in the current function
|
||||
// context.
|
||||
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
|
||||
if (FLAG_debug_code) {
|
||||
// Check that we're not inside a with or catch context.
|
||||
__ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
|
||||
__ cmp(ebx, isolate()->factory()->with_context_map());
|
||||
__ Check(not_equal, "Declaration in with context.");
|
||||
__ cmp(ebx, isolate()->factory()->catch_context_map());
|
||||
__ Check(not_equal, "Declaration in catch context.");
|
||||
}
|
||||
if (function != NULL) {
|
||||
Comment cmnt(masm_, "[ Declaration");
|
||||
VisitForAccumulatorValue(function);
|
||||
__ mov(ContextOperand(esi, variable->index()), result_register());
|
||||
// We know that we have written a function, which is not a smi.
|
||||
__ RecordWriteContextSlot(esi,
|
||||
Context::SlotOffset(variable->index()),
|
||||
result_register(),
|
||||
ecx,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
|
||||
} else if (binding_needs_init) {
|
||||
Comment cmnt(masm_, "[ Declaration");
|
||||
__ mov(ContextOperand(esi, variable->index()),
|
||||
Immediate(isolate()->factory()->the_hole_value()));
|
||||
// No write barrier since the hole value is in old space.
|
||||
|
@ -809,12 +825,14 @@ void FullCodeGenerator::VisitVariableDeclaration(
|
|||
break;
|
||||
|
||||
case Variable::LOOKUP: {
|
||||
Comment cmnt(masm_, "[ VariableDeclaration");
|
||||
Comment cmnt(masm_, "[ Declaration");
|
||||
__ push(esi);
|
||||
__ push(Immediate(variable->name()));
|
||||
// VariableDeclaration nodes are always introduced in one of four modes.
|
||||
ASSERT(mode == VAR || mode == LET ||
|
||||
mode == CONST || mode == CONST_HARMONY);
|
||||
// Declaration nodes are always introduced in one of four modes.
|
||||
ASSERT(mode == VAR ||
|
||||
mode == CONST ||
|
||||
mode == CONST_HARMONY ||
|
||||
mode == LET);
|
||||
PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
|
||||
? READ_ONLY : NONE;
|
||||
__ push(Immediate(Smi::FromInt(attr)));
|
||||
|
@ -822,7 +840,9 @@ void FullCodeGenerator::VisitVariableDeclaration(
|
|||
// Note: For variables we must not push an initial value (such as
|
||||
// 'undefined') because we may have a (legal) redeclaration and we
|
||||
// must not destroy the current value.
|
||||
if (hole_init) {
|
||||
if (function != NULL) {
|
||||
VisitForStackValue(function);
|
||||
} else if (binding_needs_init) {
|
||||
__ push(Immediate(isolate()->factory()->the_hole_value()));
|
||||
} else {
|
||||
__ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
|
||||
|
@ -834,118 +854,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
|
|||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitFunctionDeclaration(
|
||||
FunctionDeclaration* declaration) {
|
||||
VariableProxy* proxy = declaration->proxy();
|
||||
Variable* variable = proxy->var();
|
||||
switch (variable->location()) {
|
||||
case Variable::UNALLOCATED: {
|
||||
globals_->Add(variable->name());
|
||||
Handle<SharedFunctionInfo> function =
|
||||
Compiler::BuildFunctionInfo(declaration->fun(), script());
|
||||
// Check for stack-overflow exception.
|
||||
if (function.is_null()) return SetStackOverflow();
|
||||
globals_->Add(function);
|
||||
break;
|
||||
}
|
||||
|
||||
case Variable::PARAMETER:
|
||||
case Variable::LOCAL: {
|
||||
Comment cmnt(masm_, "[ FunctionDeclaration");
|
||||
VisitForAccumulatorValue(declaration->fun());
|
||||
__ mov(StackOperand(variable), result_register());
|
||||
break;
|
||||
}
|
||||
|
||||
case Variable::CONTEXT: {
|
||||
Comment cmnt(masm_, "[ FunctionDeclaration");
|
||||
EmitDebugCheckDeclarationContext(variable);
|
||||
VisitForAccumulatorValue(declaration->fun());
|
||||
__ mov(ContextOperand(esi, variable->index()), result_register());
|
||||
// We know that we have written a function, which is not a smi.
|
||||
__ RecordWriteContextSlot(esi,
|
||||
Context::SlotOffset(variable->index()),
|
||||
result_register(),
|
||||
ecx,
|
||||
kDontSaveFPRegs,
|
||||
EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
|
||||
break;
|
||||
}
|
||||
|
||||
case Variable::LOOKUP: {
|
||||
Comment cmnt(masm_, "[ FunctionDeclaration");
|
||||
__ push(esi);
|
||||
__ push(Immediate(variable->name()));
|
||||
__ push(Immediate(Smi::FromInt(NONE)));
|
||||
VisitForStackValue(declaration->fun());
|
||||
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
|
||||
VariableProxy* proxy = declaration->proxy();
|
||||
Variable* variable = proxy->var();
|
||||
Handle<JSModule> instance = declaration->module()->interface()->Instance();
|
||||
ASSERT(!instance.is_null());
|
||||
|
||||
switch (variable->location()) {
|
||||
case Variable::UNALLOCATED: {
|
||||
Comment cmnt(masm_, "[ ModuleDeclaration");
|
||||
globals_->Add(variable->name());
|
||||
globals_->Add(instance);
|
||||
Visit(declaration->module());
|
||||
break;
|
||||
}
|
||||
|
||||
case Variable::CONTEXT: {
|
||||
Comment cmnt(masm_, "[ ModuleDeclaration");
|
||||
EmitDebugCheckDeclarationContext(variable);
|
||||
__ mov(ContextOperand(esi, variable->index()), Immediate(instance));
|
||||
Visit(declaration->module());
|
||||
break;
|
||||
}
|
||||
|
||||
case Variable::PARAMETER:
|
||||
case Variable::LOCAL:
|
||||
case Variable::LOOKUP:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
|
||||
VariableProxy* proxy = declaration->proxy();
|
||||
Variable* variable = proxy->var();
|
||||
switch (variable->location()) {
|
||||
case Variable::UNALLOCATED:
|
||||
// TODO(rossberg)
|
||||
break;
|
||||
|
||||
case Variable::CONTEXT: {
|
||||
Comment cmnt(masm_, "[ ImportDeclaration");
|
||||
EmitDebugCheckDeclarationContext(variable);
|
||||
// TODO(rossberg)
|
||||
break;
|
||||
}
|
||||
|
||||
case Variable::PARAMETER:
|
||||
case Variable::LOCAL:
|
||||
case Variable::LOOKUP:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
|
||||
// TODO(rossberg)
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
|
||||
// Call the runtime to declare the globals.
|
||||
__ push(esi); // The context is the first argument.
|
||||
|
@ -1286,7 +1194,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
|
|||
|
||||
// All extension objects were empty and it is safe to use a global
|
||||
// load IC call.
|
||||
__ mov(edx, GlobalObjectOperand());
|
||||
__ mov(eax, GlobalObjectOperand());
|
||||
__ mov(ecx, var->name());
|
||||
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
|
||||
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
|
||||
|
@ -1370,7 +1278,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
|
|||
Comment cmnt(masm_, "Global variable");
|
||||
// Use inline caching. Variable name is passed in ecx and the global
|
||||
// object in eax.
|
||||
__ mov(edx, GlobalObjectOperand());
|
||||
__ mov(eax, GlobalObjectOperand());
|
||||
__ mov(ecx, var->name());
|
||||
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
|
||||
CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
|
||||
|
@ -1764,9 +1672,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
|
|||
break;
|
||||
case NAMED_PROPERTY:
|
||||
if (expr->is_compound()) {
|
||||
// We need the receiver both on the stack and in edx.
|
||||
VisitForStackValue(property->obj());
|
||||
__ mov(edx, Operand(esp, 0));
|
||||
// We need the receiver both on the stack and in the accumulator.
|
||||
VisitForAccumulatorValue(property->obj());
|
||||
__ push(result_register());
|
||||
} else {
|
||||
VisitForStackValue(property->obj());
|
||||
}
|
||||
|
@ -1774,9 +1682,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
|
|||
case KEYED_PROPERTY: {
|
||||
if (expr->is_compound()) {
|
||||
VisitForStackValue(property->obj());
|
||||
VisitForStackValue(property->key());
|
||||
__ mov(edx, Operand(esp, kPointerSize)); // Object.
|
||||
__ mov(ecx, Operand(esp, 0)); // Key.
|
||||
VisitForAccumulatorValue(property->key());
|
||||
__ mov(edx, Operand(esp, 0));
|
||||
__ push(eax);
|
||||
} else {
|
||||
VisitForStackValue(property->obj());
|
||||
VisitForStackValue(property->key());
|
||||
|
@ -2019,7 +1927,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
|
|||
VisitForStackValue(prop->obj());
|
||||
VisitForAccumulatorValue(prop->key());
|
||||
__ mov(ecx, eax);
|
||||
__ pop(edx); // Receiver.
|
||||
__ pop(edx);
|
||||
__ pop(eax); // Restore value.
|
||||
Handle<Code> ic = is_classic_mode()
|
||||
? isolate()->builtins()->KeyedStoreIC_Initialize()
|
||||
|
@ -2125,9 +2033,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
|
|||
|
||||
void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
|
||||
// Assignment to a property, using a named store IC.
|
||||
// eax : value
|
||||
// esp[0] : receiver
|
||||
|
||||
Property* prop = expr->target()->AsProperty();
|
||||
ASSERT(prop != NULL);
|
||||
ASSERT(prop->key()->AsLiteral() != NULL);
|
||||
|
@ -2170,9 +2075,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
|
|||
|
||||
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
|
||||
// Assignment to a property, using a keyed store IC.
|
||||
// eax : value
|
||||
// esp[0] : key
|
||||
// esp[kPointerSize] : receiver
|
||||
|
||||
// If the assignment starts a block of assignments to the same object,
|
||||
// change to slow case to avoid the quadratic behavior of repeatedly
|
||||
|
@ -2185,7 +2087,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
|
|||
__ pop(result_register());
|
||||
}
|
||||
|
||||
__ pop(ecx); // Key.
|
||||
__ pop(ecx);
|
||||
if (expr->ends_initialization_block()) {
|
||||
__ mov(edx, Operand(esp, 0)); // Leave receiver on the stack for later.
|
||||
} else {
|
||||
|
@ -2218,14 +2120,12 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
|
|||
|
||||
if (key->IsPropertyName()) {
|
||||
VisitForAccumulatorValue(expr->obj());
|
||||
__ mov(edx, result_register());
|
||||
EmitNamedPropertyLoad(expr);
|
||||
context()->Plug(eax);
|
||||
} else {
|
||||
VisitForStackValue(expr->obj());
|
||||
VisitForAccumulatorValue(expr->key());
|
||||
__ pop(edx); // Object.
|
||||
__ mov(ecx, result_register()); // Key.
|
||||
__ pop(edx);
|
||||
EmitKeyedPropertyLoad(expr);
|
||||
context()->Plug(eax);
|
||||
}
|
||||
|
@ -4024,16 +3924,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
|
|||
__ push(Immediate(Smi::FromInt(0)));
|
||||
}
|
||||
if (assign_type == NAMED_PROPERTY) {
|
||||
// Put the object both on the stack and in edx.
|
||||
// Put the object both on the stack and in the accumulator.
|
||||
VisitForAccumulatorValue(prop->obj());
|
||||
__ push(eax);
|
||||
__ mov(edx, eax);
|
||||
EmitNamedPropertyLoad(prop);
|
||||
} else {
|
||||
VisitForStackValue(prop->obj());
|
||||
VisitForStackValue(prop->key());
|
||||
__ mov(edx, Operand(esp, kPointerSize)); // Object.
|
||||
__ mov(ecx, Operand(esp, 0)); // Key.
|
||||
VisitForAccumulatorValue(prop->key());
|
||||
__ mov(edx, Operand(esp, 0));
|
||||
__ push(eax);
|
||||
EmitKeyedPropertyLoad(prop);
|
||||
}
|
||||
}
|
||||
|
@ -4180,7 +4079,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
|
|||
|
||||
if (proxy != NULL && proxy->var()->IsUnallocated()) {
|
||||
Comment cmnt(masm_, "Global variable");
|
||||
__ mov(edx, GlobalObjectOperand());
|
||||
__ mov(eax, GlobalObjectOperand());
|
||||
__ mov(ecx, Immediate(proxy->name()));
|
||||
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
|
||||
// Use a regular load, not a contextual load, to avoid a reference
|
||||
|
@ -4445,8 +4344,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
|
|||
|
||||
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
|
||||
Scope* declaration_scope = scope()->DeclarationScope();
|
||||
if (declaration_scope->is_global_scope() ||
|
||||
declaration_scope->is_module_scope()) {
|
||||
if (declaration_scope->is_global_scope()) {
|
||||
// Contexts nested in the global context have a canonical empty function
|
||||
// as their closure, not the anonymous closure containing the global
|
||||
// code. Pass a smi sentinel and let the runtime look up the empty
|
||||
|
|
|
@ -218,13 +218,13 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
|
|||
|
||||
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : receiver
|
||||
// -- ecx : name
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
StubCompiler::GenerateLoadArrayLength(masm, edx, eax, &miss);
|
||||
StubCompiler::GenerateLoadArrayLength(masm, eax, edx, &miss);
|
||||
__ bind(&miss);
|
||||
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
|
||||
}
|
||||
|
@ -233,13 +233,13 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
|
|||
void LoadIC::GenerateStringLength(MacroAssembler* masm,
|
||||
bool support_wrappers) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : receiver
|
||||
// -- ecx : name
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
|
||||
StubCompiler::GenerateLoadStringLength(masm, eax, edx, ebx, &miss,
|
||||
support_wrappers);
|
||||
__ bind(&miss);
|
||||
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
|
||||
|
@ -248,13 +248,13 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm,
|
|||
|
||||
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : receiver
|
||||
// -- ecx : name
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
|
||||
StubCompiler::GenerateLoadFunctionPrototype(masm, eax, edx, ebx, &miss);
|
||||
__ bind(&miss);
|
||||
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
|
||||
}
|
||||
|
@ -443,7 +443,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
|
|||
|
||||
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -451,34 +451,39 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
|||
Label probe_dictionary, check_number_dictionary;
|
||||
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(ecx, &check_string);
|
||||
__ JumpIfNotSmi(eax, &check_string);
|
||||
__ bind(&index_smi);
|
||||
// Now the key is known to be a smi. This place is also jumped to from
|
||||
// where a numeric string is converted to a smi.
|
||||
|
||||
GenerateKeyedLoadReceiverCheck(
|
||||
masm, edx, eax, Map::kHasIndexedInterceptor, &slow);
|
||||
masm, edx, ecx, Map::kHasIndexedInterceptor, &slow);
|
||||
|
||||
// Check the receiver's map to see if it has fast elements.
|
||||
__ CheckFastElements(eax, &check_number_dictionary);
|
||||
__ CheckFastElements(ecx, &check_number_dictionary);
|
||||
|
||||
GenerateFastArrayLoad(masm, edx, ecx, eax, eax, NULL, &slow);
|
||||
GenerateFastArrayLoad(masm,
|
||||
edx,
|
||||
eax,
|
||||
ecx,
|
||||
eax,
|
||||
NULL,
|
||||
&slow);
|
||||
Isolate* isolate = masm->isolate();
|
||||
Counters* counters = isolate->counters();
|
||||
__ IncrementCounter(counters->keyed_load_generic_smi(), 1);
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&check_number_dictionary);
|
||||
__ mov(ebx, ecx);
|
||||
__ mov(ebx, eax);
|
||||
__ SmiUntag(ebx);
|
||||
__ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
|
||||
// Check whether the elements is a number dictionary.
|
||||
// edx: receiver
|
||||
// ebx: untagged index
|
||||
// ecx: key
|
||||
// eax: elements
|
||||
__ CheckMap(eax,
|
||||
// eax: key
|
||||
// ecx: elements
|
||||
__ CheckMap(ecx,
|
||||
isolate->factory()->hash_table_map(),
|
||||
&slow,
|
||||
DONT_DO_SMI_CHECK);
|
||||
|
@ -486,7 +491,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
|||
// Push receiver on the stack to free up a register for the dictionary
|
||||
// probing.
|
||||
__ push(edx);
|
||||
__ LoadFromNumberDictionary(&slow_pop_receiver, eax, ecx, ebx, edx, edi, eax);
|
||||
__ LoadFromNumberDictionary(&slow_pop_receiver,
|
||||
ecx,
|
||||
eax,
|
||||
ebx,
|
||||
edx,
|
||||
edi,
|
||||
eax);
|
||||
// Pop receiver before returning.
|
||||
__ pop(edx);
|
||||
__ ret(0);
|
||||
|
@ -498,15 +509,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
|||
__ bind(&slow);
|
||||
// Slow case: jump to runtime.
|
||||
// edx: receiver
|
||||
// ecx: key
|
||||
// eax: key
|
||||
__ IncrementCounter(counters->keyed_load_generic_slow(), 1);
|
||||
GenerateRuntimeGetProperty(masm);
|
||||
|
||||
__ bind(&check_string);
|
||||
GenerateKeyStringCheck(masm, ecx, eax, ebx, &index_string, &slow);
|
||||
GenerateKeyStringCheck(masm, eax, ecx, ebx, &index_string, &slow);
|
||||
|
||||
GenerateKeyedLoadReceiverCheck(
|
||||
masm, edx, eax, Map::kHasNamedInterceptor, &slow);
|
||||
masm, edx, ecx, Map::kHasNamedInterceptor, &slow);
|
||||
|
||||
// If the receiver is a fast-case object, check the keyed lookup
|
||||
// cache. Otherwise probe the dictionary.
|
||||
|
@ -515,18 +526,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
|||
Immediate(isolate->factory()->hash_table_map()));
|
||||
__ j(equal, &probe_dictionary);
|
||||
|
||||
// The receiver's map is still in eax, compute the keyed lookup cache hash
|
||||
// Load the map of the receiver, compute the keyed lookup cache hash
|
||||
// based on 32 bits of the map pointer and the string hash.
|
||||
if (FLAG_debug_code) {
|
||||
__ cmp(eax, FieldOperand(edx, HeapObject::kMapOffset));
|
||||
__ Check(equal, "Map is no longer in eax.");
|
||||
}
|
||||
__ mov(ebx, eax); // Keep the map around for later.
|
||||
__ shr(eax, KeyedLookupCache::kMapHashShift);
|
||||
__ mov(edi, FieldOperand(ecx, String::kHashFieldOffset));
|
||||
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
|
||||
__ mov(ecx, ebx);
|
||||
__ shr(ecx, KeyedLookupCache::kMapHashShift);
|
||||
__ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
|
||||
__ shr(edi, String::kHashShift);
|
||||
__ xor_(eax, edi);
|
||||
__ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
|
||||
__ xor_(ecx, edi);
|
||||
__ and_(ecx, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
|
||||
|
||||
// Load the key (consisting of map and symbol) from the cache and
|
||||
// check for match.
|
||||
|
@ -538,7 +546,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
|||
|
||||
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
|
||||
Label try_next_entry;
|
||||
__ mov(edi, eax);
|
||||
__ mov(edi, ecx);
|
||||
__ shl(edi, kPointerSizeLog2 + 1);
|
||||
if (i != 0) {
|
||||
__ add(edi, Immediate(kPointerSize * i * 2));
|
||||
|
@ -546,25 +554,25 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
|||
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
|
||||
__ j(not_equal, &try_next_entry);
|
||||
__ add(edi, Immediate(kPointerSize));
|
||||
__ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
|
||||
__ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
|
||||
__ j(equal, &hit_on_nth_entry[i]);
|
||||
__ bind(&try_next_entry);
|
||||
}
|
||||
|
||||
__ lea(edi, Operand(eax, 1));
|
||||
__ lea(edi, Operand(ecx, 1));
|
||||
__ shl(edi, kPointerSizeLog2 + 1);
|
||||
__ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
|
||||
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
|
||||
__ j(not_equal, &slow);
|
||||
__ add(edi, Immediate(kPointerSize));
|
||||
__ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
|
||||
__ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
|
||||
__ j(not_equal, &slow);
|
||||
|
||||
// Get field offset.
|
||||
// edx : receiver
|
||||
// ebx : receiver's map
|
||||
// ecx : key
|
||||
// eax : lookup cache index
|
||||
// eax : key
|
||||
// ecx : lookup cache index
|
||||
ExternalReference cache_field_offsets =
|
||||
ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
|
||||
|
||||
|
@ -572,12 +580,12 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
|||
for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
|
||||
__ bind(&hit_on_nth_entry[i]);
|
||||
if (i != 0) {
|
||||
__ add(eax, Immediate(i));
|
||||
__ add(ecx, Immediate(i));
|
||||
}
|
||||
__ mov(edi,
|
||||
Operand::StaticArray(eax, times_pointer_size, cache_field_offsets));
|
||||
__ movzx_b(eax, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
|
||||
__ sub(edi, eax);
|
||||
Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
|
||||
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
|
||||
__ sub(edi, ecx);
|
||||
__ j(above_equal, &property_array_property);
|
||||
if (i != 0) {
|
||||
__ jmp(&load_in_object_property);
|
||||
|
@ -586,9 +594,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
|||
|
||||
// Load in-object property.
|
||||
__ bind(&load_in_object_property);
|
||||
__ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset));
|
||||
__ add(eax, edi);
|
||||
__ mov(eax, FieldOperand(edx, eax, times_pointer_size, 0));
|
||||
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
|
||||
__ add(ecx, edi);
|
||||
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
|
||||
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
|
||||
__ ret(0);
|
||||
|
||||
|
@ -604,16 +612,16 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
|||
// exists.
|
||||
__ bind(&probe_dictionary);
|
||||
|
||||
__ mov(eax, FieldOperand(edx, JSObject::kMapOffset));
|
||||
__ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
|
||||
GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
|
||||
__ mov(ecx, FieldOperand(edx, JSObject::kMapOffset));
|
||||
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
|
||||
GenerateGlobalInstanceTypeCheck(masm, ecx, &slow);
|
||||
|
||||
GenerateDictionaryLoad(masm, &slow, ebx, ecx, eax, edi, eax);
|
||||
GenerateDictionaryLoad(masm, &slow, ebx, eax, ecx, edi, eax);
|
||||
__ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&index_string);
|
||||
__ IndexFromHash(ebx, ecx);
|
||||
__ IndexFromHash(ebx, eax);
|
||||
// Now jump to the place where smi keys are handled.
|
||||
__ jmp(&index_smi);
|
||||
}
|
||||
|
@ -621,15 +629,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
|||
|
||||
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key (index)
|
||||
// -- eax : key (index)
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
Register receiver = edx;
|
||||
Register index = ecx;
|
||||
Register scratch = ebx;
|
||||
Register index = eax;
|
||||
Register scratch = ecx;
|
||||
Register result = eax;
|
||||
|
||||
StringCharAtGenerator char_at_generator(receiver,
|
||||
|
@ -653,7 +661,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
|
|||
|
||||
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -663,24 +671,24 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
|
|||
__ JumpIfSmi(edx, &slow);
|
||||
|
||||
// Check that the key is an array index, that is Uint32.
|
||||
__ test(ecx, Immediate(kSmiTagMask | kSmiSignMask));
|
||||
__ test(eax, Immediate(kSmiTagMask | kSmiSignMask));
|
||||
__ j(not_zero, &slow);
|
||||
|
||||
// Get the map of the receiver.
|
||||
__ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
|
||||
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
|
||||
|
||||
// Check that it has indexed interceptor and access checks
|
||||
// are not enabled for this object.
|
||||
__ movzx_b(eax, FieldOperand(eax, Map::kBitFieldOffset));
|
||||
__ and_(eax, Immediate(kSlowCaseBitFieldMask));
|
||||
__ cmp(eax, Immediate(1 << Map::kHasIndexedInterceptor));
|
||||
__ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
|
||||
__ and_(ecx, Immediate(kSlowCaseBitFieldMask));
|
||||
__ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor));
|
||||
__ j(not_zero, &slow);
|
||||
|
||||
// Everything is fine, call runtime.
|
||||
__ pop(eax);
|
||||
__ pop(ecx);
|
||||
__ push(edx); // receiver
|
||||
__ push(ecx); // key
|
||||
__ push(eax); // return address
|
||||
__ push(eax); // key
|
||||
__ push(ecx); // return address
|
||||
|
||||
// Perform tail call to the entry.
|
||||
ExternalReference ref =
|
||||
|
@ -695,20 +703,20 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
|
|||
|
||||
void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
Label slow, notin;
|
||||
Factory* factory = masm->isolate()->factory();
|
||||
Operand mapped_location =
|
||||
GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, ¬in, &slow);
|
||||
GenerateMappedArgumentsLookup(masm, edx, eax, ebx, ecx, ¬in, &slow);
|
||||
__ mov(eax, mapped_location);
|
||||
__ Ret();
|
||||
__ bind(¬in);
|
||||
// The unmapped lookup expects that the parameter map is in ebx.
|
||||
Operand unmapped_location =
|
||||
GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
|
||||
GenerateUnmappedArgumentsLookup(masm, eax, ebx, ecx, &slow);
|
||||
__ cmp(unmapped_location, factory->the_hole_value());
|
||||
__ j(equal, &slow);
|
||||
__ mov(eax, unmapped_location);
|
||||
|
@ -1300,15 +1308,15 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
|
|||
|
||||
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : receiver
|
||||
// -- ecx : name
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
||||
// Probe the stub cache.
|
||||
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
|
||||
Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
|
||||
eax);
|
||||
Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, eax, ecx, ebx,
|
||||
edx);
|
||||
|
||||
// Cache miss: Jump to runtime.
|
||||
GenerateMiss(masm);
|
||||
|
@ -1317,17 +1325,17 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
|
|||
|
||||
void LoadIC::GenerateNormal(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : receiver
|
||||
// -- ecx : name
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
GenerateStringDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
|
||||
GenerateStringDictionaryReceiverCheck(masm, eax, edx, ebx, &miss);
|
||||
|
||||
// eax: elements
|
||||
// edx: elements
|
||||
// Search the dictionary placing the result in eax.
|
||||
GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, eax);
|
||||
GenerateDictionaryLoad(masm, &miss, edx, ecx, edi, ebx, eax);
|
||||
__ ret(0);
|
||||
|
||||
// Cache miss: Jump to runtime.
|
||||
|
@ -1338,15 +1346,15 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
|
|||
|
||||
void LoadIC::GenerateMiss(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : receiver
|
||||
// -- ecx : name
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
||||
__ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
|
||||
|
||||
__ pop(ebx);
|
||||
__ push(edx); // receiver
|
||||
__ push(eax); // receiver
|
||||
__ push(ecx); // name
|
||||
__ push(ebx); // return address
|
||||
|
||||
|
@ -1359,7 +1367,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
|
|||
|
||||
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -1368,7 +1376,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
|
|||
|
||||
__ pop(ebx);
|
||||
__ push(edx); // receiver
|
||||
__ push(ecx); // name
|
||||
__ push(eax); // name
|
||||
__ push(ebx); // return address
|
||||
|
||||
// Perform tail call to the entry.
|
||||
|
@ -1382,14 +1390,14 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
|
|||
|
||||
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
||||
__ pop(ebx);
|
||||
__ push(edx); // receiver
|
||||
__ push(ecx); // name
|
||||
__ push(eax); // name
|
||||
__ push(ebx); // return address
|
||||
|
||||
// Perform tail call to the entry.
|
||||
|
|
|
@ -2059,9 +2059,8 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
|||
RelocInfo::CODE_TARGET,
|
||||
instr,
|
||||
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
|
||||
// Get the deoptimization index of the LLazyBailout-environment that
|
||||
// corresponds to this instruction.
|
||||
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
|
||||
ASSERT(instr->HasDeoptimizationEnvironment());
|
||||
LEnvironment* env = instr->deoptimization_environment();
|
||||
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
|
||||
|
||||
// Put the result value into the eax slot and restore all registers.
|
||||
|
@ -2115,7 +2114,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
|
|||
|
||||
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
|
||||
ASSERT(ToRegister(instr->context()).is(esi));
|
||||
ASSERT(ToRegister(instr->global_object()).is(edx));
|
||||
ASSERT(ToRegister(instr->global_object()).is(eax));
|
||||
ASSERT(ToRegister(instr->result()).is(eax));
|
||||
|
||||
__ mov(ecx, instr->name());
|
||||
|
@ -2313,7 +2312,7 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
|
|||
|
||||
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
|
||||
ASSERT(ToRegister(instr->context()).is(esi));
|
||||
ASSERT(ToRegister(instr->object()).is(edx));
|
||||
ASSERT(ToRegister(instr->object()).is(eax));
|
||||
ASSERT(ToRegister(instr->result()).is(eax));
|
||||
|
||||
__ mov(ecx, instr->name());
|
||||
|
@ -2534,7 +2533,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
|
|||
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
|
||||
ASSERT(ToRegister(instr->context()).is(esi));
|
||||
ASSERT(ToRegister(instr->object()).is(edx));
|
||||
ASSERT(ToRegister(instr->key()).is(ecx));
|
||||
ASSERT(ToRegister(instr->key()).is(eax));
|
||||
|
||||
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
|
||||
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
||||
|
@ -2544,29 +2543,25 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
|
|||
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
|
||||
Register result = ToRegister(instr->result());
|
||||
|
||||
if (instr->hydrogen()->from_inlined()) {
|
||||
__ lea(result, Operand(esp, -2 * kPointerSize));
|
||||
} else {
|
||||
// Check for arguments adapter frame.
|
||||
Label done, adapted;
|
||||
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
|
||||
__ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
|
||||
__ cmp(Operand(result),
|
||||
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
||||
__ j(equal, &adapted, Label::kNear);
|
||||
// Check for arguments adapter frame.
|
||||
Label done, adapted;
|
||||
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
|
||||
__ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
|
||||
__ cmp(Operand(result),
|
||||
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
||||
__ j(equal, &adapted, Label::kNear);
|
||||
|
||||
// No arguments adaptor frame.
|
||||
__ mov(result, Operand(ebp));
|
||||
__ jmp(&done, Label::kNear);
|
||||
// No arguments adaptor frame.
|
||||
__ mov(result, Operand(ebp));
|
||||
__ jmp(&done, Label::kNear);
|
||||
|
||||
// Arguments adaptor frame present.
|
||||
__ bind(&adapted);
|
||||
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
|
||||
// Arguments adaptor frame present.
|
||||
__ bind(&adapted);
|
||||
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
|
||||
|
||||
// Result is the frame pointer for the frame if not adapted and for the real
|
||||
// frame below the adaptor frame if adapted.
|
||||
__ bind(&done);
|
||||
}
|
||||
// Result is the frame pointer for the frame if not adapted and for the real
|
||||
// frame below the adaptor frame if adapted.
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2671,7 +2666,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
|
|||
|
||||
// Invoke the function.
|
||||
__ bind(&invoke);
|
||||
ASSERT(instr->HasPointerMap());
|
||||
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
RecordPosition(pointers->position());
|
||||
SafepointGenerator safepoint_generator(
|
||||
|
@ -2688,11 +2683,6 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
|
|||
}
|
||||
|
||||
|
||||
void LCodeGen::DoDrop(LDrop* instr) {
|
||||
__ Drop(instr->count());
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoThisFunction(LThisFunction* instr) {
|
||||
Register result = ToRegister(instr->result());
|
||||
__ LoadHeapObject(result, instr->hydrogen()->closure());
|
||||
|
@ -2739,8 +2729,7 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
|
|||
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
|
||||
int arity,
|
||||
LInstruction* instr,
|
||||
CallKind call_kind,
|
||||
EDIState edi_state) {
|
||||
CallKind call_kind) {
|
||||
bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
|
||||
function->shared()->formal_parameter_count() == arity;
|
||||
|
||||
|
@ -2748,9 +2737,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
|
|||
RecordPosition(pointers->position());
|
||||
|
||||
if (can_invoke_directly) {
|
||||
if (edi_state == EDI_UNINITIALIZED) {
|
||||
__ LoadHeapObject(edi, function);
|
||||
}
|
||||
__ LoadHeapObject(edi, function);
|
||||
|
||||
// Change context if needed.
|
||||
bool change_context =
|
||||
|
@ -2793,8 +2780,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
|
|||
CallKnownFunction(instr->function(),
|
||||
instr->arity(),
|
||||
instr,
|
||||
CALL_AS_METHOD,
|
||||
EDI_UNINITIALIZED);
|
||||
CALL_AS_METHOD);
|
||||
}
|
||||
|
||||
|
||||
|
@ -3240,21 +3226,13 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
|
|||
ASSERT(ToRegister(instr->context()).is(esi));
|
||||
ASSERT(ToRegister(instr->function()).is(edi));
|
||||
ASSERT(instr->HasPointerMap());
|
||||
|
||||
if (instr->known_function().is_null()) {
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
RecordPosition(pointers->position());
|
||||
SafepointGenerator generator(
|
||||
this, pointers, Safepoint::kLazyDeopt);
|
||||
ParameterCount count(instr->arity());
|
||||
__ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
|
||||
} else {
|
||||
CallKnownFunction(instr->known_function(),
|
||||
instr->arity(),
|
||||
instr,
|
||||
CALL_AS_METHOD,
|
||||
EDI_CONTAINS_TARGET);
|
||||
}
|
||||
ASSERT(instr->HasDeoptimizationEnvironment());
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
RecordPosition(pointers->position());
|
||||
SafepointGenerator generator(
|
||||
this, pointers, Safepoint::kLazyDeopt);
|
||||
ParameterCount count(instr->arity());
|
||||
__ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
|
||||
}
|
||||
|
||||
|
||||
|
@ -3309,11 +3287,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
|
|||
|
||||
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
|
||||
ASSERT(ToRegister(instr->result()).is(eax));
|
||||
CallKnownFunction(instr->target(),
|
||||
instr->arity(),
|
||||
instr,
|
||||
CALL_AS_FUNCTION,
|
||||
EDI_UNINITIALIZED);
|
||||
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
|
||||
}
|
||||
|
||||
|
||||
|
@ -3486,18 +3460,15 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
|
|||
void LCodeGen::DoStoreKeyedFastDoubleElement(
|
||||
LStoreKeyedFastDoubleElement* instr) {
|
||||
XMMRegister value = ToDoubleRegister(instr->value());
|
||||
Label have_value;
|
||||
|
||||
if (instr->NeedsCanonicalization()) {
|
||||
Label have_value;
|
||||
__ ucomisd(value, value);
|
||||
__ j(parity_odd, &have_value); // NaN.
|
||||
|
||||
__ ucomisd(value, value);
|
||||
__ j(parity_odd, &have_value); // NaN.
|
||||
|
||||
ExternalReference canonical_nan_reference =
|
||||
ExternalReference::address_of_canonical_non_hole_nan();
|
||||
__ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
|
||||
__ bind(&have_value);
|
||||
}
|
||||
ExternalReference canonical_nan_reference =
|
||||
ExternalReference::address_of_canonical_non_hole_nan();
|
||||
__ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
|
||||
__ bind(&have_value);
|
||||
|
||||
Operand double_store_operand = BuildFastArrayOperand(
|
||||
instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
|
||||
|
@ -4210,21 +4181,12 @@ void LCodeGen::DoCheckMapCommon(Register reg,
|
|||
}
|
||||
|
||||
|
||||
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
|
||||
void LCodeGen::DoCheckMap(LCheckMap* instr) {
|
||||
LOperand* input = instr->InputAt(0);
|
||||
ASSERT(input->IsRegister());
|
||||
Register reg = ToRegister(input);
|
||||
|
||||
Label success;
|
||||
SmallMapList* map_set = instr->hydrogen()->map_set();
|
||||
for (int i = 0; i < map_set->length() - 1; i++) {
|
||||
Handle<Map> map = map_set->at(i);
|
||||
__ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
|
||||
__ j(equal, &success);
|
||||
}
|
||||
Handle<Map> map = map_set->last();
|
||||
DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
|
||||
__ bind(&success);
|
||||
Handle<Map> map = instr->hydrogen()->map();
|
||||
DoCheckMapCommon(reg, map, instr->hydrogen()->mode(), instr->environment());
|
||||
}
|
||||
|
||||
|
||||
|
@ -4335,14 +4297,6 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
|
|||
deferred->entry(),
|
||||
TAG_OBJECT);
|
||||
|
||||
__ bind(deferred->exit());
|
||||
if (FLAG_debug_code) {
|
||||
Label is_in_new_space;
|
||||
__ JumpIfInNewSpace(result, scratch, &is_in_new_space);
|
||||
__ Abort("Allocated object is not in new-space");
|
||||
__ bind(&is_in_new_space);
|
||||
}
|
||||
|
||||
// Load the initial map.
|
||||
Register map = scratch;
|
||||
__ LoadHeapObject(scratch, constructor);
|
||||
|
@ -4377,14 +4331,14 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
|
|||
__ mov(FieldOperand(result, property_offset), scratch);
|
||||
}
|
||||
}
|
||||
|
||||
__ bind(deferred->exit());
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
|
||||
Register result = ToRegister(instr->result());
|
||||
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
|
||||
Handle<Map> initial_map(constructor->initial_map());
|
||||
int instance_size = initial_map->instance_size();
|
||||
|
||||
// TODO(3095996): Get rid of this. For now, we need to make the
|
||||
// result register contain a valid pointer because it is already
|
||||
|
@ -4392,9 +4346,8 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
|
|||
__ Set(result, Immediate(0));
|
||||
|
||||
PushSafepointRegistersScope scope(this);
|
||||
__ push(Immediate(Smi::FromInt(instance_size)));
|
||||
CallRuntimeFromDeferred(
|
||||
Runtime::kAllocateInNewSpace, 1, instr, instr->context());
|
||||
__ PushHeapObject(constructor);
|
||||
CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr, instr->context());
|
||||
__ StoreToSafepointRegisterSlot(result, eax);
|
||||
}
|
||||
|
||||
|
@ -4462,13 +4415,6 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
|
|||
__ LoadHeapObject(ecx, object);
|
||||
__ cmp(source, ecx);
|
||||
__ Assert(equal, "Unexpected object literal boilerplate");
|
||||
__ mov(ecx, FieldOperand(source, HeapObject::kMapOffset));
|
||||
__ cmp(ecx, Handle<Map>(object->map()));
|
||||
__ Assert(equal, "Unexpected boilerplate map");
|
||||
__ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
|
||||
__ and_(ecx, Map::kElementsKindMask);
|
||||
__ cmp(ecx, object->GetElementsKind() << Map::kElementsKindShift);
|
||||
__ Assert(equal, "Unexpected boilerplate elements kind");
|
||||
}
|
||||
|
||||
// Only elements backing stores for non-COW arrays need to be copied.
|
||||
|
@ -4538,10 +4484,9 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
|
|||
__ mov(FieldOperand(result, total_offset + 4), Immediate(value_high));
|
||||
}
|
||||
} else if (elements->IsFixedArray()) {
|
||||
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
|
||||
for (int i = 0; i < elements_length; i++) {
|
||||
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
|
||||
Handle<Object> value(fast_elements->get(i));
|
||||
Handle<Object> value = JSObject::GetElement(object, i);
|
||||
if (value->IsJSObject()) {
|
||||
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
|
||||
__ lea(ecx, Operand(result, *offset));
|
||||
|
@ -4565,23 +4510,6 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
|
|||
void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
|
||||
ASSERT(ToRegister(instr->context()).is(esi));
|
||||
int size = instr->hydrogen()->total_size();
|
||||
ElementsKind boilerplate_elements_kind =
|
||||
instr->hydrogen()->boilerplate()->GetElementsKind();
|
||||
|
||||
// Deopt if the literal boilerplate ElementsKind is of a type different than
|
||||
// the expected one. The check isn't necessary if the boilerplate has already
|
||||
// been converted to FAST_ELEMENTS.
|
||||
if (boilerplate_elements_kind != FAST_ELEMENTS) {
|
||||
__ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
|
||||
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
|
||||
// Load the map's "bit field 2". We only need the first byte,
|
||||
// but the following masking takes care of that anyway.
|
||||
__ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
|
||||
// Retrieve elements_kind from bit field 2.
|
||||
__ and_(ecx, Map::kElementsKindMask);
|
||||
__ cmp(ecx, boilerplate_elements_kind << Map::kElementsKindShift);
|
||||
DeoptimizeIf(not_equal, instr->environment());
|
||||
}
|
||||
|
||||
// Allocate all objects that are part of the literal in one big
|
||||
// allocation. This avoids multiple limit checks.
|
||||
|
@ -4866,7 +4794,7 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
|
|||
LOperand* key = instr->key();
|
||||
__ push(ToOperand(obj));
|
||||
EmitPushTaggedOperand(key);
|
||||
ASSERT(instr->HasPointerMap());
|
||||
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
RecordPosition(pointers->position());
|
||||
// Create safepoint generator that will also ensure enough space in the
|
||||
|
@ -4964,7 +4892,7 @@ void LCodeGen::DoIn(LIn* instr) {
|
|||
LOperand* key = instr->key();
|
||||
EmitPushTaggedOperand(key);
|
||||
EmitPushTaggedOperand(obj);
|
||||
ASSERT(instr->HasPointerMap());
|
||||
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
RecordPosition(pointers->position());
|
||||
SafepointGenerator safepoint_generator(
|
||||
|
|
|
@ -206,18 +206,12 @@ class LCodeGen BASE_EMBEDDED {
|
|||
LInstruction* instr,
|
||||
LOperand* context);
|
||||
|
||||
enum EDIState {
|
||||
EDI_UNINITIALIZED,
|
||||
EDI_CONTAINS_TARGET
|
||||
};
|
||||
|
||||
// Generate a direct call to a known function. Expects the function
|
||||
// to be in edi.
|
||||
void CallKnownFunction(Handle<JSFunction> function,
|
||||
int arity,
|
||||
LInstruction* instr,
|
||||
CallKind call_kind,
|
||||
EDIState edi_state);
|
||||
CallKind call_kind);
|
||||
|
||||
void RecordSafepointWithLazyDeopt(LInstruction* instr,
|
||||
SafepointMode safepoint_mode);
|
||||
|
|
|
@ -729,6 +729,22 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
|
|||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
|
||||
LInstruction* instr, int ast_id) {
|
||||
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
|
||||
ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
|
||||
instruction_pending_deoptimization_environment_ = instr;
|
||||
pending_deoptimization_ast_id_ = ast_id;
|
||||
return instr;
|
||||
}
|
||||
|
||||
|
||||
void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
|
||||
instruction_pending_deoptimization_environment_ = NULL;
|
||||
pending_deoptimization_ast_id_ = AstNode::kNoNumber;
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
|
||||
HInstruction* hinstr,
|
||||
CanDeoptimize can_deoptimize) {
|
||||
|
@ -741,10 +757,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
|
|||
if (hinstr->HasObservableSideEffects()) {
|
||||
ASSERT(hinstr->next()->IsSimulate());
|
||||
HSimulate* sim = HSimulate::cast(hinstr->next());
|
||||
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
|
||||
ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
|
||||
instruction_pending_deoptimization_environment_ = instr;
|
||||
pending_deoptimization_ast_id_ = sim->ast_id();
|
||||
instr = SetInstructionPendingDeoptimizationEnvironment(
|
||||
instr, sim->ast_id());
|
||||
}
|
||||
|
||||
// If instruction does not have side-effects lazy deoptimization
|
||||
|
@ -762,6 +776,12 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
|
|||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
|
||||
instr->MarkAsSaveDoubles();
|
||||
return instr;
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
|
||||
ASSERT(!instr->HasPointerMap());
|
||||
instr->set_pointer_map(new(zone()) LPointerMap(position_));
|
||||
|
@ -1310,7 +1330,6 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
|
|||
LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
|
||||
ASSERT(instr->value()->representation().IsInteger32());
|
||||
ASSERT(instr->representation().IsInteger32());
|
||||
if (instr->HasNoUses()) return NULL;
|
||||
LOperand* input = UseRegisterAtStart(instr->value());
|
||||
LBitNotI* result = new(zone()) LBitNotI(input);
|
||||
return DefineSameAsFirst(result);
|
||||
|
@ -1335,12 +1354,6 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
|
|||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
|
||||
UNIMPLEMENTED();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
|
||||
if (instr->representation().IsInteger32()) {
|
||||
ASSERT(instr->left()->representation().IsInteger32());
|
||||
|
@ -1787,9 +1800,9 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
|
|||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
|
||||
LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
LCheckMaps* result = new(zone()) LCheckMaps(value);
|
||||
LCheckMap* result = new(zone()) LCheckMap(value);
|
||||
return AssignEnvironment(result);
|
||||
}
|
||||
|
||||
|
@ -1849,7 +1862,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
|
|||
|
||||
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
|
||||
LOperand* context = UseFixed(instr->context(), esi);
|
||||
LOperand* global_object = UseFixed(instr->global_object(), edx);
|
||||
LOperand* global_object = UseFixed(instr->global_object(), eax);
|
||||
LLoadGlobalGeneric* result =
|
||||
new(zone()) LLoadGlobalGeneric(context, global_object);
|
||||
return MarkAsCall(DefineFixed(result, eax), instr);
|
||||
|
@ -1909,7 +1922,7 @@ LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
|
|||
ASSERT(instr->representation().IsTagged());
|
||||
if (instr->need_generic()) {
|
||||
LOperand* context = UseFixed(instr->context(), esi);
|
||||
LOperand* obj = UseFixed(instr->object(), edx);
|
||||
LOperand* obj = UseFixed(instr->object(), eax);
|
||||
LLoadNamedFieldPolymorphic* result =
|
||||
new(zone()) LLoadNamedFieldPolymorphic(context, obj);
|
||||
return MarkAsCall(DefineFixed(result, eax), instr);
|
||||
|
@ -1925,7 +1938,7 @@ LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
|
|||
|
||||
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
|
||||
LOperand* context = UseFixed(instr->context(), esi);
|
||||
LOperand* object = UseFixed(instr->object(), edx);
|
||||
LOperand* object = UseFixed(instr->object(), eax);
|
||||
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
|
||||
return MarkAsCall(DefineFixed(result, eax), instr);
|
||||
}
|
||||
|
@ -2004,7 +2017,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
|
|||
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
|
||||
LOperand* context = UseFixed(instr->context(), esi);
|
||||
LOperand* object = UseFixed(instr->object(), edx);
|
||||
LOperand* key = UseFixed(instr->key(), ecx);
|
||||
LOperand* key = UseFixed(instr->key(), eax);
|
||||
|
||||
LLoadKeyedGeneric* result =
|
||||
new(zone()) LLoadKeyedGeneric(context, object, key);
|
||||
|
@ -2335,12 +2348,9 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
|
|||
ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
|
||||
LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
|
||||
LInstruction* result = AssignEnvironment(lazy_bailout);
|
||||
// Store the lazy deopt environment with the instruction if needed. Right
|
||||
// now it is only used for LInstanceOfKnownGlobal.
|
||||
instruction_pending_deoptimization_environment_->
|
||||
SetDeferredLazyDeoptimizationEnvironment(result->environment());
|
||||
instruction_pending_deoptimization_environment_ = NULL;
|
||||
pending_deoptimization_ast_id_ = AstNode::kNoNumber;
|
||||
set_deoptimization_environment(result->environment());
|
||||
ClearInstructionPendingDeoptimizationEnvironment();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -2370,8 +2380,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
|
|||
undefined,
|
||||
instr->call_kind(),
|
||||
instr->is_construct());
|
||||
if (instr->arguments_var() != NULL) {
|
||||
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
|
||||
if (instr->arguments() != NULL) {
|
||||
inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
|
||||
}
|
||||
current_block_->UpdateEnvironment(inner);
|
||||
chunk_->AddInlinedClosure(instr->closure());
|
||||
|
@ -2380,20 +2390,10 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
|
|||
|
||||
|
||||
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
|
||||
LInstruction* pop = NULL;
|
||||
|
||||
HEnvironment* env = current_block_->last_environment();
|
||||
|
||||
if (instr->arguments_pushed()) {
|
||||
int argument_count = env->arguments_environment()->parameter_count();
|
||||
pop = new(zone()) LDrop(argument_count);
|
||||
argument_count_ -= argument_count;
|
||||
}
|
||||
|
||||
HEnvironment* outer = current_block_->last_environment()->
|
||||
DiscardInlined(false);
|
||||
current_block_->UpdateEnvironment(outer);
|
||||
return pop;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ class LCodeGen;
|
|||
V(CallStub) \
|
||||
V(CheckFunction) \
|
||||
V(CheckInstanceType) \
|
||||
V(CheckMaps) \
|
||||
V(CheckMap) \
|
||||
V(CheckNonSmi) \
|
||||
V(CheckPrototypeMaps) \
|
||||
V(CheckSmi) \
|
||||
|
@ -174,8 +174,7 @@ class LCodeGen;
|
|||
V(CheckMapValue) \
|
||||
V(LoadFieldByIndex) \
|
||||
V(DateField) \
|
||||
V(WrapReceiver) \
|
||||
V(Drop)
|
||||
V(WrapReceiver)
|
||||
|
||||
|
||||
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
|
||||
|
@ -199,7 +198,8 @@ class LInstruction: public ZoneObject {
|
|||
LInstruction()
|
||||
: environment_(NULL),
|
||||
hydrogen_value_(NULL),
|
||||
is_call_(false) { }
|
||||
is_call_(false),
|
||||
is_save_doubles_(false) { }
|
||||
virtual ~LInstruction() { }
|
||||
|
||||
virtual void CompileToNative(LCodeGen* generator) = 0;
|
||||
|
@ -242,12 +242,22 @@ class LInstruction: public ZoneObject {
|
|||
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
|
||||
HValue* hydrogen_value() const { return hydrogen_value_; }
|
||||
|
||||
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
|
||||
void set_deoptimization_environment(LEnvironment* env) {
|
||||
deoptimization_environment_.set(env);
|
||||
}
|
||||
LEnvironment* deoptimization_environment() const {
|
||||
return deoptimization_environment_.get();
|
||||
}
|
||||
bool HasDeoptimizationEnvironment() const {
|
||||
return deoptimization_environment_.is_set();
|
||||
}
|
||||
|
||||
void MarkAsCall() { is_call_ = true; }
|
||||
void MarkAsSaveDoubles() { is_save_doubles_ = true; }
|
||||
|
||||
// Interface to the register allocator and iterators.
|
||||
bool IsMarkedAsCall() const { return is_call_; }
|
||||
bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
|
||||
|
||||
virtual bool HasResult() const = 0;
|
||||
virtual LOperand* result() = 0;
|
||||
|
@ -268,7 +278,9 @@ class LInstruction: public ZoneObject {
|
|||
LEnvironment* environment_;
|
||||
SetOncePointer<LPointerMap> pointer_map_;
|
||||
HValue* hydrogen_value_;
|
||||
SetOncePointer<LEnvironment> deoptimization_environment_;
|
||||
bool is_call_;
|
||||
bool is_save_doubles_;
|
||||
};
|
||||
|
||||
|
||||
|
@ -513,8 +525,9 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
|
|||
|
||||
class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
|
||||
public:
|
||||
LArgumentsElements() { }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
|
||||
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
|
||||
};
|
||||
|
||||
|
||||
|
@ -831,15 +844,6 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
|
|||
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
|
||||
|
||||
Handle<JSFunction> function() const { return hydrogen()->function(); }
|
||||
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
|
||||
return lazy_deopt_env_;
|
||||
}
|
||||
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
|
||||
lazy_deopt_env_ = env;
|
||||
}
|
||||
|
||||
private:
|
||||
LEnvironment* lazy_deopt_env_;
|
||||
};
|
||||
|
||||
|
||||
|
@ -1397,19 +1401,6 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
|
|||
};
|
||||
|
||||
|
||||
class LDrop: public LTemplateInstruction<0, 0, 0> {
|
||||
public:
|
||||
explicit LDrop(int count) : count_(count) { }
|
||||
|
||||
int count() const { return count_; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
|
||||
|
||||
private:
|
||||
int count_;
|
||||
};
|
||||
|
||||
|
||||
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
|
||||
public:
|
||||
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
|
||||
|
@ -1498,7 +1489,6 @@ class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
|
|||
virtual void PrintDataTo(StringStream* stream);
|
||||
|
||||
int arity() const { return hydrogen()->argument_count() - 1; }
|
||||
Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
|
||||
};
|
||||
|
||||
|
||||
|
@ -1797,8 +1787,6 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
|
|||
LOperand* elements() { return inputs_[0]; }
|
||||
LOperand* key() { return inputs_[1]; }
|
||||
LOperand* value() { return inputs_[2]; }
|
||||
|
||||
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
|
||||
};
|
||||
|
||||
|
||||
|
@ -1961,14 +1949,14 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
|
|||
};
|
||||
|
||||
|
||||
class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
|
||||
class LCheckMap: public LTemplateInstruction<0, 1, 0> {
|
||||
public:
|
||||
explicit LCheckMaps(LOperand* value) {
|
||||
explicit LCheckMap(LOperand* value) {
|
||||
inputs_[0] = value;
|
||||
}
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
|
||||
DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
|
||||
DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
|
||||
DECLARE_HYDROGEN_ACCESSOR(CheckMap)
|
||||
};
|
||||
|
||||
|
||||
|
@ -2483,6 +2471,11 @@ class LChunkBuilder BASE_EMBEDDED {
|
|||
LInstruction* instr,
|
||||
HInstruction* hinstr,
|
||||
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
|
||||
LInstruction* MarkAsSaveDoubles(LInstruction* instr);
|
||||
|
||||
LInstruction* SetInstructionPendingDeoptimizationEnvironment(
|
||||
LInstruction* instr, int ast_id);
|
||||
void ClearInstructionPendingDeoptimizationEnvironment();
|
||||
|
||||
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
|
||||
int* argument_index_accumulator);
|
||||
|
|
|
@ -501,13 +501,9 @@ void RegExpMacroAssemblerIA32::CheckNotCharacter(uint32_t c,
|
|||
void RegExpMacroAssemblerIA32::CheckCharacterAfterAnd(uint32_t c,
|
||||
uint32_t mask,
|
||||
Label* on_equal) {
|
||||
if (c == 0) {
|
||||
__ test(current_character(), Immediate(mask));
|
||||
} else {
|
||||
__ mov(eax, mask);
|
||||
__ and_(eax, current_character());
|
||||
__ cmp(eax, c);
|
||||
}
|
||||
__ mov(eax, current_character());
|
||||
__ and_(eax, mask);
|
||||
__ cmp(eax, c);
|
||||
BranchOrBacktrack(equal, on_equal);
|
||||
}
|
||||
|
||||
|
@ -515,13 +511,9 @@ void RegExpMacroAssemblerIA32::CheckCharacterAfterAnd(uint32_t c,
|
|||
void RegExpMacroAssemblerIA32::CheckNotCharacterAfterAnd(uint32_t c,
|
||||
uint32_t mask,
|
||||
Label* on_not_equal) {
|
||||
if (c == 0) {
|
||||
__ test(current_character(), Immediate(mask));
|
||||
} else {
|
||||
__ mov(eax, mask);
|
||||
__ and_(eax, current_character());
|
||||
__ cmp(eax, c);
|
||||
}
|
||||
__ mov(eax, current_character());
|
||||
__ and_(eax, mask);
|
||||
__ cmp(eax, c);
|
||||
BranchOrBacktrack(not_equal, on_not_equal);
|
||||
}
|
||||
|
||||
|
@ -533,51 +525,12 @@ void RegExpMacroAssemblerIA32::CheckNotCharacterAfterMinusAnd(
|
|||
Label* on_not_equal) {
|
||||
ASSERT(minus < String::kMaxUtf16CodeUnit);
|
||||
__ lea(eax, Operand(current_character(), -minus));
|
||||
if (c == 0) {
|
||||
__ test(eax, Immediate(mask));
|
||||
} else {
|
||||
__ and_(eax, mask);
|
||||
__ cmp(eax, c);
|
||||
}
|
||||
__ and_(eax, mask);
|
||||
__ cmp(eax, c);
|
||||
BranchOrBacktrack(not_equal, on_not_equal);
|
||||
}
|
||||
|
||||
|
||||
void RegExpMacroAssemblerIA32::CheckCharacterInRange(
|
||||
uc16 from,
|
||||
uc16 to,
|
||||
Label* on_in_range) {
|
||||
__ lea(eax, Operand(current_character(), -from));
|
||||
__ cmp(eax, to - from);
|
||||
BranchOrBacktrack(below_equal, on_in_range);
|
||||
}
|
||||
|
||||
|
||||
void RegExpMacroAssemblerIA32::CheckCharacterNotInRange(
|
||||
uc16 from,
|
||||
uc16 to,
|
||||
Label* on_not_in_range) {
|
||||
__ lea(eax, Operand(current_character(), -from));
|
||||
__ cmp(eax, to - from);
|
||||
BranchOrBacktrack(above, on_not_in_range);
|
||||
}
|
||||
|
||||
|
||||
void RegExpMacroAssemblerIA32::CheckBitInTable(
|
||||
Handle<ByteArray> table,
|
||||
Label* on_bit_set) {
|
||||
__ mov(eax, Immediate(table));
|
||||
Register index = current_character();
|
||||
if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
|
||||
__ mov(ebx, kTableSize - 1);
|
||||
__ and_(ebx, current_character());
|
||||
index = ebx;
|
||||
}
|
||||
__ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize), 0);
|
||||
BranchOrBacktrack(not_equal, on_bit_set);
|
||||
}
|
||||
|
||||
|
||||
bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
|
||||
Label* on_no_match) {
|
||||
// Range checks (c in min..max) are generally implemented by an unsigned
|
||||
|
|
|
@ -78,14 +78,6 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
|
|||
uc16 minus,
|
||||
uc16 mask,
|
||||
Label* on_not_equal);
|
||||
virtual void CheckCharacterInRange(uc16 from,
|
||||
uc16 to,
|
||||
Label* on_in_range);
|
||||
virtual void CheckCharacterNotInRange(uc16 from,
|
||||
uc16 to,
|
||||
Label* on_not_in_range);
|
||||
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
|
||||
|
||||
// Checks whether the given offset from the current position is before
|
||||
// the end of the string.
|
||||
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
|
||||
|
|
|
@ -406,7 +406,6 @@ static void PushInterceptorArguments(MacroAssembler* masm,
|
|||
__ push(receiver);
|
||||
__ push(holder);
|
||||
__ push(FieldOperand(scratch, InterceptorInfo::kDataOffset));
|
||||
__ push(Immediate(reinterpret_cast<int>(masm->isolate())));
|
||||
}
|
||||
|
||||
|
||||
|
@ -420,12 +419,12 @@ static void CompileCallLoadPropertyWithInterceptor(
|
|||
__ CallExternalReference(
|
||||
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
|
||||
masm->isolate()),
|
||||
6);
|
||||
5);
|
||||
}
|
||||
|
||||
|
||||
// Number of pointers to be reserved on stack for fast API call.
|
||||
static const int kFastApiCallArguments = 4;
|
||||
static const int kFastApiCallArguments = 3;
|
||||
|
||||
|
||||
// Reserves space for the extra arguments to API function in the
|
||||
|
@ -473,11 +472,10 @@ static void GenerateFastApiCall(MacroAssembler* masm,
|
|||
// -- esp[8] : api function
|
||||
// (first fast api call extra argument)
|
||||
// -- esp[12] : api call data
|
||||
// -- esp[16] : isolate
|
||||
// -- esp[20] : last argument
|
||||
// -- esp[16] : last argument
|
||||
// -- ...
|
||||
// -- esp[(argc + 4) * 4] : first argument
|
||||
// -- esp[(argc + 5) * 4] : receiver
|
||||
// -- esp[(argc + 3) * 4] : first argument
|
||||
// -- esp[(argc + 4) * 4] : receiver
|
||||
// -----------------------------------
|
||||
// Get the function and setup the context.
|
||||
Handle<JSFunction> function = optimization.constant_function();
|
||||
|
@ -495,11 +493,9 @@ static void GenerateFastApiCall(MacroAssembler* masm,
|
|||
} else {
|
||||
__ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
|
||||
}
|
||||
__ mov(Operand(esp, 4 * kPointerSize),
|
||||
Immediate(reinterpret_cast<int>(masm->isolate())));
|
||||
|
||||
// Prepare arguments.
|
||||
__ lea(eax, Operand(esp, 4 * kPointerSize));
|
||||
__ lea(eax, Operand(esp, 3 * kPointerSize));
|
||||
|
||||
const int kApiArgc = 1; // API function gets reference to the v8::Arguments.
|
||||
|
||||
|
@ -683,7 +679,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
|
|||
__ CallExternalReference(
|
||||
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
|
||||
masm->isolate()),
|
||||
6);
|
||||
5);
|
||||
|
||||
// Restore the name_ register.
|
||||
__ pop(name_);
|
||||
|
@ -1038,7 +1034,6 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
|
|||
} else {
|
||||
__ push(Immediate(Handle<Object>(callback->data())));
|
||||
}
|
||||
__ push(Immediate(reinterpret_cast<int>(isolate())));
|
||||
|
||||
// Save a pointer to where we pushed the arguments pointer.
|
||||
// This will be passed as the const AccessorInfo& to the C++ callback.
|
||||
|
@ -1049,9 +1044,9 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
|
|||
|
||||
__ push(scratch3); // Restore return address.
|
||||
|
||||
// 4 elements array for v8::Arguments::values_, handler for name and pointer
|
||||
// 3 elements array for v8::Arguments::values_, handler for name and pointer
|
||||
// to the values (it considered as smi in GC).
|
||||
const int kStackSpace = 6;
|
||||
const int kStackSpace = 5;
|
||||
const int kApiArgc = 2;
|
||||
|
||||
__ PrepareCallApiFunction(kApiArgc);
|
||||
|
@ -1218,7 +1213,6 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
|
|||
__ push(holder_reg);
|
||||
__ mov(holder_reg, Immediate(callback));
|
||||
__ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
|
||||
__ push(Immediate(reinterpret_cast<int>(isolate())));
|
||||
__ push(holder_reg);
|
||||
__ push(name_reg);
|
||||
__ push(scratch2); // restore return address
|
||||
|
@ -1226,7 +1220,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
|
|||
ExternalReference ref =
|
||||
ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
|
||||
masm()->isolate());
|
||||
__ TailCallExternalReference(ref, 6, 1);
|
||||
__ TailCallExternalReference(ref, 5, 1);
|
||||
}
|
||||
} else { // !compile_followup_inline
|
||||
// Call the runtime system to load the interceptor.
|
||||
|
@ -1242,7 +1236,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
|
|||
ExternalReference ref =
|
||||
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
|
||||
isolate());
|
||||
__ TailCallExternalReference(ref, 6, 1);
|
||||
__ TailCallExternalReference(ref, 5, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2180,7 +2174,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
|
|||
name, depth, &miss);
|
||||
|
||||
// Move the return address on top of the stack.
|
||||
__ mov(eax, Operand(esp, 4 * kPointerSize));
|
||||
__ mov(eax, Operand(esp, 3 * kPointerSize));
|
||||
__ mov(Operand(esp, 0 * kPointerSize), eax);
|
||||
|
||||
// esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
|
||||
|
@ -2709,27 +2703,27 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
|
|||
Handle<JSObject> object,
|
||||
Handle<JSObject> last) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : receiver
|
||||
// -- ecx : name
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ JumpIfSmi(edx, &miss);
|
||||
__ JumpIfSmi(eax, &miss);
|
||||
|
||||
ASSERT(last->IsGlobalObject() || last->HasFastProperties());
|
||||
|
||||
// Check the maps of the full prototype chain. Also check that
|
||||
// global property cells up to (but not including) the last object
|
||||
// in the prototype chain are empty.
|
||||
CheckPrototypes(object, edx, last, ebx, eax, edi, name, &miss);
|
||||
CheckPrototypes(object, eax, last, ebx, edx, edi, name, &miss);
|
||||
|
||||
// If the last object in the prototype chain is a global object,
|
||||
// check that the global property cell is empty.
|
||||
if (last->IsGlobalObject()) {
|
||||
GenerateCheckPropertyCell(
|
||||
masm(), Handle<GlobalObject>::cast(last), name, eax, &miss);
|
||||
masm(), Handle<GlobalObject>::cast(last), name, edx, &miss);
|
||||
}
|
||||
|
||||
// Return undefined if maps of the full prototype chain are still the
|
||||
|
@ -2750,13 +2744,13 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
|
|||
int index,
|
||||
Handle<String> name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : receiver
|
||||
// -- ecx : name
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
GenerateLoadField(object, holder, edx, ebx, eax, edi, index, name, &miss);
|
||||
GenerateLoadField(object, holder, eax, ebx, edx, edi, index, name, &miss);
|
||||
__ bind(&miss);
|
||||
GenerateLoadMiss(masm(), Code::LOAD_IC);
|
||||
|
||||
|
@ -2771,13 +2765,13 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
|
|||
Handle<JSObject> holder,
|
||||
Handle<AccessorInfo> callback) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : receiver
|
||||
// -- ecx : name
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
GenerateLoadCallback(object, holder, edx, ecx, ebx, eax, edi, callback,
|
||||
GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi, callback,
|
||||
name, &miss);
|
||||
__ bind(&miss);
|
||||
GenerateLoadMiss(masm(), Code::LOAD_IC);
|
||||
|
@ -2792,13 +2786,13 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
|
|||
Handle<JSFunction> value,
|
||||
Handle<String> name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : receiver
|
||||
// -- ecx : name
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
GenerateLoadConstant(object, holder, edx, ebx, eax, edi, value, name, &miss);
|
||||
GenerateLoadConstant(object, holder, eax, ebx, edx, edi, value, name, &miss);
|
||||
__ bind(&miss);
|
||||
GenerateLoadMiss(masm(), Code::LOAD_IC);
|
||||
|
||||
|
@ -2811,8 +2805,8 @@ Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
|
|||
Handle<JSObject> holder,
|
||||
Handle<String> name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : receiver
|
||||
// -- ecx : name
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
@ -2822,7 +2816,7 @@ Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
|
|||
|
||||
// TODO(368): Compile in the whole chain: all the interceptors in
|
||||
// prototypes and ultimate answer.
|
||||
GenerateLoadInterceptor(receiver, holder, &lookup, edx, ecx, eax, ebx, edi,
|
||||
GenerateLoadInterceptor(receiver, holder, &lookup, eax, ecx, edx, ebx, edi,
|
||||
name, &miss);
|
||||
|
||||
__ bind(&miss);
|
||||
|
@ -2840,15 +2834,15 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
|
|||
Handle<String> name,
|
||||
bool is_dont_delete) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : receiver
|
||||
// -- ecx : name
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
// Check that the maps haven't changed.
|
||||
__ JumpIfSmi(edx, &miss);
|
||||
CheckPrototypes(object, edx, holder, ebx, eax, edi, name, &miss);
|
||||
__ JumpIfSmi(eax, &miss);
|
||||
CheckPrototypes(object, eax, holder, ebx, edx, edi, name, &miss);
|
||||
|
||||
// Get the value from the cell.
|
||||
if (Serializer::enabled()) {
|
||||
|
@ -2886,7 +2880,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
|
|||
Handle<JSObject> holder,
|
||||
int index) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -2896,10 +2890,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
|
|||
__ IncrementCounter(counters->keyed_load_field(), 1);
|
||||
|
||||
// Check that the name has not changed.
|
||||
__ cmp(ecx, Immediate(name));
|
||||
__ cmp(eax, Immediate(name));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
GenerateLoadField(receiver, holder, edx, ebx, eax, edi, index, name, &miss);
|
||||
GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
|
||||
|
||||
__ bind(&miss);
|
||||
__ DecrementCounter(counters->keyed_load_field(), 1);
|
||||
|
@ -2916,7 +2910,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
|
|||
Handle<JSObject> holder,
|
||||
Handle<AccessorInfo> callback) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -2926,10 +2920,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
|
|||
__ IncrementCounter(counters->keyed_load_callback(), 1);
|
||||
|
||||
// Check that the name has not changed.
|
||||
__ cmp(ecx, Immediate(name));
|
||||
__ cmp(eax, Immediate(name));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
GenerateLoadCallback(receiver, holder, edx, ecx, ebx, eax, edi, callback,
|
||||
GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi, callback,
|
||||
name, &miss);
|
||||
|
||||
__ bind(&miss);
|
||||
|
@ -2947,7 +2941,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
|
|||
Handle<JSObject> holder,
|
||||
Handle<JSFunction> value) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -2957,11 +2951,11 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
|
|||
__ IncrementCounter(counters->keyed_load_constant_function(), 1);
|
||||
|
||||
// Check that the name has not changed.
|
||||
__ cmp(ecx, Immediate(name));
|
||||
__ cmp(eax, Immediate(name));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
GenerateLoadConstant(
|
||||
receiver, holder, edx, ebx, eax, edi, value, name, &miss);
|
||||
receiver, holder, edx, ebx, ecx, edi, value, name, &miss);
|
||||
__ bind(&miss);
|
||||
__ DecrementCounter(counters->keyed_load_constant_function(), 1);
|
||||
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
|
||||
|
@ -2976,7 +2970,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
|
|||
Handle<JSObject> holder,
|
||||
Handle<String> name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -2986,12 +2980,12 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
|
|||
__ IncrementCounter(counters->keyed_load_interceptor(), 1);
|
||||
|
||||
// Check that the name has not changed.
|
||||
__ cmp(ecx, Immediate(name));
|
||||
__ cmp(eax, Immediate(name));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
LookupResult lookup(isolate());
|
||||
LookupPostInterceptor(holder, name, &lookup);
|
||||
GenerateLoadInterceptor(receiver, holder, &lookup, edx, ecx, eax, ebx, edi,
|
||||
GenerateLoadInterceptor(receiver, holder, &lookup, edx, eax, ecx, ebx, edi,
|
||||
name, &miss);
|
||||
__ bind(&miss);
|
||||
__ DecrementCounter(counters->keyed_load_interceptor(), 1);
|
||||
|
@ -3005,7 +2999,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
|
|||
Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
|
||||
Handle<String> name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -3015,10 +3009,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
|
|||
__ IncrementCounter(counters->keyed_load_array_length(), 1);
|
||||
|
||||
// Check that the name has not changed.
|
||||
__ cmp(ecx, Immediate(name));
|
||||
__ cmp(eax, Immediate(name));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
GenerateLoadArrayLength(masm(), edx, eax, &miss);
|
||||
GenerateLoadArrayLength(masm(), edx, ecx, &miss);
|
||||
__ bind(&miss);
|
||||
__ DecrementCounter(counters->keyed_load_array_length(), 1);
|
||||
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
|
||||
|
@ -3031,7 +3025,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
|
|||
Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
|
||||
Handle<String> name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -3041,10 +3035,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
|
|||
__ IncrementCounter(counters->keyed_load_string_length(), 1);
|
||||
|
||||
// Check that the name has not changed.
|
||||
__ cmp(ecx, Immediate(name));
|
||||
__ cmp(eax, Immediate(name));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
GenerateLoadStringLength(masm(), edx, eax, ebx, &miss, true);
|
||||
GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
|
||||
__ bind(&miss);
|
||||
__ DecrementCounter(counters->keyed_load_string_length(), 1);
|
||||
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
|
||||
|
@ -3057,7 +3051,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
|
|||
Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
|
||||
Handle<String> name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -3067,10 +3061,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
|
|||
__ IncrementCounter(counters->keyed_load_function_prototype(), 1);
|
||||
|
||||
// Check that the name has not changed.
|
||||
__ cmp(ecx, Immediate(name));
|
||||
__ cmp(eax, Immediate(name));
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
GenerateLoadFunctionPrototype(masm(), edx, eax, ebx, &miss);
|
||||
GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
|
||||
__ bind(&miss);
|
||||
__ DecrementCounter(counters->keyed_load_function_prototype(), 1);
|
||||
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
|
||||
|
@ -3083,7 +3077,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
|
|||
Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
|
||||
Handle<Map> receiver_map) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -3104,7 +3098,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
|
|||
MapHandleList* receiver_maps,
|
||||
CodeHandleList* handler_ics) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -3268,7 +3262,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
|
|||
void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
|
||||
MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -3276,15 +3270,21 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
|
|||
|
||||
// This stub is meant to be tail-jumped to, the receiver must already
|
||||
// have been verified by the caller to not be a smi.
|
||||
__ JumpIfNotSmi(ecx, &miss_force_generic);
|
||||
__ mov(ebx, ecx);
|
||||
__ JumpIfNotSmi(eax, &miss_force_generic);
|
||||
__ mov(ebx, eax);
|
||||
__ SmiUntag(ebx);
|
||||
__ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
|
||||
// Push receiver on the stack to free up a register for the dictionary
|
||||
// probing.
|
||||
__ push(edx);
|
||||
__ LoadFromNumberDictionary(&slow, eax, ecx, ebx, edx, edi, eax);
|
||||
__ LoadFromNumberDictionary(&slow,
|
||||
ecx,
|
||||
eax,
|
||||
ebx,
|
||||
edx,
|
||||
edi,
|
||||
eax);
|
||||
// Pop receiver before returning.
|
||||
__ pop(edx);
|
||||
__ ret(0);
|
||||
|
@ -3293,6 +3293,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
|
|||
__ pop(edx);
|
||||
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : value
|
||||
// -- ecx : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
|
@ -3304,6 +3305,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
|
|||
|
||||
__ bind(&miss_force_generic);
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : value
|
||||
// -- ecx : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
|
@ -3315,44 +3317,11 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
|
|||
}
|
||||
|
||||
|
||||
static void GenerateSmiKeyCheck(MacroAssembler* masm,
|
||||
Register key,
|
||||
Register scratch,
|
||||
XMMRegister xmm_scratch0,
|
||||
XMMRegister xmm_scratch1,
|
||||
Label* fail) {
|
||||
// Check that key is a smi and if SSE2 is available a heap number
|
||||
// containing a smi and branch if the check fails.
|
||||
if (CpuFeatures::IsSupported(SSE2)) {
|
||||
CpuFeatures::Scope use_sse2(SSE2);
|
||||
Label key_ok;
|
||||
__ JumpIfSmi(key, &key_ok);
|
||||
__ cmp(FieldOperand(key, HeapObject::kMapOffset),
|
||||
Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
|
||||
__ j(not_equal, fail);
|
||||
__ movdbl(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
|
||||
__ cvttsd2si(scratch, Operand(xmm_scratch0));
|
||||
__ cvtsi2sd(xmm_scratch1, scratch);
|
||||
__ ucomisd(xmm_scratch1, xmm_scratch0);
|
||||
__ j(not_equal, fail);
|
||||
__ j(parity_even, fail); // NaN.
|
||||
// Check if the key fits in the smi range.
|
||||
__ cmp(scratch, 0xc0000000);
|
||||
__ j(sign, fail);
|
||||
__ SmiTag(scratch);
|
||||
__ mov(key, scratch);
|
||||
__ bind(&key_ok);
|
||||
} else {
|
||||
__ JumpIfNotSmi(key, fail);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
|
||||
MacroAssembler* masm,
|
||||
ElementsKind elements_kind) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -3361,41 +3330,41 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
|
|||
// This stub is meant to be tail-jumped to, the receiver must already
|
||||
// have been verified by the caller to not be a smi.
|
||||
|
||||
// Check that the key is a smi or a heap number convertible to a smi.
|
||||
GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(eax, &miss_force_generic);
|
||||
|
||||
// Check that the index is in range.
|
||||
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
|
||||
__ cmp(eax, FieldOperand(ebx, ExternalArray::kLengthOffset));
|
||||
// Unsigned comparison catches both negative and too-large values.
|
||||
__ j(above_equal, &miss_force_generic);
|
||||
__ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
|
||||
// ebx: base pointer of external storage
|
||||
switch (elements_kind) {
|
||||
case EXTERNAL_BYTE_ELEMENTS:
|
||||
__ SmiUntag(ecx); // Untag the index.
|
||||
__ movsx_b(eax, Operand(ebx, ecx, times_1, 0));
|
||||
__ SmiUntag(eax); // Untag the index.
|
||||
__ movsx_b(eax, Operand(ebx, eax, times_1, 0));
|
||||
break;
|
||||
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
||||
case EXTERNAL_PIXEL_ELEMENTS:
|
||||
__ SmiUntag(ecx); // Untag the index.
|
||||
__ movzx_b(eax, Operand(ebx, ecx, times_1, 0));
|
||||
__ SmiUntag(eax); // Untag the index.
|
||||
__ movzx_b(eax, Operand(ebx, eax, times_1, 0));
|
||||
break;
|
||||
case EXTERNAL_SHORT_ELEMENTS:
|
||||
__ movsx_w(eax, Operand(ebx, ecx, times_1, 0));
|
||||
__ movsx_w(eax, Operand(ebx, eax, times_1, 0));
|
||||
break;
|
||||
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
||||
__ movzx_w(eax, Operand(ebx, ecx, times_1, 0));
|
||||
__ movzx_w(eax, Operand(ebx, eax, times_1, 0));
|
||||
break;
|
||||
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
||||
case EXTERNAL_INT_ELEMENTS:
|
||||
__ mov(eax, Operand(ebx, ecx, times_2, 0));
|
||||
__ mov(ecx, Operand(ebx, eax, times_2, 0));
|
||||
break;
|
||||
case EXTERNAL_FLOAT_ELEMENTS:
|
||||
__ fld_s(Operand(ebx, ecx, times_2, 0));
|
||||
__ fld_s(Operand(ebx, eax, times_2, 0));
|
||||
break;
|
||||
case EXTERNAL_DOUBLE_ELEMENTS:
|
||||
__ fld_d(Operand(ebx, ecx, times_4, 0));
|
||||
__ fld_d(Operand(ebx, eax, times_4, 0));
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
|
@ -3403,7 +3372,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
|
|||
}
|
||||
|
||||
// For integer array types:
|
||||
// eax: value
|
||||
// ecx: value
|
||||
// For floating-point array type:
|
||||
// FP(0): value
|
||||
|
||||
|
@ -3414,17 +3383,18 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
|
|||
// it to a HeapNumber.
|
||||
Label box_int;
|
||||
if (elements_kind == EXTERNAL_INT_ELEMENTS) {
|
||||
__ cmp(eax, 0xc0000000);
|
||||
__ cmp(ecx, 0xC0000000);
|
||||
__ j(sign, &box_int);
|
||||
} else {
|
||||
ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
|
||||
// The test is different for unsigned int values. Since we need
|
||||
// the value to be in the range of a positive smi, we can't
|
||||
// handle either of the top two bits being set in the value.
|
||||
__ test(eax, Immediate(0xc0000000));
|
||||
__ test(ecx, Immediate(0xC0000000));
|
||||
__ j(not_zero, &box_int);
|
||||
}
|
||||
|
||||
__ mov(eax, ecx);
|
||||
__ SmiTag(eax);
|
||||
__ ret(0);
|
||||
|
||||
|
@ -3433,31 +3403,33 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
|
|||
// Allocate a HeapNumber for the int and perform int-to-double
|
||||
// conversion.
|
||||
if (elements_kind == EXTERNAL_INT_ELEMENTS) {
|
||||
__ push(eax);
|
||||
__ push(ecx);
|
||||
__ fild_s(Operand(esp, 0));
|
||||
__ pop(eax);
|
||||
__ pop(ecx);
|
||||
} else {
|
||||
ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
|
||||
// Need to zero-extend the value.
|
||||
// There's no fild variant for unsigned values, so zero-extend
|
||||
// to a 64-bit int manually.
|
||||
__ push(Immediate(0));
|
||||
__ push(eax);
|
||||
__ push(ecx);
|
||||
__ fild_d(Operand(esp, 0));
|
||||
__ pop(eax);
|
||||
__ pop(eax);
|
||||
__ pop(ecx);
|
||||
__ pop(ecx);
|
||||
}
|
||||
// FP(0): value
|
||||
__ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
|
||||
__ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
|
||||
// Set the value.
|
||||
__ mov(eax, ecx);
|
||||
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
|
||||
__ ret(0);
|
||||
} else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
|
||||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
|
||||
// For the floating-point array type, we need to always allocate a
|
||||
// HeapNumber.
|
||||
__ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
|
||||
__ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
|
||||
// Set the value.
|
||||
__ mov(eax, ecx);
|
||||
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
|
||||
__ ret(0);
|
||||
} else {
|
||||
|
@ -3477,7 +3449,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
|
|||
__ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
|
||||
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -3486,7 +3458,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
|
|||
__ jmp(ic, RelocInfo::CODE_TARGET);
|
||||
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -3503,8 +3475,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||
MacroAssembler* masm,
|
||||
ElementsKind elements_kind) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : value
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -3513,8 +3484,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||
// This stub is meant to be tail-jumped to, the receiver must already
|
||||
// have been verified by the caller to not be a smi.
|
||||
|
||||
// Check that the key is a smi or a heap number convertible to a smi.
|
||||
GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(ecx, &miss_force_generic);
|
||||
|
||||
// Check that the index is in range.
|
||||
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
|
@ -3609,39 +3580,12 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||
// (code-stubs-ia32.cc) is roughly what is needed here though the
|
||||
// conversion failure case does not need to be handled.
|
||||
if (CpuFeatures::IsSupported(SSE2)) {
|
||||
if ((elements_kind == EXTERNAL_INT_ELEMENTS ||
|
||||
elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) &&
|
||||
CpuFeatures::IsSupported(SSE3)) {
|
||||
CpuFeatures::Scope scope(SSE3);
|
||||
// fisttp stores values as signed integers. To represent the
|
||||
// entire range of int and unsigned int arrays, store as a
|
||||
// 64-bit int and discard the high 32 bits.
|
||||
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
|
||||
__ sub(esp, Immediate(2 * kPointerSize));
|
||||
__ fisttp_d(Operand(esp, 0));
|
||||
|
||||
// If conversion failed (NaN, infinity, or a number outside
|
||||
// signed int64 range), the result is 0x8000000000000000, and
|
||||
// we must handle this case in the runtime.
|
||||
Label ok;
|
||||
__ cmp(Operand(esp, kPointerSize), Immediate(0x80000000u));
|
||||
__ j(not_equal, &ok);
|
||||
__ cmp(Operand(esp, 0), Immediate(0));
|
||||
__ j(not_equal, &ok);
|
||||
__ add(esp, Immediate(2 * kPointerSize)); // Restore the stack.
|
||||
__ jmp(&slow);
|
||||
|
||||
__ bind(&ok);
|
||||
__ pop(ebx);
|
||||
__ add(esp, Immediate(kPointerSize));
|
||||
__ mov(Operand(edi, ecx, times_2, 0), ebx);
|
||||
} else {
|
||||
if (elements_kind != EXTERNAL_INT_ELEMENTS &&
|
||||
elements_kind != EXTERNAL_UNSIGNED_INT_ELEMENTS) {
|
||||
ASSERT(CpuFeatures::IsSupported(SSE2));
|
||||
CpuFeatures::Scope scope(SSE2);
|
||||
__ cvttsd2si(ebx, FieldOperand(eax, HeapNumber::kValueOffset));
|
||||
__ cmp(ebx, 0x80000000u);
|
||||
__ j(equal, &slow);
|
||||
// ebx: untagged integer value
|
||||
// ecx: untagged integer value
|
||||
switch (elements_kind) {
|
||||
case EXTERNAL_PIXEL_ELEMENTS:
|
||||
__ ClampUint8(ebx);
|
||||
|
@ -3655,14 +3599,41 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
||||
__ mov_w(Operand(edi, ecx, times_1, 0), ebx);
|
||||
break;
|
||||
case EXTERNAL_INT_ELEMENTS:
|
||||
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
||||
__ mov(Operand(edi, ecx, times_2, 0), ebx);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (CpuFeatures::IsSupported(SSE3)) {
|
||||
CpuFeatures::Scope scope(SSE3);
|
||||
// fisttp stores values as signed integers. To represent the
|
||||
// entire range of int and unsigned int arrays, store as a
|
||||
// 64-bit int and discard the high 32 bits.
|
||||
// If the value is NaN or +/-infinity, the result is 0x80000000,
|
||||
// which is automatically zero when taken mod 2^n, n < 32.
|
||||
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
|
||||
__ sub(esp, Immediate(2 * kPointerSize));
|
||||
__ fisttp_d(Operand(esp, 0));
|
||||
__ pop(ebx);
|
||||
__ add(esp, Immediate(kPointerSize));
|
||||
} else {
|
||||
ASSERT(CpuFeatures::IsSupported(SSE2));
|
||||
CpuFeatures::Scope scope(SSE2);
|
||||
// We can easily implement the correct rounding behavior for the
|
||||
// range [0, 2^31-1]. For the time being, to keep this code simple,
|
||||
// make the slow runtime call for values outside this range.
|
||||
// Note: we could do better for signed int arrays.
|
||||
__ movd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
|
||||
// We will need the key if we have to make the slow runtime call.
|
||||
__ push(ebx);
|
||||
__ LoadPowerOf2(xmm1, ebx, 31);
|
||||
__ pop(ebx);
|
||||
__ ucomisd(xmm1, xmm0);
|
||||
__ j(above_equal, &slow);
|
||||
__ cvttsd2si(ebx, Operand(xmm0));
|
||||
}
|
||||
// ebx: untagged integer value
|
||||
__ mov(Operand(edi, ecx, times_2, 0), ebx);
|
||||
}
|
||||
__ ret(0); // Return original value.
|
||||
}
|
||||
|
@ -3700,7 +3671,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||
|
||||
void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -3709,19 +3680,19 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
|
|||
// This stub is meant to be tail-jumped to, the receiver must already
|
||||
// have been verified by the caller to not be a smi.
|
||||
|
||||
// Check that the key is a smi or a heap number convertible to a smi.
|
||||
GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(eax, &miss_force_generic);
|
||||
|
||||
// Get the elements array.
|
||||
__ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ AssertFastElements(eax);
|
||||
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ AssertFastElements(ecx);
|
||||
|
||||
// Check that the key is within bounds.
|
||||
__ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
|
||||
__ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
|
||||
__ j(above_equal, &miss_force_generic);
|
||||
|
||||
// Load the result and make sure it's not the hole.
|
||||
__ mov(ebx, Operand(eax, ecx, times_2,
|
||||
__ mov(ebx, Operand(ecx, eax, times_2,
|
||||
FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ cmp(ebx, masm->isolate()->factory()->the_hole_value());
|
||||
__ j(equal, &miss_force_generic);
|
||||
|
@ -3738,7 +3709,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
|
|||
void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
|
||||
MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : key
|
||||
// -- eax : key
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
|
@ -3747,38 +3718,39 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
|
|||
// This stub is meant to be tail-jumped to, the receiver must already
|
||||
// have been verified by the caller to not be a smi.
|
||||
|
||||
// Check that the key is a smi or a heap number convertible to a smi.
|
||||
GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(eax, &miss_force_generic);
|
||||
|
||||
// Get the elements array.
|
||||
__ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ AssertFastElements(eax);
|
||||
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ AssertFastElements(ecx);
|
||||
|
||||
// Check that the key is within bounds.
|
||||
__ cmp(ecx, FieldOperand(eax, FixedDoubleArray::kLengthOffset));
|
||||
__ cmp(eax, FieldOperand(ecx, FixedDoubleArray::kLengthOffset));
|
||||
__ j(above_equal, &miss_force_generic);
|
||||
|
||||
// Check for the hole
|
||||
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
|
||||
__ cmp(FieldOperand(eax, ecx, times_4, offset), Immediate(kHoleNanUpper32));
|
||||
__ cmp(FieldOperand(ecx, eax, times_4, offset), Immediate(kHoleNanUpper32));
|
||||
__ j(equal, &miss_force_generic);
|
||||
|
||||
// Always allocate a heap number for the result.
|
||||
if (CpuFeatures::IsSupported(SSE2)) {
|
||||
CpuFeatures::Scope use_sse2(SSE2);
|
||||
__ movdbl(xmm0, FieldOperand(eax, ecx, times_4,
|
||||
__ movdbl(xmm0, FieldOperand(ecx, eax, times_4,
|
||||
FixedDoubleArray::kHeaderSize));
|
||||
} else {
|
||||
__ fld_d(FieldOperand(eax, ecx, times_4, FixedDoubleArray::kHeaderSize));
|
||||
__ fld_d(FieldOperand(ecx, eax, times_4, FixedDoubleArray::kHeaderSize));
|
||||
}
|
||||
__ AllocateHeapNumber(eax, ebx, edi, &slow_allocate_heapnumber);
|
||||
__ AllocateHeapNumber(ecx, ebx, edi, &slow_allocate_heapnumber);
|
||||
// Set the value.
|
||||
if (CpuFeatures::IsSupported(SSE2)) {
|
||||
CpuFeatures::Scope use_sse2(SSE2);
|
||||
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
|
||||
__ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
|
||||
} else {
|
||||
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
|
||||
__ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
|
||||
}
|
||||
__ mov(eax, ecx);
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&slow_allocate_heapnumber);
|
||||
|
@ -3815,8 +3787,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
|
|||
// This stub is meant to be tail-jumped to, the receiver must already
|
||||
// have been verified by the caller to not be a smi.
|
||||
|
||||
// Check that the key is a smi or a heap number convertible to a smi.
|
||||
GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(ecx, &miss_force_generic);
|
||||
|
||||
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
|
||||
__ JumpIfNotSmi(eax, &transition_elements_kind);
|
||||
|
@ -3970,8 +3942,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
|
|||
// This stub is meant to be tail-jumped to, the receiver must already
|
||||
// have been verified by the caller to not be a smi.
|
||||
|
||||
// Check that the key is a smi or a heap number convertible to a smi.
|
||||
GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(ecx, &miss_force_generic);
|
||||
|
||||
// Get the elements array.
|
||||
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
|
@ -4032,7 +4004,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
|
|||
|
||||
int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
|
||||
__ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
|
||||
|
||||
// Restore the key, which is known to be the array length.
|
||||
__ mov(ecx, Immediate(0));
|
||||
|
||||
|
|
|
@ -1053,33 +1053,18 @@ Handle<Code> KeyedLoadIC::ComputePolymorphicStub(
|
|||
}
|
||||
|
||||
|
||||
static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
|
||||
// This helper implements a few common fast cases for converting
|
||||
// non-smi keys of keyed loads/stores to a smi or a string.
|
||||
if (key->IsHeapNumber()) {
|
||||
double value = Handle<HeapNumber>::cast(key)->value();
|
||||
if (isnan(value)) {
|
||||
key = isolate->factory()->nan_symbol();
|
||||
} else {
|
||||
int int_value = FastD2I(value);
|
||||
if (value == int_value && Smi::IsValid(int_value)) {
|
||||
key = Handle<Smi>(Smi::FromInt(int_value));
|
||||
}
|
||||
}
|
||||
} else if (key->IsUndefined()) {
|
||||
key = isolate->factory()->undefined_symbol();
|
||||
}
|
||||
return key;
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* KeyedLoadIC::Load(State state,
|
||||
Handle<Object> object,
|
||||
Handle<Object> key,
|
||||
bool force_generic_stub) {
|
||||
// Check for values that can be converted into a symbol directly or
|
||||
// is representable as a smi.
|
||||
key = TryConvertKey(key, isolate());
|
||||
// Check for values that can be converted into a symbol.
|
||||
// TODO(1295): Remove this code.
|
||||
if (key->IsHeapNumber() &&
|
||||
isnan(Handle<HeapNumber>::cast(key)->value())) {
|
||||
key = isolate()->factory()->nan_symbol();
|
||||
} else if (key->IsUndefined()) {
|
||||
key = isolate()->factory()->undefined_symbol();
|
||||
}
|
||||
|
||||
if (key->IsSymbol()) {
|
||||
Handle<String> name = Handle<String>::cast(key);
|
||||
|
@ -1776,10 +1761,6 @@ MaybeObject* KeyedStoreIC::Store(State state,
|
|||
Handle<Object> key,
|
||||
Handle<Object> value,
|
||||
bool force_generic) {
|
||||
// Check for values that can be converted into a symbol directly or
|
||||
// is representable as a smi.
|
||||
key = TryConvertKey(key, isolate());
|
||||
|
||||
if (key->IsSymbol()) {
|
||||
Handle<String> name = Handle<String>::cast(key);
|
||||
|
||||
|
|
|
@ -830,19 +830,6 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
|
|||
MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
|
||||
|
||||
VisitGlobalContext(ctx, &marking_visitor);
|
||||
} else if (map->instance_type() == JS_FUNCTION_TYPE) {
|
||||
marking_visitor.VisitPointers(
|
||||
HeapObject::RawField(obj, JSFunction::kPropertiesOffset),
|
||||
HeapObject::RawField(obj, JSFunction::kCodeEntryOffset));
|
||||
|
||||
marking_visitor.VisitCodeEntry(
|
||||
obj->address() + JSFunction::kCodeEntryOffset);
|
||||
|
||||
marking_visitor.VisitPointers(
|
||||
HeapObject::RawField(obj,
|
||||
JSFunction::kCodeEntryOffset + kPointerSize),
|
||||
HeapObject::RawField(obj,
|
||||
JSFunction::kNonWeakFieldsEndOffset));
|
||||
} else {
|
||||
obj->IterateBody(map->instance_type(), size, &marking_visitor);
|
||||
}
|
||||
|
|
|
@ -79,7 +79,7 @@ void Interface::DoAdd(
|
|||
PrintF("%*sthis = ", Nesting::current(), "");
|
||||
this->Print(Nesting::current());
|
||||
PrintF("%*s%s : ", Nesting::current(), "",
|
||||
(*static_cast<String**>(name))->ToAsciiArray());
|
||||
(*reinterpret_cast<String**>(name))->ToAsciiArray());
|
||||
interface->Print(Nesting::current());
|
||||
}
|
||||
#endif
|
||||
|
@ -97,7 +97,7 @@ void Interface::DoAdd(
|
|||
#ifdef DEBUG
|
||||
Nesting nested;
|
||||
#endif
|
||||
static_cast<Interface*>(p->value)->Unify(interface, ok);
|
||||
reinterpret_cast<Interface*>(p->value)->Unify(interface, ok);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -180,15 +180,6 @@ void Interface::DoUnify(Interface* that, bool* ok) {
|
|||
return;
|
||||
}
|
||||
|
||||
// Merge instance.
|
||||
if (!that->instance_.is_null()) {
|
||||
if (!this->instance_.is_null() && *this->instance_ != *that->instance_) {
|
||||
*ok = false;
|
||||
return;
|
||||
}
|
||||
this->instance_ = that->instance_;
|
||||
}
|
||||
|
||||
// Merge interfaces.
|
||||
this->flags_ |= that->flags_;
|
||||
that->forward_ = this;
|
||||
|
|
|
@ -86,12 +86,6 @@ class Interface : public ZoneObject {
|
|||
if (*ok) Chase()->flags_ |= MODULE;
|
||||
}
|
||||
|
||||
// Set associated instance object.
|
||||
void MakeSingleton(Handle<JSModule> instance, bool* ok) {
|
||||
*ok = IsModule() && Chase()->instance_.is_null();
|
||||
if (*ok) Chase()->instance_ = instance;
|
||||
}
|
||||
|
||||
// Do not allow any further refinements, directly or through unification.
|
||||
void Freeze(bool* ok) {
|
||||
*ok = IsValue() || IsModule();
|
||||
|
@ -101,6 +95,9 @@ class Interface : public ZoneObject {
|
|||
// ---------------------------------------------------------------------------
|
||||
// Accessors.
|
||||
|
||||
// Look up an exported name. Returns NULL if not (yet) defined.
|
||||
Interface* Lookup(Handle<String> name);
|
||||
|
||||
// Check whether this is still a fully undetermined type.
|
||||
bool IsUnknown() { return Chase()->flags_ == NONE; }
|
||||
|
||||
|
@ -113,42 +110,6 @@ class Interface : public ZoneObject {
|
|||
// Check whether this is closed (i.e. fully determined).
|
||||
bool IsFrozen() { return Chase()->flags_ & FROZEN; }
|
||||
|
||||
Handle<JSModule> Instance() { return Chase()->instance_; }
|
||||
|
||||
// Look up an exported name. Returns NULL if not (yet) defined.
|
||||
Interface* Lookup(Handle<String> name);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Iterators.
|
||||
|
||||
// Use like:
|
||||
// for (auto it = interface->iterator(); !it.done(); it.Advance()) {
|
||||
// ... it.name() ... it.interface() ...
|
||||
// }
|
||||
class Iterator {
|
||||
public:
|
||||
bool done() const { return entry_ == NULL; }
|
||||
Handle<String> name() const {
|
||||
ASSERT(!done());
|
||||
return Handle<String>(*static_cast<String**>(entry_->key));
|
||||
}
|
||||
Interface* interface() const {
|
||||
ASSERT(!done());
|
||||
return static_cast<Interface*>(entry_->value);
|
||||
}
|
||||
void Advance() { entry_ = exports_->Next(entry_); }
|
||||
|
||||
private:
|
||||
friend class Interface;
|
||||
explicit Iterator(const ZoneHashMap* exports)
|
||||
: exports_(exports), entry_(exports ? exports->Start() : NULL) {}
|
||||
|
||||
const ZoneHashMap* exports_;
|
||||
ZoneHashMap::Entry* entry_;
|
||||
};
|
||||
|
||||
Iterator iterator() const { return Iterator(this->exports_); }
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Debugging.
|
||||
#ifdef DEBUG
|
||||
|
@ -168,7 +129,6 @@ class Interface : public ZoneObject {
|
|||
int flags_;
|
||||
Interface* forward_; // Unification link
|
||||
ZoneHashMap* exports_; // Module exports and their types (allocated lazily)
|
||||
Handle<JSModule> instance_;
|
||||
|
||||
explicit Interface(int flags)
|
||||
: flags_(flags),
|
||||
|
|
|
@ -33,9 +33,8 @@
|
|||
#include "utils.h"
|
||||
#include "ast.h"
|
||||
#include "bytecodes-irregexp.h"
|
||||
#include "interpreter-irregexp.h"
|
||||
#include "jsregexp.h"
|
||||
#include "regexp-macro-assembler.h"
|
||||
#include "interpreter-irregexp.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
@ -450,37 +449,6 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
|
|||
}
|
||||
break;
|
||||
}
|
||||
BYTECODE(CHECK_CHAR_IN_RANGE) {
|
||||
uint32_t from = Load16Aligned(pc + 4);
|
||||
uint32_t to = Load16Aligned(pc + 6);
|
||||
if (from <= current_char && current_char <= to) {
|
||||
pc = code_base + Load32Aligned(pc + 8);
|
||||
} else {
|
||||
pc += BC_CHECK_CHAR_IN_RANGE_LENGTH;
|
||||
}
|
||||
break;
|
||||
}
|
||||
BYTECODE(CHECK_CHAR_NOT_IN_RANGE) {
|
||||
uint32_t from = Load16Aligned(pc + 4);
|
||||
uint32_t to = Load16Aligned(pc + 6);
|
||||
if (from > current_char || current_char > to) {
|
||||
pc = code_base + Load32Aligned(pc + 8);
|
||||
} else {
|
||||
pc += BC_CHECK_CHAR_NOT_IN_RANGE_LENGTH;
|
||||
}
|
||||
break;
|
||||
}
|
||||
BYTECODE(CHECK_BIT_IN_TABLE) {
|
||||
int mask = RegExpMacroAssembler::kTableMask;
|
||||
byte b = pc[8 + ((current_char & mask) >> kBitsPerByteLog2)];
|
||||
int bit = (current_char & (kBitsPerByte - 1));
|
||||
if ((b & (1 << bit)) != 0) {
|
||||
pc = code_base + Load32Aligned(pc + 4);
|
||||
} else {
|
||||
pc += BC_CHECK_BIT_IN_TABLE_LENGTH;
|
||||
}
|
||||
break;
|
||||
}
|
||||
BYTECODE(CHECK_LT) {
|
||||
uint32_t limit = (insn >> BYTECODE_SHIFT);
|
||||
if (current_char < limit) {
|
||||
|
@ -520,6 +488,59 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
|
|||
pc += BC_CHECK_REGISTER_EQ_POS_LENGTH;
|
||||
}
|
||||
break;
|
||||
BYTECODE(LOOKUP_MAP1) {
|
||||
// Look up character in a bitmap. If we find a 0, then jump to the
|
||||
// location at pc + 8. Otherwise fall through!
|
||||
int index = current_char - (insn >> BYTECODE_SHIFT);
|
||||
byte map = code_base[Load32Aligned(pc + 4) + (index >> 3)];
|
||||
map = ((map >> (index & 7)) & 1);
|
||||
if (map == 0) {
|
||||
pc = code_base + Load32Aligned(pc + 8);
|
||||
} else {
|
||||
pc += BC_LOOKUP_MAP1_LENGTH;
|
||||
}
|
||||
break;
|
||||
}
|
||||
BYTECODE(LOOKUP_MAP2) {
|
||||
// Look up character in a half-nibble map. If we find 00, then jump to
|
||||
// the location at pc + 8. If we find 01 then jump to location at
|
||||
// pc + 11, etc.
|
||||
int index = (current_char - (insn >> BYTECODE_SHIFT)) << 1;
|
||||
byte map = code_base[Load32Aligned(pc + 3) + (index >> 3)];
|
||||
map = ((map >> (index & 7)) & 3);
|
||||
if (map < 2) {
|
||||
if (map == 0) {
|
||||
pc = code_base + Load32Aligned(pc + 8);
|
||||
} else {
|
||||
pc = code_base + Load32Aligned(pc + 12);
|
||||
}
|
||||
} else {
|
||||
if (map == 2) {
|
||||
pc = code_base + Load32Aligned(pc + 16);
|
||||
} else {
|
||||
pc = code_base + Load32Aligned(pc + 20);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
BYTECODE(LOOKUP_MAP8) {
|
||||
// Look up character in a byte map. Use the byte as an index into a
|
||||
// table that follows this instruction immediately.
|
||||
int index = current_char - (insn >> BYTECODE_SHIFT);
|
||||
byte map = code_base[Load32Aligned(pc + 4) + index];
|
||||
const byte* new_pc = code_base + Load32Aligned(pc + 8) + (map << 2);
|
||||
pc = code_base + Load32Aligned(new_pc);
|
||||
break;
|
||||
}
|
||||
BYTECODE(LOOKUP_HI_MAP8) {
|
||||
// Look up high byte of this character in a byte map. Use the byte as
|
||||
// an index into a table that follows this instruction immediately.
|
||||
int index = (current_char >> 8) - (insn >> BYTECODE_SHIFT);
|
||||
byte map = code_base[Load32Aligned(pc + 4) + index];
|
||||
const byte* new_pc = code_base + Load32Aligned(pc + 8) + (map << 2);
|
||||
pc = code_base + Load32Aligned(new_pc);
|
||||
break;
|
||||
}
|
||||
BYTECODE(CHECK_NOT_REGS_EQUAL)
|
||||
if (registers[insn >> BYTECODE_SHIFT] ==
|
||||
registers[Load32Aligned(pc + 4)]) {
|
||||
|
|
|
@ -1430,7 +1430,6 @@ void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
|
|||
|
||||
Isolate::Isolate()
|
||||
: state_(UNINITIALIZED),
|
||||
embedder_data_(NULL),
|
||||
entry_stack_(NULL),
|
||||
stack_trace_nesting_level_(0),
|
||||
incomplete_message_(NULL),
|
||||
|
@ -1473,6 +1472,7 @@ Isolate::Isolate()
|
|||
string_tracker_(NULL),
|
||||
regexp_stack_(NULL),
|
||||
date_cache_(NULL),
|
||||
embedder_data_(NULL),
|
||||
context_exit_happened_(false) {
|
||||
TRACE_ISOLATE(constructor);
|
||||
|
||||
|
@ -1857,13 +1857,6 @@ bool Isolate::Init(Deserializer* des) {
|
|||
LOG(this, LogCompiledFunctions());
|
||||
}
|
||||
|
||||
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, state_)),
|
||||
Internals::kIsolateStateOffset);
|
||||
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, embedder_data_)),
|
||||
Internals::kIsolateEmbedderDataOffset);
|
||||
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)),
|
||||
Internals::kIsolateRootsOffset);
|
||||
|
||||
state_ = INITIALIZED;
|
||||
time_millis_at_init_ = OS::TimeCurrentMillis();
|
||||
return true;
|
||||
|
|
|
@ -422,7 +422,7 @@ class Isolate {
|
|||
enum AddressId {
|
||||
#define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
|
||||
FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM)
|
||||
#undef DECLARE_ENUM
|
||||
#undef C
|
||||
kIsolateAddressCount
|
||||
};
|
||||
|
||||
|
@ -1038,18 +1038,6 @@ class Isolate {
|
|||
friend struct GlobalState;
|
||||
friend struct InitializeGlobalState;
|
||||
|
||||
enum State {
|
||||
UNINITIALIZED, // Some components may not have been allocated.
|
||||
INITIALIZED // All components are fully initialized.
|
||||
};
|
||||
|
||||
// These fields are accessed through the API, offsets must be kept in sync
|
||||
// with v8::internal::Internals (in include/v8.h) constants. This is also
|
||||
// verified in Isolate::Init() using runtime checks.
|
||||
State state_; // Will be padded to kApiPointerSize.
|
||||
void* embedder_data_;
|
||||
Heap heap_;
|
||||
|
||||
// The per-process lock should be acquired before the ThreadDataTable is
|
||||
// modified.
|
||||
class ThreadDataTable {
|
||||
|
@ -1107,6 +1095,14 @@ class Isolate {
|
|||
static void SetIsolateThreadLocals(Isolate* isolate,
|
||||
PerIsolateThreadData* data);
|
||||
|
||||
enum State {
|
||||
UNINITIALIZED, // Some components may not have been allocated.
|
||||
INITIALIZED // All components are fully initialized.
|
||||
};
|
||||
|
||||
State state_;
|
||||
EntryStackItem* entry_stack_;
|
||||
|
||||
// Allocate and insert PerIsolateThreadData into the ThreadDataTable
|
||||
// (regardless of whether such data already exists).
|
||||
PerIsolateThreadData* AllocatePerIsolateThreadData(ThreadId thread_id);
|
||||
|
@ -1150,13 +1146,13 @@ class Isolate {
|
|||
// the Error object.
|
||||
bool IsErrorObject(Handle<Object> obj);
|
||||
|
||||
EntryStackItem* entry_stack_;
|
||||
int stack_trace_nesting_level_;
|
||||
StringStream* incomplete_message_;
|
||||
// The preallocated memory thread singleton.
|
||||
PreallocatedMemoryThread* preallocated_memory_thread_;
|
||||
Address isolate_addresses_[kIsolateAddressCount + 1]; // NOLINT
|
||||
NoAllocationStringAllocator* preallocated_message_space_;
|
||||
|
||||
Bootstrapper* bootstrapper_;
|
||||
RuntimeProfiler* runtime_profiler_;
|
||||
CompilationCache* compilation_cache_;
|
||||
|
@ -1165,6 +1161,7 @@ class Isolate {
|
|||
Mutex* break_access_;
|
||||
Atomic32 debugger_initialized_;
|
||||
Mutex* debugger_access_;
|
||||
Heap heap_;
|
||||
Logger* logger_;
|
||||
StackGuard stack_guard_;
|
||||
StatsTable* stats_table_;
|
||||
|
@ -1205,8 +1202,11 @@ class Isolate {
|
|||
unibrow::Mapping<unibrow::Ecma262Canonicalize>
|
||||
regexp_macro_assembler_canonicalize_;
|
||||
RegExpStack* regexp_stack_;
|
||||
|
||||
DateCache* date_cache_;
|
||||
|
||||
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
|
||||
void* embedder_data_;
|
||||
|
||||
// The garbage collector should be a little more aggressive when it knows
|
||||
// that a context was recently exited.
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -40,7 +40,6 @@ class RegExpCompiler;
|
|||
class RegExpMacroAssembler;
|
||||
class RegExpNode;
|
||||
class RegExpTree;
|
||||
class BoyerMooreLookahead;
|
||||
|
||||
class RegExpImpl {
|
||||
public:
|
||||
|
@ -191,10 +190,8 @@ class RegExpImpl {
|
|||
static String* last_ascii_string_;
|
||||
static String* two_byte_cached_string_;
|
||||
|
||||
static bool CompileIrregexp(
|
||||
Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii);
|
||||
static inline bool EnsureCompiledIrregexp(
|
||||
Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii);
|
||||
static bool CompileIrregexp(Handle<JSRegExp> re, bool is_ascii);
|
||||
static inline bool EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii);
|
||||
|
||||
|
||||
// Set the subject cache. The previous string buffer is not deleted, so the
|
||||
|
@ -225,8 +222,48 @@ enum ElementInSetsRelation {
|
|||
};
|
||||
|
||||
|
||||
// Represents code units in the range from from_ to to_, both ends are
|
||||
// inclusive.
|
||||
// Represents the relation of two sets.
|
||||
// Sets can be either disjoint, partially or fully overlapping, or equal.
|
||||
class SetRelation BASE_EMBEDDED {
|
||||
public:
|
||||
// Relation is represented by a bit saying whether there are elements in
|
||||
// one set that is not in the other, and a bit saying that there are elements
|
||||
// that are in both sets.
|
||||
|
||||
// Location of an element. Corresponds to the internal areas of
|
||||
// a Venn diagram.
|
||||
enum {
|
||||
kInFirst = 1 << kInsideFirst,
|
||||
kInSecond = 1 << kInsideSecond,
|
||||
kInBoth = 1 << kInsideBoth
|
||||
};
|
||||
SetRelation() : bits_(0) {}
|
||||
~SetRelation() {}
|
||||
// Add the existence of objects in a particular
|
||||
void SetElementsInFirstSet() { bits_ |= kInFirst; }
|
||||
void SetElementsInSecondSet() { bits_ |= kInSecond; }
|
||||
void SetElementsInBothSets() { bits_ |= kInBoth; }
|
||||
// Check the currently known relation of the sets (common functions only,
|
||||
// for other combinations, use value() to get the bits and check them
|
||||
// manually).
|
||||
// Sets are completely disjoint.
|
||||
bool Disjoint() { return (bits_ & kInBoth) == 0; }
|
||||
// Sets are equal.
|
||||
bool Equals() { return (bits_ & (kInFirst | kInSecond)) == 0; }
|
||||
// First set contains second.
|
||||
bool Contains() { return (bits_ & kInSecond) == 0; }
|
||||
// Second set contains first.
|
||||
bool ContainedIn() { return (bits_ & kInFirst) == 0; }
|
||||
bool NonTrivialIntersection() {
|
||||
return (bits_ == (kInFirst | kInSecond | kInBoth));
|
||||
}
|
||||
int value() { return bits_; }
|
||||
|
||||
private:
|
||||
int bits_;
|
||||
};
|
||||
|
||||
|
||||
class CharacterRange {
|
||||
public:
|
||||
CharacterRange() : from_(0), to_(0) { }
|
||||
|
@ -234,7 +271,7 @@ class CharacterRange {
|
|||
CharacterRange(void* null) { ASSERT_EQ(NULL, null); } //NOLINT
|
||||
CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) { }
|
||||
static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges);
|
||||
static Vector<const int> GetWordBounds();
|
||||
static Vector<const uc16> GetWordBounds();
|
||||
static inline CharacterRange Singleton(uc16 value) {
|
||||
return CharacterRange(value, value);
|
||||
}
|
||||
|
@ -255,7 +292,7 @@ class CharacterRange {
|
|||
bool IsSingleton() { return (from_ == to_); }
|
||||
void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_ascii);
|
||||
static void Split(ZoneList<CharacterRange>* base,
|
||||
Vector<const int> overlay,
|
||||
Vector<const uc16> overlay,
|
||||
ZoneList<CharacterRange>** included,
|
||||
ZoneList<CharacterRange>** excluded);
|
||||
// Whether a range list is in canonical form: Ranges ordered by from value,
|
||||
|
@ -266,6 +303,28 @@ class CharacterRange {
|
|||
// adjacent ranges are merged. The resulting list may be shorter than the
|
||||
// original, but cannot be longer.
|
||||
static void Canonicalize(ZoneList<CharacterRange>* ranges);
|
||||
// Check how the set of characters defined by a CharacterRange list relates
|
||||
// to the set of word characters. List must be in canonical form.
|
||||
static SetRelation WordCharacterRelation(ZoneList<CharacterRange>* ranges);
|
||||
// Takes two character range lists (representing character sets) in canonical
|
||||
// form and merges them.
|
||||
// The characters that are only covered by the first set are added to
|
||||
// first_set_only_out. the characters that are only in the second set are
|
||||
// added to second_set_only_out, and the characters that are in both are
|
||||
// added to both_sets_out.
|
||||
// The pointers to first_set_only_out, second_set_only_out and both_sets_out
|
||||
// should be to empty lists, but they need not be distinct, and may be NULL.
|
||||
// If NULL, the characters are dropped, and if two arguments are the same
|
||||
// pointer, the result is the union of the two sets that would be created
|
||||
// if the pointers had been distinct.
|
||||
// This way, the Merge function can compute all the usual set operations:
|
||||
// union (all three out-sets are equal), intersection (only both_sets_out is
|
||||
// non-NULL), and set difference (only first_set is non-NULL).
|
||||
static void Merge(ZoneList<CharacterRange>* first_set,
|
||||
ZoneList<CharacterRange>* second_set,
|
||||
ZoneList<CharacterRange>* first_set_only_out,
|
||||
ZoneList<CharacterRange>* second_set_only_out,
|
||||
ZoneList<CharacterRange>* both_sets_out);
|
||||
// Negate the contents of a character range in canonical form.
|
||||
static void Negate(ZoneList<CharacterRange>* src,
|
||||
ZoneList<CharacterRange>* dst);
|
||||
|
@ -416,8 +475,7 @@ struct NodeInfo {
|
|||
follows_newline_interest(false),
|
||||
follows_start_interest(false),
|
||||
at_end(false),
|
||||
visited(false),
|
||||
replacement_calculated(false) { }
|
||||
visited(false) { }
|
||||
|
||||
// Returns true if the interests and assumptions of this node
|
||||
// matches the given one.
|
||||
|
@ -467,7 +525,25 @@ struct NodeInfo {
|
|||
|
||||
bool at_end: 1;
|
||||
bool visited: 1;
|
||||
bool replacement_calculated: 1;
|
||||
};
|
||||
|
||||
|
||||
class SiblingList {
|
||||
public:
|
||||
SiblingList() : list_(NULL) { }
|
||||
int length() {
|
||||
return list_ == NULL ? 0 : list_->length();
|
||||
}
|
||||
void Ensure(RegExpNode* parent) {
|
||||
if (list_ == NULL) {
|
||||
list_ = new ZoneList<RegExpNode*>(2);
|
||||
list_->Add(parent);
|
||||
}
|
||||
}
|
||||
void Add(RegExpNode* node) { list_->Add(node); }
|
||||
RegExpNode* Get(int index) { return list_->at(index); }
|
||||
private:
|
||||
ZoneList<RegExpNode*>* list_;
|
||||
};
|
||||
|
||||
|
||||
|
@ -523,14 +599,9 @@ class QuickCheckDetails {
|
|||
};
|
||||
|
||||
|
||||
extern int kUninitializedRegExpNodePlaceHolder;
|
||||
|
||||
|
||||
class RegExpNode: public ZoneObject {
|
||||
public:
|
||||
RegExpNode() : replacement_(NULL), trace_count_(0) {
|
||||
bm_info_[0] = bm_info_[1] = NULL;
|
||||
}
|
||||
RegExpNode() : first_character_set_(NULL), trace_count_(0) { }
|
||||
virtual ~RegExpNode();
|
||||
virtual void Accept(NodeVisitor* visitor) = 0;
|
||||
// Generates a goto to this node or actually generates the code at this point.
|
||||
|
@ -564,50 +635,6 @@ class RegExpNode: public ZoneObject {
|
|||
bool not_at_start) = 0;
|
||||
static const int kNodeIsTooComplexForGreedyLoops = -1;
|
||||
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
|
||||
// Only returns the successor for a text node of length 1 that matches any
|
||||
// character and that has no guards on it.
|
||||
virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
|
||||
RegExpCompiler* compiler) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Collects information on the possible code units (mod 128) that can match if
|
||||
// we look forward. This is used for a Boyer-Moore-like string searching
|
||||
// implementation. TODO(erikcorry): This should share more code with
|
||||
// EatsAtLeast, GetQuickCheckDetails. The budget argument is used to limit
|
||||
// the number of nodes we are willing to look at in order to create this data.
|
||||
static const int kFillInBMBudget = 200;
|
||||
virtual void FillInBMInfo(int offset,
|
||||
int recursion_depth,
|
||||
int budget,
|
||||
BoyerMooreLookahead* bm,
|
||||
bool not_at_start) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// If we know that the input is ASCII then there are some nodes that can
|
||||
// never match. This method returns a node that can be substituted for
|
||||
// itself, or NULL if the node can never match.
|
||||
virtual RegExpNode* FilterASCII(int depth) { return this; }
|
||||
// Helper for FilterASCII.
|
||||
RegExpNode* replacement() {
|
||||
ASSERT(info()->replacement_calculated);
|
||||
return replacement_;
|
||||
}
|
||||
RegExpNode* set_replacement(RegExpNode* replacement) {
|
||||
info()->replacement_calculated = true;
|
||||
replacement_ = replacement;
|
||||
return replacement; // For convenience.
|
||||
}
|
||||
|
||||
// We want to avoid recalculating the lookahead info, so we store it on the
|
||||
// node. Only info that is for this node is stored. We can tell that the
|
||||
// info is for this node when offset == 0, so the information is calculated
|
||||
// relative to this node.
|
||||
void SaveBMInfo(BoyerMooreLookahead* bm, bool not_at_start, int offset) {
|
||||
if (offset == 0) set_bm_info(not_at_start, bm);
|
||||
}
|
||||
|
||||
Label* label() { return &label_; }
|
||||
// If non-generic code is generated for a node (i.e. the node is not at the
|
||||
// start of the trace) then it cannot be reused. This variable sets a limit
|
||||
|
@ -618,31 +645,72 @@ class RegExpNode: public ZoneObject {
|
|||
|
||||
NodeInfo* info() { return &info_; }
|
||||
|
||||
BoyerMooreLookahead* bm_info(bool not_at_start) {
|
||||
return bm_info_[not_at_start ? 1 : 0];
|
||||
void AddSibling(RegExpNode* node) { siblings_.Add(node); }
|
||||
|
||||
// Static version of EnsureSibling that expresses the fact that the
|
||||
// result has the same type as the input.
|
||||
template <class C>
|
||||
static C* EnsureSibling(C* node, NodeInfo* info, bool* cloned) {
|
||||
return static_cast<C*>(node->EnsureSibling(info, cloned));
|
||||
}
|
||||
|
||||
SiblingList* siblings() { return &siblings_; }
|
||||
void set_siblings(SiblingList* other) { siblings_ = *other; }
|
||||
|
||||
// Return the set of possible next characters recognized by the regexp
|
||||
// (or a safe subset, potentially the set of all characters).
|
||||
ZoneList<CharacterRange>* FirstCharacterSet();
|
||||
|
||||
// Compute (if possible within the budget of traversed nodes) the
|
||||
// possible first characters of the input matched by this node and
|
||||
// its continuation. Returns the remaining budget after the computation.
|
||||
// If the budget is spent, the result is negative, and the cached
|
||||
// first_character_set_ value isn't set.
|
||||
virtual int ComputeFirstCharacterSet(int budget);
|
||||
|
||||
// Get and set the cached first character set value.
|
||||
ZoneList<CharacterRange>* first_character_set() {
|
||||
return first_character_set_;
|
||||
}
|
||||
void set_first_character_set(ZoneList<CharacterRange>* character_set) {
|
||||
first_character_set_ = character_set;
|
||||
}
|
||||
|
||||
protected:
|
||||
enum LimitResult { DONE, CONTINUE };
|
||||
RegExpNode* replacement_;
|
||||
static const int kComputeFirstCharacterSetFail = -1;
|
||||
|
||||
LimitResult LimitVersions(RegExpCompiler* compiler, Trace* trace);
|
||||
|
||||
void set_bm_info(bool not_at_start, BoyerMooreLookahead* bm) {
|
||||
bm_info_[not_at_start ? 1 : 0] = bm;
|
||||
}
|
||||
// Returns a sibling of this node whose interests and assumptions
|
||||
// match the ones in the given node info. If no sibling exists NULL
|
||||
// is returned.
|
||||
RegExpNode* TryGetSibling(NodeInfo* info);
|
||||
|
||||
// Returns a sibling of this node whose interests match the ones in
|
||||
// the given node info. The info must not contain any assertions.
|
||||
// If no node exists a new one will be created by cloning the current
|
||||
// node. The result will always be an instance of the same concrete
|
||||
// class as this node.
|
||||
RegExpNode* EnsureSibling(NodeInfo* info, bool* cloned);
|
||||
|
||||
// Returns a clone of this node initialized using the copy constructor
|
||||
// of its concrete class. Note that the node may have to be pre-
|
||||
// processed before it is on a usable state.
|
||||
virtual RegExpNode* Clone() = 0;
|
||||
|
||||
private:
|
||||
static const int kFirstCharBudget = 10;
|
||||
Label label_;
|
||||
NodeInfo info_;
|
||||
SiblingList siblings_;
|
||||
ZoneList<CharacterRange>* first_character_set_;
|
||||
// This variable keeps track of how many times code has been generated for
|
||||
// this node (in different traces). We don't keep track of where the
|
||||
// generated code is located unless the code is generated at the start of
|
||||
// a trace, in which case it is generic and can be reused by flushing the
|
||||
// deferred operations in the current trace and generating a goto.
|
||||
int trace_count_;
|
||||
BoyerMooreLookahead* bm_info_[2];
|
||||
};
|
||||
|
||||
|
||||
|
@ -663,8 +731,8 @@ class Interval {
|
|||
return (from_ <= value) && (value <= to_);
|
||||
}
|
||||
bool is_empty() { return from_ == kNone; }
|
||||
int from() const { return from_; }
|
||||
int to() const { return to_; }
|
||||
int from() { return from_; }
|
||||
int to() { return to_; }
|
||||
static Interval Empty() { return Interval(); }
|
||||
static const int kNone = -1;
|
||||
private:
|
||||
|
@ -679,20 +747,6 @@ class SeqRegExpNode: public RegExpNode {
|
|||
: on_success_(on_success) { }
|
||||
RegExpNode* on_success() { return on_success_; }
|
||||
void set_on_success(RegExpNode* node) { on_success_ = node; }
|
||||
virtual RegExpNode* FilterASCII(int depth);
|
||||
virtual void FillInBMInfo(int offset,
|
||||
int recursion_depth,
|
||||
int budget,
|
||||
BoyerMooreLookahead* bm,
|
||||
bool not_at_start) {
|
||||
on_success_->FillInBMInfo(
|
||||
offset, recursion_depth + 1, budget - 1, bm, not_at_start);
|
||||
if (offset == 0) set_bm_info(not_at_start, bm);
|
||||
}
|
||||
|
||||
protected:
|
||||
RegExpNode* FilterSuccessor(int depth);
|
||||
|
||||
private:
|
||||
RegExpNode* on_success_;
|
||||
};
|
||||
|
@ -739,14 +793,11 @@ class ActionNode: public SeqRegExpNode {
|
|||
return on_success()->GetQuickCheckDetails(
|
||||
details, compiler, filled_in, not_at_start);
|
||||
}
|
||||
virtual void FillInBMInfo(int offset,
|
||||
int recursion_depth,
|
||||
int budget,
|
||||
BoyerMooreLookahead* bm,
|
||||
bool not_at_start);
|
||||
Type type() { return type_; }
|
||||
// TODO(erikcorry): We should allow some action nodes in greedy loops.
|
||||
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
|
||||
virtual ActionNode* Clone() { return new ActionNode(*this); }
|
||||
virtual int ComputeFirstCharacterSet(int budget);
|
||||
|
||||
private:
|
||||
union {
|
||||
|
@ -809,15 +860,13 @@ class TextNode: public SeqRegExpNode {
|
|||
ZoneList<TextElement>* elements() { return elms_; }
|
||||
void MakeCaseIndependent(bool is_ascii);
|
||||
virtual int GreedyLoopTextLength();
|
||||
virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
|
||||
RegExpCompiler* compiler);
|
||||
virtual void FillInBMInfo(int offset,
|
||||
int recursion_depth,
|
||||
int budget,
|
||||
BoyerMooreLookahead* bm,
|
||||
bool not_at_start);
|
||||
virtual TextNode* Clone() {
|
||||
TextNode* result = new TextNode(*this);
|
||||
result->CalculateOffsets();
|
||||
return result;
|
||||
}
|
||||
void CalculateOffsets();
|
||||
virtual RegExpNode* FilterASCII(int depth);
|
||||
virtual int ComputeFirstCharacterSet(int budget);
|
||||
|
||||
private:
|
||||
enum TextEmitPassType {
|
||||
|
@ -848,7 +897,12 @@ class AssertionNode: public SeqRegExpNode {
|
|||
AT_START,
|
||||
AT_BOUNDARY,
|
||||
AT_NON_BOUNDARY,
|
||||
AFTER_NEWLINE
|
||||
AFTER_NEWLINE,
|
||||
// Types not directly expressible in regexp syntax.
|
||||
// Used for modifying a boundary node if its following character is
|
||||
// known to be word and/or non-word.
|
||||
AFTER_NONWORD_CHARACTER,
|
||||
AFTER_WORD_CHARACTER
|
||||
};
|
||||
static AssertionNode* AtEnd(RegExpNode* on_success) {
|
||||
return new AssertionNode(AT_END, on_success);
|
||||
|
@ -874,20 +928,12 @@ class AssertionNode: public SeqRegExpNode {
|
|||
RegExpCompiler* compiler,
|
||||
int filled_in,
|
||||
bool not_at_start);
|
||||
virtual void FillInBMInfo(int offset,
|
||||
int recursion_depth,
|
||||
int budget,
|
||||
BoyerMooreLookahead* bm,
|
||||
bool not_at_start);
|
||||
virtual int ComputeFirstCharacterSet(int budget);
|
||||
virtual AssertionNode* Clone() { return new AssertionNode(*this); }
|
||||
AssertionNodeType type() { return type_; }
|
||||
void set_type(AssertionNodeType type) { type_ = type; }
|
||||
|
||||
private:
|
||||
void EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace);
|
||||
enum IfPrevious { kIsNonWord, kIsWord };
|
||||
void BacktrackIfPrevious(RegExpCompiler* compiler,
|
||||
Trace* trace,
|
||||
IfPrevious backtrack_if_previous);
|
||||
AssertionNode(AssertionNodeType t, RegExpNode* on_success)
|
||||
: SeqRegExpNode(on_success), type_(t) { }
|
||||
AssertionNodeType type_;
|
||||
|
@ -915,11 +961,8 @@ class BackReferenceNode: public SeqRegExpNode {
|
|||
bool not_at_start) {
|
||||
return;
|
||||
}
|
||||
virtual void FillInBMInfo(int offset,
|
||||
int recursion_depth,
|
||||
int budget,
|
||||
BoyerMooreLookahead* bm,
|
||||
bool not_at_start);
|
||||
virtual BackReferenceNode* Clone() { return new BackReferenceNode(*this); }
|
||||
virtual int ComputeFirstCharacterSet(int budget);
|
||||
|
||||
private:
|
||||
int start_reg_;
|
||||
|
@ -943,15 +986,7 @@ class EndNode: public RegExpNode {
|
|||
// Returning 0 from EatsAtLeast should ensure we never get here.
|
||||
UNREACHABLE();
|
||||
}
|
||||
virtual void FillInBMInfo(int offset,
|
||||
int recursion_depth,
|
||||
int budget,
|
||||
BoyerMooreLookahead* bm,
|
||||
bool not_at_start) {
|
||||
// Returning 0 from EatsAtLeast should ensure we never get here.
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
virtual EndNode* Clone() { return new EndNode(*this); }
|
||||
private:
|
||||
Action action_;
|
||||
};
|
||||
|
@ -1036,18 +1071,13 @@ class ChoiceNode: public RegExpNode {
|
|||
RegExpCompiler* compiler,
|
||||
int characters_filled_in,
|
||||
bool not_at_start);
|
||||
virtual void FillInBMInfo(int offset,
|
||||
int recursion_depth,
|
||||
int budget,
|
||||
BoyerMooreLookahead* bm,
|
||||
bool not_at_start);
|
||||
virtual ChoiceNode* Clone() { return new ChoiceNode(*this); }
|
||||
|
||||
bool being_calculated() { return being_calculated_; }
|
||||
bool not_at_start() { return not_at_start_; }
|
||||
void set_not_at_start() { not_at_start_ = true; }
|
||||
void set_being_calculated(bool b) { being_calculated_ = b; }
|
||||
virtual bool try_to_emit_quick_check_for_alternative(int i) { return true; }
|
||||
virtual RegExpNode* FilterASCII(int depth);
|
||||
|
||||
protected:
|
||||
int GreedyLoopTextLengthForAlternative(GuardedAlternative* alternative);
|
||||
|
@ -1059,7 +1089,7 @@ class ChoiceNode: public RegExpNode {
|
|||
void GenerateGuard(RegExpMacroAssembler* macro_assembler,
|
||||
Guard* guard,
|
||||
Trace* trace);
|
||||
int CalculatePreloadCharacters(RegExpCompiler* compiler, int eats_at_least);
|
||||
int CalculatePreloadCharacters(RegExpCompiler* compiler, bool not_at_start);
|
||||
void EmitOutOfLineContinuation(RegExpCompiler* compiler,
|
||||
Trace* trace,
|
||||
GuardedAlternative alternative,
|
||||
|
@ -1089,22 +1119,13 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
|
|||
RegExpCompiler* compiler,
|
||||
int characters_filled_in,
|
||||
bool not_at_start);
|
||||
virtual void FillInBMInfo(int offset,
|
||||
int recursion_depth,
|
||||
int budget,
|
||||
BoyerMooreLookahead* bm,
|
||||
bool not_at_start) {
|
||||
alternatives_->at(1).node()->FillInBMInfo(
|
||||
offset, recursion_depth + 1, budget - 1, bm, not_at_start);
|
||||
if (offset == 0) set_bm_info(not_at_start, bm);
|
||||
}
|
||||
// For a negative lookahead we don't emit the quick check for the
|
||||
// alternative that is expected to fail. This is because quick check code
|
||||
// starts by loading enough characters for the alternative that takes fewest
|
||||
// characters, but on a negative lookahead the negative branch did not take
|
||||
// part in that calculation (EatsAtLeast) so the assumptions don't hold.
|
||||
virtual bool try_to_emit_quick_check_for_alternative(int i) { return i != 0; }
|
||||
virtual RegExpNode* FilterASCII(int depth);
|
||||
virtual int ComputeFirstCharacterSet(int budget);
|
||||
};
|
||||
|
||||
|
||||
|
@ -1125,16 +1146,12 @@ class LoopChoiceNode: public ChoiceNode {
|
|||
RegExpCompiler* compiler,
|
||||
int characters_filled_in,
|
||||
bool not_at_start);
|
||||
virtual void FillInBMInfo(int offset,
|
||||
int recursion_depth,
|
||||
int budget,
|
||||
BoyerMooreLookahead* bm,
|
||||
bool not_at_start);
|
||||
virtual int ComputeFirstCharacterSet(int budget);
|
||||
virtual LoopChoiceNode* Clone() { return new LoopChoiceNode(*this); }
|
||||
RegExpNode* loop_node() { return loop_node_; }
|
||||
RegExpNode* continue_node() { return continue_node_; }
|
||||
bool body_can_be_zero_length() { return body_can_be_zero_length_; }
|
||||
virtual void Accept(NodeVisitor* visitor);
|
||||
virtual RegExpNode* FilterASCII(int depth);
|
||||
|
||||
private:
|
||||
// AddAlternative is made private for loop nodes because alternatives
|
||||
|
@ -1150,146 +1167,6 @@ class LoopChoiceNode: public ChoiceNode {
|
|||
};
|
||||
|
||||
|
||||
// Improve the speed that we scan for an initial point where a non-anchored
|
||||
// regexp can match by using a Boyer-Moore-like table. This is done by
|
||||
// identifying non-greedy non-capturing loops in the nodes that eat any
|
||||
// character one at a time. For example in the middle of the regexp
|
||||
// /foo[\s\S]*?bar/ we find such a loop. There is also such a loop implicitly
|
||||
// inserted at the start of any non-anchored regexp.
|
||||
//
|
||||
// When we have found such a loop we look ahead in the nodes to find the set of
|
||||
// characters that can come at given distances. For example for the regexp
|
||||
// /.?foo/ we know that there are at least 3 characters ahead of us, and the
|
||||
// sets of characters that can occur are [any, [f, o], [o]]. We find a range in
|
||||
// the lookahead info where the set of characters is reasonably constrained. In
|
||||
// our example this is from index 1 to 2 (0 is not constrained). We can now
|
||||
// look 3 characters ahead and if we don't find one of [f, o] (the union of
|
||||
// [f, o] and [o]) then we can skip forwards by the range size (in this case 2).
|
||||
//
|
||||
// For Unicode input strings we do the same, but modulo 128.
|
||||
//
|
||||
// We also look at the first string fed to the regexp and use that to get a hint
|
||||
// of the character frequencies in the inputs. This affects the assessment of
|
||||
// whether the set of characters is 'reasonably constrained'.
|
||||
//
|
||||
// We also have another lookahead mechanism (called quick check in the code),
|
||||
// which uses a wide load of multiple characters followed by a mask and compare
|
||||
// to determine whether a match is possible at this point.
|
||||
enum ContainedInLattice {
|
||||
kNotYet = 0,
|
||||
kLatticeIn = 1,
|
||||
kLatticeOut = 2,
|
||||
kLatticeUnknown = 3 // Can also mean both in and out.
|
||||
};
|
||||
|
||||
|
||||
inline ContainedInLattice Combine(ContainedInLattice a, ContainedInLattice b) {
|
||||
return static_cast<ContainedInLattice>(a | b);
|
||||
}
|
||||
|
||||
|
||||
ContainedInLattice AddRange(ContainedInLattice a,
|
||||
const int* ranges,
|
||||
int ranges_size,
|
||||
Interval new_range);
|
||||
|
||||
|
||||
class BoyerMoorePositionInfo : public ZoneObject {
|
||||
public:
|
||||
BoyerMoorePositionInfo()
|
||||
: map_(new ZoneList<bool>(kMapSize)),
|
||||
map_count_(0),
|
||||
w_(kNotYet),
|
||||
s_(kNotYet),
|
||||
d_(kNotYet),
|
||||
surrogate_(kNotYet) {
|
||||
for (int i = 0; i < kMapSize; i++) {
|
||||
map_->Add(false);
|
||||
}
|
||||
}
|
||||
|
||||
bool& at(int i) { return map_->at(i); }
|
||||
|
||||
static const int kMapSize = 128;
|
||||
static const int kMask = kMapSize - 1;
|
||||
|
||||
int map_count() const { return map_count_; }
|
||||
|
||||
void Set(int character);
|
||||
void SetInterval(const Interval& interval);
|
||||
void SetAll();
|
||||
bool is_non_word() { return w_ == kLatticeOut; }
|
||||
bool is_word() { return w_ == kLatticeIn; }
|
||||
|
||||
private:
|
||||
ZoneList<bool>* map_;
|
||||
int map_count_; // Number of set bits in the map.
|
||||
ContainedInLattice w_; // The \w character class.
|
||||
ContainedInLattice s_; // The \s character class.
|
||||
ContainedInLattice d_; // The \d character class.
|
||||
ContainedInLattice surrogate_; // Surrogate UTF-16 code units.
|
||||
};
|
||||
|
||||
|
||||
class BoyerMooreLookahead : public ZoneObject {
|
||||
public:
|
||||
BoyerMooreLookahead(int length, RegExpCompiler* compiler);
|
||||
|
||||
int length() { return length_; }
|
||||
int max_char() { return max_char_; }
|
||||
RegExpCompiler* compiler() { return compiler_; }
|
||||
|
||||
int Count(int map_number) {
|
||||
return bitmaps_->at(map_number)->map_count();
|
||||
}
|
||||
|
||||
BoyerMoorePositionInfo* at(int i) { return bitmaps_->at(i); }
|
||||
|
||||
void Set(int map_number, int character) {
|
||||
if (character > max_char_) return;
|
||||
BoyerMoorePositionInfo* info = bitmaps_->at(map_number);
|
||||
info->Set(character);
|
||||
}
|
||||
|
||||
void SetInterval(int map_number, const Interval& interval) {
|
||||
if (interval.from() > max_char_) return;
|
||||
BoyerMoorePositionInfo* info = bitmaps_->at(map_number);
|
||||
if (interval.to() > max_char_) {
|
||||
info->SetInterval(Interval(interval.from(), max_char_));
|
||||
} else {
|
||||
info->SetInterval(interval);
|
||||
}
|
||||
}
|
||||
|
||||
void SetAll(int map_number) {
|
||||
bitmaps_->at(map_number)->SetAll();
|
||||
}
|
||||
|
||||
void SetRest(int from_map) {
|
||||
for (int i = from_map; i < length_; i++) SetAll(i);
|
||||
}
|
||||
bool EmitSkipInstructions(RegExpMacroAssembler* masm);
|
||||
|
||||
private:
|
||||
// This is the value obtained by EatsAtLeast. If we do not have at least this
|
||||
// many characters left in the sample string then the match is bound to fail.
|
||||
// Therefore it is OK to read a character this far ahead of the current match
|
||||
// point.
|
||||
int length_;
|
||||
RegExpCompiler* compiler_;
|
||||
// 0x7f for ASCII, 0xffff for UTF-16.
|
||||
int max_char_;
|
||||
ZoneList<BoyerMoorePositionInfo*>* bitmaps_;
|
||||
|
||||
int GetSkipTable(int min_lookahead,
|
||||
int max_lookahead,
|
||||
Handle<ByteArray> boolean_skip_table);
|
||||
bool FindWorthwhileInterval(int* from, int* to);
|
||||
int FindBestInterval(
|
||||
int max_number_of_chars, int old_biggest_points, int* from, int* to);
|
||||
};
|
||||
|
||||
|
||||
// There are many ways to generate code for a node. This class encapsulates
|
||||
// the current way we should be generating. In other words it encapsulates
|
||||
// the current state of the code generator. The effect of this is that we
|
||||
|
@ -1581,7 +1458,6 @@ class RegExpEngine: public AllStatic {
|
|||
bool ignore_case,
|
||||
bool multiline,
|
||||
Handle<String> pattern,
|
||||
Handle<String> sample_subject,
|
||||
bool is_ascii);
|
||||
|
||||
static void DotPrint(const char* label, RegExpNode* node, bool ignore_case);
|
||||
|
|
|
@ -65,12 +65,8 @@
|
|||
// static LazyInstance<MyClass, MyCreateTrait>::type my_instance =
|
||||
// LAZY_INSTANCE_INITIALIZER;
|
||||
//
|
||||
// WARNINGS:
|
||||
// - This implementation of LazyInstance is NOT THREAD-SAFE by default. See
|
||||
// ThreadSafeInitOnceTrait declared below for that.
|
||||
// - Lazy initialization comes with a cost. Make sure that you don't use it on
|
||||
// critical path. Consider adding your initialization code to a function
|
||||
// which is explicitly called once.
|
||||
// WARNING: This implementation of LazyInstance is NOT thread-safe by default.
|
||||
// See ThreadSafeInitOnceTrait declared below for that.
|
||||
//
|
||||
// Notes for advanced users:
|
||||
// LazyInstance can actually be used in two different ways:
|
||||
|
@ -250,7 +246,7 @@ struct LazyInstance {
|
|||
|
||||
|
||||
template <typename T,
|
||||
typename CreateTrait = DefaultCreateTrait<T>,
|
||||
typename CreateTrait = DefaultConstructTrait<T>,
|
||||
typename InitOnceTrait = SingleThreadInitOnceTrait,
|
||||
typename DestroyTrait = LeakyInstanceTrait<T> >
|
||||
struct LazyDynamicInstance {
|
||||
|
|
|
@ -207,19 +207,20 @@ void List<T, P>::Initialize(int capacity) {
|
|||
}
|
||||
|
||||
|
||||
template <typename T, typename P>
|
||||
int SortedListBSearch(const List<T>& list, P cmp) {
|
||||
template <typename T>
|
||||
int SortedListBSearch(
|
||||
const List<T>& list, T elem, int (*cmp)(const T* x, const T* y)) {
|
||||
int low = 0;
|
||||
int high = list.length() - 1;
|
||||
while (low <= high) {
|
||||
int mid = (low + high) / 2;
|
||||
T mid_elem = list[mid];
|
||||
|
||||
if (cmp(&mid_elem) > 0) {
|
||||
if (cmp(&mid_elem, &elem) > 0) {
|
||||
high = mid - 1;
|
||||
continue;
|
||||
}
|
||||
if (cmp(&mid_elem) < 0) {
|
||||
if (cmp(&mid_elem, &elem) < 0) {
|
||||
low = mid + 1;
|
||||
continue;
|
||||
}
|
||||
|
@ -230,21 +231,9 @@ int SortedListBSearch(const List<T>& list, P cmp) {
|
|||
}
|
||||
|
||||
|
||||
template<typename T>
|
||||
class ElementCmp {
|
||||
public:
|
||||
explicit ElementCmp(T e) : elem_(e) {}
|
||||
int operator()(const T* other) {
|
||||
return PointerValueCompare(other, &elem_);
|
||||
}
|
||||
private:
|
||||
T elem_;
|
||||
};
|
||||
|
||||
|
||||
template <typename T>
|
||||
int SortedListBSearch(const List<T>& list, T elem) {
|
||||
return SortedListBSearch<T, ElementCmp<T> > (list, ElementCmp<T>(elem));
|
||||
return SortedListBSearch<T>(list, elem, PointerValueCompare<T>);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -173,11 +173,9 @@ typedef List<Handle<Code> > CodeHandleList;
|
|||
|
||||
// Perform binary search for an element in an already sorted
|
||||
// list. Returns the index of the element of -1 if it was not found.
|
||||
// |cmp| is a predicate that takes a pointer to an element of the List
|
||||
// and returns +1 if it is greater, -1 if it is less than the element
|
||||
// being searched.
|
||||
template <typename T, class P>
|
||||
int SortedListBSearch(const List<T>& list, P cmp);
|
||||
template <typename T>
|
||||
int SortedListBSearch(
|
||||
const List<T>& list, T elem, int (*cmp)(const T* x, const T* y));
|
||||
template <typename T>
|
||||
int SortedListBSearch(const List<T>& list, T elem);
|
||||
|
||||
|
|
|
@ -958,7 +958,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
|
|||
}
|
||||
}
|
||||
|
||||
if (instr->IsMarkedAsCall()) {
|
||||
if (instr->IsMarkedAsCall() || instr->IsMarkedAsSaveDoubles()) {
|
||||
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
|
||||
if (output == NULL || !output->IsDoubleRegister() ||
|
||||
output->index() != i) {
|
||||
|
|
|
@ -95,37 +95,31 @@ void LOperand::PrintTo(StringStream* stream) {
|
|||
}
|
||||
|
||||
#define DEFINE_OPERAND_CACHE(name, type) \
|
||||
L##name* L##name::cache = NULL; \
|
||||
\
|
||||
void L##name::SetUpCache() { \
|
||||
name* name::cache = NULL; \
|
||||
void name::SetUpCache() { \
|
||||
if (cache) return; \
|
||||
cache = new L##name[kNumCachedOperands]; \
|
||||
cache = new name[kNumCachedOperands]; \
|
||||
for (int i = 0; i < kNumCachedOperands; i++) { \
|
||||
cache[i].ConvertTo(type, i); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
void L##name::TearDownCache() { \
|
||||
delete[] cache; \
|
||||
}
|
||||
|
||||
LITHIUM_OPERAND_LIST(DEFINE_OPERAND_CACHE)
|
||||
DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND)
|
||||
DEFINE_OPERAND_CACHE(LStackSlot, STACK_SLOT)
|
||||
DEFINE_OPERAND_CACHE(LDoubleStackSlot, DOUBLE_STACK_SLOT)
|
||||
DEFINE_OPERAND_CACHE(LRegister, REGISTER)
|
||||
DEFINE_OPERAND_CACHE(LDoubleRegister, DOUBLE_REGISTER)
|
||||
|
||||
#undef DEFINE_OPERAND_CACHE
|
||||
|
||||
void LOperand::SetUpCaches() {
|
||||
#define LITHIUM_OPERAND_SETUP(name, type) L##name::SetUpCache();
|
||||
LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_SETUP)
|
||||
#undef LITHIUM_OPERAND_SETUP
|
||||
LConstantOperand::SetUpCache();
|
||||
LStackSlot::SetUpCache();
|
||||
LDoubleStackSlot::SetUpCache();
|
||||
LRegister::SetUpCache();
|
||||
LDoubleRegister::SetUpCache();
|
||||
}
|
||||
|
||||
|
||||
void LOperand::TearDownCaches() {
|
||||
#define LITHIUM_OPERAND_TEARDOWN(name, type) L##name::TearDownCache();
|
||||
LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_TEARDOWN)
|
||||
#undef LITHIUM_OPERAND_TEARDOWN
|
||||
}
|
||||
|
||||
|
||||
bool LParallelMove::IsRedundant() const {
|
||||
for (int i = 0; i < move_operands_.length(); ++i) {
|
||||
if (!move_operands_[i].IsRedundant()) return false;
|
||||
|
|
|
@ -35,14 +35,6 @@
|
|||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define LITHIUM_OPERAND_LIST(V) \
|
||||
V(ConstantOperand, CONSTANT_OPERAND) \
|
||||
V(StackSlot, STACK_SLOT) \
|
||||
V(DoubleStackSlot, DOUBLE_STACK_SLOT) \
|
||||
V(Register, REGISTER) \
|
||||
V(DoubleRegister, DOUBLE_REGISTER)
|
||||
|
||||
|
||||
class LOperand: public ZoneObject {
|
||||
public:
|
||||
enum Kind {
|
||||
|
@ -60,13 +52,14 @@ class LOperand: public ZoneObject {
|
|||
|
||||
Kind kind() const { return KindField::decode(value_); }
|
||||
int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
|
||||
#define LITHIUM_OPERAND_PREDICATE(name, type) \
|
||||
bool Is##name() const { return kind() == type; }
|
||||
LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_PREDICATE)
|
||||
LITHIUM_OPERAND_PREDICATE(Argument, ARGUMENT)
|
||||
LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED)
|
||||
LITHIUM_OPERAND_PREDICATE(Ignored, INVALID)
|
||||
#undef LITHIUM_OPERAND_PREDICATE
|
||||
bool IsConstantOperand() const { return kind() == CONSTANT_OPERAND; }
|
||||
bool IsStackSlot() const { return kind() == STACK_SLOT; }
|
||||
bool IsDoubleStackSlot() const { return kind() == DOUBLE_STACK_SLOT; }
|
||||
bool IsRegister() const { return kind() == REGISTER; }
|
||||
bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; }
|
||||
bool IsArgument() const { return kind() == ARGUMENT; }
|
||||
bool IsUnallocated() const { return kind() == UNALLOCATED; }
|
||||
bool IsIgnored() const { return kind() == INVALID; }
|
||||
bool Equals(LOperand* other) const { return value_ == other->value_; }
|
||||
|
||||
void PrintTo(StringStream* stream);
|
||||
|
@ -76,9 +69,9 @@ class LOperand: public ZoneObject {
|
|||
ASSERT(this->index() == index);
|
||||
}
|
||||
|
||||
// Calls SetUpCache()/TearDownCache() for each subclass.
|
||||
// Calls SetUpCache() for each subclass. Don't forget to update this method
|
||||
// if you add a new LOperand subclass.
|
||||
static void SetUpCaches();
|
||||
static void TearDownCaches();
|
||||
|
||||
protected:
|
||||
static const int kKindFieldWidth = 3;
|
||||
|
@ -272,7 +265,6 @@ class LConstantOperand: public LOperand {
|
|||
}
|
||||
|
||||
static void SetUpCache();
|
||||
static void TearDownCache();
|
||||
|
||||
private:
|
||||
static const int kNumCachedOperands = 128;
|
||||
|
@ -308,7 +300,6 @@ class LStackSlot: public LOperand {
|
|||
}
|
||||
|
||||
static void SetUpCache();
|
||||
static void TearDownCache();
|
||||
|
||||
private:
|
||||
static const int kNumCachedOperands = 128;
|
||||
|
@ -333,7 +324,6 @@ class LDoubleStackSlot: public LOperand {
|
|||
}
|
||||
|
||||
static void SetUpCache();
|
||||
static void TearDownCache();
|
||||
|
||||
private:
|
||||
static const int kNumCachedOperands = 128;
|
||||
|
@ -358,7 +348,6 @@ class LRegister: public LOperand {
|
|||
}
|
||||
|
||||
static void SetUpCache();
|
||||
static void TearDownCache();
|
||||
|
||||
private:
|
||||
static const int kNumCachedOperands = 16;
|
||||
|
@ -383,7 +372,6 @@ class LDoubleRegister: public LOperand {
|
|||
}
|
||||
|
||||
static void SetUpCache();
|
||||
static void TearDownCache();
|
||||
|
||||
private:
|
||||
static const int kNumCachedOperands = 16;
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче