This commit is contained in:
Ben Noordhuis 2014-11-14 00:52:27 +01:00
Родитель 3b3d89bad2
Коммит 5d1b6d3e0f
888 изменённых файлов: 192839 добавлений и 31972 удалений

4
deps/v8/.gitignore поставляемый
Просмотреть файл

@ -61,6 +61,9 @@ shell_g
/test/test262/data
/test/test262/data.old
/test/test262/tc39-test262-*
/test/test262-es6/data
/test/test262-es6/data.old
/test/test262-es6/tc39-test262-*
/testing/gmock
/testing/gtest
/third_party/icu
@ -80,5 +83,6 @@ GRTAGS
GSYMS
GPATH
gtags.files
turbo*.cfg
turbo*.dot
turbo*.json

5
deps/v8/AUTHORS поставляемый
Просмотреть файл

@ -14,6 +14,9 @@ NVIDIA Corporation
BlackBerry Limited
Opera Software ASA
Intel Corporation
MIPS Technologies, Inc.
Imagination Technologies, LLC
Loongson Technology Corporation Limited
Akinori MUSHA <knu@FreeBSD.org>
Alexander Botero-Lowry <alexbl@FreeBSD.org>
@ -24,6 +27,7 @@ Andreas Anyuru <andreas.anyuru@gmail.com>
Baptiste Afsa <baptiste.afsa@arm.com>
Bert Belder <bertbelder@gmail.com>
Burcu Dogan <burcujdogan@gmail.com>
Caitlin Potter <caitpotter88@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
Chunyang Dai <chunyang.dai@intel.com>
Daniel Andersson <kodandersson@gmail.com>
@ -35,6 +39,7 @@ Fedor Indutny <fedor@indutny.com>
Filipe David Manana <fdmanana@gmail.com>
Haitao Feng <haitao.feng@intel.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Isiah Meadows <impinball@gmail.com>
Jacob Bramley <jacob.bramley@arm.com>
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>

65
deps/v8/BUILD.gn поставляемый
Просмотреть файл

@ -182,7 +182,7 @@ action("js2c") {
"src/array.js",
"src/string.js",
"src/uri.js",
"third_party/fdlibm/fdlibm.js",
"src/third_party/fdlibm/fdlibm.js",
"src/math.js",
"src/apinatives.js",
"src/date.js",
@ -243,7 +243,9 @@ action("js2c_experimental") {
"src/generator.js",
"src/harmony-string.js",
"src/harmony-array.js",
"src/harmony-typedarray.js",
"src/harmony-classes.js",
"src/harmony-tostring.js"
]
outputs = [
@ -432,6 +434,8 @@ source_set("v8_base") {
"src/assembler.h",
"src/assert-scope.h",
"src/assert-scope.cc",
"src/ast-numbering.cc",
"src/ast-numbering.h",
"src/ast-value-factory.cc",
"src/ast-value-factory.h",
"src/ast.cc",
@ -446,6 +450,8 @@ source_set("v8_base") {
"src/bignum-dtoa.h",
"src/bignum.cc",
"src/bignum.h",
"src/bit-vector.cc",
"src/bit-vector.h",
"src/bootstrapper.cc",
"src/bootstrapper.h",
"src/builtins.cc",
@ -453,6 +459,7 @@ source_set("v8_base") {
"src/bytecodes-irregexp.h",
"src/cached-powers.cc",
"src/cached-powers.h",
"src/char-predicates.cc",
"src/char-predicates-inl.h",
"src/char-predicates.h",
"src/checks.cc",
@ -469,10 +476,14 @@ source_set("v8_base") {
"src/codegen.h",
"src/compilation-cache.cc",
"src/compilation-cache.h",
"src/compilation-statistics.cc",
"src/compilation-statistics.h",
"src/compiler/access-builder.cc",
"src/compiler/access-builder.h",
"src/compiler/ast-graph-builder.cc",
"src/compiler/ast-graph-builder.h",
"src/compiler/ast-loop-assignment-analyzer.cc",
"src/compiler/ast-loop-assignment-analyzer.h",
"src/compiler/basic-block-instrumentor.cc",
"src/compiler/basic-block-instrumentor.h",
"src/compiler/change-lowering.cc",
@ -485,6 +496,9 @@ source_set("v8_base") {
"src/compiler/common-operator.h",
"src/compiler/control-builders.cc",
"src/compiler/control-builders.h",
"src/compiler/control-reducer.cc",
"src/compiler/control-reducer.h",
"src/compiler/diamond.h",
"src/compiler/frame.h",
"src/compiler/gap-resolver.cc",
"src/compiler/gap-resolver.h",
@ -520,6 +534,9 @@ source_set("v8_base") {
"src/compiler/js-graph.h",
"src/compiler/js-inlining.cc",
"src/compiler/js-inlining.h",
"src/compiler/js-intrinsic-builder.cc",
"src/compiler/js-intrinsic-builder.h",
"src/compiler/js-operator.cc",
"src/compiler/js-operator.h",
"src/compiler/js-typed-lowering.cc",
"src/compiler/js-typed-lowering.h",
@ -549,15 +566,21 @@ source_set("v8_base") {
"src/compiler/phi-reducer.h",
"src/compiler/pipeline.cc",
"src/compiler/pipeline.h",
"src/compiler/pipeline-statistics.cc",
"src/compiler/pipeline-statistics.h",
"src/compiler/raw-machine-assembler.cc",
"src/compiler/raw-machine-assembler.h",
"src/compiler/register-allocator.cc",
"src/compiler/register-allocator.h",
"src/compiler/register-configuration.cc",
"src/compiler/register-configuration.h",
"src/compiler/representation-change.h",
"src/compiler/schedule.cc",
"src/compiler/schedule.h",
"src/compiler/scheduler.cc",
"src/compiler/scheduler.h",
"src/compiler/select-lowering.cc",
"src/compiler/select-lowering.h",
"src/compiler/simplified-lowering.cc",
"src/compiler/simplified-lowering.h",
"src/compiler/simplified-operator-reducer.cc",
@ -572,6 +595,8 @@ source_set("v8_base") {
"src/compiler/value-numbering-reducer.h",
"src/compiler/verifier.cc",
"src/compiler/verifier.h",
"src/compiler/zone-pool.cc",
"src/compiler/zone-pool.h",
"src/compiler.cc",
"src/compiler.h",
"src/contexts.cc",
@ -584,8 +609,6 @@ source_set("v8_base") {
"src/cpu-profiler-inl.h",
"src/cpu-profiler.cc",
"src/cpu-profiler.h",
"src/data-flow.cc",
"src/data-flow.h",
"src/date.cc",
"src/date.h",
"src/dateparser-inl.h",
@ -624,7 +647,6 @@ source_set("v8_base") {
"src/factory.h",
"src/fast-dtoa.cc",
"src/fast-dtoa.h",
"src/feedback-slots.h",
"src/field-index.h",
"src/field-index-inl.h",
"src/fixed-dtoa.cc",
@ -674,8 +696,6 @@ source_set("v8_base") {
"src/heap/store-buffer-inl.h",
"src/heap/store-buffer.cc",
"src/heap/store-buffer.h",
"src/heap/sweeper-thread.h",
"src/heap/sweeper-thread.cc",
"src/hydrogen-alias-analysis.h",
"src/hydrogen-bce.cc",
"src/hydrogen-bce.h",
@ -824,14 +844,29 @@ source_set("v8_base") {
"src/rewriter.h",
"src/runtime-profiler.cc",
"src/runtime-profiler.h",
"src/runtime/runtime-api.cc",
"src/runtime/runtime-array.cc",
"src/runtime/runtime-classes.cc",
"src/runtime/runtime-collections.cc",
"src/runtime/runtime-compiler.cc",
"src/runtime/runtime-date.cc",
"src/runtime/runtime-debug.cc",
"src/runtime/runtime-function.cc",
"src/runtime/runtime-generator.cc",
"src/runtime/runtime-i18n.cc",
"src/runtime/runtime-internal.cc",
"src/runtime/runtime-json.cc",
"src/runtime/runtime-literals.cc",
"src/runtime/runtime-liveedit.cc",
"src/runtime/runtime-maths.cc",
"src/runtime/runtime-numbers.cc",
"src/runtime/runtime-object.cc",
"src/runtime/runtime-observe.cc",
"src/runtime/runtime-proxy.cc",
"src/runtime/runtime-regexp.cc",
"src/runtime/runtime-scopes.cc",
"src/runtime/runtime-strings.cc",
"src/runtime/runtime-symbol.cc",
"src/runtime/runtime-test.cc",
"src/runtime/runtime-typedarray.cc",
"src/runtime/runtime-uri.cc",
@ -884,8 +919,9 @@ source_set("v8_base") {
"src/unicode-inl.h",
"src/unicode.cc",
"src/unicode.h",
"src/unicode-decoder.cc",
"src/unicode-decoder.h",
"src/unique.h",
"src/uri.h",
"src/utils-inl.h",
"src/utils.cc",
"src/utils.h",
@ -903,8 +939,8 @@ source_set("v8_base") {
"src/zone-inl.h",
"src/zone.cc",
"src/zone.h",
"third_party/fdlibm/fdlibm.cc",
"third_party/fdlibm/fdlibm.h",
"src/third_party/fdlibm/fdlibm.cc",
"src/third_party/fdlibm/fdlibm.h",
]
if (v8_target_arch == "x86") {
@ -939,6 +975,8 @@ source_set("v8_base") {
"src/compiler/ia32/instruction-codes-ia32.h",
"src/compiler/ia32/instruction-selector-ia32.cc",
"src/compiler/ia32/linkage-ia32.cc",
"src/ic/ia32/access-compiler-ia32.cc",
"src/ic/ia32/handler-compiler-ia32.cc",
"src/ic/ia32/ic-ia32.cc",
"src/ic/ia32/ic-compiler-ia32.cc",
"src/ic/ia32/stub-cache-ia32.cc",
@ -1107,6 +1145,10 @@ source_set("v8_base") {
"src/mips/regexp-macro-assembler-mips.cc",
"src/mips/regexp-macro-assembler-mips.h",
"src/mips/simulator-mips.cc",
"src/compiler/mips/code-generator-mips.cc",
"src/compiler/mips/instruction-codes-mips.h",
"src/compiler/mips/instruction-selector-mips.cc",
"src/compiler/mips/linkage-mips.cc",
"src/ic/mips/access-compiler-mips.cc",
"src/ic/mips/handler-compiler-mips.cc",
"src/ic/mips/ic-mips.cc",
@ -1216,6 +1258,8 @@ source_set("v8_libbase") {
"src/base/division-by-constant.cc",
"src/base/division-by-constant.h",
"src/base/flags.h",
"src/base/functional.cc",
"src/base/functional.h",
"src/base/lazy-instance.h",
"src/base/logging.cc",
"src/base/logging.h",
@ -1287,8 +1331,6 @@ source_set("v8_libbase") {
sources += [
"src/base/platform/platform-win32.cc",
"src/base/win32-headers.h",
"src/base/win32-math.cc",
"src/base/win32-math.h",
]
defines += [ "_CRT_RAND_S" ] # for rand_s()
@ -1344,6 +1386,7 @@ if (current_toolchain == host_toolchain) {
":v8_base",
":v8_libplatform",
":v8_nosnapshot",
"//build/config/sanitizers:deps",
]
if (v8_compress_startup_data == "bz2") {

284
deps/v8/ChangeLog поставляемый
Просмотреть файл

@ -1,3 +1,287 @@
2014-11-05: Version 3.30.33
`1..isPrototypeOf.call(null)` should return false, not throw TypeError
(issue 3483).
Refactor ObjectGetOwnPropertyKeys to accept bitmask rather than boolean
(issue 3549).
Add debug mirror support for ES6 Map/Set iterators (Chromium issue
427868).
Performance and stability improvements on all platforms.
2014-11-04: Version 3.30.30
Performance and stability improvements on all platforms.
2014-11-02: Version 3.30.27
Performance and stability improvements on all platforms.
2014-11-02: Version 3.30.26
Performance and stability improvements on all platforms.
2014-11-01: Version 3.30.25
Performance and stability improvements on all platforms.
2014-11-01: Version 3.30.24
Ensure we don't try to inline raw access to indexed interceptor
receivers (Chromium issue 419220).
Performance and stability improvements on all platforms.
2014-10-31: Version 3.30.23
Introduce v8::Exception::GetMessage to find location of an error object
(Chromium issue 427954).
Performance and stability improvements on all platforms.
2014-10-30: Version 3.30.22
MIPS: Classes: Add super support in methods and accessors (issue 3330).
Classes: Add super support in methods and accessors (issue 3330).
Performance and stability improvements on all platforms.
2014-10-29: Version 3.30.21
MIPS: Classes: Add basic support for properties (issue 3330).
Classes: Add more tests for prototype edge cases (Chromium issue 3655).
Classes: Add test for method prototype (issue 3330).
Get stack trace for uncaught exceptions/promise rejections from the
simple stack when available.
Classes: Add basic support for properties (issue 3330).
Allow duplicate property names in classes (issue 3570).
Windows: use SystemTimeToTzSpecificLocalTime instead of localtime_s
(Chromium issue 417640).
Performance and stability improvements on all platforms.
2014-10-28: Version 3.30.20
Performance and stability improvements on all platforms.
2014-10-27: Version 3.30.19
Check string literals with escapes in PreParserTraits::GetSymbol()
(issue 3606).
only define ARRAYSIZE_UNSAFE for NaCl builds (Chromium issue 405225).
Performance and stability improvements on all platforms.
2014-10-24: Version 3.30.18
Narrow cases where Sparse/Smart versions of Array methods are used
(issues 2615, 3612, 3621).
Shrink new space in idle notification (Chromium issue 424423).
Performance and stability improvements on all platforms.
2014-10-23: Version 3.30.17
ARM64: Fix stack manipulation (Chromium issue 425585).
Speed up creation of Objects whose prototype has dictionary elements
(Chromium issue 422754).
Enable libstdc++ debug mode in debug builds (issue 3638).
Performance and stability improvements on all platforms.
2014-10-22: Version 3.30.16
Remove v8stdint.h, it doesn't serve a purpose anymore.
Performance and stability improvements on all platforms.
2014-10-21: Version 3.30.15
Avoid the Marsaglia effect in 3D (Chromium issue 423311).
Performance and stability improvements on all platforms.
2014-10-20: Version 3.30.14
Performance and stability improvements on all platforms.
2014-10-17: Version 3.30.13
Don't expose Array.prototype.values as it breaks webcompat (Chromium
issue 409858).
Fix break location calculation (Chromium issue 419663).
Enable libstdc++ debug mode in debug builds (issue 3638).
Performance and stability improvements on all platforms.
2014-10-17: Version 3.30.12
Implement .forEach() on typed arrays (issue 3578).
Introduce v8::Exception::GetStackTrace API method.
Remove SmartMove, bringing Array methods further into spec compliance
(issue 2615).
Convert argument toObject() in Object.getOwnPropertyNames/Descriptors
(issue 3443).
Performance and stability improvements on all platforms.
2014-10-15: Version 3.30.11
Array.prototype.{every, filter, find, findIndex, forEach, map, some}:
Use fresh primitive wrapper for calls (issue 3536).
Correctly expand literal buffer for surrogate pairs (Chromium issue
423212).
Performance and stability improvements on all platforms.
2014-10-15: Version 3.30.10
Squeeze the layout of various AST node types (Chromium issue 417697).
Performance and stability improvements on all platforms.
2014-10-14: Version 3.30.9
Performance and stability improvements on all platforms.
2014-10-13: Version 3.30.8
AST nodes have at most one bailout/typefeedback ID now, saving lots of
memory (Chromium issue 417697).
Allow identifier code points from supplementary multilingual planes
(issue 3617).
Performance and stability improvements on all platforms.
2014-10-10: Version 3.30.7
Fix computation of UTC time from local time at DST change points (issue
3116, Chromium issues 415424, 417640).
Convert `obj` ToObject in Object.keys() (issue 3587).
Performance and stability improvements on all platforms.
2014-10-09: Version 3.30.6
Update unicode to 7.0.0 (issue 2892).
Classes: Add support for toString (issue 3330).
Don't enable WPO on Win64 and require Server 2003 / x64 for win64
(Chromium issue 421363).
Performance and stability improvements on all platforms.
2014-10-08: Version 3.30.5
Performance and stability improvements on all platforms.
2014-10-08: Version 3.30.4
This uses a runtime function to set up the the constructor and its
prototype (issue 3330).
Remove PersistentBase::ClearAndLeak.
Squeeze the layout of variable proxy nodes (Chromium issue 417697).
Add MonotonicallyIncreasingTime to V8 Platform (Chromium issue 417668).
Fix representation of HLoadRoot (Chromium issue 419036).
Performance and stability improvements on all platforms.
2014-10-03: Version 3.30.3
Removed the Isolate* field from literal nodes (Chromium issue 417697).
Squeeze the layout of expression nodes a bit (Chromium issue 417697).
Merged FeedbackSlotInterface into AstNode, removing the need for a 2nd
vtable (Chromium issue 417697).
Extend CPU profiler with mapping ticks to source lines.
Remove support for parallel sweeping.
Introduce v8::Object::GetIsolate().
Performance and stability improvements on all platforms.
2014-10-02: Version 3.30.2
Fix Hydrogen's BuildStore() (Chromium issue 417508).
Move unit tests to test/unittests (issue 3489).
Changes to ALLOW_UNUSED to match upcoming changes to the Chromium trunk:
* Eliminate usage of ALLOW_UNUSED to define COMPILE_ASSERT and just use
static_assert() in all cases now that all platforms build with C++11. *
Convert remaining uses of ALLOW_UNUSED to ALLOW_UNUSED_TYPE to match how
Chromium will be splitting this functionality. (In Chromium we'll have
both ALLOW_UNUSED_TYPE and ALLOW_UNUSED_LOCAL, which have different
syntax to enable us to use these with MSVC.) (Chromium issue 81439).
Performance and stability improvements on all platforms.
2014-10-01: Version 3.30.1
Introduce PromiseRejectCallback (issue 3093).
ES6: Implement object literal property shorthand (issue 3584).
Performance and stability improvements on all platforms.
2014-09-30: Version 3.29.93
Add a getter for the address and size of the code range to the pulic API

22
deps/v8/Makefile поставляемый
Просмотреть файл

@ -140,10 +140,15 @@ endif
# asan=/path/to/clang++
ifneq ($(strip $(asan)),)
GYPFLAGS += -Dasan=1
export CC=$(dir $(asan))clang
export CXX=$(asan)
export CXX_host=$(asan)
export LINK=$(asan)
export ASAN_SYMBOLIZER_PATH="$(dir $(asan))llvm-symbolizer"
export ASAN_SYMBOLIZER_PATH=$(dir $(asan))llvm-symbolizer
TESTFLAGS += --asan
ifeq ($(lsan), on)
GYPFLAGS += -Dlsan=1
endif
endif
# arm specific flags.
@ -230,8 +235,8 @@ NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
GYPFILES = build/all.gyp build/features.gypi build/standalone.gypi \
build/toolchain.gypi samples/samples.gyp src/compiler/compiler.gyp \
src/d8.gyp test/cctest/cctest.gyp tools/gyp/v8.gyp
build/toolchain.gypi samples/samples.gyp src/d8.gyp \
test/cctest/cctest.gyp test/unittests/unittests.gyp tools/gyp/v8.gyp
# If vtunejit=on, the v8vtune.gyp will be appended.
ifeq ($(vtunejit), on)
@ -252,7 +257,7 @@ NACL_CHECKS = $(addsuffix .check,$(NACL_BUILDS))
ENVFILE = $(OUTDIR)/environment
.PHONY: all check clean builddeps dependencies $(ENVFILE).new native \
qc quickcheck $(QUICKCHECKS) \
qc quickcheck $(QUICKCHECKS) turbocheck \
$(addsuffix .quickcheck,$(MODES)) $(addsuffix .quickcheck,$(ARCHES)) \
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
@ -381,6 +386,15 @@ quickcheck: $(subst $(COMMA),$(SPACE),$(FASTCOMPILEMODES))
--arch-and-mode=$(FASTTESTMODES) $(TESTFLAGS) --quickcheck
qc: quickcheck
turbocheck: $(subst $(COMMA),$(SPACE),$(FASTCOMPILEMODES))
tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(SUPERFASTTESTMODES) $(TESTFLAGS) \
--quickcheck --variants=turbofan --download-data mozilla webkit
tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(FASTTESTMODES) $(TESTFLAGS) \
--quickcheck --variants=turbofan
tc: turbocheck
# Clean targets. You can clean each architecture individually, or everything.
$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)):
rm -f $(OUTDIR)/Makefile.$(basename $@)*

51
deps/v8/Makefile.nacl поставляемый
Просмотреть файл

@ -36,41 +36,29 @@ NACL_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(NACL_ARCHES)))
HOST_OS = $(shell uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')
ifeq ($(HOST_OS), linux)
TOOLCHAIN_DIR = linux_x86_glibc
else
ifeq ($(HOST_OS), mac)
TOOLCHAIN_DIR = mac_x86_glibc
else
$(error Host platform "${HOST_OS}" is not supported)
endif
endif
TOOLCHAIN_PATH = $(realpath ${NACL_SDK_ROOT}/toolchain)
NACL_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
ifeq ($(ARCH), nacl_ia32)
GYPENV = nacl_target_arch=nacl_ia32 v8_target_arch=arm v8_host_arch=ia32
TOOLCHAIN_ARCH = x86-4.4
NACL_CC = "$(NACL_TOOLCHAIN)/bin/i686-nacl-gcc"
NACL_CXX = "$(NACL_TOOLCHAIN)/bin/i686-nacl-g++"
NACL_LINK = "$(NACL_TOOLCHAIN)/bin/i686-nacl-g++"
else
ifeq ($(ARCH), nacl_x64)
GYPENV = nacl_target_arch=nacl_x64 v8_target_arch=arm v8_host_arch=ia32
TOOLCHAIN_ARCH = x86-4.4
NACL_CC = "$(NACL_TOOLCHAIN)/bin/x86_64-nacl-gcc"
NACL_CXX = "$(NACL_TOOLCHAIN)/bin/x86_64-nacl-g++"
NACL_LINK = "$(NACL_TOOLCHAIN)/bin/x86_64-nacl-g++"
else
$(error Target architecture "${ARCH}" is not supported)
endif
endif
NACL_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/linux_pnacl
ifeq ($(wildcard $(NACL_TOOLCHAIN)),)
$(error Cannot find Native Client toolchain in "${NACL_TOOLCHAIN}")
endif
ifeq ($(ARCH), nacl_ia32)
GYPENV = nacl_target_arch=nacl_ia32 v8_target_arch=arm v8_host_arch=ia32
NACL_CC = "$(NACL_TOOLCHAIN)/bin/pnacl-clang"
NACL_CXX = "$(NACL_TOOLCHAIN)/bin/pnacl-clang++"
NACL_LINK = "$(NACL_TOOLCHAIN)/bin/pnacl-clang++ --pnacl-allow-native -arch x86-32"
else
ifeq ($(ARCH), nacl_x64)
GYPENV = nacl_target_arch=nacl_x64 v8_target_arch=arm v8_host_arch=ia32
NACL_CC = "$(NACL_TOOLCHAIN)/bin/pnacl-clang"
NACL_CXX = "$(NACL_TOOLCHAIN)/bin/pnacl-clang++"
NACL_LINK = "$(NACL_TOOLCHAIN)/bin/pnacl-clang++ --pnacl-allow-native -arch x86-64"
else
$(error Target architecture "${ARCH}" is not supported)
endif
endif
# For mksnapshot host generation.
GYPENV += host_os=${HOST_OS}
@ -85,7 +73,11 @@ NACL_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(NACL_BUILDS))
# For some reason the $$(basename $$@) expansion didn't work here...
$(NACL_BUILDS): $(NACL_MAKEFILES)
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
CC=${NACL_CC} \
CXX=${NACL_CXX} \
AR="$(NACL_TOOLCHAIN)/bin/pnacl-ar" \
RANLIB="$(NACL_TOOLCHAIN)/bin/pnacl-ranlib" \
LD="$(NACL_TOOLCHAIN)/bin/pnacl-ld" \
LINK=${NACL_LINK} \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print raw_input().capitalize()") \
@ -97,6 +89,7 @@ $(NACL_MAKEFILES):
GYP_DEFINES="${GYPENV}" \
CC=${NACL_CC} \
CXX=${NACL_CXX} \
LINK=${NACL_LINK} \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \

1
deps/v8/OWNERS поставляемый
Просмотреть файл

@ -16,7 +16,6 @@ rossberg@chromium.org
svenpanne@chromium.org
titzer@chromium.org
ulan@chromium.org
vegorov@chromium.org
verwaest@chromium.org
vogelheim@chromium.org
yangguo@chromium.org

26
deps/v8/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,26 @@
V8 JavaScript Engine
=============
V8 is Google's open source JavaScript engine.
V8 implements ECMAScript as specified in ECMA-262.
V8 is written in C++ and is used in Google Chrome, the open source
browser from Google.
V8 can run standalone, or can be embedded into any C++ application.
V8 Project page: https://code.google.com/p/v8/
Getting the Code
=============
V8 Git repository: https://chromium.googlesource.com/v8/v8.git
GitHub mirror: https://github.com/v8/v8-git-mirror
For fetching all branches, add the following into your remote
configuration in `.git/config`:
fetch = +refs/branch-heads/*:refs/remotes/branch-heads/*
fetch = +refs/tags/*:refs/tags/*

8
deps/v8/build/all.gyp поставляемый
Просмотреть файл

@ -9,18 +9,14 @@
'type': 'none',
'dependencies': [
'../samples/samples.gyp:*',
'../src/base/base.gyp:base-unittests',
'../src/compiler/compiler.gyp:compiler-unittests',
'../src/d8.gyp:d8',
'../src/heap/heap.gyp:heap-unittests',
'../src/libplatform/libplatform.gyp:libplatform-unittests',
'../test/cctest/cctest.gyp:*',
'../test/unittests/unittests.gyp:*',
],
'conditions': [
['component!="shared_library"', {
'dependencies': [
'../tools/lexer-shell.gyp:lexer-shell',
'../tools/lexer-shell.gyp:parser-shell',
'../tools/parser-shell.gyp:parser-shell',
],
}],
]

23
deps/v8/build/standalone.gypi поставляемый
Просмотреть файл

@ -136,6 +136,14 @@
'configurations': {
'DebugBaseCommon': {
'cflags': [ '-g', '-O0' ],
'conditions': [
['(v8_target_arch=="ia32" or v8_target_arch=="x87") and \
OS=="linux"', {
'defines': [
'_GLIBCXX_DEBUG'
],
}],
],
},
'Optdebug': {
'inherit_from': [ 'DebugBaseCommon', 'DebugBase2' ],
@ -313,9 +321,15 @@
},
'VCLibrarianTool': {
'AdditionalOptions': ['/ignore:4221'],
'conditions': [
['v8_target_arch=="x64"', {
'TargetMachine': '17', # x64
}, {
'TargetMachine': '1', # ia32
}],
],
},
'VCLinkerTool': {
'MinimumRequiredVersion': '5.01', # XP.
'AdditionalDependencies': [
'ws2_32.lib',
],
@ -340,6 +354,13 @@
'advapi32.lib',
],
}],
['v8_target_arch=="x64"', {
'MinimumRequiredVersion': '5.02', # Server 2003.
'TargetMachine': '17', # x64
}, {
'MinimumRequiredVersion': '5.01', # XP.
'TargetMachine': '1', # ia32
}],
],
},
},

33
deps/v8/build/toolchain.gypi поставляемый
Просмотреть файл

@ -302,7 +302,7 @@
'cflags': ['-mfp32'],
}],
['mips_arch_variant=="r6"', {
'cflags!': ['-mfp32'],
'cflags!': ['-mfp32', '-mfpxx'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'ldflags': [
'-mips32r6',
@ -312,14 +312,17 @@
}],
['mips_arch_variant=="r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
'ldflags': ['-mips32r2'],
}],
['mips_arch_variant=="r1"', {
'cflags!': ['-mfp64'],
'cflags!': ['-mfp64', '-mfpxx'],
'cflags': ['-mips32', '-Wa,-mips32'],
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="rx"', {
'cflags!': ['-mfp64'],
'cflags': ['-mips32', '-Wa,-mips32'],
'cflags!': ['-mfp64', '-mfp32'],
'cflags': ['-mips32', '-Wa,-mips32', '-mfpxx'],
'ldflags': ['-mips32'],
}],
],
}],
@ -400,7 +403,7 @@
'cflags': ['-mfp32'],
}],
['mips_arch_variant=="r6"', {
'cflags!': ['-mfp32'],
'cflags!': ['-mfp32', '-mfpxx'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'ldflags': [
'-mips32r6',
@ -410,17 +413,20 @@
}],
['mips_arch_variant=="r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
'ldflags': ['-mips32r2'],
}],
['mips_arch_variant=="r1"', {
'cflags!': ['-mfp64'],
'cflags!': ['-mfp64', '-mfpxx'],
'cflags': ['-mips32', '-Wa,-mips32'],
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="rx"', {
'cflags!': ['-mfp64'],
'cflags': ['-mips32', '-Wa,-mips32'],
'cflags!': ['-mfp64', '-mfp32'],
'cflags': ['-mips32', '-Wa,-mips32', '-mfpxx'],
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="loongson"', {
'cflags!': ['-mfp64'],
'cflags!': ['-mfp64', '-mfp32', '-mfpxx'],
'cflags': ['-mips3', '-Wa,-mips3'],
}],
],
@ -702,7 +708,6 @@
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
OS=="qnx"', {
'cflags!': [
'-O0',
'-O3',
'-O2',
'-O1',
@ -791,10 +796,6 @@
}, {
'RuntimeLibrary': '1', #/MTd
}],
['v8_target_arch=="x64"', {
# TODO(2207): remove this option once the bug is fixed.
'WholeProgramOptimization': 'true',
}],
],
},
'VCLinkerTool': {
@ -964,10 +965,6 @@
}, {
'RuntimeLibrary': '0', #/MT
}],
['v8_target_arch=="x64"', {
# TODO(2207): remove this option once the bug is fixed.
'WholeProgramOptimization': 'true',
}],
],
},
'VCLinkerTool': {

9
deps/v8/include/v8-platform.h поставляемый
Просмотреть файл

@ -55,6 +55,15 @@ class Platform {
* scheduling. The definition of "foreground" is opaque to V8.
*/
virtual void CallOnForegroundThread(Isolate* isolate, Task* task) = 0;
/**
* Monotonically increasing time in seconds from an arbitrary fixed point in
* the past. This function is expected to return at least
* millisecond-precision values. For this reason,
* it is recommended that the fixed point be no further in the past than
* the epoch.
**/
virtual double MonotonicallyIncreasingTime() = 0;
};
} // namespace v8

20
deps/v8/include/v8-profiler.h поставляемый
Просмотреть файл

@ -22,6 +22,14 @@ typedef uint32_t SnapshotObjectId;
*/
class V8_EXPORT CpuProfileNode {
public:
struct LineTick {
/** The 1-based number of the source line where the function originates. */
int line;
/** The count of samples associated with the source line. */
unsigned int hit_count;
};
/** Returns function name (empty string for anonymous functions.) */
Handle<String> GetFunctionName() const;
@ -43,6 +51,18 @@ class V8_EXPORT CpuProfileNode {
*/
int GetColumnNumber() const;
/**
* Returns the number of the function's source lines that collect the samples.
*/
unsigned int GetHitLineCount() const;
/** Returns the set of source lines that collect the samples.
* The caller allocates buffer and responsible for releasing it.
* True if all available entries are copied, otherwise false.
* The function copies nothing if buffer is not large enough.
*/
bool GetLineTicks(LineTick* entries, unsigned int length) const;
/** Returns bailout reason for the function
* if the optimization was disabled for it.
*/

628
deps/v8/include/v8.h поставляемый
Просмотреть файл

@ -15,7 +15,11 @@
#ifndef V8_H_
#define V8_H_
#include "v8stdint.h"
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include "v8config.h"
// We reserve the V8_* prefix for macros defined in V8 public API and
// assume there are no name conflicts with the embedder's code.
@ -85,6 +89,7 @@ class ObjectOperationDescriptor;
class ObjectTemplate;
class Platform;
class Primitive;
class Promise;
class RawOperationDescriptor;
class Script;
class Signature;
@ -511,6 +516,18 @@ template <class T> class PersistentBase {
P* parameter,
typename WeakCallbackData<S, P>::Callback callback);
// Phantom persistents work like weak persistents, except that the pointer to
// the object being collected is not available in the finalization callback.
// This enables the garbage collector to collect the object and any objects
// it references transitively in one GC cycle.
template <typename P>
V8_INLINE void SetPhantom(P* parameter,
typename WeakCallbackData<T, P>::Callback callback);
template <typename S, typename P>
V8_INLINE void SetPhantom(P* parameter,
typename WeakCallbackData<S, P>::Callback callback);
template<typename P>
V8_INLINE P* ClearWeak();
@ -696,9 +713,6 @@ template <class T, class M> class Persistent : public PersistentBase<T> {
return Persistent<S>::Cast(*this);
}
// This will be removed.
V8_INLINE T* ClearAndLeak();
private:
friend class Isolate;
friend class Utils;
@ -1415,6 +1429,27 @@ class V8_EXPORT StackFrame {
};
// A StateTag represents a possible state of the VM.
enum StateTag { JS, GC, COMPILER, OTHER, EXTERNAL, IDLE };
// A RegisterState represents the current state of registers used
// by the sampling profiler API.
struct RegisterState {
RegisterState() : pc(NULL), sp(NULL), fp(NULL) {}
void* pc; // Instruction pointer.
void* sp; // Stack pointer.
void* fp; // Frame pointer.
};
// The output structure filled up by GetStackSample API function.
struct SampleInfo {
size_t frames_count;
StateTag vm_state;
};
/**
* A JSON Parser.
*/
@ -1590,6 +1625,18 @@ class V8_EXPORT Value : public Data {
*/
bool IsSet() const;
/**
* Returns true if this value is a Map Iterator.
* This is an experimental feature.
*/
bool IsMapIterator() const;
/**
* Returns true if this value is a Set Iterator.
* This is an experimental feature.
*/
bool IsSetIterator() const;
/**
* Returns true if this value is a WeakMap.
* This is an experimental feature.
@ -1680,14 +1727,24 @@ class V8_EXPORT Value : public Data {
*/
bool IsDataView() const;
Local<Boolean> ToBoolean() const;
Local<Number> ToNumber() const;
Local<String> ToString() const;
Local<String> ToDetailString() const;
Local<Object> ToObject() const;
Local<Integer> ToInteger() const;
Local<Uint32> ToUint32() const;
Local<Int32> ToInt32() const;
Local<Boolean> ToBoolean(Isolate* isolate) const;
Local<Number> ToNumber(Isolate* isolate) const;
Local<String> ToString(Isolate* isolate) const;
Local<String> ToDetailString(Isolate* isolate) const;
Local<Object> ToObject(Isolate* isolate) const;
Local<Integer> ToInteger(Isolate* isolate) const;
Local<Uint32> ToUint32(Isolate* isolate) const;
Local<Int32> ToInt32(Isolate* isolate) const;
// TODO(dcarney): deprecate all these.
inline Local<Boolean> ToBoolean() const;
inline Local<Number> ToNumber() const;
inline Local<String> ToString() const;
inline Local<String> ToDetailString() const;
inline Local<Object> ToObject() const;
inline Local<Integer> ToInteger() const;
inline Local<Uint32> ToUint32() const;
inline Local<Int32> ToInt32() const;
/**
* Attempts to convert a string to an array index.
@ -1754,7 +1811,6 @@ class V8_EXPORT String : public Name {
enum Encoding {
UNKNOWN_ENCODING = 0x1,
TWO_BYTE_ENCODING = 0x0,
ASCII_ENCODING = 0x4, // TODO(yangguo): deprecate this.
ONE_BYTE_ENCODING = 0x4
};
/**
@ -1810,7 +1866,6 @@ class V8_EXPORT String : public Name {
NO_OPTIONS = 0,
HINT_MANY_WRITES_EXPECTED = 1,
NO_NULL_TERMINATION = 2,
PRESERVE_ASCII_NULL = 4, // TODO(yangguo): deprecate this.
PRESERVE_ONE_BYTE_NULL = 4,
// Used by WriteUtf8 to replace orphan surrogate code units with the
// unicode replacement character. Needs to be set to guarantee valid UTF-8
@ -1849,9 +1904,6 @@ class V8_EXPORT String : public Name {
*/
bool IsExternalOneByte() const;
// TODO(yangguo): deprecate this.
bool IsExternalAscii() const { return IsExternalOneByte(); }
class V8_EXPORT ExternalStringResourceBase { // NOLINT
public:
virtual ~ExternalStringResourceBase() {}
@ -1930,8 +1982,6 @@ class V8_EXPORT String : public Name {
ExternalOneByteStringResource() {}
};
typedef ExternalOneByteStringResource ExternalAsciiStringResource;
/**
* If the string is an external string, return the ExternalStringResourceBase
* regardless of the encoding, otherwise return NULL. The encoding of the
@ -1952,11 +2002,6 @@ class V8_EXPORT String : public Name {
*/
const ExternalOneByteStringResource* GetExternalOneByteStringResource() const;
// TODO(yangguo): deprecate this.
const ExternalAsciiStringResource* GetExternalAsciiStringResource() const {
return GetExternalOneByteStringResource();
}
V8_INLINE static String* Cast(v8::Value* obj);
enum NewStringType {
@ -2119,6 +2164,7 @@ class V8_EXPORT Symbol : public Name {
// Well-known symbols
static Local<Symbol> GetIterator(Isolate* isolate);
static Local<Symbol> GetUnscopables(Isolate* isolate);
static Local<Symbol> GetToStringTag(Isolate* isolate);
V8_INLINE static Symbol* Cast(v8::Value* obj);
@ -2495,15 +2541,6 @@ class V8_EXPORT Object : public Value {
Local<Value> GetHiddenValue(Handle<String> key);
bool DeleteHiddenValue(Handle<String> key);
/**
* Returns true if this is an instance of an api function (one
* created from a function created from a function template) and has
* been modified since it was created. Note that this method is
* conservative and may return true for objects that haven't actually
* been modified.
*/
bool IsDirty();
/**
* Clone this object with a fast but shallow copy. Values will point
* to the same values as the original object.
@ -2564,6 +2601,11 @@ class V8_EXPORT Object : public Value {
*/
Local<Value> CallAsConstructor(int argc, Handle<Value> argv[]);
/**
* Return the isolate to which the Object belongs to.
*/
Isolate* GetIsolate();
static Local<Object> New(Isolate* isolate);
V8_INLINE static Object* Cast(Value* obj);
@ -2830,6 +2872,12 @@ class V8_EXPORT Promise : public Object {
Local<Promise> Catch(Handle<Function> handler);
Local<Promise> Then(Handle<Function> handler);
/**
* Returns true if the promise has at least one derived promise, and
* therefore resolve/reject handlers (including default handler).
*/
bool HasHandler();
V8_INLINE static Promise* Cast(Value* obj);
private:
@ -2931,11 +2979,16 @@ class V8_EXPORT ArrayBuffer : public Object {
*/
bool IsExternal() const;
/**
* Returns true if this ArrayBuffer may be neutered.
*/
bool IsNeuterable() const;
/**
* Neuters this ArrayBuffer and all its views (typed arrays).
* Neutering sets the byte length of the buffer and all typed arrays to zero,
* preventing JavaScript from ever accessing underlying backing store.
* ArrayBuffer should have been externalized.
* ArrayBuffer should have been externalized and must be neuterable.
*/
void Neuter();
@ -4134,6 +4187,11 @@ class V8_EXPORT Exception {
static Local<Value> SyntaxError(Handle<String> message);
static Local<Value> TypeError(Handle<String> message);
static Local<Value> Error(Handle<String> message);
static Local<Message> GetMessage(Handle<Value> exception);
// DEPRECATED. Use GetMessage()->GetStackTrace()
static Local<StackTrace> GetStackTrace(Handle<Value> exception);
};
@ -4175,6 +4233,37 @@ typedef void (*MemoryAllocationCallback)(ObjectSpace space,
// --- Leave Script Callback ---
typedef void (*CallCompletedCallback)();
// --- Promise Reject Callback ---
enum PromiseRejectEvent {
kPromiseRejectWithNoHandler = 0,
kPromiseHandlerAddedAfterReject = 1
};
class PromiseRejectMessage {
public:
PromiseRejectMessage(Handle<Promise> promise, PromiseRejectEvent event,
Handle<Value> value, Handle<StackTrace> stack_trace)
: promise_(promise),
event_(event),
value_(value),
stack_trace_(stack_trace) {}
V8_INLINE Handle<Promise> GetPromise() const { return promise_; }
V8_INLINE PromiseRejectEvent GetEvent() const { return event_; }
V8_INLINE Handle<Value> GetValue() const { return value_; }
// DEPRECATED. Use v8::Exception::GetMessage(GetValue())->GetStackTrace()
V8_INLINE Handle<StackTrace> GetStackTrace() const { return stack_trace_; }
private:
Handle<Promise> promise_;
PromiseRejectEvent event_;
Handle<Value> value_;
Handle<StackTrace> stack_trace_;
};
typedef void (*PromiseRejectCallback)(PromiseRejectMessage message);
// --- Microtask Callback ---
typedef void (*MicrotaskCallback)(void* data);
@ -4345,6 +4434,27 @@ enum JitCodeEventOptions {
typedef void (*JitCodeEventHandler)(const JitCodeEvent* event);
/**
* Interface for iterating through all external resources in the heap.
*/
class V8_EXPORT ExternalResourceVisitor { // NOLINT
public:
virtual ~ExternalResourceVisitor() {}
virtual void VisitExternalString(Handle<String> string) {}
};
/**
* Interface for iterating through all the persistent handles in the heap.
*/
class V8_EXPORT PersistentHandleVisitor { // NOLINT
public:
virtual ~PersistentHandleVisitor() {}
virtual void VisitPersistentHandle(Persistent<Value>* value,
uint16_t class_id) {}
};
/**
* Isolate represents an isolated instance of the V8 engine. V8 isolates have
* completely separate states. Objects from one isolate must not be used in
@ -4485,6 +4595,7 @@ class V8_EXPORT Isolate {
*/
enum UseCounterFeature {
kUseAsm = 0,
kBreakIterator = 1,
kUseCounterFeatureCount // This enum value must be last.
};
@ -4559,6 +4670,21 @@ class V8_EXPORT Isolate {
*/
void GetHeapStatistics(HeapStatistics* heap_statistics);
/**
* Get a call stack sample from the isolate.
* \param state Execution state.
* \param frames Caller allocated buffer to store stack frames.
* \param frames_limit Maximum number of frames to capture. The buffer must
* be large enough to hold the number of frames.
* \param sample_info The sample info is filled up by the function
* provides number of actual captured stack frames and
* the current VM state.
* \note GetStackSample should only be called when the JS thread is paused or
* interrupted. Otherwise the behavior is undefined.
*/
void GetStackSample(const RegisterState& state, void** frames,
size_t frames_limit, SampleInfo* sample_info);
/**
* Adjusts the amount of registered external memory. Used to give V8 an
* indication of the amount of externally allocated memory that is kept alive
@ -4688,6 +4814,42 @@ class V8_EXPORT Isolate {
*/
void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
/**
* Forcefully terminate the current thread of JavaScript execution
* in the given isolate.
*
* This method can be used by any thread even if that thread has not
* acquired the V8 lock with a Locker object.
*/
void TerminateExecution();
/**
* Is V8 terminating JavaScript execution.
*
* Returns true if JavaScript execution is currently terminating
* because of a call to TerminateExecution. In that case there are
* still JavaScript frames on the stack and the termination
* exception is still active.
*/
bool IsExecutionTerminating();
/**
* Resume execution capability in the given isolate, whose execution
* was previously forcefully terminated using TerminateExecution().
*
* When execution is forcefully terminated using TerminateExecution(),
* the isolate can not resume execution until all JavaScript frames
* have propagated the uncatchable exception which is generated. This
* method allows the program embedding the engine to handle the
* termination event and resume execution capability, even if
* JavaScript frames remain on the stack.
*
* This method can be used by any thread even if that thread has not
* acquired the V8 lock with a Locker object.
*/
void CancelTerminateExecution();
/**
* Request V8 to interrupt long running JavaScript code and invoke
* the given |callback| passing the given |data| to it. After |callback|
@ -4735,6 +4897,13 @@ class V8_EXPORT Isolate {
*/
void RemoveCallCompletedCallback(CallCompletedCallback callback);
/**
* Set callback to notify about promise reject with no handler, or
* revocation of such a previous notification once the handler is added.
*/
void SetPromiseRejectCallback(PromiseRejectCallback callback);
/**
* Experimental: Runs the Microtask Work Queue until empty
* Any exceptions thrown by microtask callbacks are swallowed.
@ -4853,12 +5022,93 @@ class V8_EXPORT Isolate {
* On Win64, embedders are advised to install function table callbacks for
* these ranges, as default SEH won't be able to unwind through jitted code.
*
* The first page of the code range is reserved for the embedder and is
* committed, writable, and executable.
*
* Might be empty on other platforms.
*
* https://code.google.com/p/v8/issues/detail?id=3598
*/
void GetCodeRange(void** start, size_t* length_in_bytes);
/** Set the callback to invoke in case of fatal errors. */
void SetFatalErrorHandler(FatalErrorCallback that);
/**
* Set the callback to invoke to check if code generation from
* strings should be allowed.
*/
void SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback callback);
/**
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
*/
bool IsDead();
/**
* Adds a message listener.
*
* The same message listener can be added more than once and in that
* case it will be called more than once for each message.
*
* If data is specified, it will be passed to the callback when it is called.
* Otherwise, the exception object will be passed to the callback instead.
*/
bool AddMessageListener(MessageCallback that,
Handle<Value> data = Handle<Value>());
/**
* Remove all message listeners from the specified callback function.
*/
void RemoveMessageListeners(MessageCallback that);
/** Callback function for reporting failed access checks.*/
void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
/**
* Tells V8 to capture current stack trace when uncaught exception occurs
* and report it to the message listeners. The option is off by default.
*/
void SetCaptureStackTraceForUncaughtExceptions(
bool capture, int frame_limit = 10,
StackTrace::StackTraceOptions options = StackTrace::kOverview);
/**
* Enables the host application to provide a mechanism to be notified
* and perform custom logging when V8 Allocates Executable Memory.
*/
void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
ObjectSpace space, AllocationAction action);
/**
* Removes callback that was installed by AddMemoryAllocationCallback.
*/
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
/**
* Iterates through all external resources referenced from current isolate
* heap. GC is not invoked prior to iterating, therefore there is no
* guarantee that visited objects are still alive.
*/
void VisitExternalResources(ExternalResourceVisitor* visitor);
/**
* Iterates through all the persistent handles in the current isolate's heap
* that have class_ids.
*/
void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor);
/**
* Iterates through all the persistent handles in the current isolate's heap
* that have class_ids and are candidates to be marked as partially dependent
* handles. This will visit handles to young objects created since the last
* garbage collection but is free to visit an arbitrary superset of these
* objects.
*/
void VisitHandlesForPartialDependence(PersistentHandleVisitor* visitor);
private:
template<class K, class V, class Traits> friend class PersistentValueMap;
@ -4937,40 +5187,21 @@ typedef uintptr_t (*ReturnAddressLocationResolver)(
uintptr_t return_addr_location);
/**
* Interface for iterating through all external resources in the heap.
*/
class V8_EXPORT ExternalResourceVisitor { // NOLINT
public:
virtual ~ExternalResourceVisitor() {}
virtual void VisitExternalString(Handle<String> string) {}
};
/**
* Interface for iterating through all the persistent handles in the heap.
*/
class V8_EXPORT PersistentHandleVisitor { // NOLINT
public:
virtual ~PersistentHandleVisitor() {}
virtual void VisitPersistentHandle(Persistent<Value>* value,
uint16_t class_id) {}
};
/**
* Container class for static utility functions.
*/
class V8_EXPORT V8 {
public:
/** Set the callback to invoke in case of fatal errors. */
static void SetFatalErrorHandler(FatalErrorCallback that);
// TODO(dcarney): deprecate this.
V8_INLINE static void SetFatalErrorHandler(FatalErrorCallback that);
/**
* Set the callback to invoke to check if code generation from
* strings should be allowed.
*/
static void SetAllowCodeGenerationFromStringsCallback(
// TODO(dcarney): deprecate this.
V8_INLINE static void SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback that);
/**
@ -4982,10 +5213,11 @@ class V8_EXPORT V8 {
static void SetArrayBufferAllocator(ArrayBuffer::Allocator* allocator);
/**
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
*/
static bool IsDead();
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
*/
// TODO(dcarney): deprecate this.
V8_INLINE static bool IsDead();
/**
* The following 4 functions are to be used when V8 is built with
@ -5038,21 +5270,23 @@ class V8_EXPORT V8 {
* If data is specified, it will be passed to the callback when it is called.
* Otherwise, the exception object will be passed to the callback instead.
*/
static bool AddMessageListener(MessageCallback that,
Handle<Value> data = Handle<Value>());
// TODO(dcarney): deprecate this.
V8_INLINE static bool AddMessageListener(
MessageCallback that, Handle<Value> data = Handle<Value>());
/**
* Remove all message listeners from the specified callback function.
*/
static void RemoveMessageListeners(MessageCallback that);
// TODO(dcarney): deprecate this.
V8_INLINE static void RemoveMessageListeners(MessageCallback that);
/**
* Tells V8 to capture current stack trace when uncaught exception occurs
* and report it to the message listeners. The option is off by default.
*/
static void SetCaptureStackTraceForUncaughtExceptions(
bool capture,
int frame_limit = 10,
// TODO(dcarney): deprecate this.
V8_INLINE static void SetCaptureStackTraceForUncaughtExceptions(
bool capture, int frame_limit = 10,
StackTrace::StackTraceOptions options = StackTrace::kOverview);
/**
@ -5071,7 +5305,9 @@ class V8_EXPORT V8 {
static const char* GetVersion();
/** Callback function for reporting failed access checks.*/
static void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
// TODO(dcarney): deprecate this.
V8_INLINE static void SetFailedAccessCheckCallbackFunction(
FailedAccessCheckCallback);
/**
* Enables the host application to receive a notification before a
@ -5083,6 +5319,7 @@ class V8_EXPORT V8 {
* register the same callback function two times with different
* GCType filters.
*/
// TODO(dcarney): deprecate this.
static void AddGCPrologueCallback(
GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll);
@ -5090,7 +5327,8 @@ class V8_EXPORT V8 {
* This function removes callback which was installed by
* AddGCPrologueCallback function.
*/
static void RemoveGCPrologueCallback(GCPrologueCallback callback);
// TODO(dcarney): deprecate this.
V8_INLINE static void RemoveGCPrologueCallback(GCPrologueCallback callback);
/**
* Enables the host application to receive a notification after a
@ -5102,6 +5340,7 @@ class V8_EXPORT V8 {
* register the same callback function two times with different
* GCType filters.
*/
// TODO(dcarney): deprecate this.
static void AddGCEpilogueCallback(
GCEpilogueCallback callback, GCType gc_type_filter = kGCTypeAll);
@ -5109,20 +5348,24 @@ class V8_EXPORT V8 {
* This function removes callback which was installed by
* AddGCEpilogueCallback function.
*/
static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
// TODO(dcarney): deprecate this.
V8_INLINE static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
/**
* Enables the host application to provide a mechanism to be notified
* and perform custom logging when V8 Allocates Executable Memory.
*/
static void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
ObjectSpace space,
AllocationAction action);
// TODO(dcarney): deprecate this.
V8_INLINE static void AddMemoryAllocationCallback(
MemoryAllocationCallback callback, ObjectSpace space,
AllocationAction action);
/**
* Removes callback that was installed by AddMemoryAllocationCallback.
*/
static void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
// TODO(dcarney): deprecate this.
V8_INLINE static void RemoveMemoryAllocationCallback(
MemoryAllocationCallback callback);
/**
* Initializes V8. This function needs to be called before the first Isolate
@ -5152,7 +5395,8 @@ class V8_EXPORT V8 {
*
* \param isolate The isolate in which to terminate the current JS execution.
*/
static void TerminateExecution(Isolate* isolate);
// TODO(dcarney): deprecate this.
V8_INLINE static void TerminateExecution(Isolate* isolate);
/**
* Is V8 terminating JavaScript execution.
@ -5164,7 +5408,8 @@ class V8_EXPORT V8 {
*
* \param isolate The isolate in which to check.
*/
static bool IsExecutionTerminating(Isolate* isolate = NULL);
// TODO(dcarney): deprecate this.
V8_INLINE static bool IsExecutionTerminating(Isolate* isolate = NULL);
/**
* Resume execution capability in the given isolate, whose execution
@ -5182,7 +5427,8 @@ class V8_EXPORT V8 {
*
* \param isolate The isolate in which to resume execution capability.
*/
static void CancelTerminateExecution(Isolate* isolate);
// TODO(dcarney): deprecate this.
V8_INLINE static void CancelTerminateExecution(Isolate* isolate);
/**
* Releases any resources used by v8 and stops any utility threads
@ -5200,13 +5446,25 @@ class V8_EXPORT V8 {
* heap. GC is not invoked prior to iterating, therefore there is no
* guarantee that visited objects are still alive.
*/
static void VisitExternalResources(ExternalResourceVisitor* visitor);
// TODO(dcarney): deprecate this.
V8_INLINE static void VisitExternalResources(
ExternalResourceVisitor* visitor);
/**
* Iterates through all the persistent handles in the current isolate's heap
* that have class_ids.
*/
static void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor);
// TODO(dcarney): deprecate this.
V8_INLINE static void VisitHandlesWithClassIds(
PersistentHandleVisitor* visitor);
/**
* Iterates through all the persistent handles in isolate's heap that have
* class_ids.
*/
// TODO(dcarney): deprecate this.
V8_INLINE static void VisitHandlesWithClassIds(
Isolate* isolate, PersistentHandleVisitor* visitor);
/**
* Iterates through all the persistent handles in the current isolate's heap
@ -5215,7 +5473,8 @@ class V8_EXPORT V8 {
* garbage collection but is free to visit an arbitrary superset of these
* objects.
*/
static void VisitHandlesForPartialDependence(
// TODO(dcarney): deprecate this.
V8_INLINE static void VisitHandlesForPartialDependence(
Isolate* isolate, PersistentHandleVisitor* visitor);
/**
@ -5242,14 +5501,15 @@ class V8_EXPORT V8 {
private:
V8();
enum WeakHandleType { PhantomHandle, NonphantomHandle };
static internal::Object** GlobalizeReference(internal::Isolate* isolate,
internal::Object** handle);
static internal::Object** CopyPersistent(internal::Object** handle);
static void DisposeGlobal(internal::Object** global_handle);
typedef WeakCallbackData<Value, void>::Callback WeakCallback;
static void MakeWeak(internal::Object** global_handle,
void* data,
WeakCallback weak_callback);
static void MakeWeak(internal::Object** global_handle, void* data,
WeakCallback weak_callback, WeakHandleType phantom);
static void* ClearWeak(internal::Object** global_handle);
static void Eternalize(Isolate* isolate,
Value* handle,
@ -5275,8 +5535,16 @@ class V8_EXPORT TryCatch {
* all TryCatch blocks should be stack allocated because the memory
* location itself is compared against JavaScript try/catch blocks.
*/
// TODO(dcarney): deprecate.
TryCatch();
/**
* Creates a new try/catch block and registers it with v8. Note that
* all TryCatch blocks should be stack allocated because the memory
* location itself is compared against JavaScript try/catch blocks.
*/
TryCatch(Isolate* isolate);
/**
* Unregisters and deletes this try/catch block.
*/
@ -5716,8 +5984,6 @@ class V8_EXPORT Locker {
bool top_level_;
internal::Isolate* isolate_;
static bool active_;
// Disallow copying and assigning.
Locker(const Locker&);
void operator=(const Locker&);
@ -5826,7 +6092,7 @@ class Internals {
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
static const int kContextHeaderSize = 2 * kApiPointerSize;
static const int kContextEmbedderDataIndex = 95;
static const int kContextEmbedderDataIndex = 76;
static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02;
@ -5844,7 +6110,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
static const int kEmptyStringRootIndex = 164;
static const int kEmptyStringRootIndex = 154;
// The external allocation limit should be below 256 MB on all architectures
// to avoid that resource-constrained embedders run low on memory.
@ -5859,7 +6125,7 @@ class Internals {
static const int kNodeIsIndependentShift = 4;
static const int kNodeIsPartiallyDependentShift = 5;
static const int kJSObjectType = 0xbc;
static const int kJSObjectType = 0xbd;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x88;
@ -6114,9 +6380,8 @@ void PersistentBase<T>::SetWeak(
typename WeakCallbackData<S, P>::Callback callback) {
TYPE_CHECK(S, T);
typedef typename WeakCallbackData<Value, void>::Callback Callback;
V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_),
parameter,
reinterpret_cast<Callback>(callback));
V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_), parameter,
reinterpret_cast<Callback>(callback), V8::NonphantomHandle);
}
@ -6130,7 +6395,26 @@ void PersistentBase<T>::SetWeak(
template <class T>
template<typename P>
template <typename S, typename P>
void PersistentBase<T>::SetPhantom(
P* parameter, typename WeakCallbackData<S, P>::Callback callback) {
TYPE_CHECK(S, T);
typedef typename WeakCallbackData<Value, void>::Callback Callback;
V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_), parameter,
reinterpret_cast<Callback>(callback), V8::PhantomHandle);
}
template <class T>
template <typename P>
void PersistentBase<T>::SetPhantom(
P* parameter, typename WeakCallbackData<T, P>::Callback callback) {
SetPhantom<T, P>(parameter, callback);
}
template <class T>
template <typename P>
P* PersistentBase<T>::ClearWeak() {
return reinterpret_cast<P*>(
V8::ClearWeak(reinterpret_cast<internal::Object**>(this->val_)));
@ -6157,15 +6441,6 @@ void PersistentBase<T>::MarkPartiallyDependent() {
}
template <class T, class M>
T* Persistent<T, M>::ClearAndLeak() {
T* old;
old = this->val_;
this->val_ = NULL;
return old;
}
template <class T>
void PersistentBase<T>::SetWrapperClassId(uint16_t class_id) {
typedef internal::Internals I;
@ -6571,6 +6846,44 @@ template <class T> Value* Value::Cast(T* value) {
}
Local<Boolean> Value::ToBoolean() const {
return ToBoolean(Isolate::GetCurrent());
}
Local<Number> Value::ToNumber() const {
return ToNumber(Isolate::GetCurrent());
}
Local<String> Value::ToString() const {
return ToString(Isolate::GetCurrent());
}
Local<String> Value::ToDetailString() const {
return ToDetailString(Isolate::GetCurrent());
}
Local<Object> Value::ToObject() const {
return ToObject(Isolate::GetCurrent());
}
Local<Integer> Value::ToInteger() const {
return ToInteger(Isolate::GetCurrent());
}
Local<Uint32> Value::ToUint32() const {
return ToUint32(Isolate::GetCurrent());
}
Local<Int32> Value::ToInt32() const { return ToInt32(Isolate::GetCurrent()); }
Name* Name::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
@ -6961,6 +7274,119 @@ void* Context::GetAlignedPointerFromEmbedderData(int index) {
}
void V8::SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback callback) {
Isolate* isolate = Isolate::GetCurrent();
isolate->SetAllowCodeGenerationFromStringsCallback(callback);
}
bool V8::IsDead() {
Isolate* isolate = Isolate::GetCurrent();
return isolate->IsDead();
}
bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
Isolate* isolate = Isolate::GetCurrent();
return isolate->AddMessageListener(that, data);
}
void V8::RemoveMessageListeners(MessageCallback that) {
Isolate* isolate = Isolate::GetCurrent();
isolate->RemoveMessageListeners(that);
}
void V8::SetFailedAccessCheckCallbackFunction(
FailedAccessCheckCallback callback) {
Isolate* isolate = Isolate::GetCurrent();
isolate->SetFailedAccessCheckCallbackFunction(callback);
}
void V8::SetCaptureStackTraceForUncaughtExceptions(
bool capture, int frame_limit, StackTrace::StackTraceOptions options) {
Isolate* isolate = Isolate::GetCurrent();
isolate->SetCaptureStackTraceForUncaughtExceptions(capture, frame_limit,
options);
}
void V8::SetFatalErrorHandler(FatalErrorCallback callback) {
Isolate* isolate = Isolate::GetCurrent();
isolate->SetFatalErrorHandler(callback);
}
void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) {
Isolate* isolate = Isolate::GetCurrent();
isolate->RemoveGCPrologueCallback(
reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback));
}
void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
Isolate* isolate = Isolate::GetCurrent();
isolate->RemoveGCEpilogueCallback(
reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback));
}
void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
ObjectSpace space,
AllocationAction action) {
Isolate* isolate = Isolate::GetCurrent();
isolate->AddMemoryAllocationCallback(callback, space, action);
}
void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
Isolate* isolate = Isolate::GetCurrent();
isolate->RemoveMemoryAllocationCallback(callback);
}
void V8::TerminateExecution(Isolate* isolate) { isolate->TerminateExecution(); }
bool V8::IsExecutionTerminating(Isolate* isolate) {
if (isolate == NULL) {
isolate = Isolate::GetCurrent();
}
return isolate->IsExecutionTerminating();
}
void V8::CancelTerminateExecution(Isolate* isolate) {
isolate->CancelTerminateExecution();
}
void V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
Isolate* isolate = Isolate::GetCurrent();
isolate->VisitExternalResources(visitor);
}
void V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
Isolate* isolate = Isolate::GetCurrent();
isolate->VisitHandlesWithClassIds(visitor);
}
void V8::VisitHandlesWithClassIds(Isolate* isolate,
PersistentHandleVisitor* visitor) {
isolate->VisitHandlesWithClassIds(visitor);
}
void V8::VisitHandlesForPartialDependence(Isolate* isolate,
PersistentHandleVisitor* visitor) {
isolate->VisitHandlesForPartialDependence(visitor);
}
/**
* \example shell.cc
* A simple shell that takes a list of expressions on the

16
deps/v8/include/v8config.h поставляемый
Просмотреть файл

@ -120,7 +120,6 @@
// V8_LIBC_BIONIC - Bionic libc
// V8_LIBC_BSD - BSD libc derivate
// V8_LIBC_GLIBC - GNU C library
// V8_LIBC_UCLIBC - uClibc
//
// Note that testing for libc must be done using #if not #ifdef. For example,
// to test for the GNU C library, use:
@ -133,8 +132,6 @@
#elif defined(__BIONIC__)
# define V8_LIBC_BIONIC 1
# define V8_LIBC_BSD 1
#elif defined(__UCLIBC__)
# define V8_LIBC_UCLIBC 1
#elif defined(__GLIBC__) || defined(__GNU_LIBRARY__)
# define V8_LIBC_GLIBC 1
#else
@ -178,6 +175,7 @@
// V8_HAS_BUILTIN_CLZ - __builtin_clz() supported
// V8_HAS_BUILTIN_CTZ - __builtin_ctz() supported
// V8_HAS_BUILTIN_EXPECT - __builtin_expect() supported
// V8_HAS_BUILTIN_FRAME_ADDRESS - __builtin_frame_address() supported
// V8_HAS_BUILTIN_POPCOUNT - __builtin_popcount() supported
// V8_HAS_BUILTIN_SADD_OVERFLOW - __builtin_sadd_overflow() supported
// V8_HAS_BUILTIN_SSUB_OVERFLOW - __builtin_ssub_overflow() supported
@ -186,7 +184,6 @@
// V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported
// V8_HAS___FINAL - __final supported in non-C++11 mode
// V8_HAS___FORCEINLINE - __forceinline supported
// V8_HAS_SEALED - MSVC style sealed marker supported
//
// Note that testing for compilers and/or features must be done using #if
// not #ifdef. For example, to test for Intel C++ Compiler, use:
@ -214,6 +211,7 @@
# define V8_HAS_BUILTIN_CLZ (__has_builtin(__builtin_clz))
# define V8_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz))
# define V8_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect))
# define V8_HAS_BUILTIN_FRAME_ADDRESS (__has_builtin(__builtin_frame_address))
# define V8_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
# define V8_HAS_BUILTIN_SADD_OVERFLOW (__has_builtin(__builtin_sadd_overflow))
# define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow))
@ -251,6 +249,7 @@
# define V8_HAS_BUILTIN_CLZ (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_BUILTIN_CTZ (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_BUILTIN_EXPECT (V8_GNUC_PREREQ(2, 96, 0))
# define V8_HAS_BUILTIN_FRAME_ADDRESS (V8_GNUC_PREREQ(2, 96, 0))
# define V8_HAS_BUILTIN_POPCOUNT (V8_GNUC_PREREQ(3, 4, 0))
// g++ requires -std=c++0x or -std=gnu++0x to support C++11 functionality
@ -277,14 +276,11 @@
# define V8_HAS___ALIGNOF 1
// Override control was added with Visual Studio 2005, but
// Visual Studio 2010 and earlier spell "final" as "sealed".
# define V8_HAS_CXX11_FINAL (_MSC_VER >= 1700)
# define V8_HAS_CXX11_OVERRIDE (_MSC_VER >= 1400)
# define V8_HAS_SEALED (_MSC_VER >= 1400)
# define V8_HAS_CXX11_FINAL 1
# define V8_HAS_CXX11_OVERRIDE 1
# define V8_HAS_DECLSPEC_ALIGN 1
# define V8_HAS_DECLSPEC_DEPRECATED (_MSC_VER >= 1300)
# define V8_HAS_DECLSPEC_DEPRECATED 1
# define V8_HAS_DECLSPEC_NOINLINE 1
# define V8_HAS___FORCEINLINE 1

33
deps/v8/include/v8stdint.h поставляемый
Просмотреть файл

@ -1,33 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Load definitions of standard types.
#ifndef V8STDINT_H_
#define V8STDINT_H_
#include <stddef.h>
#include <stdio.h>
#include "v8config.h"
#if V8_OS_WIN && !V8_CC_MINGW
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef short int16_t; // NOLINT
typedef unsigned short uint16_t; // NOLINT
typedef int int32_t;
typedef unsigned int uint32_t;
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
// intptr_t and friends are defined in crtdefs.h through stdio.h.
#else
#include <stdint.h> // NOLINT
#endif
#endif // V8STDINT_H_

3
deps/v8/src/DEPS поставляемый
Просмотреть файл

@ -3,8 +3,7 @@ include_rules = [
"-src/compiler",
"+src/compiler/pipeline.h",
"-src/libplatform",
"-include/libplatform",
"+testing",
"-include/libplatform"
]
specific_include_rules = {

39
deps/v8/src/accessors.cc поставляемый
Просмотреть файл

@ -56,17 +56,6 @@ Handle<ExecutableAccessorInfo> Accessors::CloneAccessor(
}
template <class C>
static C* FindInstanceOf(Isolate* isolate, Object* obj) {
for (PrototypeIterator iter(isolate, obj,
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(); iter.Advance()) {
if (Is<C>(iter.GetCurrent())) return C::cast(iter.GetCurrent());
}
return NULL;
}
static V8_INLINE bool CheckForName(Handle<Name> name,
Handle<String> property_name,
int offset,
@ -183,7 +172,10 @@ void Accessors::ArgumentsIteratorSetter(
LookupIterator it(object, Utils::OpenHandle(*name));
CHECK_EQ(LookupIterator::ACCESSOR, it.state());
DCHECK(it.HolderIsReceiverOrHiddenPrototype());
Object::SetDataProperty(&it, value);
if (Object::SetDataProperty(&it, value).is_null()) {
isolate->OptionalRescheduleException(false);
}
}
@ -258,7 +250,7 @@ void Accessors::ArrayLengthSetter(
if (uint32_v->Number() == number_v->Number()) {
maybe = JSArray::SetElementsLength(array_handle, uint32_v);
maybe.Check();
if (maybe.is_null()) isolate->OptionalRescheduleException(false);
return;
}
@ -892,9 +884,8 @@ static Handle<Object> GetFunctionPrototype(Isolate* isolate,
}
static Handle<Object> SetFunctionPrototype(Isolate* isolate,
Handle<JSFunction> function,
Handle<Object> value) {
MUST_USE_RESULT static MaybeHandle<Object> SetFunctionPrototype(
Isolate* isolate, Handle<JSFunction> function, Handle<Object> value) {
Handle<Object> old_value;
bool is_observed = function->map()->is_observed();
if (is_observed) {
@ -908,21 +899,17 @@ static Handle<Object> SetFunctionPrototype(Isolate* isolate,
DCHECK(function->prototype() == *value);
if (is_observed && !old_value->SameValue(*value)) {
JSObject::EnqueueChangeRecord(
MaybeHandle<Object> result = JSObject::EnqueueChangeRecord(
function, "update", isolate->factory()->prototype_string(), old_value);
if (result.is_null()) return MaybeHandle<Object>();
}
return function;
}
Handle<Object> Accessors::FunctionGetPrototype(Handle<JSFunction> function) {
return GetFunctionPrototype(function->GetIsolate(), function);
}
Handle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function,
Handle<Object> prototype) {
MaybeHandle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function,
Handle<Object> prototype) {
DCHECK(function->should_have_prototype());
Isolate* isolate = function->GetIsolate();
return SetFunctionPrototype(isolate, function, prototype);
@ -953,7 +940,9 @@ void Accessors::FunctionPrototypeSetter(
}
Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
SetFunctionPrototype(isolate, object, value);
if (SetFunctionPrototype(isolate, object, value).is_null()) {
isolate->OptionalRescheduleException(false);
}
}

5
deps/v8/src/accessors.h поставляемый
Просмотреть файл

@ -66,9 +66,8 @@ class Accessors : public AllStatic {
};
// Accessor functions called directly from the runtime system.
static Handle<Object> FunctionSetPrototype(Handle<JSFunction> object,
Handle<Object> value);
static Handle<Object> FunctionGetPrototype(Handle<JSFunction> object);
MUST_USE_RESULT static MaybeHandle<Object> FunctionSetPrototype(
Handle<JSFunction> object, Handle<Object> value);
static Handle<Object> FunctionGetArguments(Handle<JSFunction> object);
// Accessor infos.

2
deps/v8/src/allocation.cc поставляемый
Просмотреть файл

@ -85,7 +85,7 @@ char* StrNDup(const char* str, int n) {
void* AlignedAlloc(size_t size, size_t alignment) {
DCHECK_LE(V8_ALIGNOF(void*), alignment);
DCHECK(base::bits::IsPowerOfTwo32(alignment));
DCHECK(base::bits::IsPowerOfTwo64(alignment));
void* ptr;
#if V8_OS_WIN
ptr = _aligned_malloc(size, alignment);

795
deps/v8/src/api.cc поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

6
deps/v8/src/api.h поставляемый
Просмотреть файл

@ -54,7 +54,8 @@ class NeanderArray {
return obj_.value();
}
void add(v8::internal::Handle<v8::internal::Object> value);
void add(internal::Isolate* isolate,
v8::internal::Handle<v8::internal::Object> value);
int length();
@ -232,6 +233,8 @@ class Utils {
static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Promise> PromiseToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<StackTrace> StackTraceToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<StackFrame> StackFrameToLocal(
@ -355,6 +358,7 @@ MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch)
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise)
MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace)
MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number)

132
deps/v8/src/arm/assembler-arm.cc поставляемый
Просмотреть файл

@ -109,6 +109,9 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.architecture() >= 7) {
if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
if (FLAG_enable_armv8 && cpu.architecture() >= 8) {
supported_ |= 1u << ARMv8;
}
if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
// Use movw/movt for QUALCOMM ARMv7 cores.
if (FLAG_enable_movw_movt && cpu.implementer() == base::CPU::QUALCOMM) {
@ -472,7 +475,6 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;
last_bound_pos_ = 0;
constant_pool_available_ = !FLAG_enable_ool_constant_pool;
ClearRecordedAstId();
}
@ -1056,7 +1058,8 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
static bool use_mov_immediate_load(const Operand& x,
const Assembler* assembler) {
if (assembler != NULL && !assembler->is_constant_pool_available()) {
if (FLAG_enable_ool_constant_pool && assembler != NULL &&
!assembler->is_ool_constant_pool_available()) {
return true;
} else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
(assembler == NULL || !assembler->predictable_code_size())) {
@ -1137,7 +1140,7 @@ void Assembler::move_32_bit_immediate(Register rd,
mov(rd, target, LeaveCC, cond);
}
} else {
DCHECK(is_constant_pool_available());
DCHECK(!FLAG_enable_ool_constant_pool || is_ool_constant_pool_available());
ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
if (section == ConstantPoolArray::EXTENDED_SECTION) {
DCHECK(FLAG_enable_ool_constant_pool);
@ -1571,11 +1574,27 @@ void Assembler::udiv(Register dst, Register src1, Register src2,
}
void Assembler::mul(Register dst, Register src1, Register src2,
SBit s, Condition cond) {
void Assembler::mul(Register dst, Register src1, Register src2, SBit s,
Condition cond) {
DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
// dst goes in bits 16-19 for this instruction!
emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
emit(cond | s | dst.code() * B16 | src2.code() * B8 | B7 | B4 | src1.code());
}
void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
Condition cond) {
DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 |
srcA.code() * B12 | src2.code() * B8 | B4 | src1.code());
}
void Assembler::smmul(Register dst, Register src1, Register src2,
Condition cond) {
DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 | 0xf * B12 |
src2.code() * B8 | B4 | src1.code());
}
@ -2492,7 +2511,7 @@ void Assembler::vmov(const DwVfpRegister dst,
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
} else if (FLAG_enable_vldr_imm && is_constant_pool_available()) {
} else if (FLAG_enable_vldr_imm && is_ool_constant_pool_available()) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
// generated data which also happens to be executable, a Very Bad
@ -2526,27 +2545,20 @@ void Assembler::vmov(const DwVfpRegister dst,
uint32_t lo, hi;
DoubleAsTwoUInt32(imm, &lo, &hi);
if (scratch.is(no_reg)) {
if (dst.code() < 16) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
// Move the low part of the double into the lower of the corresponsing S
// registers of D register dst.
mov(ip, Operand(lo));
vmov(loc.low(), ip);
// Move the high part of the double into the higher of the
// corresponsing S registers of D register dst.
mov(ip, Operand(hi));
vmov(loc.high(), ip);
if (lo == hi) {
// Move the low and high parts of the double to a D register in one
// instruction.
mov(ip, Operand(lo));
vmov(dst, ip, ip);
} else if (scratch.is(no_reg)) {
mov(ip, Operand(lo));
vmov(dst, VmovIndexLo, ip);
if ((lo & 0xffff) == (hi & 0xffff)) {
movt(ip, hi >> 16);
} else {
// D16-D31 does not have S registers, so move the low and high parts
// directly to the D register using vmov.32.
// Note: This may be slower, so we only do this when we have to.
mov(ip, Operand(lo));
vmov(dst, VmovIndexLo, ip);
mov(ip, Operand(hi));
vmov(dst, VmovIndexHi, ip);
}
vmov(dst, VmovIndexHi, ip);
} else {
// Move the low and high parts of the double to a D register in one
// instruction.
@ -3075,6 +3087,76 @@ void Assembler::vsqrt(const DwVfpRegister dst,
}
void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
0x5 * B9 | B8 | B6 | m * B5 | vm);
}
void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
}
void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
}
void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x3 * B16 |
vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
}
void Assembler::vrintz(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond) {
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
0x5 * B9 | B8 | B7 | B6 | m * B5 | vm);
}
// Support for NEON.
void Assembler::vld1(NeonSize size,

26
deps/v8/src/arm/assembler-arm.h поставляемый
Просмотреть файл

@ -975,6 +975,11 @@ class Assembler : public AssemblerBase {
void mul(Register dst, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al);
void smmla(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al);
void smmul(Register dst, Register src1, Register src2, Condition cond = al);
void smlal(Register dstL, Register dstH, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al);
@ -1274,6 +1279,14 @@ class Assembler : public AssemblerBase {
const DwVfpRegister src,
const Condition cond = al);
// ARMv8 rounding instructions.
void vrinta(const DwVfpRegister dst, const DwVfpRegister src);
void vrintn(const DwVfpRegister dst, const DwVfpRegister src);
void vrintm(const DwVfpRegister dst, const DwVfpRegister src);
void vrintp(const DwVfpRegister dst, const DwVfpRegister src);
void vrintz(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond = al);
// Support for NEON.
// All these APIs support D0 to D31 and Q0 to Q15.
@ -1488,8 +1501,6 @@ class Assembler : public AssemblerBase {
// Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool);
bool is_constant_pool_available() const { return constant_pool_available_; }
bool use_extended_constant_pool() const {
return constant_pool_builder_.current_section() ==
ConstantPoolArray::EXTENDED_SECTION;
@ -1549,10 +1560,6 @@ class Assembler : public AssemblerBase {
(pc_offset() < no_const_pool_before_);
}
void set_constant_pool_available(bool available) {
constant_pool_available_ = available;
}
private:
int next_buffer_check_; // pc offset of next buffer check
@ -1615,10 +1622,6 @@ class Assembler : public AssemblerBase {
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
// Indicates whether the constant pool can be accessed, which is only possible
// if the pp register points to the current code object's constant pool.
bool constant_pool_available_;
// Code emission
inline void CheckBuffer();
void GrowBuffer();
@ -1654,9 +1657,6 @@ class Assembler : public AssemblerBase {
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
friend class FrameAndConstantPoolScope;
friend class ConstantPoolUnavailableScope;
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;
friend class EnsureSpace;

89
deps/v8/src/arm/code-stubs-arm.cc поставляемый
Просмотреть файл

@ -1441,7 +1441,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ b(ne, &slow);
// Null is not instance of anything.
__ cmp(scratch, Operand(isolate()->factory()->null_value()));
__ cmp(object, Operand(isolate()->factory()->null_value()));
__ b(ne, &object_not_null);
if (ReturnTrueFalseObject()) {
__ Move(r0, factory->false_value());
@ -1503,6 +1503,34 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
// Return address is in lr.
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
Register index = LoadDescriptor::NameRegister();
Register scratch = r3;
Register result = r0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
STRING_INDEX_IS_ARRAY_INDEX,
RECEIVER_IS_STRING);
char_at_generator.GenerateFast(masm);
__ Ret();
StubRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
}
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
@ -2376,13 +2404,13 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex);
__ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
__ b(eq, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
__ jmp(&done);
@ -2698,9 +2726,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&extra_checks_or_miss);
Label miss;
__ CompareRoot(r4, Heap::kMegamorphicSymbolRootIndex);
__ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
__ b(eq, &slow_start);
__ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex);
__ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
__ b(eq, &miss);
if (!FLAG_trace_ic) {
@ -2710,8 +2738,19 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
__ b(ne, &miss);
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
// We have to update statistics for runtime profiling.
const int with_types_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
__ ldr(r4, FieldMemOperand(r2, with_types_offset));
__ sub(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, with_types_offset));
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
__ ldr(r4, FieldMemOperand(r2, generic_offset));
__ add(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, generic_offset));
__ jmp(&slow_start);
}
@ -2759,14 +2798,16 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.
__ JumpIfSmi(object_, receiver_not_string_);
if (check_mode_ == RECEIVER_IS_UNKNOWN) {
__ JumpIfSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
__ tst(result_, Operand(kIsNotStringMask));
__ b(ne, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
__ tst(result_, Operand(kIsNotStringMask));
__ b(ne, receiver_not_string_);
}
// If the index is non-smi trigger the non-smi case.
__ JumpIfNotSmi(index_, &index_not_smi_);
@ -3137,8 +3178,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r2: length
// r3: from index (untagged)
__ SmiTag(r3, r3);
StringCharAtGenerator generator(
r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
StringCharAtGenerator generator(r0, r3, r2, r0, &runtime, &runtime, &runtime,
STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
generator.GenerateFast(masm);
__ Drop(3);
__ Ret();
@ -3146,6 +3187,24 @@ void SubStringStub::Generate(MacroAssembler* masm) {
}
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in r0.
Label check_heap_number, call_builtin;
__ JumpIfNotSmi(r0, &check_heap_number);
__ Ret();
__ bind(&check_heap_number);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ CompareRoot(r1, Heap::kHeapNumberMapRootIndex);
__ b(ne, &call_builtin);
__ Ret();
__ bind(&call_builtin);
__ push(r0);
__ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
}
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {

16
deps/v8/src/arm/codegen-arm.cc поставляемый
Просмотреть файл

@ -604,8 +604,22 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ add(src_elements, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
__ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
__ add(array, array, Operand(kHeapObjectTag));
__ add(dst_end, dst_elements, Operand(length, LSL, 1));
// Allocating heap numbers in the loop below can fail and cause a jump to
// gc_required. We can't leave a partly initialized FixedArray behind,
// so pessimistically fill it with holes now.
Label initialization_loop, initialization_loop_entry;
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ b(&initialization_loop_entry);
__ bind(&initialization_loop);
__ str(scratch, MemOperand(dst_elements, kPointerSize, PostIndex));
__ bind(&initialization_loop_entry);
__ cmp(dst_elements, dst_end);
__ b(lt, &initialization_loop);
__ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
__ add(array, array, Operand(kHeapObjectTag));
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in src_elements to fully take advantage of
// post-indexing.

52
deps/v8/src/arm/constants-arm.h поставляемый
Просмотреть файл

@ -161,26 +161,26 @@ enum MiscInstructionsBits74 {
// Instruction encoding bits and masks.
enum {
H = 1 << 5, // Halfword (or byte).
S6 = 1 << 6, // Signed (or unsigned).
L = 1 << 20, // Load (or store).
S = 1 << 20, // Set condition code (or leave unchanged).
W = 1 << 21, // Writeback base register (or leave unchanged).
A = 1 << 21, // Accumulate in multiply instruction (or not).
B = 1 << 22, // Unsigned byte (or word).
N = 1 << 22, // Long (or short).
U = 1 << 23, // Positive (or negative) offset/index.
P = 1 << 24, // Offset/pre-indexed addressing (or post-indexed addressing).
I = 1 << 25, // Immediate shifter operand (or not).
B4 = 1 << 4,
B5 = 1 << 5,
B6 = 1 << 6,
B7 = 1 << 7,
B8 = 1 << 8,
B9 = 1 << 9,
H = 1 << 5, // Halfword (or byte).
S6 = 1 << 6, // Signed (or unsigned).
L = 1 << 20, // Load (or store).
S = 1 << 20, // Set condition code (or leave unchanged).
W = 1 << 21, // Writeback base register (or leave unchanged).
A = 1 << 21, // Accumulate in multiply instruction (or not).
B = 1 << 22, // Unsigned byte (or word).
N = 1 << 22, // Long (or short).
U = 1 << 23, // Positive (or negative) offset/index.
P = 1 << 24, // Offset/pre-indexed addressing (or post-indexed addressing).
I = 1 << 25, // Immediate shifter operand (or not).
B4 = 1 << 4,
B5 = 1 << 5,
B6 = 1 << 6,
B7 = 1 << 7,
B8 = 1 << 8,
B9 = 1 << 9,
B12 = 1 << 12,
B16 = 1 << 16,
B17 = 1 << 17,
B18 = 1 << 18,
B19 = 1 << 19,
B20 = 1 << 20,
@ -194,16 +194,16 @@ enum {
B28 = 1 << 28,
// Instruction bit masks.
kCondMask = 15 << 28,
kALUMask = 0x6f << 21,
kRdMask = 15 << 12, // In str instruction.
kCondMask = 15 << 28,
kALUMask = 0x6f << 21,
kRdMask = 15 << 12, // In str instruction.
kCoprocessorMask = 15 << 8,
kOpCodeMask = 15 << 21, // In data-processing instructions.
kImm24Mask = (1 << 24) - 1,
kImm16Mask = (1 << 16) - 1,
kImm8Mask = (1 << 8) - 1,
kOff12Mask = (1 << 12) - 1,
kOff8Mask = (1 << 8) - 1
kImm24Mask = (1 << 24) - 1,
kImm16Mask = (1 << 16) - 1,
kImm8Mask = (1 << 8) - 1,
kOff12Mask = (1 << 12) - 1,
kOff8Mask = (1 << 8) - 1
};

6
deps/v8/src/arm/debug-arm.cc поставляемый
Просмотреть файл

@ -178,7 +178,11 @@ void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0);
RegList regs = receiver.bit() | name.bit();
if (FLAG_vector_ics) {
regs |= VectorLoadICTrampolineDescriptor::SlotRegister().bit();
}
Generate_DebugBreakCallHelper(masm, regs, 0);
}

65
deps/v8/src/arm/disasm-arm.cc поставляемый
Просмотреть файл

@ -148,7 +148,7 @@ void Decoder::Print(const char* str) {
// These condition names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
static const char* cond_names[kNumberOfConditions] = {
static const char* const cond_names[kNumberOfConditions] = {
"eq", "ne", "cs" , "cc" , "mi" , "pl" , "vs" , "vc" ,
"hi", "ls", "ge", "lt", "gt", "le", "", "invalid",
};
@ -1096,6 +1096,17 @@ void Decoder::DecodeType3(Instruction* instr) {
break;
}
case db_x: {
if (instr->Bits(22, 20) == 0x5) {
if (instr->Bits(7, 4) == 0x1) {
if (instr->Bits(15, 12) == 0xF) {
Format(instr, "smmul'cond 'rn, 'rm, 'rs");
} else {
// SMMLA (in V8 notation matching ARM ISA format)
Format(instr, "smmla'cond 'rn, 'rm, 'rs, 'rd");
}
break;
}
}
if (FLAG_enable_sudiv) {
if (instr->Bits(5, 4) == 0x1) {
if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
@ -1266,6 +1277,14 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
} else {
Unknown(instr); // Not used by V8.
}
} else if (((instr->Opc2Value() == 0x6)) && instr->Opc3Value() == 0x3) {
bool dp_operation = (instr->SzValue() == 1);
// vrintz - round towards zero (truncate)
if (dp_operation) {
Format(instr, "vrintz'cond.f64.f64 'Dd, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
} else {
Unknown(instr); // Not used by V8.
}
@ -1616,6 +1635,50 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
Unknown(instr);
}
break;
case 0x1D:
if (instr->Opc1Value() == 0x7 && instr->Bits(19, 18) == 0x2 &&
instr->Bits(11, 9) == 0x5 && instr->Bits(7, 6) == 0x1 &&
instr->Bit(4) == 0x0) {
// VRINTA, VRINTN, VRINTP, VRINTM (floating-point)
bool dp_operation = (instr->SzValue() == 1);
int rounding_mode = instr->Bits(17, 16);
switch (rounding_mode) {
case 0x0:
if (dp_operation) {
Format(instr, "vrinta.f64.f64 'Dd, 'Dm");
} else {
Unknown(instr);
}
break;
case 0x1:
if (dp_operation) {
Format(instr, "vrintn.f64.f64 'Dd, 'Dm");
} else {
Unknown(instr);
}
break;
case 0x2:
if (dp_operation) {
Format(instr, "vrintp.f64.f64 'Dd, 'Dm");
} else {
Unknown(instr);
}
break;
case 0x3:
if (dp_operation) {
Format(instr, "vrintm.f64.f64 'Dd, 'Dm");
} else {
Unknown(instr);
}
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
break;
}
} else {
Unknown(instr);
}
break;
default:
Unknown(instr);
break;

467
deps/v8/src/arm/full-codegen-arm.cc поставляемый
Просмотреть файл

@ -1102,7 +1102,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
int slot = stmt->ForInFeedbackSlot();
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@ -1131,6 +1131,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(r0);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ push(r0);
// Check for proxies.
@ -1155,6 +1156,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ push(r0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@ -1194,7 +1196,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Move(r1, FeedbackVector());
__ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot)));
int vector_index = FeedbackVector()->GetIndex(slot);
__ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(vector_index)));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@ -1364,7 +1367,13 @@ void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
__ Move(LoadDescriptor::NameRegister(), home_object_symbol);
CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
if (FLAG_vector_ics) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->HomeObjectFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
}
__ cmp(r0, Operand(isolate()->factory()->undefined_value()));
Label done;
@ -1426,7 +1435,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
if (FLAG_vector_ics) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
}
ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
@ -1515,7 +1524,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
if (FLAG_vector_ics) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
}
CallLoadIC(CONTEXTUAL);
context()->Plug(r0);
@ -1691,6 +1700,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in r0.
@ -1719,6 +1729,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
@ -1751,7 +1763,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(r0);
VisitForStackValue(value);
if (property->emit_store()) {
__ CallRuntime(Runtime::kSetPrototype, 2);
__ CallRuntime(Runtime::kInternalSetPrototype, 2);
} else {
__ Drop(2);
}
@ -1883,22 +1895,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment");
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind {
VARIABLE,
NAMED_PROPERTY,
KEYED_PROPERTY,
NAMED_SUPER_PROPERTY
};
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
if (property != NULL) {
assign_type = (property->key()->IsPropertyName())
? (property->IsSuperAccess() ? NAMED_SUPER_PROPERTY
: NAMED_PROPERTY)
: KEYED_PROPERTY;
}
LhsKind assign_type = GetAssignType(property);
// Evaluate LHS expression.
switch (assign_type) {
@ -1925,6 +1923,21 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ Push(result_register());
}
break;
case KEYED_SUPER_PROPERTY:
VisitForStackValue(property->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(property->obj()->AsSuperReference());
__ Push(result_register());
VisitForAccumulatorValue(property->key());
__ Push(result_register());
if (expr->is_compound()) {
const Register scratch = r1;
__ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
__ Push(scratch);
__ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
__ Push(scratch);
__ Push(result_register());
}
break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
VisitForStackValue(property->obj());
@ -1956,6 +1969,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitNamedSuperPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
@ -2003,7 +2020,12 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitNamedPropertyAssignment(expr);
break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyAssignment(expr);
EmitNamedSuperPropertyStore(property);
context()->Plug(r0);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyStore(property);
context()->Plug(r0);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
@ -2134,7 +2156,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ ldr(load_name, MemOperand(sp, 2 * kPointerSize));
if (FLAG_vector_ics) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->KeyedLoadFeedbackSlot())));
Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
}
Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallIC(ic, TypeFeedbackId::None());
@ -2154,7 +2176,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
if (FLAG_vector_ics) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->DoneFeedbackSlot())));
Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
}
CallLoadIC(NOT_CONTEXTUAL); // r0=result.done
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
@ -2167,7 +2189,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
if (FLAG_vector_ics) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->ValueFeedbackSlot())));
Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
}
CallLoadIC(NOT_CONTEXTUAL); // r0=result.value
context()->DropAndPlug(2, r0); // drop iter and g
@ -2308,23 +2330,26 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label gc_required;
Label allocated;
Handle<Map> map(isolate()->native_context()->iterator_result_map());
const int instance_size = 5 * kPointerSize;
DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
instance_size);
__ Allocate(map->instance_size(), r0, r2, r3, &gc_required, TAG_OBJECT);
__ Allocate(instance_size, r0, r2, r3, &gc_required, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
__ Push(Smi::FromInt(instance_size));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
__ ldr(context_register(),
MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&allocated);
__ mov(r1, Operand(map));
__ ldr(r1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
__ ldr(r1, ContextOperand(r1, Context::ITERATOR_RESULT_MAP_INDEX));
__ pop(r2);
__ mov(r3, Operand(isolate()->factory()->ToBoolean(done)));
__ mov(r4, Operand(isolate()->factory()->empty_fixed_array()));
DCHECK_EQ(map->instance_size(), 5 * kPointerSize);
__ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
@ -2348,7 +2373,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
if (FLAG_vector_ics) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
@ -2373,7 +2398,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
if (FLAG_vector_ics) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
CallIC(ic);
} else {
CallIC(ic, prop->PropertyFeedbackId());
@ -2381,6 +2406,14 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetSourcePosition(prop->position());
__ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
}
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode,
@ -2475,6 +2508,62 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Constructor is in r0.
DCHECK(lit != NULL);
__ push(r0);
// No access check is needed here since the constructor is created by the
// class literal.
Register scratch = r1;
__ ldr(scratch,
FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
__ push(scratch);
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
DCHECK(key != NULL);
if (property->is_static()) {
__ ldr(scratch, MemOperand(sp, kPointerSize)); // constructor
} else {
__ ldr(scratch, MemOperand(sp, 0)); // prototype
}
__ push(scratch);
VisitForStackValue(key);
VisitForStackValue(value);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::PROTOTYPE:
__ CallRuntime(Runtime::kDefineClassMethod, 3);
break;
case ObjectLiteral::Property::GETTER:
__ CallRuntime(Runtime::kDefineClassGetter, 3);
break;
case ObjectLiteral::Property::SETTER:
__ CallRuntime(Runtime::kDefineClassSetter, 3);
break;
default:
UNREACHABLE();
}
}
// prototype
__ CallRuntime(Runtime::kToFastProperties, 1);
// constructor
__ CallRuntime(Runtime::kToFastProperties, 1);
}
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
@ -2490,16 +2579,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitAssignment(Expression* expr) {
DCHECK(expr->IsValidReferenceExpression());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
if (prop != NULL) {
assign_type = (prop->key()->IsPropertyName())
? NAMED_PROPERTY
: KEYED_PROPERTY;
}
LhsKind assign_type = GetAssignType(prop);
switch (assign_type) {
case VARIABLE: {
@ -2518,6 +2599,42 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
CallStoreIC();
break;
}
case NAMED_SUPER_PROPERTY: {
__ Push(r0);
VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(prop->obj()->AsSuperReference());
// stack: value, this; r0: home_object
Register scratch = r2;
Register scratch2 = r3;
__ mov(scratch, result_register()); // home_object
__ ldr(r0, MemOperand(sp, kPointerSize)); // value
__ ldr(scratch2, MemOperand(sp, 0)); // this
__ str(scratch2, MemOperand(sp, kPointerSize)); // this
__ str(scratch, MemOperand(sp, 0)); // home_object
// stack: this, home_object; r0: value
EmitNamedSuperPropertyStore(prop);
break;
}
case KEYED_SUPER_PROPERTY: {
__ Push(r0);
VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(prop->obj()->AsSuperReference());
__ Push(result_register());
VisitForAccumulatorValue(prop->key());
Register scratch = r2;
Register scratch2 = r3;
__ ldr(scratch2, MemOperand(sp, 2 * kPointerSize)); // value
// stack: value, this, home_object; r0: key, r3: value
__ ldr(scratch, MemOperand(sp, kPointerSize)); // this
__ str(scratch, MemOperand(sp, 2 * kPointerSize));
__ ldr(scratch, MemOperand(sp, 0)); // home_object
__ str(scratch, MemOperand(sp, kPointerSize));
__ str(r0, MemOperand(sp, 0));
__ Move(r0, scratch2);
// stack: this, home_object, key; r0: value.
EmitKeyedSuperPropertyStore(prop);
break;
}
case KEYED_PROPERTY: {
__ push(r0); // Preserve value.
VisitForStackValue(prop->obj());
@ -2634,21 +2751,32 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
void FullCodeGenerator::EmitNamedSuperPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
// Assignment to named property of super.
// r0 : value
// stack : receiver ('this'), home_object
Property* prop = expr->target()->AsProperty();
DCHECK(prop != NULL);
Literal* key = prop->key()->AsLiteral();
DCHECK(key != NULL);
__ Push(r0);
__ Push(key->value());
__ Push(r0);
__ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
: Runtime::kStoreToSuper_Sloppy),
4);
context()->Plug(r0);
}
void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// Assignment to named property of super.
// r0 : value
// stack : receiver ('this'), home_object, key
DCHECK(prop != NULL);
__ Push(r0);
__ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
: Runtime::kStoreKeyedToSuper_Sloppy),
4);
}
@ -2686,11 +2814,19 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(r0);
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
__ Move(LoadDescriptor::NameRegister(), r0);
__ pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
if (!expr->IsSuperAccess()) {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
__ Move(LoadDescriptor::NameRegister(), r0);
__ pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(expr->obj()->AsSuperReference());
__ Push(result_register());
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
context()->Plug(r0);
}
}
@ -2802,6 +2938,43 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
}
void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
SetSourcePosition(prop->position());
// Load the function from the receiver.
const Register scratch = r1;
SuperReference* super_ref = prop->obj()->AsSuperReference();
EmitLoadHomeObject(super_ref);
__ Push(r0);
VisitForAccumulatorValue(super_ref->this_var());
__ Push(r0);
__ Push(r0);
__ ldr(scratch, MemOperand(sp, kPointerSize * 2));
__ Push(scratch);
VisitForStackValue(prop->key());
// Stack here:
// - home_object
// - this (receiver)
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
__ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
// Replace home_object with target function.
__ str(r0, MemOperand(sp, kPointerSize));
// Stack here:
// - target function
// - this (receiver)
EmitCall(expr, CallICState::METHOD);
}
void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
@ -2816,7 +2989,7 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
SetSourcePosition(expr->position());
Handle<Code> ic = CallIC::initialize_stub(
isolate(), arg_count, call_type);
__ mov(r3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
__ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
@ -2830,13 +3003,16 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// r4: copy of the first argument or undefined if it doesn't exist.
// r5: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ ldr(r4, MemOperand(sp, arg_count * kPointerSize));
__ ldr(r5, MemOperand(sp, arg_count * kPointerSize));
} else {
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
}
// r4: the receiver of the enclosing function.
__ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// r3: the receiver of the enclosing function.
int receiver_offset = 2 + info_->scope()->num_parameters();
__ ldr(r3, MemOperand(fp, receiver_offset * kPointerSize));
@ -2848,8 +3024,17 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
__ Push(r5);
__ Push(r4, r3, r2, r1);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
}
void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
DCHECK(super_ref != NULL);
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(r0);
__ CallRuntime(Runtime::kGetPrototype, 1);
}
@ -2892,6 +3077,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// r1 (receiver). Touch up the stack with the right values.
__ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ str(r1, MemOperand(sp, arg_count * kPointerSize));
PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
}
// Record source position for debugger.
@ -2925,6 +3112,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ Push(context_register(), r2);
__ CallRuntime(Runtime::kLoadLookupSlot, 2);
__ Push(r0, r1); // Function, receiver.
PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
// If fast case code has been generated, emit code to push the
// function and receiver and have the slow path jump around this
@ -2948,9 +3136,12 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
bool is_named_call = property->key()->IsPropertyName();
// super.x() is handled in EmitCallWithLoadIC.
if (property->IsSuperAccess() && is_named_call) {
EmitSuperCallWithLoadIC(expr);
if (property->IsSuperAccess()) {
if (is_named_call) {
EmitSuperCallWithLoadIC(expr);
} else {
EmitKeyedSuperCallWithLoadIC(expr);
}
} else {
{
PreservePositionScope scope(masm()->positions_recorder());
@ -2962,6 +3153,12 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitKeyedCallWithLoadIC(expr, property->key());
}
}
} else if (call_type == Call::SUPER_CALL) {
SuperReference* super_ref = callee->AsSuperReference();
EmitLoadSuperConstructor(super_ref);
__ Push(result_register());
VisitForStackValue(super_ref->this_var());
EmitCall(expr, CallICState::METHOD);
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
@ -2990,7 +3187,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
VisitForStackValue(expr->expression());
if (expr->expression()->IsSuperReference()) {
EmitLoadSuperConstructor(expr->expression()->AsSuperReference());
__ Push(result_register());
} else {
VisitForStackValue(expr->expression());
}
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@ -3010,12 +3212,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
if (FLAG_pretenuring_call_new) {
EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
DCHECK(expr->AllocationSiteFeedbackSlot() ==
expr->CallNewFeedbackSlot() + 1);
DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
expr->CallNewFeedbackSlot().ToInt() + 1);
}
__ Move(r2, FeedbackVector());
__ mov(r3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
__ mov(r3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
@ -3326,6 +3528,32 @@ void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
}
void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
&if_false, &fall_through);
__ JumpIfSmi(r0, if_false);
Register map = r1;
Register type_reg = r2;
__ ldr(map, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ sub(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
__ cmp(type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ls, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
@ -4216,7 +4444,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ mov(LoadDescriptor::NameRegister(), Operand(expr->name()));
if (FLAG_vector_ics) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->CallRuntimeFeedbackSlot())));
Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
@ -4372,22 +4600,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->expression()->AsProperty();
// In case of a property we use the uninitialized expression context
// of the key to detect a named property.
if (prop != NULL) {
assign_type =
(prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
if (prop->IsSuperAccess()) {
// throw exception.
VisitSuperReference(prop->obj()->AsSuperReference());
return;
}
}
LhsKind assign_type = GetAssignType(prop);
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
@ -4400,18 +4614,55 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(ip, Operand(Smi::FromInt(0)));
__ push(ip);
}
if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the register.
VisitForStackValue(prop->obj());
__ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
__ ldr(LoadDescriptor::ReceiverRegister(),
MemOperand(sp, 1 * kPointerSize));
__ ldr(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
EmitKeyedPropertyLoad(prop);
switch (assign_type) {
case NAMED_PROPERTY: {
// Put the object both on the stack and in the register.
VisitForStackValue(prop->obj());
__ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(prop);
break;
}
case NAMED_SUPER_PROPERTY: {
VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(prop->obj()->AsSuperReference());
__ Push(result_register());
const Register scratch = r1;
__ ldr(scratch, MemOperand(sp, kPointerSize));
__ Push(scratch);
__ Push(result_register());
EmitNamedSuperPropertyLoad(prop);
break;
}
case KEYED_SUPER_PROPERTY: {
VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(prop->obj()->AsSuperReference());
__ Push(result_register());
VisitForAccumulatorValue(prop->key());
__ Push(result_register());
const Register scratch = r1;
__ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
__ Push(scratch);
__ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
__ Push(scratch);
__ Push(result_register());
EmitKeyedSuperPropertyLoad(prop);
break;
}
case KEYED_PROPERTY: {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
__ ldr(LoadDescriptor::ReceiverRegister(),
MemOperand(sp, 1 * kPointerSize));
__ ldr(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
EmitKeyedPropertyLoad(prop);
break;
}
case VARIABLE:
UNREACHABLE();
}
}
@ -4445,9 +4696,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ str(r0, MemOperand(sp, kPointerSize));
break;
case NAMED_SUPER_PROPERTY:
__ str(r0, MemOperand(sp, 2 * kPointerSize));
break;
case KEYED_PROPERTY:
__ str(r0, MemOperand(sp, 2 * kPointerSize));
break;
case KEYED_SUPER_PROPERTY:
__ str(r0, MemOperand(sp, 3 * kPointerSize));
break;
}
}
}
@ -4475,9 +4732,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ str(r0, MemOperand(sp, kPointerSize));
break;
case NAMED_SUPER_PROPERTY:
__ str(r0, MemOperand(sp, 2 * kPointerSize));
break;
case KEYED_PROPERTY:
__ str(r0, MemOperand(sp, 2 * kPointerSize));
break;
case KEYED_SUPER_PROPERTY:
__ str(r0, MemOperand(sp, 3 * kPointerSize));
break;
}
}
}
@ -4533,6 +4796,28 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
case NAMED_SUPER_PROPERTY: {
EmitNamedSuperPropertyStore(prop);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
}
} else {
context()->Plug(r0);
}
break;
}
case KEYED_SUPER_PROPERTY: {
EmitKeyedSuperPropertyStore(prop);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
}
} else {
context()->Plug(r0);
}
break;
}
case KEYED_PROPERTY: {
__ Pop(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
@ -4563,7 +4848,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
__ mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
if (FLAG_vector_ics) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
}
// Use a regular load, not a contextual load, to avoid a reference
// error.

12
deps/v8/src/arm/interface-descriptors-arm.cc поставляемый
Просмотреть файл

@ -29,6 +29,9 @@ const Register StoreDescriptor::NameRegister() { return r2; }
const Register StoreDescriptor::ValueRegister() { return r0; }
const Register StoreTransitionDescriptor::MapRegister() { return r3; }
const Register ElementTransitionAndStoreDescriptor::MapRegister() { return r3; }
@ -149,6 +152,15 @@ void TransitionElementsKindDescriptor::Initialize(
}
void AllocateHeapNumberDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// register state
// cp -- context
Register registers[] = {cp};
data->Initialize(arraysize(registers), registers, nullptr);
}
void ArrayConstructorConstantArgCountDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// register state

12
deps/v8/src/arm/lithium-arm.cc поставляемый
Просмотреть файл

@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <sstream>
#include "src/v8.h"
#include "src/arm/lithium-codegen-arm.h"
@ -316,9 +318,9 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
OStringStream os;
std::ostringstream os;
os << hydrogen()->access() << " <- ";
stream->Add(os.c_str());
stream->Add(os.str().c_str());
value()->PrintTo(stream);
}
@ -697,11 +699,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Shift operations can only deoptimize if we do a logical shift
// by 0 and the result cannot be truncated to int32.
if (op == Token::SHR && constant_value == 0) {
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
}
LInstruction* result =

18
deps/v8/src/arm/lithium-codegen-arm.cc поставляемый
Просмотреть файл

@ -2994,13 +2994,14 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector = ToRegister(instr->temp_vector());
DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
__ Move(vector, instr->hydrogen()->feedback_vector());
Register vector_register = ToRegister(instr->temp_vector());
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
// No need to allocate this register.
DCHECK(VectorLoadICDescriptor::SlotRegister().is(r0));
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(instr->hydrogen()->slot())));
int index = vector->GetIndex(instr->hydrogen()->slot());
__ mov(VectorLoadICDescriptor::SlotRegister(), Operand(Smi::FromInt(index)));
}
@ -3015,7 +3016,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -3137,7 +3138,8 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@ -3428,7 +3430,7 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}

27
deps/v8/src/arm/macro-assembler-arm.cc поставляемый
Просмотреть файл

@ -967,7 +967,7 @@ void MacroAssembler::StubPrologue() {
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
if (FLAG_enable_ool_constant_pool) {
LoadConstantPoolPointerRegister();
set_constant_pool_available(true);
set_ool_constant_pool_available(true);
}
}
@ -992,16 +992,16 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
}
if (FLAG_enable_ool_constant_pool) {
LoadConstantPoolPointerRegister();
set_constant_pool_available(true);
set_ool_constant_pool_available(true);
}
}
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool) {
bool load_constant_pool_pointer_reg) {
// r0-r3: preserved
PushFixedFrame();
if (FLAG_enable_ool_constant_pool && load_constant_pool) {
if (FLAG_enable_ool_constant_pool && load_constant_pool_pointer_reg) {
LoadConstantPoolPointerRegister();
}
mov(ip, Operand(Smi::FromInt(type)));
@ -4071,21 +4071,22 @@ void MacroAssembler::TruncatingDiv(Register result,
DCHECK(!dividend.is(ip));
DCHECK(!result.is(ip));
base::MagicNumbersForDivision<uint32_t> mag =
base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
base::SignedDivisionByConstant(bit_cast<uint32_t>(divisor));
mov(ip, Operand(mag.multiplier));
smull(ip, result, dividend, ip);
bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
bool neg = (mag.multiplier & (1U << 31)) != 0;
if (divisor > 0 && neg) {
add(result, result, Operand(dividend));
}
if (divisor < 0 && !neg && mag.multiplier > 0) {
sub(result, result, Operand(dividend));
smmla(result, dividend, ip, dividend);
} else {
smmul(result, dividend, ip);
if (divisor < 0 && !neg && mag.multiplier > 0) {
sub(result, result, Operand(dividend));
}
}
if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift));
add(result, result, Operand(dividend, LSR, 31));
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

68
deps/v8/src/arm/macro-assembler-arm.h поставляемый
Просмотреть файл

@ -1401,7 +1401,8 @@ class MacroAssembler: public Assembler {
}
// Activation support.
void EnterFrame(StackFrame::Type type, bool load_constant_pool = false);
void EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg = false);
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type);
@ -1530,71 +1531,6 @@ class CodePatcher {
};
class FrameAndConstantPoolScope {
public:
FrameAndConstantPoolScope(MacroAssembler* masm, StackFrame::Type type)
: masm_(masm),
type_(type),
old_has_frame_(masm->has_frame()),
old_constant_pool_available_(masm->is_constant_pool_available()) {
// We only want to enable constant pool access for non-manual frame scopes
// to ensure the constant pool pointer is valid throughout the scope.
DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
masm->set_has_frame(true);
masm->set_constant_pool_available(true);
masm->EnterFrame(type, !old_constant_pool_available_);
}
~FrameAndConstantPoolScope() {
masm_->LeaveFrame(type_);
masm_->set_has_frame(old_has_frame_);
masm_->set_constant_pool_available(old_constant_pool_available_);
}
// Normally we generate the leave-frame code when this object goes
// out of scope. Sometimes we may need to generate the code somewhere else
// in addition. Calling this will achieve that, but the object stays in
// scope, the MacroAssembler is still marked as being in a frame scope, and
// the code will be generated again when it goes out of scope.
void GenerateLeaveFrame() {
DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
masm_->LeaveFrame(type_);
}
private:
MacroAssembler* masm_;
StackFrame::Type type_;
bool old_has_frame_;
bool old_constant_pool_available_;
DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope);
};
// Class for scoping the the unavailability of constant pool access.
class ConstantPoolUnavailableScope {
public:
explicit ConstantPoolUnavailableScope(MacroAssembler* masm)
: masm_(masm),
old_constant_pool_available_(masm->is_constant_pool_available()) {
if (FLAG_enable_ool_constant_pool) {
masm_->set_constant_pool_available(false);
}
}
~ConstantPoolUnavailableScope() {
if (FLAG_enable_ool_constant_pool) {
masm_->set_constant_pool_available(old_constant_pool_available_);
}
}
private:
MacroAssembler* masm_;
int old_constant_pool_available_;
DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope);
};
// -----------------------------------------------------------------------------
// Static helper functions.

80
deps/v8/src/arm/simulator-arm.cc поставляемый
Просмотреть файл

@ -2710,6 +2710,27 @@ void Simulator::DecodeType3(Instruction* instr) {
break;
}
case db_x: {
if (instr->Bits(22, 20) == 0x5) {
if (instr->Bits(7, 4) == 0x1) {
int rm = instr->RmValue();
int32_t rm_val = get_register(rm);
int rs = instr->RsValue();
int32_t rs_val = get_register(rs);
if (instr->Bits(15, 12) == 0xF) {
// SMMUL (in V8 notation matching ARM ISA format)
// Format(instr, "smmul'cond 'rn, 'rm, 'rs");
rn_val = base::bits::SignedMulHigh32(rm_val, rs_val);
} else {
// SMMLA (in V8 notation matching ARM ISA format)
// Format(instr, "smmla'cond 'rn, 'rm, 'rs, 'rd");
int rd = instr->RdValue();
int32_t rd_val = get_register(rd);
rn_val = base::bits::SignedMulHighAndAdd32(rm_val, rs_val, rd_val);
}
set_register(rn, rn_val);
return;
}
}
if (FLAG_enable_sudiv) {
if (instr->Bits(5, 4) == 0x1) {
if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
@ -2720,15 +2741,12 @@ void Simulator::DecodeType3(Instruction* instr) {
int rs = instr->RsValue();
int32_t rs_val = get_register(rs);
int32_t ret_val = 0;
DCHECK(rs_val != 0);
// udiv
if (instr->Bit(21) == 0x1) {
ret_val = static_cast<int32_t>(static_cast<uint32_t>(rm_val) /
static_cast<uint32_t>(rs_val));
} else if ((rm_val == kMinInt) && (rs_val == -1)) {
ret_val = kMinInt;
ret_val = bit_cast<int32_t>(base::bits::UnsignedDiv32(
bit_cast<uint32_t>(rm_val), bit_cast<uint32_t>(rs_val)));
} else {
ret_val = rm_val / rs_val;
ret_val = base::bits::SignedDiv32(rm_val, rs_val);
}
set_register(rn, ret_val);
return;
@ -2939,6 +2957,12 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
} else {
UNREACHABLE(); // Not used by v8.
}
} else if (((instr->Opc2Value() == 0x6)) && (instr->Opc3Value() == 0x3)) {
// vrintz - truncate
double dm_value = get_double_from_d_register(vm);
double dd_value = trunc(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
UNREACHABLE(); // Not used by V8.
}
@ -3589,6 +3613,50 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
UNIMPLEMENTED();
}
break;
case 0x1D:
if (instr->Opc1Value() == 0x7 && instr->Opc3Value() == 0x1 &&
instr->Bits(11, 9) == 0x5 && instr->Bits(19, 18) == 0x2 &&
instr->Bit(8) == 0x1) {
int vm = instr->VFPMRegValue(kDoublePrecision);
int vd = instr->VFPDRegValue(kDoublePrecision);
double dm_value = get_double_from_d_register(vm);
double dd_value = 0.0;
int rounding_mode = instr->Bits(17, 16);
switch (rounding_mode) {
case 0x0: // vrinta - round with ties to away from zero
dd_value = round(dm_value);
break;
case 0x1: { // vrintn - round with ties to even
dd_value = std::floor(dm_value);
double error = dm_value - dd_value;
// Take care of correctly handling the range [-0.5, -0.0], which
// must yield -0.0.
if ((-0.5 <= dm_value) && (dm_value < 0.0)) {
dd_value = -0.0;
// If the error is greater than 0.5, or is equal to 0.5 and the
// integer result is odd, round up.
} else if ((error > 0.5) ||
((error == 0.5) && (fmod(dd_value, 2) != 0))) {
dd_value++;
}
break;
}
case 0x2: // vrintp - ceil
dd_value = std::ceil(dm_value);
break;
case 0x3: // vrintm - floor
dd_value = std::floor(dm_value);
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
break;
}
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
UNIMPLEMENTED();
}
break;
default:
UNIMPLEMENTED();
break;

6
deps/v8/src/arm64/assembler-arm64.cc поставляемый
Просмотреть файл

@ -1936,6 +1936,12 @@ void Assembler::frintn(const FPRegister& fd,
}
void Assembler::frintp(const FPRegister& fd, const FPRegister& fn) {
DCHECK(fd.SizeInBits() == fn.SizeInBits());
FPDataProcessing1Source(fd, fn, FRINTP);
}
void Assembler::frintz(const FPRegister& fd,
const FPRegister& fn) {
DCHECK(fd.SizeInBits() == fn.SizeInBits());

3
deps/v8/src/arm64/assembler-arm64.h поставляемый
Просмотреть файл

@ -1663,6 +1663,9 @@ class Assembler : public AssemblerBase {
// FP round to integer (nearest with ties to even).
void frintn(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (towards plus infinity).
void frintp(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (towards zero.)
void frintz(const FPRegister& fd, const FPRegister& fn);

2
deps/v8/src/arm64/builtins-arm64.cc поставляемый
Просмотреть файл

@ -156,7 +156,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ Cbz(argc, &no_arguments);
// First args = sp[(argc - 1) * 8].
__ Sub(argc, argc, 1);
__ Claim(argc, kXRegSize);
__ Drop(argc, kXRegSize);
// jssp now point to args[0], load and drop args[0] + receiver.
Register arg = argc;
__ Ldr(arg, MemOperand(jssp, 2 * kPointerSize, PostIndex));

87
deps/v8/src/arm64/code-stubs-arm64.cc поставляемый
Просмотреть файл

@ -1422,6 +1422,34 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
// Return address is in lr.
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
Register index = LoadDescriptor::NameRegister();
Register result = x0;
Register scratch = x3;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
STRING_INDEX_IS_ARRAY_INDEX,
RECEIVER_IS_STRING);
char_at_generator.GenerateFast(masm);
__ Ret();
StubRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper);
__ Bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
}
void InstanceofStub::Generate(MacroAssembler* masm) {
// Stack on entry:
// jssp[0]: function.
@ -1569,7 +1597,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Mov(result, res_false);
// Null is not instance of anything.
__ Cmp(object_type, Operand(isolate()->factory()->null_value()));
__ Cmp(object, Operand(isolate()->factory()->null_value()));
__ B(ne, &object_not_null);
__ Ret();
@ -2683,13 +2711,13 @@ static void GenerateRecordCallTarget(MacroAssembler* masm,
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ JumpIfRoot(scratch1, Heap::kUninitializedSymbolRootIndex, &initialize);
__ JumpIfRoot(scratch1, Heap::kuninitialized_symbolRootIndex, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ Bind(&megamorphic);
__ Add(scratch1, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ LoadRoot(scratch2, Heap::kMegamorphicSymbolRootIndex);
__ LoadRoot(scratch2, Heap::kmegamorphic_symbolRootIndex);
__ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
__ B(&done);
@ -3038,8 +3066,8 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&extra_checks_or_miss);
Label miss;
__ JumpIfRoot(x4, Heap::kMegamorphicSymbolRootIndex, &slow_start);
__ JumpIfRoot(x4, Heap::kUninitializedSymbolRootIndex, &miss);
__ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &slow_start);
__ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
if (!FLAG_trace_ic) {
// We are going megamorphic. If the feedback is a JSFunction, it is fine
@ -3048,8 +3076,19 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
__ Add(x4, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ LoadRoot(x5, Heap::kMegamorphicSymbolRootIndex);
__ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
__ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
// We have to update statistics for runtime profiling.
const int with_types_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
__ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
__ Subs(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
__ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
__ Adds(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
__ B(&slow_start);
}
@ -3097,14 +3136,16 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.
__ JumpIfSmi(object_, receiver_not_string_);
if (check_mode_ == RECEIVER_IS_UNKNOWN) {
__ JumpIfSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
__ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// Fetch the instance type of the receiver into result register.
__ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
__ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
// If the receiver is not a string trigger the non-string case.
__ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
}
// If the index is non-smi trigger the non-smi case.
__ JumpIfNotSmi(index_, &index_not_smi_);
@ -3782,9 +3823,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// x12: input_type
// x15: from (untagged)
__ SmiTag(from);
StringCharAtGenerator generator(
input_string, from, result_length, x0,
&runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
StringCharAtGenerator generator(input_string, from, result_length, x0,
&runtime, &runtime, &runtime,
STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
generator.GenerateFast(masm);
__ Drop(3);
__ Ret();
@ -3792,6 +3833,22 @@ void SubStringStub::Generate(MacroAssembler* masm) {
}
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in x0.
Label check_heap_number, call_builtin;
__ JumpIfNotSmi(x0, &check_heap_number);
__ Ret();
__ bind(&check_heap_number);
__ JumpIfNotHeapNumber(x0, &call_builtin);
__ Ret();
__ bind(&call_builtin);
__ push(x0);
__ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
}
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {

19
deps/v8/src/arm64/codegen-arm64.cc поставляемый
Просмотреть файл

@ -290,15 +290,28 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Register src_elements = x10;
Register dst_elements = x11;
Register dst_end = x12;
Register the_hole = x14;
__ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
__ Add(src_elements, elements,
FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_elements, array, FixedArray::kHeaderSize);
__ Add(array, array, kHeapObjectTag);
__ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
Register the_hole = x14;
// Allocating heap numbers in the loop below can fail and cause a jump to
// gc_required. We can't leave a partly initialized FixedArray behind,
// so pessimistically fill it with holes now.
Label initialization_loop, initialization_loop_entry;
__ B(&initialization_loop_entry);
__ bind(&initialization_loop);
__ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
__ bind(&initialization_loop_entry);
__ Cmp(dst_elements, dst_end);
__ B(lt, &initialization_loop);
__ Add(dst_elements, array, FixedArray::kHeaderSize);
__ Add(array, array, kHeapObjectTag);
Register heap_num_map = x15;
__ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
__ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
Label entry;

6
deps/v8/src/arm64/debug-arm64.cc поставляемый
Просмотреть файл

@ -238,7 +238,11 @@ void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
Generate_DebugBreakCallHelper(masm, receiver.Bit() | name.Bit(), 0, x10);
RegList regs = receiver.Bit() | name.Bit();
if (FLAG_vector_ics) {
regs |= VectorLoadICTrampolineDescriptor::SlotRegister().Bit();
}
Generate_DebugBreakCallHelper(masm, regs, 0, x10);
}

4
deps/v8/src/arm64/delayed-masm-arm64.cc поставляемый
Просмотреть файл

@ -16,8 +16,8 @@ namespace internal {
void DelayedMasm::StackSlotMove(LOperand* src, LOperand* dst) {
DCHECK(src->IsStackSlot());
DCHECK(dst->IsStackSlot());
DCHECK((src->IsStackSlot() && dst->IsStackSlot()) ||
(src->IsDoubleStackSlot() && dst->IsDoubleStackSlot()));
MemOperand src_operand = cgen_->ToMemOperand(src);
MemOperand dst_operand = cgen_->ToMemOperand(dst);
if (pending_ == kStackSlotMove) {

2
deps/v8/src/arm64/disasm-arm64.cc поставляемый
Просмотреть файл

@ -1695,7 +1695,7 @@ int Disassembler::SubstituteBarrierField(Instruction* instr,
DCHECK(format[0] == 'M');
USE(format);
static const char* options[4][4] = {
static const char* const options[4][4] = {
{ "sy (0b0000)", "oshld", "oshst", "osh" },
{ "sy (0b0100)", "nshld", "nshst", "nsh" },
{ "sy (0b1000)", "ishld", "ishst", "ish" },

499
deps/v8/src/arm64/full-codegen-arm64.cc поставляемый
Просмотреть файл

@ -299,24 +299,26 @@ void FullCodeGenerator::Generate() {
}
VisitDeclarations(scope()->declarations());
}
}
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
DCHECK(jssp.Is(__ StackPointer()));
__ CompareRoot(jssp, Heap::kStackLimitRootIndex);
__ B(hs, &ok);
PredictableCodeSizeScope predictable(masm_,
Assembler::kCallSizeWithRelocation);
__ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ Bind(&ok);
}
{
Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
DCHECK(jssp.Is(__ StackPointer()));
__ CompareRoot(jssp, Heap::kStackLimitRootIndex);
__ B(hs, &ok);
PredictableCodeSizeScope predictable(masm_,
Assembler::kCallSizeWithRelocation);
__ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ Bind(&ok);
}
{ Comment cmnt(masm_, "[ Body");
DCHECK(loop_depth() == 0);
VisitStatements(function()->body());
DCHECK(loop_depth() == 0);
{
Comment cmnt(masm_, "[ Body");
DCHECK(loop_depth() == 0);
VisitStatements(function()->body());
DCHECK(loop_depth() == 0);
}
}
// Always emit a 'return undefined' in case control fell off the end of
@ -1097,7 +1099,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ASM_LOCATION("FullCodeGenerator::VisitForInStatement");
Comment cmnt(masm_, "[ ForInStatement");
int slot = stmt->ForInFeedbackSlot();
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
// TODO(all): This visitor probably needs better comments and a revisit.
SetStatementPosition(stmt);
@ -1124,6 +1126,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(x0);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ Bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ Push(x0);
// Check for proxies.
@ -1147,6 +1150,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Bind(&call_runtime);
__ Push(x0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@ -1181,7 +1185,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ LoadObject(x1, FeedbackVector());
__ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot)));
int vector_index = FeedbackVector()->GetIndex(slot);
__ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(vector_index)));
__ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check.
__ Peek(x10, 0); // Get enumerated object.
@ -1350,7 +1355,13 @@ void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
__ Mov(LoadDescriptor::NameRegister(), Operand(home_object_symbol));
CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
SmiFromSlot(expr->HomeObjectFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
}
__ Mov(x10, Operand(isolate()->factory()->undefined_value()));
__ cmp(x0, x10);
@ -1408,7 +1419,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ Mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(proxy->VariableFeedbackSlot()));
SmiFromSlot(proxy->VariableFeedbackSlot()));
}
ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL
@ -1493,7 +1504,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(proxy->VariableFeedbackSlot()));
SmiFromSlot(proxy->VariableFeedbackSlot()));
}
CallLoadIC(CONTEXTUAL);
context()->Plug(x0);
@ -1671,6 +1682,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in x0.
@ -1699,6 +1711,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
@ -1732,7 +1746,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Peek(x0, 0);
__ Push(x0);
VisitForStackValue(value);
__ CallRuntime(Runtime::kSetPrototype, 2);
__ CallRuntime(Runtime::kInternalSetPrototype, 2);
} else {
VisitForEffect(value);
}
@ -1862,22 +1876,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment");
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind {
VARIABLE,
NAMED_PROPERTY,
KEYED_PROPERTY,
NAMED_SUPER_PROPERTY
};
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
if (property != NULL) {
assign_type = (property->key()->IsPropertyName())
? (property->IsSuperAccess() ? NAMED_SUPER_PROPERTY
: NAMED_PROPERTY)
: KEYED_PROPERTY;
}
LhsKind assign_type = GetAssignType(property);
// Evaluate LHS expression.
switch (assign_type) {
@ -1903,6 +1903,20 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ Push(scratch, result_register());
}
break;
case KEYED_SUPER_PROPERTY:
VisitForStackValue(property->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(property->obj()->AsSuperReference());
__ Push(result_register());
VisitForAccumulatorValue(property->key());
__ Push(result_register());
if (expr->is_compound()) {
const Register scratch1 = x10;
const Register scratch2 = x11;
__ Peek(scratch1, 2 * kPointerSize);
__ Peek(scratch2, kPointerSize);
__ Push(scratch1, scratch2, result_register());
}
break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
VisitForStackValue(property->obj());
@ -1933,6 +1947,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitNamedSuperPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
@ -1980,7 +1998,12 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitNamedPropertyAssignment(expr);
break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyAssignment(expr);
EmitNamedSuperPropertyStore(property);
context()->Plug(x0);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyStore(property);
context()->Plug(x0);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
@ -1997,7 +2020,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ Mov(LoadDescriptor::NameRegister(), Operand(key->value()));
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(prop->PropertyFeedbackSlot()));
SmiFromSlot(prop->PropertyFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
@ -2019,11 +2042,11 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
// Call keyed load IC. It has arguments key and receiver in x0 and x1.
Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(prop->PropertyFeedbackSlot()));
SmiFromSlot(prop->PropertyFeedbackSlot()));
CallIC(ic);
} else {
CallIC(ic, prop->PropertyFeedbackId());
@ -2031,6 +2054,14 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetSourcePosition(prop->position());
__ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
}
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode,
@ -2146,19 +2177,67 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
}
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Constructor is in x0.
DCHECK(lit != NULL);
__ push(x0);
// No access check is needed here since the constructor is created by the
// class literal.
Register scratch = x1;
__ Ldr(scratch,
FieldMemOperand(x0, JSFunction::kPrototypeOrInitialMapOffset));
__ Push(scratch);
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
DCHECK(key != NULL);
if (property->is_static()) {
__ Peek(scratch, kPointerSize); // constructor
} else {
__ Peek(scratch, 0); // prototype
}
__ Push(scratch);
VisitForStackValue(key);
VisitForStackValue(value);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::PROTOTYPE:
__ CallRuntime(Runtime::kDefineClassMethod, 3);
break;
case ObjectLiteral::Property::GETTER:
__ CallRuntime(Runtime::kDefineClassGetter, 3);
break;
case ObjectLiteral::Property::SETTER:
__ CallRuntime(Runtime::kDefineClassSetter, 3);
break;
default:
UNREACHABLE();
}
}
// prototype
__ CallRuntime(Runtime::kToFastProperties, 1);
// constructor
__ CallRuntime(Runtime::kToFastProperties, 1);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
DCHECK(expr->IsValidReferenceExpression());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
if (prop != NULL) {
assign_type = (prop->key()->IsPropertyName())
? NAMED_PROPERTY
: KEYED_PROPERTY;
}
LhsKind assign_type = GetAssignType(prop);
switch (assign_type) {
case VARIABLE: {
@ -2179,6 +2258,42 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
CallStoreIC();
break;
}
case NAMED_SUPER_PROPERTY: {
__ Push(x0);
VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(prop->obj()->AsSuperReference());
// stack: value, this; x0: home_object
Register scratch = x10;
Register scratch2 = x11;
__ mov(scratch, result_register()); // home_object
__ Peek(x0, kPointerSize); // value
__ Peek(scratch2, 0); // this
__ Poke(scratch2, kPointerSize); // this
__ Poke(scratch, 0); // home_object
// stack: this, home_object; x0: value
EmitNamedSuperPropertyStore(prop);
break;
}
case KEYED_SUPER_PROPERTY: {
__ Push(x0);
VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(prop->obj()->AsSuperReference());
__ Push(result_register());
VisitForAccumulatorValue(prop->key());
Register scratch = x10;
Register scratch2 = x11;
__ Peek(scratch2, 2 * kPointerSize); // value
// stack: value, this, home_object; x0: key, x11: value
__ Peek(scratch, kPointerSize); // this
__ Poke(scratch, 2 * kPointerSize);
__ Peek(scratch, 0); // home_object
__ Poke(scratch, kPointerSize);
__ Poke(x0, 0);
__ Move(x0, scratch2);
// stack: this, home_object, key; x0: value.
EmitKeyedSuperPropertyStore(prop);
break;
}
case KEYED_PROPERTY: {
__ Push(x0); // Preserve value.
VisitForStackValue(prop->obj());
@ -2297,21 +2412,32 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
void FullCodeGenerator::EmitNamedSuperPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
// Assignment to named property of super.
// x0 : value
// stack : receiver ('this'), home_object
Property* prop = expr->target()->AsProperty();
DCHECK(prop != NULL);
Literal* key = prop->key()->AsLiteral();
DCHECK(key != NULL);
__ Push(x0);
__ Push(key->value());
__ Push(x0);
__ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
: Runtime::kStoreToSuper_Sloppy),
4);
context()->Plug(x0);
}
void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// Assignment to named property of super.
// x0 : value
// stack : receiver ('this'), home_object, key
DCHECK(prop != NULL);
__ Push(x0);
__ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
: Runtime::kStoreKeyedToSuper_Sloppy),
4);
}
@ -2351,11 +2477,19 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(x0);
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
__ Move(LoadDescriptor::NameRegister(), x0);
__ Pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
if (!expr->IsSuperAccess()) {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
__ Move(LoadDescriptor::NameRegister(), x0);
__ Pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(expr->obj()->AsSuperReference());
__ Push(result_register());
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
context()->Plug(x0);
}
}
@ -2464,6 +2598,43 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
}
void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
SetSourcePosition(prop->position());
// Load the function from the receiver.
const Register scratch = x10;
SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
EmitLoadHomeObject(super_ref);
__ Push(x0);
VisitForAccumulatorValue(super_ref->this_var());
__ Push(x0);
__ Peek(scratch, kPointerSize);
__ Push(x0, scratch);
VisitForStackValue(prop->key());
// Stack here:
// - home_object
// - this (receiver)
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
__ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
// Replace home_object with target function.
__ Poke(x0, kPointerSize);
// Stack here:
// - target function
// - this (receiver)
EmitCall(expr, CallICState::METHOD);
}
void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
@ -2478,7 +2649,7 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
Handle<Code> ic = CallIC::initialize_stub(
isolate(), arg_count, call_type);
__ Mov(x3, Smi::FromInt(expr->CallFeedbackSlot()));
__ Mov(x3, SmiFromSlot(expr->CallFeedbackSlot()));
__ Peek(x1, (arg_count + 1) * kXRegSize);
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
@ -2496,11 +2667,12 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Prepare to push a copy of the first argument or undefined if it doesn't
// exist.
if (arg_count > 0) {
__ Peek(x10, arg_count * kXRegSize);
__ Peek(x9, arg_count * kXRegSize);
} else {
__ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
__ LoadRoot(x9, Heap::kUndefinedValueRootIndex);
}
__ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// Prepare to push the receiver of the enclosing function.
int receiver_offset = 2 + info_->scope()->num_parameters();
__ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize));
@ -2511,10 +2683,18 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ Mov(x13, Smi::FromInt(scope()->start_position()));
// Push.
__ Push(x10, x11, x12, x13);
__ Push(x9, x10, x11, x12, x13);
// Do the runtime call.
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
}
void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
DCHECK(super_ref != NULL);
__ ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(x0);
__ CallRuntime(Runtime::kGetPrototype, 1);
}
@ -2557,6 +2737,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// The runtime call returns a pair of values in x0 (function) and
// x1 (receiver). Touch up the stack with the right values.
__ PokePair(x1, x0, arg_count * kPointerSize);
PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
}
// Record source position for debugger.
@ -2592,6 +2774,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ Push(context_register(), x10);
__ CallRuntime(Runtime::kLoadLookupSlot, 2);
__ Push(x0, x1); // Receiver, function.
PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
// If fast case code has been generated, emit code to push the
// function and receiver and have the slow path jump around this
@ -2614,9 +2797,12 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
bool is_named_call = property->key()->IsPropertyName();
// super.x() is handled in EmitCallWithLoadIC.
if (property->IsSuperAccess() && is_named_call) {
EmitSuperCallWithLoadIC(expr);
if (property->IsSuperAccess()) {
if (is_named_call) {
EmitSuperCallWithLoadIC(expr);
} else {
EmitKeyedSuperCallWithLoadIC(expr);
}
} else {
{
PreservePositionScope scope(masm()->positions_recorder());
@ -2628,6 +2814,12 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitKeyedCallWithLoadIC(expr, property->key());
}
}
} else if (call_type == Call::SUPER_CALL) {
SuperReference* super_ref = callee->AsSuperReference();
EmitLoadSuperConstructor(super_ref);
__ Push(result_register());
VisitForStackValue(super_ref->this_var());
EmitCall(expr, CallICState::METHOD);
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
@ -2656,7 +2848,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
VisitForStackValue(expr->expression());
if (expr->expression()->IsSuperReference()) {
EmitLoadSuperConstructor(expr->expression()->AsSuperReference());
__ Push(result_register());
} else {
VisitForStackValue(expr->expression());
}
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@ -2676,12 +2873,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
if (FLAG_pretenuring_call_new) {
EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
DCHECK(expr->AllocationSiteFeedbackSlot() ==
expr->CallNewFeedbackSlot() + 1);
DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
expr->CallNewFeedbackSlot().ToInt() + 1);
}
__ LoadObject(x2, FeedbackVector());
__ Mov(x3, Smi::FromInt(expr->CallNewFeedbackSlot()));
__ Mov(x3, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
@ -3004,6 +3201,32 @@ void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
}
void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
&if_false, &fall_through);
__ JumpIfSmi(x0, if_false);
Register map = x10;
Register type_reg = x11;
__ Ldr(map, FieldMemOperand(x0, HeapObject::kMapOffset));
__ Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Sub(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
__ Cmp(type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ls, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
@ -3883,7 +4106,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ Mov(LoadDescriptor::NameRegister(), Operand(name));
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->CallRuntimeFeedbackSlot()));
SmiFromSlot(expr->CallRuntimeFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
@ -4037,22 +4260,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->expression()->AsProperty();
// In case of a property we use the uninitialized expression context
// of the key to detect a named property.
if (prop != NULL) {
assign_type =
(prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
if (prop->IsSuperAccess()) {
// throw exception.
VisitSuperReference(prop->obj()->AsSuperReference());
return;
}
}
LhsKind assign_type = GetAssignType(prop);
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
@ -4064,18 +4273,52 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (expr->is_postfix() && !context()->IsEffect()) {
__ Push(xzr);
}
if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the register.
VisitForStackValue(prop->obj());
__ Peek(LoadDescriptor::ReceiverRegister(), 0);
EmitNamedPropertyLoad(prop);
} else {
// KEYED_PROPERTY
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
__ Peek(LoadDescriptor::ReceiverRegister(), 1 * kPointerSize);
__ Peek(LoadDescriptor::NameRegister(), 0);
EmitKeyedPropertyLoad(prop);
switch (assign_type) {
case NAMED_PROPERTY: {
// Put the object both on the stack and in the register.
VisitForStackValue(prop->obj());
__ Peek(LoadDescriptor::ReceiverRegister(), 0);
EmitNamedPropertyLoad(prop);
break;
}
case NAMED_SUPER_PROPERTY: {
VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(prop->obj()->AsSuperReference());
__ Push(result_register());
const Register scratch = x10;
__ Peek(scratch, kPointerSize);
__ Push(scratch, result_register());
EmitNamedSuperPropertyLoad(prop);
break;
}
case KEYED_SUPER_PROPERTY: {
VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(prop->obj()->AsSuperReference());
__ Push(result_register());
VisitForAccumulatorValue(prop->key());
__ Push(result_register());
const Register scratch1 = x10;
const Register scratch2 = x11;
__ Peek(scratch1, 2 * kPointerSize);
__ Peek(scratch2, kPointerSize);
__ Push(scratch1, scratch2, result_register());
EmitKeyedSuperPropertyLoad(prop);
break;
}
case KEYED_PROPERTY: {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
__ Peek(LoadDescriptor::ReceiverRegister(), 1 * kPointerSize);
__ Peek(LoadDescriptor::NameRegister(), 0);
EmitKeyedPropertyLoad(prop);
break;
}
case VARIABLE:
UNREACHABLE();
}
}
@ -4109,9 +4352,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ Poke(x0, kPointerSize);
break;
case NAMED_SUPER_PROPERTY:
__ Poke(x0, kPointerSize * 2);
break;
case KEYED_PROPERTY:
__ Poke(x0, kPointerSize * 2);
break;
case KEYED_SUPER_PROPERTY:
__ Poke(x0, kPointerSize * 3);
break;
}
}
}
@ -4139,9 +4388,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ Poke(x0, kXRegSize);
break;
case NAMED_SUPER_PROPERTY:
__ Poke(x0, 2 * kXRegSize);
break;
case KEYED_PROPERTY:
__ Poke(x0, 2 * kXRegSize);
break;
case KEYED_SUPER_PROPERTY:
__ Poke(x0, 3 * kXRegSize);
break;
}
}
}
@ -4199,6 +4454,28 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
case NAMED_SUPER_PROPERTY: {
EmitNamedSuperPropertyStore(prop);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
}
} else {
context()->Plug(x0);
}
break;
}
case KEYED_SUPER_PROPERTY: {
EmitKeyedSuperPropertyStore(prop);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
}
} else {
context()->Plug(x0);
}
break;
}
case KEYED_PROPERTY: {
__ Pop(StoreDescriptor::NameRegister());
__ Pop(StoreDescriptor::ReceiverRegister());
@ -4229,7 +4506,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
__ Mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(proxy->VariableFeedbackSlot()));
SmiFromSlot(proxy->VariableFeedbackSlot()));
}
// Use a regular load, not a contextual load, to avoid a reference
// error.
@ -4585,7 +4862,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Peek(load_name, 2 * kPointerSize);
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->KeyedLoadFeedbackSlot()));
SmiFromSlot(expr->KeyedLoadFeedbackSlot()));
}
Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallIC(ic, TypeFeedbackId::None());
@ -4605,7 +4882,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->DoneFeedbackSlot()));
SmiFromSlot(expr->DoneFeedbackSlot()));
}
CallLoadIC(NOT_CONTEXTUAL); // x0=result.done
// The ToBooleanStub argument (result.done) is in x0.
@ -4618,7 +4895,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->ValueFeedbackSlot()));
SmiFromSlot(expr->ValueFeedbackSlot()));
}
CallLoadIC(NOT_CONTEXTUAL); // x0=result.value
context()->DropAndPlug(2, x0); // drop iter and g
@ -4640,7 +4917,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// The value stays in x0, and is ultimately read by the resumed generator, as
// if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
// is read to throw the value when the resumed generator is already closed. r1
// is read to throw the value when the resumed generator is already closed. x1
// will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
@ -4754,16 +5031,18 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label gc_required;
Label allocated;
Handle<Map> map(isolate()->native_context()->iterator_result_map());
const int instance_size = 5 * kPointerSize;
DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
instance_size);
// Allocate and populate an object with this form: { value: VAL, done: DONE }
Register result = x0;
__ Allocate(map->instance_size(), result, x10, x11, &gc_required, TAG_OBJECT);
__ Allocate(instance_size, result, x10, x11, &gc_required, TAG_OBJECT);
__ B(&allocated);
__ Bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
__ Push(Smi::FromInt(instance_size));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
__ Ldr(context_register(),
MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -4774,11 +5053,13 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Register boolean_done = x3;
Register empty_fixed_array = x4;
Register untagged_result = x5;
__ Mov(map_reg, Operand(map));
__ Ldr(map_reg, GlobalObjectMemOperand());
__ Ldr(map_reg, FieldMemOperand(map_reg, GlobalObject::kNativeContextOffset));
__ Ldr(map_reg,
ContextMemOperand(map_reg, Context::ITERATOR_RESULT_MAP_INDEX));
__ Pop(result_value);
__ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done)));
__ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array()));
DCHECK_EQ(map->instance_size(), 5 * kPointerSize);
STATIC_ASSERT(JSObject::kPropertiesOffset + kPointerSize ==
JSObject::kElementsOffset);
STATIC_ASSERT(JSGeneratorObject::kResultValuePropertyOffset + kPointerSize ==

Просмотреть файл

@ -29,6 +29,9 @@ const Register StoreDescriptor::NameRegister() { return x2; }
const Register StoreDescriptor::ValueRegister() { return x0; }
const Register StoreTransitionDescriptor::MapRegister() { return x3; }
const Register ElementTransitionAndStoreDescriptor::MapRegister() { return x3; }
@ -182,6 +185,14 @@ void TransitionElementsKindDescriptor::Initialize(
}
void AllocateHeapNumberDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// cp: context
Register registers[] = {cp};
data->Initialize(arraysize(registers), registers, nullptr);
}
void ArrayConstructorConstantArgCountDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// cp: context

12
deps/v8/src/arm64/lithium-arm64.cc поставляемый
Просмотреть файл

@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <sstream>
#include "src/v8.h"
#include "src/arm64/lithium-codegen-arm64.h"
@ -282,9 +284,9 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
OStringStream os;
std::ostringstream os;
os << hydrogen()->access();
stream->Add(os.c_str());
stream->Add(os.str().c_str());
stream->Add(" <- ");
value()->PrintTo(stream);
}
@ -2234,11 +2236,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
(JSShiftAmountFromHConstant(instr->right()) == 0);
bool can_deopt = false;
if ((op == Token::SHR) && right_can_be_zero) {
if (FLAG_opt_safe_uint32_operations) {
can_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
can_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
can_deopt = !instr->CheckFlag(HInstruction::kUint32);
}
LInstruction* result;

18
deps/v8/src/arm64/lithium-codegen-arm64.cc поставляемый
Просмотреть файл

@ -3371,13 +3371,14 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector = ToRegister(instr->temp_vector());
DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
__ Mov(vector, instr->hydrogen()->feedback_vector());
Register vector_register = ToRegister(instr->temp_vector());
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Mov(vector_register, vector);
// No need to allocate this register.
DCHECK(VectorLoadICDescriptor::SlotRegister().is(x0));
__ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(instr->hydrogen()->slot()));
int index = vector->GetIndex(instr->hydrogen()->slot());
__ Mov(VectorLoadICDescriptor::SlotRegister(), Smi::FromInt(index));
}
@ -3391,7 +3392,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -3645,7 +3646,7 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).Is(x0));
@ -3701,7 +3702,8 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).is(x0));

14
deps/v8/src/arm64/macro-assembler-arm64-inl.h поставляемый
Просмотреть файл

@ -825,6 +825,12 @@ void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) {
}
void MacroAssembler::Frintp(const FPRegister& fd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
frintp(fd, fn);
}
void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
frintz(fd, fn);
@ -1120,6 +1126,14 @@ void MacroAssembler::Smulh(const Register& rd,
}
void MacroAssembler::Umull(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(allow_macro_instructions_);
DCHECK(!rd.IsZero());
umaddl(rd, rn, rm, xzr);
}
void MacroAssembler::Stnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& dst) {

7
deps/v8/src/arm64/macro-assembler-arm64.cc поставляемый
Просмотреть файл

@ -3064,6 +3064,13 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
}
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// Out-of-line constant pool not implemented on arm64.
UNREACHABLE();
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
DCHECK(jssp.Is(StackPointer()));
UseScratchRegisterScope temps(this);

3
deps/v8/src/arm64/macro-assembler-arm64.h поставляемый
Просмотреть файл

@ -422,6 +422,7 @@ class MacroAssembler : public Assembler {
inline void Frinta(const FPRegister& fd, const FPRegister& fn);
inline void Frintm(const FPRegister& fd, const FPRegister& fn);
inline void Frintn(const FPRegister& fd, const FPRegister& fn);
inline void Frintp(const FPRegister& fd, const FPRegister& fn);
inline void Frintz(const FPRegister& fd, const FPRegister& fn);
inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
inline void Fsub(const FPRegister& fd,
@ -489,6 +490,7 @@ class MacroAssembler : public Assembler {
inline void Smulh(const Register& rd,
const Register& rn,
const Register& rm);
inline void Umull(const Register& rd, const Register& rn, const Register& rm);
inline void Stnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& dst);
@ -1627,6 +1629,7 @@ class MacroAssembler : public Assembler {
// Activation support.
void EnterFrame(StackFrame::Type type);
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
void LeaveFrame(StackFrame::Type type);
// Returns map with validated enum cache in object register.

10
deps/v8/src/arm64/simulator-arm64.cc поставляемый
Просмотреть файл

@ -2463,6 +2463,12 @@ void Simulator::VisitFPDataProcessing1Source(Instruction* instr) {
set_sreg(fd, FPRoundInt(sreg(fn), FPNegativeInfinity)); break;
case FRINTM_d:
set_dreg(fd, FPRoundInt(dreg(fn), FPNegativeInfinity)); break;
case FRINTP_s:
set_sreg(fd, FPRoundInt(sreg(fn), FPPositiveInfinity));
break;
case FRINTP_d:
set_dreg(fd, FPRoundInt(dreg(fn), FPPositiveInfinity));
break;
case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break;
case FRINTN_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieEven)); break;
case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn), FPZero)); break;
@ -2767,6 +2773,10 @@ double Simulator::FPRoundInt(double value, FPRounding round_mode) {
// We always use floor(value).
break;
}
case FPPositiveInfinity: {
int_result = ceil(value);
break;
}
default: UNIMPLEMENTED();
}
return int_result;

4
deps/v8/src/array-iterator.js поставляемый
Просмотреть файл

@ -112,6 +112,8 @@ function SetUpArrayIterator() {
%FunctionSetName(ArrayIteratorIterator, '[Symbol.iterator]');
%AddNamedProperty(ArrayIterator.prototype, symbolIterator,
ArrayIteratorIterator, DONT_ENUM);
%AddNamedProperty(ArrayIterator.prototype, symbolToStringTag,
"Array Iterator", READ_ONLY | DONT_ENUM);
}
SetUpArrayIterator();
@ -120,8 +122,8 @@ function ExtendArrayPrototype() {
%CheckIsBootstrapping();
InstallFunctions($Array.prototype, DONT_ENUM, $Array(
// No 'values' since it breaks webcompat: http://crbug.com/409858
'entries', ArrayEntries,
'values', ArrayValues,
'keys', ArrayKeys
));

117
deps/v8/src/array.js поставляемый
Просмотреть файл

@ -90,7 +90,8 @@ function UseSparseVariant(array, length, is_array, touched) {
// Only use the sparse variant on arrays that are likely to be sparse and the
// number of elements touched in the operation is relatively small compared to
// the overall size of the array.
if (!is_array || length < 1000 || %IsObserved(array)) {
if (!is_array || length < 1000 || %IsObserved(array) ||
%HasComplexElements(array)) {
return false;
}
if (!%_IsSmi(length)) {
@ -203,7 +204,7 @@ function ConvertToLocaleString(e) {
// This function implements the optimized splice implementation that can use
// special array operations to handle sparse arrays in a sensible fashion.
function SmartSlice(array, start_i, del_count, len, deleted_elements) {
function SparseSlice(array, start_i, del_count, len, deleted_elements) {
// Move deleted elements to a new array (the return value from splice).
var indices = %GetArrayKeys(array, start_i + del_count);
if (IS_NUMBER(indices)) {
@ -211,7 +212,7 @@ function SmartSlice(array, start_i, del_count, len, deleted_elements) {
for (var i = start_i; i < limit; ++i) {
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
deleted_elements[i - start_i] = current;
%AddElement(deleted_elements, i - start_i, current, NONE);
}
}
} else {
@ -222,7 +223,7 @@ function SmartSlice(array, start_i, del_count, len, deleted_elements) {
if (key >= start_i) {
var current = array[key];
if (!IS_UNDEFINED(current) || key in array) {
deleted_elements[key - start_i] = current;
%AddElement(deleted_elements, key - start_i, current, NONE);
}
}
}
@ -233,7 +234,9 @@ function SmartSlice(array, start_i, del_count, len, deleted_elements) {
// This function implements the optimized splice implementation that can use
// special array operations to handle sparse arrays in a sensible fashion.
function SmartMove(array, start_i, del_count, len, num_additional_args) {
function SparseMove(array, start_i, del_count, len, num_additional_args) {
// Bail out if no moving is necessary.
if (num_additional_args === del_count) return;
// Move data to new array.
var new_array = new InternalArray(len - del_count + num_additional_args);
var indices = %GetArrayKeys(array, len);
@ -281,12 +284,11 @@ function SmartMove(array, start_i, del_count, len, num_additional_args) {
function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
for (var i = 0; i < del_count; i++) {
var index = start_i + i;
// The spec could also be interpreted such that %HasOwnProperty
// would be the appropriate test. We follow KJS in consulting the
// prototype.
var current = array[index];
if (!IS_UNDEFINED(current) || index in array) {
deleted_elements[i] = current;
if (index in array) {
var current = array[index];
// The spec requires [[DefineOwnProperty]] here, %AddElement is close
// enough (in that it ignores the prototype).
%AddElement(deleted_elements, i, current, NONE);
}
}
}
@ -300,12 +302,8 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) {
for (var i = len - del_count; i > start_i; i--) {
var from_index = i + del_count - 1;
var to_index = i + num_additional_args - 1;
// The spec could also be interpreted such that
// %HasOwnProperty would be the appropriate test. We follow
// KJS in consulting the prototype.
var current = array[from_index];
if (!IS_UNDEFINED(current) || from_index in array) {
array[to_index] = current;
if (from_index in array) {
array[to_index] = array[from_index];
} else {
delete array[to_index];
}
@ -314,12 +312,8 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) {
for (var i = start_i; i < len - del_count; i++) {
var from_index = i + del_count;
var to_index = i + num_additional_args;
// The spec could also be interpreted such that
// %HasOwnProperty would be the appropriate test. We follow
// KJS in consulting the prototype.
var current = array[from_index];
if (!IS_UNDEFINED(current) || from_index in array) {
array[to_index] = current;
if (from_index in array) {
array[to_index] = array[from_index];
} else {
delete array[to_index];
}
@ -349,7 +343,7 @@ function ArrayToString() {
func = array.join;
}
if (!IS_SPEC_FUNCTION(func)) {
return %_CallFunction(array, ObjectToString);
return %_CallFunction(array, NoSideEffectsObjectToString);
}
return %_CallFunction(array, func);
}
@ -378,6 +372,14 @@ function ArrayJoin(separator) {
var result = %_FastOneByteArrayJoin(array, separator);
if (!IS_UNDEFINED(result)) return result;
// Fast case for one-element arrays.
if (length === 1) {
var e = array[0];
if (IS_STRING(e)) return e;
if (IS_NULL_OR_UNDEFINED(e)) return '';
return NonStringToString(e);
}
return Join(array, length, separator, ConvertToString);
}
@ -596,8 +598,8 @@ function ArrayShift() {
var first = array[0];
if (IS_ARRAY(array)) {
SmartMove(array, 0, 1, len, 0);
if (UseSparseVariant(array, len, IS_ARRAY(array), len)) {
SparseMove(array, 0, 1, len, 0);
} else {
SimpleMove(array, 0, 1, len, 0);
}
@ -636,10 +638,10 @@ function ArrayUnshift(arg1) { // length == 1
var array = TO_OBJECT_INLINE(this);
var len = TO_UINT32(array.length);
var num_arguments = %_ArgumentsLength();
var is_sealed = ObjectIsSealed(array);
if (IS_ARRAY(array) && !is_sealed && len > 0) {
SmartMove(array, 0, 0, len, num_arguments);
if (len > 0 && UseSparseVariant(array, len, IS_ARRAY(array), len) &&
!ObjectIsSealed(array)) {
SparseMove(array, 0, 0, len, num_arguments);
} else {
SimpleMove(array, 0, 0, len, num_arguments);
}
@ -685,7 +687,7 @@ function ArraySlice(start, end) {
if (UseSparseVariant(array, len, IS_ARRAY(array), end_i - start_i)) {
%NormalizeElements(array);
%NormalizeElements(result);
SmartSlice(array, start_i, end_i - start_i, len, result);
SparseSlice(array, start_i, end_i - start_i, len, result);
} else {
SimpleSlice(array, start_i, end_i - start_i, len, result);
}
@ -801,8 +803,8 @@ function ArraySplice(start, delete_count) {
if (UseSparseVariant(array, len, IS_ARRAY(array), changed_elements)) {
%NormalizeElements(array);
%NormalizeElements(deleted_elements);
SmartSlice(array, start_i, del_count, len, deleted_elements);
SmartMove(array, start_i, del_count, len, num_elements_to_add);
SparseSlice(array, start_i, del_count, len, deleted_elements);
SparseMove(array, start_i, del_count, len, num_elements_to_add);
} else {
SimpleSlice(array, start_i, del_count, len, deleted_elements);
SimpleMove(array, start_i, del_count, len, num_elements_to_add);
@ -1125,10 +1127,11 @@ function ArrayFilter(f, receiver) {
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
var needs_wrapper = false;
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
} else {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
var result = new $Array();
@ -1140,7 +1143,8 @@ function ArrayFilter(f, receiver) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
if (%_CallFunction(receiver, element, i, array, f)) {
var new_receiver = needs_wrapper ? ToObject(receiver) : receiver;
if (%_CallFunction(new_receiver, element, i, array, f)) {
accumulator[accumulator_length++] = element;
}
}
@ -1161,10 +1165,11 @@ function ArrayForEach(f, receiver) {
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
var needs_wrapper = false;
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
} else {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
@ -1173,7 +1178,8 @@ function ArrayForEach(f, receiver) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
%_CallFunction(receiver, element, i, array, f);
var new_receiver = needs_wrapper ? ToObject(receiver) : receiver;
%_CallFunction(new_receiver, element, i, array, f);
}
}
}
@ -1192,10 +1198,11 @@ function ArraySome(f, receiver) {
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
var needs_wrapper = false;
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
} else {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
@ -1204,7 +1211,8 @@ function ArraySome(f, receiver) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
if (%_CallFunction(receiver, element, i, array, f)) return true;
var new_receiver = needs_wrapper ? ToObject(receiver) : receiver;
if (%_CallFunction(new_receiver, element, i, array, f)) return true;
}
}
return false;
@ -1222,10 +1230,11 @@ function ArrayEvery(f, receiver) {
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
var needs_wrapper = false;
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
} else {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
@ -1234,7 +1243,8 @@ function ArrayEvery(f, receiver) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
if (!%_CallFunction(receiver, element, i, array, f)) return false;
var new_receiver = needs_wrapper ? ToObject(receiver) : receiver;
if (!%_CallFunction(new_receiver, element, i, array, f)) return false;
}
}
return true;
@ -1251,10 +1261,11 @@ function ArrayMap(f, receiver) {
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
var needs_wrapper = false;
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
} else {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
var result = new $Array();
@ -1265,7 +1276,8 @@ function ArrayMap(f, receiver) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
accumulator[i] = %_CallFunction(receiver, element, i, array, f);
var new_receiver = needs_wrapper ? ToObject(receiver) : receiver;
accumulator[i] = %_CallFunction(new_receiver, element, i, array, f);
}
}
%MoveArrayContents(accumulator, result);
@ -1398,9 +1410,8 @@ function ArrayReduce(callback, current) {
var i = 0;
find_initial: if (%_ArgumentsLength() < 2) {
for (; i < length; i++) {
current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
i++;
if (i in array) {
current = array[i++];
break find_initial;
}
}
@ -1435,9 +1446,8 @@ function ArrayReduceRight(callback, current) {
var i = length - 1;
find_initial: if (%_ArgumentsLength() < 2) {
for (; i >= 0; i--) {
current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
i--;
if (i in array) {
current = array[i--];
break find_initial;
}
}
@ -1481,7 +1491,6 @@ function SetUpArray() {
find: true,
findIndex: true,
keys: true,
values: true,
};
%AddNamedProperty($Array.prototype, symbolUnscopables, unscopables,
DONT_ENUM | READ_ONLY);

3
deps/v8/src/arraybuffer.js поставляемый
Просмотреть файл

@ -77,6 +77,9 @@ function SetUpArrayBuffer() {
%AddNamedProperty(
$ArrayBuffer.prototype, "constructor", $ArrayBuffer, DONT_ENUM);
%AddNamedProperty($ArrayBuffer.prototype,
symbolToStringTag, "ArrayBuffer", DONT_ENUM | READ_ONLY);
InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLen);
InstallFunctions($ArrayBuffer, DONT_ENUM, $Array(

37
deps/v8/src/assembler.cc поставляемый
Просмотреть файл

@ -37,6 +37,7 @@
#include <cmath>
#include "src/api.h"
#include "src/base/cpu.h"
#include "src/base/functional.h"
#include "src/base/lazy-instance.h"
#include "src/base/platform/platform.h"
#include "src/builtins.h"
@ -130,7 +131,8 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false),
// We may use the assembler without an isolate.
serializer_enabled_(isolate && isolate->serializer_enabled()) {
serializer_enabled_(isolate && isolate->serializer_enabled()),
ool_constant_pool_available_(false) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
@ -794,8 +796,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
}
void RelocInfo::Print(Isolate* isolate, OStream& os) { // NOLINT
os << pc_ << " " << RelocModeName(rmode_);
void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
os << static_cast<const void*>(pc_) << " " << RelocModeName(rmode_);
if (IsComment(rmode_)) {
os << " (" << reinterpret_cast<char*>(data_) << ")";
} else if (rmode_ == EMBEDDED_OBJECT) {
@ -803,11 +805,11 @@ void RelocInfo::Print(Isolate* isolate, OStream& os) { // NOLINT
} else if (rmode_ == EXTERNAL_REFERENCE) {
ExternalReferenceEncoder ref_encoder(isolate);
os << " (" << ref_encoder.NameOfAddress(target_reference()) << ") ("
<< target_reference() << ")";
<< static_cast<const void*>(target_reference()) << ")";
} else if (IsCodeTarget(rmode_)) {
Code* code = Code::GetCodeFromTargetAddress(target_address());
os << " (" << Code::Kind2String(code->kind()) << ") (" << target_address()
<< ")";
os << " (" << Code::Kind2String(code->kind()) << ") ("
<< static_cast<const void*>(target_address()) << ")";
if (rmode_ == CODE_TARGET_WITH_ID) {
os << " (id=" << static_cast<int>(data_) << ")";
}
@ -1521,6 +1523,29 @@ ExternalReference ExternalReference::debug_step_in_fp_address(
}
bool operator==(ExternalReference lhs, ExternalReference rhs) {
return lhs.address() == rhs.address();
}
bool operator!=(ExternalReference lhs, ExternalReference rhs) {
return !(lhs == rhs);
}
size_t hash_value(ExternalReference reference) {
return base::hash<Address>()(reference.address());
}
std::ostream& operator<<(std::ostream& os, ExternalReference reference) {
os << static_cast<const void*>(reference.address());
const Runtime::Function* fn = Runtime::FunctionForEntry(reference.address());
if (fn) os << "<" << fn->name << ".entry>";
return os;
}
void PositionsRecorder::RecordPosition(int pos) {
DCHECK(pos != RelocInfo::kNoPosition);
DCHECK(pos >= 0);

46
deps/v8/src/assembler.h поставляемый
Просмотреть файл

@ -79,6 +79,16 @@ class AssemblerBase: public Malloced {
return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
}
bool is_ool_constant_pool_available() const {
if (FLAG_enable_ool_constant_pool) {
return ool_constant_pool_available_;
} else {
// Out-of-line constant pool not supported on this architecture.
UNREACHABLE();
return false;
}
}
// Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
// cross-snapshotting.
static void QuietNaN(HeapObject* nan) { }
@ -98,6 +108,15 @@ class AssemblerBase: public Malloced {
int buffer_size_;
bool own_buffer_;
void set_ool_constant_pool_available(bool available) {
if (FLAG_enable_ool_constant_pool) {
ool_constant_pool_available_ = available;
} else {
// Out-of-line constant pool not supported on this architecture.
UNREACHABLE();
}
}
// The program counter, which points into the buffer above and moves forward.
byte* pc_;
@ -108,6 +127,14 @@ class AssemblerBase: public Malloced {
bool emit_debug_code_;
bool predictable_code_size_;
bool serializer_enabled_;
// Indicates whether the constant pool can be accessed, which is only possible
// if the pp register points to the current code object's constant pool.
bool ool_constant_pool_available_;
// Constant pool.
friend class FrameAndConstantPoolScope;
friend class ConstantPoolUnavailableScope;
};
@ -216,7 +243,7 @@ class CpuFeatures : public AllStatic {
// unknown pc location. Assembler::bind() is used to bind a label to the
// current pc. A label can be bound only once.
class Label BASE_EMBEDDED {
class Label {
public:
enum Distance {
kNear, kFar
@ -578,7 +605,7 @@ class RelocInfo {
#ifdef ENABLE_DISASSEMBLER
// Printing
static const char* RelocModeName(Mode rmode);
void Print(Isolate* isolate, OStream& os); // NOLINT
void Print(Isolate* isolate, std::ostream& os); // NOLINT
#endif // ENABLE_DISASSEMBLER
#ifdef VERIFY_HEAP
void Verify(Isolate* isolate);
@ -959,14 +986,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference stress_deopt_count(Isolate* isolate);
bool operator==(const ExternalReference& other) const {
return address_ == other.address_;
}
bool operator!=(const ExternalReference& other) const {
return !(*this == other);
}
private:
explicit ExternalReference(void* address)
: address_(address) {}
@ -987,6 +1006,13 @@ class ExternalReference BASE_EMBEDDED {
void* address_;
};
bool operator==(ExternalReference, ExternalReference);
bool operator!=(ExternalReference, ExternalReference);
size_t hash_value(ExternalReference);
std::ostream& operator<<(std::ostream&, ExternalReference);
// -----------------------------------------------------------------------------
// Position recording support

2
deps/v8/src/assert-scope.h поставляемый
Просмотреть файл

@ -5,7 +5,7 @@
#ifndef V8_ASSERT_SCOPE_H_
#define V8_ASSERT_SCOPE_H_
#include "include/v8stdint.h"
#include <stdint.h>
#include "src/base/macros.h"
namespace v8 {

479
deps/v8/src/ast-numbering.cc поставляемый Normal file
Просмотреть файл

@ -0,0 +1,479 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/ast.h"
#include "src/ast-numbering.h"
#include "src/compiler.h"
#include "src/scopes.h"
namespace v8 {
namespace internal {
class AstNumberingVisitor FINAL : public AstVisitor {
public:
explicit AstNumberingVisitor(Zone* zone)
: AstVisitor(), next_id_(BailoutId::FirstUsable().ToInt()) {
InitializeAstVisitor(zone);
}
void Renumber(FunctionLiteral* node);
private:
// AST node visitor interface.
#define DEFINE_VISIT(type) virtual void Visit##type(type* node) OVERRIDE;
AST_NODE_LIST(DEFINE_VISIT)
#undef DEFINE_VISIT
void VisitStatements(ZoneList<Statement*>* statements) OVERRIDE;
void VisitDeclarations(ZoneList<Declaration*>* declarations) OVERRIDE;
void VisitArguments(ZoneList<Expression*>* arguments);
void VisitObjectLiteralProperty(ObjectLiteralProperty* property);
int ReserveIdRange(int n) {
int tmp = next_id_;
next_id_ += n;
return tmp;
}
void IncrementNodeCount() { properties_.add_node_count(1); }
int next_id_;
AstProperties properties_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor);
};
void AstNumberingVisitor::VisitVariableDeclaration(VariableDeclaration* node) {
IncrementNodeCount();
VisitVariableProxy(node->proxy());
}
void AstNumberingVisitor::VisitExportDeclaration(ExportDeclaration* node) {
IncrementNodeCount();
VisitVariableProxy(node->proxy());
}
void AstNumberingVisitor::VisitModuleUrl(ModuleUrl* node) {
IncrementNodeCount();
}
void AstNumberingVisitor::VisitEmptyStatement(EmptyStatement* node) {
IncrementNodeCount();
}
void AstNumberingVisitor::VisitContinueStatement(ContinueStatement* node) {
IncrementNodeCount();
}
void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {
IncrementNodeCount();
}
void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(DebuggerStatement::num_ids()));
}
void AstNumberingVisitor::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(NativeFunctionLiteral::num_ids()));
}
void AstNumberingVisitor::VisitLiteral(Literal* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Literal::num_ids()));
}
void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(RegExpLiteral::num_ids()));
}
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(VariableProxy::num_ids()));
}
void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(ThisFunction::num_ids()));
}
void AstNumberingVisitor::VisitSuperReference(SuperReference* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(SuperReference::num_ids()));
Visit(node->this_var());
}
void AstNumberingVisitor::VisitModuleDeclaration(ModuleDeclaration* node) {
IncrementNodeCount();
VisitVariableProxy(node->proxy());
Visit(node->module());
}
void AstNumberingVisitor::VisitImportDeclaration(ImportDeclaration* node) {
IncrementNodeCount();
VisitVariableProxy(node->proxy());
Visit(node->module());
}
void AstNumberingVisitor::VisitModuleVariable(ModuleVariable* node) {
IncrementNodeCount();
Visit(node->proxy());
}
void AstNumberingVisitor::VisitModulePath(ModulePath* node) {
IncrementNodeCount();
Visit(node->module());
}
void AstNumberingVisitor::VisitModuleStatement(ModuleStatement* node) {
IncrementNodeCount();
Visit(node->body());
}
void AstNumberingVisitor::VisitExpressionStatement(ExpressionStatement* node) {
IncrementNodeCount();
Visit(node->expression());
}
void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
IncrementNodeCount();
Visit(node->expression());
}
void AstNumberingVisitor::VisitYield(Yield* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Yield::num_ids()));
Visit(node->generator_object());
Visit(node->expression());
}
void AstNumberingVisitor::VisitThrow(Throw* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Throw::num_ids()));
Visit(node->exception());
}
void AstNumberingVisitor::VisitUnaryOperation(UnaryOperation* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(UnaryOperation::num_ids()));
Visit(node->expression());
}
void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(CountOperation::num_ids()));
Visit(node->expression());
}
void AstNumberingVisitor::VisitBlock(Block* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Block::num_ids()));
if (node->scope() != NULL) VisitDeclarations(node->scope()->declarations());
VisitStatements(node->statements());
}
void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
IncrementNodeCount();
VisitVariableProxy(node->proxy());
VisitFunctionLiteral(node->fun());
}
void AstNumberingVisitor::VisitModuleLiteral(ModuleLiteral* node) {
IncrementNodeCount();
VisitBlock(node->body());
}
void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(CallRuntime::num_ids()));
VisitArguments(node->arguments());
}
void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
IncrementNodeCount();
Visit(node->expression());
Visit(node->statement());
}
void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(DoWhileStatement::num_ids()));
Visit(node->body());
Visit(node->cond());
}
void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(WhileStatement::num_ids()));
Visit(node->cond());
Visit(node->body());
}
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
IncrementNodeCount();
Visit(node->try_block());
Visit(node->catch_block());
}
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
IncrementNodeCount();
Visit(node->try_block());
Visit(node->finally_block());
}
void AstNumberingVisitor::VisitProperty(Property* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Property::num_ids()));
Visit(node->key());
Visit(node->obj());
}
void AstNumberingVisitor::VisitAssignment(Assignment* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Assignment::num_ids()));
if (node->is_compound()) VisitBinaryOperation(node->binary_operation());
Visit(node->target());
Visit(node->value());
}
void AstNumberingVisitor::VisitBinaryOperation(BinaryOperation* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(BinaryOperation::num_ids()));
Visit(node->left());
Visit(node->right());
}
void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(CompareOperation::num_ids()));
Visit(node->left());
Visit(node->right());
}
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(ForInStatement::num_ids()));
Visit(node->each());
Visit(node->enumerable());
Visit(node->body());
}
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(ForOfStatement::num_ids()));
Visit(node->assign_iterator());
Visit(node->next_result());
Visit(node->result_done());
Visit(node->assign_each());
Visit(node->body());
}
void AstNumberingVisitor::VisitConditional(Conditional* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Conditional::num_ids()));
Visit(node->condition());
Visit(node->then_expression());
Visit(node->else_expression());
}
void AstNumberingVisitor::VisitIfStatement(IfStatement* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(IfStatement::num_ids()));
Visit(node->condition());
Visit(node->then_statement());
if (node->HasElseStatement()) {
Visit(node->else_statement());
}
}
void AstNumberingVisitor::VisitSwitchStatement(SwitchStatement* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(SwitchStatement::num_ids()));
Visit(node->tag());
ZoneList<CaseClause*>* cases = node->cases();
for (int i = 0; i < cases->length(); i++) {
VisitCaseClause(cases->at(i));
}
}
void AstNumberingVisitor::VisitCaseClause(CaseClause* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(CaseClause::num_ids()));
if (!node->is_default()) Visit(node->label());
VisitStatements(node->statements());
}
void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(ForStatement::num_ids()));
if (node->init() != NULL) Visit(node->init());
if (node->cond() != NULL) Visit(node->cond());
if (node->next() != NULL) Visit(node->next());
Visit(node->body());
}
void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(ClassLiteral::num_ids()));
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
for (int i = 0; i < node->properties()->length(); i++) {
VisitObjectLiteralProperty(node->properties()->at(i));
}
}
void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(ObjectLiteral::num_ids()));
for (int i = 0; i < node->properties()->length(); i++) {
VisitObjectLiteralProperty(node->properties()->at(i));
}
}
void AstNumberingVisitor::VisitObjectLiteralProperty(
ObjectLiteralProperty* node) {
Visit(node->key());
Visit(node->value());
}
void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(node->num_ids()));
for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i));
}
}
void AstNumberingVisitor::VisitCall(Call* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Call::num_ids()));
Visit(node->expression());
VisitArguments(node->arguments());
}
void AstNumberingVisitor::VisitCallNew(CallNew* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(CallNew::num_ids()));
Visit(node->expression());
VisitArguments(node->arguments());
}
void AstNumberingVisitor::VisitStatements(ZoneList<Statement*>* statements) {
if (statements == NULL) return;
for (int i = 0; i < statements->length(); i++) {
Visit(statements->at(i));
}
}
void AstNumberingVisitor::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
for (int i = 0; i < declarations->length(); i++) {
Visit(declarations->at(i));
}
}
void AstNumberingVisitor::VisitArguments(ZoneList<Expression*>* arguments) {
for (int i = 0; i < arguments->length(); i++) {
Visit(arguments->at(i));
}
}
void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(FunctionLiteral::num_ids()));
// We don't recurse into the declarations or body of the function literal:
// you have to separately Renumber() each FunctionLiteral that you compile.
}
void AstNumberingVisitor::Renumber(FunctionLiteral* node) {
properties_.flags()->Add(*node->flags());
properties_.increase_feedback_slots(node->slot_count());
properties_.increase_ic_feedback_slots(node->ic_slot_count());
if (node->scope()->HasIllegalRedeclaration()) {
node->scope()->VisitIllegalRedeclaration(this);
return;
}
Scope* scope = node->scope();
VisitDeclarations(scope->declarations());
if (scope->is_function_scope() && scope->function() != NULL) {
// Visit the name of the named function expression.
Visit(scope->function());
}
VisitStatements(node->body());
node->set_ast_properties(&properties_);
}
bool AstNumbering::Renumber(FunctionLiteral* function, Zone* zone) {
AstNumberingVisitor visitor(zone);
visitor.Renumber(function);
return !visitor.HasStackOverflow();
}
}
} // namespace v8::internal

19
deps/v8/src/ast-numbering.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,19 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_AST_NUMBERING_H_
#define V8_AST_NUMBERING_H_
namespace v8 {
namespace internal {
namespace AstNumbering {
// Assign type feedback IDs and bailout IDs to an AST node tree.
//
bool Renumber(FunctionLiteral* function, Zone* zone);
}
}
} // namespace v8::internal
#endif // V8_AST_NUMBERING_H_

121
deps/v8/src/ast-value-factory.cc поставляемый
Просмотреть файл

@ -117,14 +117,15 @@ bool AstRawString::IsOneByteEqualTo(const char* data) const {
bool AstRawString::Compare(void* a, void* b) {
AstRawString* string1 = reinterpret_cast<AstRawString*>(a);
AstRawString* string2 = reinterpret_cast<AstRawString*>(b);
if (string1->is_one_byte_ != string2->is_one_byte_) return false;
if (string1->hash_ != string2->hash_) return false;
int length = string1->literal_bytes_.length();
if (string2->literal_bytes_.length() != length) return false;
return memcmp(string1->literal_bytes_.start(),
string2->literal_bytes_.start(), length) == 0;
return *static_cast<AstRawString*>(a) == *static_cast<AstRawString*>(b);
}
bool AstRawString::operator==(const AstRawString& rhs) const {
if (is_one_byte_ != rhs.is_one_byte_) return false;
if (hash_ != rhs.hash_) return false;
int len = literal_bytes_.length();
if (rhs.literal_bytes_.length() != len) return false;
return memcmp(literal_bytes_.start(), rhs.literal_bytes_.start(), len) == 0;
}
@ -158,9 +159,6 @@ bool AstValue::BooleanValue() const {
return DoubleToBoolean(number_);
case SMI:
return smi_ != 0;
case STRING_ARRAY:
UNREACHABLE();
break;
case BOOLEAN:
return bool_;
case NULL_TYPE:
@ -201,22 +199,6 @@ void AstValue::Internalize(Isolate* isolate) {
value_ = isolate->factory()->false_value();
}
break;
case STRING_ARRAY: {
DCHECK(strings_ != NULL);
Factory* factory = isolate->factory();
int len = strings_->length();
Handle<FixedArray> elements = factory->NewFixedArray(len, TENURED);
for (int i = 0; i < len; i++) {
const AstRawString* string = (*strings_)[i];
Handle<Object> element = string->string();
// Strings are already internalized.
DCHECK(!element.is_null());
elements->set(i, *element);
}
value_ =
factory->NewJSArrayWithElements(elements, FAST_ELEMENTS, TENURED);
break;
}
case NULL_TYPE:
value_ = isolate->factory()->null_value();
break;
@ -230,7 +212,7 @@ void AstValue::Internalize(Isolate* isolate) {
}
const AstRawString* AstValueFactory::GetOneByteString(
AstRawString* AstValueFactory::GetOneByteStringInternal(
Vector<const uint8_t> literal) {
uint32_t hash = StringHasher::HashSequentialString<uint8_t>(
literal.start(), literal.length(), hash_seed_);
@ -238,7 +220,7 @@ const AstRawString* AstValueFactory::GetOneByteString(
}
const AstRawString* AstValueFactory::GetTwoByteString(
AstRawString* AstValueFactory::GetTwoByteStringInternal(
Vector<const uint16_t> literal) {
uint32_t hash = StringHasher::HashSequentialString<uint16_t>(
literal.start(), literal.length(), hash_seed_);
@ -247,13 +229,24 @@ const AstRawString* AstValueFactory::GetTwoByteString(
const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
DisallowHeapAllocation no_gc;
String::FlatContent content = literal->GetFlatContent();
if (content.IsOneByte()) {
return GetOneByteString(content.ToOneByteVector());
// For the FlatContent to stay valid, we shouldn't do any heap
// allocation. Make sure we won't try to internalize the string in GetString.
AstRawString* result = NULL;
Isolate* saved_isolate = isolate_;
isolate_ = NULL;
{
DisallowHeapAllocation no_gc;
String::FlatContent content = literal->GetFlatContent();
if (content.IsOneByte()) {
result = GetOneByteStringInternal(content.ToOneByteVector());
} else {
DCHECK(content.IsTwoByte());
result = GetTwoByteStringInternal(content.ToUC16Vector());
}
}
DCHECK(content.IsTwoByte());
return GetTwoByteString(content.ToUC16Vector());
isolate_ = saved_isolate;
if (isolate_) result->Internalize(isolate_);
return result;
}
@ -329,59 +322,45 @@ const AstValue* AstValueFactory::NewSmi(int number) {
}
#define GENERATE_VALUE_GETTER(value, initializer) \
if (!value) { \
value = new (zone_) AstValue(initializer); \
if (isolate_) { \
value->Internalize(isolate_); \
} \
values_.Add(value); \
} \
return value;
const AstValue* AstValueFactory::NewBoolean(bool b) {
AstValue* value = new (zone_) AstValue(b);
if (isolate_) {
value->Internalize(isolate_);
if (b) {
GENERATE_VALUE_GETTER(true_value_, true);
} else {
GENERATE_VALUE_GETTER(false_value_, false);
}
values_.Add(value);
return value;
}
const AstValue* AstValueFactory::NewStringList(
ZoneList<const AstRawString*>* strings) {
AstValue* value = new (zone_) AstValue(strings);
if (isolate_) {
value->Internalize(isolate_);
}
values_.Add(value);
return value;
}
const AstValue* AstValueFactory::NewNull() {
AstValue* value = new (zone_) AstValue(AstValue::NULL_TYPE);
if (isolate_) {
value->Internalize(isolate_);
}
values_.Add(value);
return value;
GENERATE_VALUE_GETTER(null_value_, AstValue::NULL_TYPE);
}
const AstValue* AstValueFactory::NewUndefined() {
AstValue* value = new (zone_) AstValue(AstValue::UNDEFINED);
if (isolate_) {
value->Internalize(isolate_);
}
values_.Add(value);
return value;
GENERATE_VALUE_GETTER(undefined_value_, AstValue::UNDEFINED);
}
const AstValue* AstValueFactory::NewTheHole() {
AstValue* value = new (zone_) AstValue(AstValue::THE_HOLE);
if (isolate_) {
value->Internalize(isolate_);
}
values_.Add(value);
return value;
GENERATE_VALUE_GETTER(the_hole_value_, AstValue::THE_HOLE);
}
const AstRawString* AstValueFactory::GetString(
uint32_t hash, bool is_one_byte, Vector<const byte> literal_bytes) {
#undef GENERATE_VALUE_GETTER
AstRawString* AstValueFactory::GetString(uint32_t hash, bool is_one_byte,
Vector<const byte> literal_bytes) {
// literal_bytes here points to whatever the user passed, and this is OK
// because we use vector_compare (which checks the contents) to compare
// against the AstRawStrings which are in the string_table_. We should not

121
deps/v8/src/ast-value-factory.h поставляемый
Просмотреть файл

@ -88,12 +88,16 @@ class AstRawString : public AstString {
return *c;
}
V8_INLINE bool IsArguments(AstValueFactory* ast_value_factory) const;
// For storing AstRawStrings in a hash map.
uint32_t hash() const {
return hash_;
}
static bool Compare(void* a, void* b);
bool operator==(const AstRawString& rhs) const;
private:
friend class AstValueFactory;
friend class AstRawStringInternalizationKey;
@ -190,7 +194,6 @@ class AstValue : public ZoneObject {
NUMBER,
SMI,
BOOLEAN,
STRING_ARRAY,
NULL_TYPE,
UNDEFINED,
THE_HOLE
@ -209,10 +212,6 @@ class AstValue : public ZoneObject {
explicit AstValue(bool b) : type_(BOOLEAN) { bool_ = b; }
explicit AstValue(ZoneList<const AstRawString*>* s) : type_(STRING_ARRAY) {
strings_ = s;
}
explicit AstValue(Type t) : type_(t) {
DCHECK(t == NULL_TYPE || t == UNDEFINED || t == THE_HOLE);
}
@ -234,36 +233,42 @@ class AstValue : public ZoneObject {
};
// For generating string constants.
#define STRING_CONSTANTS(F) \
F(anonymous_function, "(anonymous function)") \
F(arguments, "arguments") \
F(constructor, "constructor") \
F(done, "done") \
F(dot, ".") \
F(dot_for, ".for") \
F(dot_generator, ".generator") \
F(dot_generator_object, ".generator_object") \
F(dot_iterator, ".iterator") \
F(dot_module, ".module") \
F(dot_result, ".result") \
F(empty, "") \
F(eval, "eval") \
F(initialize_const_global, "initializeConstGlobal") \
F(initialize_var_global, "initializeVarGlobal") \
F(make_reference_error, "MakeReferenceError") \
F(make_syntax_error, "MakeSyntaxError") \
F(make_type_error, "MakeTypeError") \
F(module, "module") \
F(native, "native") \
F(next, "next") \
F(proto, "__proto__") \
F(prototype, "prototype") \
F(this, "this") \
F(use_asm, "use asm") \
F(use_strict, "use strict") \
// For generating constants.
#define STRING_CONSTANTS(F) \
F(anonymous_function, "(anonymous function)") \
F(arguments, "arguments") \
F(constructor, "constructor") \
F(done, "done") \
F(dot, ".") \
F(dot_for, ".for") \
F(dot_generator, ".generator") \
F(dot_generator_object, ".generator_object") \
F(dot_iterator, ".iterator") \
F(dot_module, ".module") \
F(dot_result, ".result") \
F(empty, "") \
F(eval, "eval") \
F(initialize_const_global, "initializeConstGlobal") \
F(initialize_var_global, "initializeVarGlobal") \
F(make_reference_error, "MakeReferenceErrorEmbedded") \
F(make_syntax_error, "MakeSyntaxErrorEmbedded") \
F(make_type_error, "MakeTypeErrorEmbedded") \
F(module, "module") \
F(native, "native") \
F(next, "next") \
F(proto, "__proto__") \
F(prototype, "prototype") \
F(this, "this") \
F(use_asm, "use asm") \
F(use_strict, "use strict") \
F(value, "value")
#define OTHER_CONSTANTS(F) \
F(true_value) \
F(false_value) \
F(null_value) \
F(undefined_value) \
F(the_hole_value)
class AstValueFactory {
public:
@ -272,18 +277,26 @@ class AstValueFactory {
zone_(zone),
isolate_(NULL),
hash_seed_(hash_seed) {
#define F(name, str) \
name##_string_ = NULL;
#define F(name, str) name##_string_ = NULL;
STRING_CONSTANTS(F)
#undef F
#define F(name) name##_ = NULL;
OTHER_CONSTANTS(F)
#undef F
}
const AstRawString* GetOneByteString(Vector<const uint8_t> literal);
Zone* zone() const { return zone_; }
const AstRawString* GetOneByteString(Vector<const uint8_t> literal) {
return GetOneByteStringInternal(literal);
}
const AstRawString* GetOneByteString(const char* string) {
return GetOneByteString(Vector<const uint8_t>(
reinterpret_cast<const uint8_t*>(string), StrLength(string)));
}
const AstRawString* GetTwoByteString(Vector<const uint16_t> literal);
const AstRawString* GetTwoByteString(Vector<const uint16_t> literal) {
return GetTwoByteStringInternal(literal);
}
const AstRawString* GetString(Handle<String> literal);
const AstConsString* NewConsString(const AstString* left,
const AstString* right);
@ -293,15 +306,15 @@ class AstValueFactory {
return isolate_ != NULL;
}
#define F(name, str) \
const AstRawString* name##_string() { \
if (name##_string_ == NULL) { \
const char* data = str; \
name##_string_ = GetOneByteString( \
#define F(name, str) \
const AstRawString* name##_string() { \
if (name##_string_ == NULL) { \
const char* data = str; \
name##_string_ = GetOneByteString( \
Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), \
static_cast<int>(strlen(data)))); \
} \
return name##_string_; \
static_cast<int>(strlen(data)))); \
} \
return name##_string_; \
}
STRING_CONSTANTS(F)
#undef F
@ -318,8 +331,10 @@ class AstValueFactory {
const AstValue* NewTheHole();
private:
const AstRawString* GetString(uint32_t hash, bool is_one_byte,
Vector<const byte> literal_bytes);
AstRawString* GetOneByteStringInternal(Vector<const uint8_t> literal);
AstRawString* GetTwoByteStringInternal(Vector<const uint16_t> literal);
AstRawString* GetString(uint32_t hash, bool is_one_byte,
Vector<const byte> literal_bytes);
// All strings are copied here, one after another (no NULLs inbetween).
HashMap string_table_;
@ -332,14 +347,22 @@ class AstValueFactory {
uint32_t hash_seed_;
#define F(name, str) \
const AstRawString* name##_string_;
#define F(name, str) const AstRawString* name##_string_;
STRING_CONSTANTS(F)
#undef F
#define F(name) AstValue* name##_;
OTHER_CONSTANTS(F)
#undef F
};
bool AstRawString::IsArguments(AstValueFactory* ast_value_factory) const {
return ast_value_factory->arguments_string() == this;
}
} } // namespace v8::internal
#undef STRING_CONSTANTS
#undef OTHER_CONSTANTS
#endif // V8_AST_VALUE_FACTORY_H_

130
deps/v8/src/ast.cc поставляемый
Просмотреть файл

@ -59,59 +59,56 @@ bool Expression::IsUndefinedLiteral(Isolate* isolate) const {
}
VariableProxy::VariableProxy(Zone* zone, Variable* var, int position,
IdGen* id_gen)
: Expression(zone, position, id_gen),
name_(var->raw_name()),
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
is_assigned_(false),
interface_(var->interface()),
variable_feedback_slot_(kInvalidFeedbackSlot) {
VariableProxy::VariableProxy(Zone* zone, Variable* var, int position)
: Expression(zone, position),
bit_field_(IsThisField::encode(var->is_this()) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false)),
variable_feedback_slot_(FeedbackVectorICSlot::Invalid()),
raw_name_(var->raw_name()),
interface_(var->interface()) {
BindTo(var);
}
VariableProxy::VariableProxy(Zone* zone, const AstRawString* name, bool is_this,
Interface* interface, int position, IdGen* id_gen)
: Expression(zone, position, id_gen),
name_(name),
var_(NULL),
is_this_(is_this),
is_assigned_(false),
interface_(interface),
variable_feedback_slot_(kInvalidFeedbackSlot) {}
Interface* interface, int position)
: Expression(zone, position),
bit_field_(IsThisField::encode(is_this) | IsAssignedField::encode(false) |
IsResolvedField::encode(false)),
variable_feedback_slot_(FeedbackVectorICSlot::Invalid()),
raw_name_(name),
interface_(interface) {}
void VariableProxy::BindTo(Variable* var) {
DCHECK(var_ == NULL); // must be bound only once
DCHECK(var != NULL); // must bind
DCHECK(!FLAG_harmony_modules || interface_->IsUnified(var->interface()));
DCHECK((is_this() && var->is_this()) || name_ == var->raw_name());
DCHECK((is_this() && var->is_this()) || raw_name() == var->raw_name());
// Ideally CONST-ness should match. However, this is very hard to achieve
// because we don't know the exact semantics of conflicting (const and
// non-const) multiple variable declarations, const vars introduced via
// eval() etc. Const-ness and variable declarations are a complete mess
// in JS. Sigh...
var_ = var;
set_var(var);
set_is_resolved();
var->set_is_used();
}
Assignment::Assignment(Zone* zone, Token::Value op, Expression* target,
Expression* value, int pos, IdGen* id_gen)
: Expression(zone, pos, id_gen),
op_(op),
Expression* value, int pos)
: Expression(zone, pos),
bit_field_(IsUninitializedField::encode(false) |
KeyTypeField::encode(ELEMENT) |
StoreModeField::encode(STANDARD_STORE) |
TokenField::encode(op)),
target_(target),
value_(value),
binary_operation_(NULL),
assignment_id_(id_gen->GetNextId()),
is_uninitialized_(false),
store_mode_(STANDARD_STORE) {}
binary_operation_(NULL) {}
Token::Value Assignment::binary_op() const {
switch (op_) {
switch (op()) {
case Token::ASSIGN_BIT_OR: return Token::BIT_OR;
case Token::ASSIGN_BIT_XOR: return Token::BIT_XOR;
case Token::ASSIGN_BIT_AND: return Token::BIT_AND;
@ -436,7 +433,7 @@ void BinaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
bool BinaryOperation::ResultOverwriteAllowed() const {
switch (op_) {
switch (op()) {
case Token::COMMA:
case Token::OR:
case Token::AND:
@ -560,7 +557,7 @@ bool FunctionDeclaration::IsInlineable() const {
// once we use the common type field in the AST consistently.
void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
to_boolean_types_ = oracle->ToBooleanTypes(test_id());
set_to_boolean_types(oracle->ToBooleanTypes(test_id()));
}
@ -582,6 +579,8 @@ Call::CallType Call::GetCallType(Isolate* isolate) const {
}
}
if (expression()->AsSuperReference() != NULL) return SUPER_CALL;
Property* property = expression()->AsProperty();
return property != NULL ? PROPERTY_CALL : OTHER_CALL;
}
@ -607,9 +606,9 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
int allocation_site_feedback_slot = FLAG_pretenuring_call_new
? AllocationSiteFeedbackSlot()
: CallNewFeedbackSlot();
FeedbackVectorSlot allocation_site_feedback_slot =
FLAG_pretenuring_call_new ? AllocationSiteFeedbackSlot()
: CallNewFeedbackSlot();
allocation_site_ =
oracle->GetCallNewAllocationSite(allocation_site_feedback_slot);
is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackSlot());
@ -794,14 +793,14 @@ bool RegExpCapture::IsAnchoredAtEnd() {
// output formats are alike.
class RegExpUnparser FINAL : public RegExpVisitor {
public:
RegExpUnparser(OStream& os, Zone* zone) : os_(os), zone_(zone) {}
RegExpUnparser(std::ostream& os, Zone* zone) : os_(os), zone_(zone) {}
void VisitCharacterRange(CharacterRange that);
#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, \
void* data) OVERRIDE;
FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
#undef MAKE_CASE
private:
OStream& os_;
std::ostream& os_;
Zone* zone_;
};
@ -944,7 +943,7 @@ void* RegExpUnparser::VisitEmpty(RegExpEmpty* that, void* data) {
}
OStream& RegExpTree::Print(OStream& os, Zone* zone) { // NOLINT
std::ostream& RegExpTree::Print(std::ostream& os, Zone* zone) { // NOLINT
RegExpUnparser unparser(os, zone);
Accept(&unparser, NULL);
return os;
@ -989,58 +988,55 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
CaseClause::CaseClause(Zone* zone, Expression* label,
ZoneList<Statement*>* statements, int pos, IdGen* id_gen)
: Expression(zone, pos, id_gen),
ZoneList<Statement*>* statements, int pos)
: Expression(zone, pos),
label_(label),
statements_(statements),
compare_type_(Type::None(zone)),
compare_id_(id_gen->GetNextId()),
entry_id_(id_gen->GetNextId()) {}
compare_type_(Type::None(zone)) {}
#define REGULAR_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
}
#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_slot_node(node); \
}
#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
set_dont_crankshaft_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \
}
#define DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_slot_node(node); \
set_dont_crankshaft_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \
}
#define DONT_TURBOFAN_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
set_dont_crankshaft_reason(k##NodeType); \
set_dont_turbofan_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \
}
#define DONT_TURBOFAN_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
add_slot_node(node); \
set_dont_crankshaft_reason(k##NodeType); \
set_dont_turbofan_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \
}
#define DONT_SELFOPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_flag(kDontSelfOptimize); \
}
#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_slot_node(node); \
add_flag(kDontSelfOptimize); \
}
#define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
set_dont_crankshaft_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \
add_flag(kDontCache); \
@ -1091,16 +1087,18 @@ DONT_OPTIMIZE_NODE(ModuleUrl)
DONT_OPTIMIZE_NODE(ModuleStatement)
DONT_OPTIMIZE_NODE(WithStatement)
DONT_OPTIMIZE_NODE(DebuggerStatement)
DONT_OPTIMIZE_NODE(ClassLiteral)
DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
DONT_OPTIMIZE_NODE(SuperReference)
DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(Yield)
// TODO(turbofan): Remove the dont_turbofan_reason once this list is empty.
// This list must be kept in sync with Pipeline::GenerateCode.
DONT_TURBOFAN_NODE(ForOfStatement)
DONT_TURBOFAN_NODE(TryCatchStatement)
DONT_TURBOFAN_NODE(TryFinallyStatement)
DONT_TURBOFAN_NODE(ClassLiteral)
DONT_TURBOFAN_NODE_WITH_FEEDBACK_SLOTS(SuperReference)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
@ -1112,7 +1110,6 @@ DONT_CACHE_NODE(ModuleLiteral)
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
increase_node_count();
add_slot_node(node);
if (node->is_jsruntime()) {
// Don't try to optimize JS runtime calls because we bailout on them.
@ -1126,20 +1123,19 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
#undef DONT_CACHE_NODE
Handle<String> Literal::ToString() {
if (value_->IsString()) return value_->AsString()->string();
DCHECK(value_->IsNumber());
char arr[100];
Vector<char> buffer(arr, arraysize(arr));
const char* str;
if (value()->IsSmi()) {
// Optimization only, the heap number case would subsume this.
SNPrintF(buffer, "%d", Smi::cast(*value())->value());
str = arr;
} else {
str = DoubleToCString(value()->Number(), buffer);
}
return isolate_->factory()->NewStringFromAsciiChecked(str);
uint32_t Literal::Hash() {
return raw_value()->IsString()
? raw_value()->AsString()->hash()
: ComputeLongHash(double_to_uint64(raw_value()->AsNumber()));
}
// static
bool Literal::Match(void* literal1, void* literal2) {
const AstValue* x = static_cast<Literal*>(literal1)->raw_value();
const AstValue* y = static_cast<Literal*>(literal2)->raw_value();
return (x->IsString() && y->IsString() && *x->AsString() == *y->AsString()) ||
(x->IsNumber() && y->IsNumber() && x->AsNumber() == y->AsNumber());
}

1155
deps/v8/src/ast.h поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

12
deps/v8/src/base/atomicops.h поставляемый
Просмотреть файл

@ -25,7 +25,7 @@
#ifndef V8_BASE_ATOMICOPS_H_
#define V8_BASE_ATOMICOPS_H_
#include "include/v8stdint.h"
#include <stdint.h>
#include "src/base/build_config.h"
#if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT)
@ -42,15 +42,17 @@ namespace base {
typedef char Atomic8;
typedef int32_t Atomic32;
#ifdef V8_HOST_ARCH_64_BIT
#if defined(__native_client__)
typedef int64_t Atomic64;
#elif defined(V8_HOST_ARCH_64_BIT)
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
// means Atomic64 and AtomicWord should be the same type on 64-bit.
#if defined(__ILP32__)
typedef int64_t Atomic64;
#else
typedef intptr_t Atomic64;
#endif
#endif
#endif // defined(V8_HOST_ARCH_64_BIT)
#endif // defined(__native_client__)
// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
// Atomic64 routines below, depending on your architecture.
@ -140,6 +142,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "src/base/atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__)
#include "src/base/atomicops_internals_mac.h"
#elif defined(__native_client__)
#include "src/base/atomicops_internals_portable.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64
#include "src/base/atomicops_internals_arm64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM

23
deps/v8/src/base/atomicops_internals_mac.h поставляемый
Просмотреть файл

@ -12,6 +12,20 @@
namespace v8 {
namespace base {
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
inline void MemoryBarrier() { OSMemoryBarrier(); }
inline void AcquireMemoryBarrier() {
// On x86 processors, loads already have acquire semantics, so
// there is no need to put a full barrier here.
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
ATOMICOPS_COMPILER_BARRIER();
#else
MemoryBarrier();
#endif
}
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
@ -46,10 +60,6 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
}
inline void MemoryBarrier() {
OSMemoryBarrier();
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
@ -98,7 +108,7 @@ inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
AcquireMemoryBarrier();
return value;
}
@ -188,7 +198,7 @@ inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
AcquireMemoryBarrier();
return value;
}
@ -199,6 +209,7 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
#endif // defined(__LP64__)
#undef ATOMICOPS_COMPILER_BARRIER
} } // namespace v8::base
#endif // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_

138
deps/v8/src/base/atomicops_internals_portable.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,138 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
#define V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
namespace v8 {
namespace base {
inline void MemoryBarrier() { __sync_synchronize(); }
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return __sync_val_compare_and_swap(ptr, old_value, new_value);
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __sync_lock_test_and_set(ptr, new_value);
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return __sync_add_and_fetch(ptr, increment);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return __sync_add_and_fetch(ptr, increment);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
return __sync_val_compare_and_swap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
return __sync_val_compare_and_swap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
__sync_lock_test_and_set(ptr, value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
__sync_lock_test_and_set(ptr, value);
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
__sync_lock_test_and_set(ptr, value);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__sync_lock_test_and_set(ptr, value);
}
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
return __sync_add_and_fetch(ptr, 0);
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return __sync_add_and_fetch(ptr, 0);
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return __sync_add_and_fetch(ptr, 0);
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
return __sync_add_and_fetch(ptr, 0);
}
// 64-bit versions of the operations.
// See the 32-bit versions for comments.
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return __sync_val_compare_and_swap(ptr, old_value, new_value);
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __sync_lock_test_and_set(ptr, new_value);
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return __sync_add_and_fetch(ptr, increment);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return __sync_add_and_fetch(ptr, increment);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
return __sync_val_compare_and_swap(ptr, old_value, new_value);
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
return __sync_val_compare_and_swap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__sync_lock_test_and_set(ptr, value);
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
__sync_lock_test_and_set(ptr, value);
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__sync_lock_test_and_set(ptr, value);
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return __sync_add_and_fetch(ptr, 0);
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return __sync_add_and_fetch(ptr, 0);
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
return __sync_add_and_fetch(ptr, 0);
}
}
} // namespace v8::base
#endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_

46
deps/v8/src/base/base.gyp поставляемый
Просмотреть файл

@ -1,46 +0,0 @@
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'v8_code': 1,
},
'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
'targets': [
{
'target_name': 'base-unittests',
'type': 'executable',
'dependencies': [
'../../testing/gtest.gyp:gtest',
'../../testing/gtest.gyp:gtest_main',
'../../tools/gyp/v8.gyp:v8_libbase',
],
'include_dirs': [
'../..',
],
'sources': [ ### gcmole(all) ###
'bits-unittest.cc',
'cpu-unittest.cc',
'division-by-constant-unittest.cc',
'flags-unittest.cc',
'platform/condition-variable-unittest.cc',
'platform/mutex-unittest.cc',
'platform/platform-unittest.cc',
'platform/semaphore-unittest.cc',
'platform/time-unittest.cc',
'sys-info-unittest.cc',
'utils/random-number-generator-unittest.cc',
],
'conditions': [
['os_posix == 1', {
# TODO(svenpanne): This is a temporary work-around to fix the warnings
# that show up because we use -std=gnu++0x instead of -std=c++11.
'cflags!': [
'-pedantic',
],
}],
],
},
],
}

28
deps/v8/src/base/bits.cc поставляемый
Просмотреть файл

@ -3,6 +3,9 @@
// found in the LICENSE file.
#include "src/base/bits.h"
#include <limits>
#include "src/base/logging.h"
namespace v8 {
@ -20,6 +23,31 @@ uint32_t RoundUpToPowerOfTwo32(uint32_t value) {
return value + 1;
}
int32_t SignedMulHigh32(int32_t lhs, int32_t rhs) {
int64_t const value = static_cast<int64_t>(lhs) * static_cast<int64_t>(rhs);
return bit_cast<int32_t, uint32_t>(bit_cast<uint64_t>(value) >> 32u);
}
int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs, int32_t acc) {
return bit_cast<int32_t>(bit_cast<uint32_t>(acc) +
bit_cast<uint32_t>(SignedMulHigh32(lhs, rhs)));
}
int32_t SignedDiv32(int32_t lhs, int32_t rhs) {
if (rhs == 0) return 0;
if (rhs == -1) return -lhs;
return lhs / rhs;
}
int32_t SignedMod32(int32_t lhs, int32_t rhs) {
if (rhs == 0 || rhs == -1) return 0;
return lhs % rhs;
}
} // namespace bits
} // namespace base
} // namespace v8

96
deps/v8/src/base/bits.h поставляемый
Просмотреть файл

@ -5,7 +5,7 @@
#ifndef V8_BASE_BITS_H_
#define V8_BASE_BITS_H_
#include "include/v8stdint.h"
#include <stdint.h>
#include "src/base/macros.h"
#if V8_CC_MSVC
#include <intrin.h>
@ -19,7 +19,7 @@ namespace base {
namespace bits {
// CountPopulation32(value) returns the number of bits set in |value|.
inline uint32_t CountPopulation32(uint32_t value) {
inline unsigned CountPopulation32(uint32_t value) {
#if V8_HAS_BUILTIN_POPCOUNT
return __builtin_popcount(value);
#else
@ -28,20 +28,31 @@ inline uint32_t CountPopulation32(uint32_t value) {
value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
return value;
return static_cast<unsigned>(value);
#endif
}
// CountPopulation64(value) returns the number of bits set in |value|.
inline unsigned CountPopulation64(uint64_t value) {
#if V8_HAS_BUILTIN_POPCOUNT
return __builtin_popcountll(value);
#else
return CountPopulation32(static_cast<uint32_t>(value)) +
CountPopulation32(static_cast<uint32_t>(value >> 32));
#endif
}
// CountLeadingZeros32(value) returns the number of zero bits following the most
// significant 1 bit in |value| if |value| is non-zero, otherwise it returns 32.
inline uint32_t CountLeadingZeros32(uint32_t value) {
inline unsigned CountLeadingZeros32(uint32_t value) {
#if V8_HAS_BUILTIN_CLZ
return value ? __builtin_clz(value) : 32;
#elif V8_CC_MSVC
unsigned long result; // NOLINT(runtime/int)
if (!_BitScanReverse(&result, value)) return 32;
return static_cast<uint32_t>(31 - result);
return static_cast<unsigned>(31 - result);
#else
value = value | (value >> 1);
value = value | (value >> 2);
@ -53,16 +64,33 @@ inline uint32_t CountLeadingZeros32(uint32_t value) {
}
// CountLeadingZeros64(value) returns the number of zero bits following the most
// significant 1 bit in |value| if |value| is non-zero, otherwise it returns 64.
inline unsigned CountLeadingZeros64(uint64_t value) {
#if V8_HAS_BUILTIN_CLZ
return value ? __builtin_clzll(value) : 64;
#else
value = value | (value >> 1);
value = value | (value >> 2);
value = value | (value >> 4);
value = value | (value >> 8);
value = value | (value >> 16);
value = value | (value >> 32);
return CountPopulation64(~value);
#endif
}
// CountTrailingZeros32(value) returns the number of zero bits preceding the
// least significant 1 bit in |value| if |value| is non-zero, otherwise it
// returns 32.
inline uint32_t CountTrailingZeros32(uint32_t value) {
inline unsigned CountTrailingZeros32(uint32_t value) {
#if V8_HAS_BUILTIN_CTZ
return value ? __builtin_ctz(value) : 32;
#elif V8_CC_MSVC
unsigned long result; // NOLINT(runtime/int)
if (!_BitScanForward(&result, value)) return 32;
return static_cast<uint32_t>(result);
return static_cast<unsigned>(result);
#else
if (value == 0) return 32;
unsigned count = 0;
@ -73,6 +101,22 @@ inline uint32_t CountTrailingZeros32(uint32_t value) {
}
// CountTrailingZeros64(value) returns the number of zero bits preceding the
// least significant 1 bit in |value| if |value| is non-zero, otherwise it
// returns 64.
inline unsigned CountTrailingZeros64(uint64_t value) {
#if V8_HAS_BUILTIN_CTZ
return value ? __builtin_ctzll(value) : 64;
#else
if (value == 0) return 64;
unsigned count = 0;
for (value ^= value - 1; value >>= 1; ++count)
;
return count;
#endif
}
// Returns true iff |value| is a power of 2.
inline bool IsPowerOfTwo32(uint32_t value) {
return value && !(value & (value - 1));
@ -143,6 +187,44 @@ inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
#endif
}
// SignedMulHigh32(lhs, rhs) multiplies two signed 32-bit values |lhs| and
// |rhs|, extracts the most significant 32 bits of the result, and returns
// those.
int32_t SignedMulHigh32(int32_t lhs, int32_t rhs);
// SignedMulHighAndAdd32(lhs, rhs, acc) multiplies two signed 32-bit values
// |lhs| and |rhs|, extracts the most significant 32 bits of the result, and
// adds the accumulate value |acc|.
int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs, int32_t acc);
// SignedDiv32(lhs, rhs) divides |lhs| by |rhs| and returns the quotient
// truncated to int32. If |rhs| is zero, then zero is returned. If |lhs|
// is minint and |rhs| is -1, it returns minint.
int32_t SignedDiv32(int32_t lhs, int32_t rhs);
// SignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
// truncated to int32. If either |rhs| is zero or |lhs| is minint and |rhs|
// is -1, it returns zero.
int32_t SignedMod32(int32_t lhs, int32_t rhs);
// UnsignedDiv32(lhs, rhs) divides |lhs| by |rhs| and returns the quotient
// truncated to uint32. If |rhs| is zero, then zero is returned.
inline uint32_t UnsignedDiv32(uint32_t lhs, uint32_t rhs) {
return rhs ? lhs / rhs : 0u;
}
// UnsignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
// truncated to uint32. If |rhs| is zero, then zero is returned.
inline uint32_t UnsignedMod32(uint32_t lhs, uint32_t rhs) {
return rhs ? lhs % rhs : 0u;
}
} // namespace bits
} // namespace base
} // namespace v8

4
deps/v8/src/base/build_config.h поставляемый
Просмотреть файл

@ -29,6 +29,10 @@
#define V8_HOST_ARCH_64_BIT 1
#endif
#endif // __native_client__
#elif defined(__pnacl__)
// PNaCl is also ILP-32.
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
#elif defined(_M_IX86) || defined(__i386__)
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1

12
deps/v8/src/base/compiler-specific.h поставляемый
Просмотреть файл

@ -7,15 +7,13 @@
#include "include/v8config.h"
// Annotate a variable indicating it's ok if the variable is not used.
// (Typically used to silence a compiler warning when the assignment
// is important for some other reason.)
// Annotate a typedef or function indicating it's ok if it's not used.
// Use like:
// int x ALLOW_UNUSED = ...;
// typedef Foo Bar ALLOW_UNUSED_TYPE;
#if V8_HAS_ATTRIBUTE_UNUSED
#define ALLOW_UNUSED __attribute__((unused))
#define ALLOW_UNUSED_TYPE __attribute__((unused))
#else
#define ALLOW_UNUSED
#define ALLOW_UNUSED_TYPE
#endif
@ -39,8 +37,6 @@
#define FINAL final
#elif V8_HAS___FINAL
#define FINAL __final
#elif V8_HAS_SEALED
#define FINAL sealed
#else
#define FINAL /* NOT SUPPORTED */
#endif

66
deps/v8/src/base/cpu.cc поставляемый
Просмотреть файл

@ -7,12 +7,18 @@
#if V8_LIBC_MSVCRT
#include <intrin.h> // __cpuid()
#endif
#if V8_OS_POSIX
#include <unistd.h> // sysconf()
#if V8_OS_LINUX
#include <linux/auxvec.h> // AT_HWCAP
#endif
#if V8_GLIBC_PREREQ(2, 16)
#include <sys/auxv.h> // getauxval()
#endif
#if V8_OS_QNX
#include <sys/syspage.h> // cpuinfo
#endif
#if V8_OS_POSIX
#include <unistd.h> // sysconf()
#endif
#include <ctype.h>
#include <limits.h>
@ -29,7 +35,9 @@
namespace v8 {
namespace base {
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
#if defined(__pnacl__)
// Portable host shouldn't do feature detection.
#elif V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
// Define __cpuid() for non-MSVC libraries.
#if !V8_LIBC_MSVCRT
@ -90,11 +98,12 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
#define HWCAP_LPAE (1 << 20)
#define AT_HWCAP 16
// Read the ELF HWCAP flags by parsing /proc/self/auxv.
static uint32_t ReadELFHWCaps() {
uint32_t result = 0;
#if V8_GLIBC_PREREQ(2, 16)
result = static_cast<uint32_t>(getauxval(AT_HWCAP));
#else
// Read the ELF HWCAP flags by parsing /proc/self/auxv.
FILE* fp = fopen("/proc/self/auxv", "r");
if (fp != NULL) {
struct { uint32_t tag; uint32_t value; } entry;
@ -110,6 +119,7 @@ static uint32_t ReadELFHWCaps() {
}
fclose(fp);
}
#endif
return result;
}
@ -119,13 +129,18 @@ static uint32_t ReadELFHWCaps() {
int __detect_fp64_mode(void) {
double result = 0;
// Bit representation of (double)1 is 0x3FF0000000000000.
asm(
"lui $t0, 0x3FF0\n\t"
"ldc1 $f0, %0\n\t"
"mtc1 $t0, $f1\n\t"
"sdc1 $f0, %0\n\t"
: "+m" (result)
: : "t0", "$f0", "$f1", "memory");
__asm__ volatile(
".set push\n\t"
".set noreorder\n\t"
".set oddspreg\n\t"
"lui $t0, 0x3FF0\n\t"
"ldc1 $f0, %0\n\t"
"mtc1 $t0, $f1\n\t"
"sdc1 $f0, %0\n\t"
".set pop\n\t"
: "+m"(result)
:
: "t0", "$f0", "$f1", "memory");
return !(result == 1);
}
@ -133,9 +148,22 @@ int __detect_fp64_mode(void) {
int __detect_mips_arch_revision(void) {
// TODO(dusmil): Do the specific syscall as soon as it is implemented in mips
// kernel. Currently fail-back to the least common denominator which is
// mips32 revision 1.
return 1;
// kernel.
uint32_t result = 0;
__asm__ volatile(
"move $v0, $zero\n\t"
// Encoding for "addi $v0, $v0, 1" on non-r6,
// which is encoding for "bovc $v0, %v0, 1" on r6.
// Use machine code directly to avoid compilation errors with different
// toolchains and maintain compatibility.
".word 0x20420001\n\t"
"sw $v0, %0\n\t"
: "=m"(result)
:
: "v0", "memory");
// Result is 0 on r6 architectures, 1 on other architecture revisions.
// Fall-back to the least common denominator which is mips32 revision 1.
return result ? 1 : 6;
}
#endif
@ -290,7 +318,11 @@ CPU::CPU() : stepping_(0),
has_vfp3_d32_(false),
is_fp64_mode_(false) {
memcpy(vendor_, "Unknown", 8);
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
#if V8_OS_NACL
// Portable host shouldn't do feature detection.
// TODO(jfb): Remove the hardcoded ARM simulator flags in the build, and
// hardcode them here instead.
#elif V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
int cpu_info[4];
// __cpuid with an InfoType argument of 0 returns the number of

4
deps/v8/src/base/division-by-constant.cc поставляемый
Просмотреть файл

@ -52,7 +52,7 @@ MagicNumbersForDivision<T> SignedDivisionByConstant(T d) {
delta = ad - r2;
} while (q1 < delta || (q1 == delta && r1 == 0));
T mul = q2 + 1;
return {neg ? (0 - mul) : mul, p - bits, false};
return MagicNumbersForDivision<T>(neg ? (0 - mul) : mul, p - bits, false);
}
@ -93,7 +93,7 @@ MagicNumbersForDivision<T> UnsignedDivisionByConstant(T d,
}
delta = d - 1 - r2;
} while (p < bits * 2 && (q1 < delta || (q1 == delta && r1 == 0)));
return {q2 + 1, p - bits, a};
return MagicNumbersForDivision<T>(q2 + 1, p - bits, a);
}

79
deps/v8/src/base/flags.h поставляемый
Просмотреть файл

@ -26,8 +26,9 @@ class Flags FINAL {
typedef S mask_type;
Flags() : mask_(0) {}
Flags(flag_type flag) : mask_(flag) {} // NOLINT(runtime/explicit)
explicit Flags(mask_type mask) : mask_(mask) {}
Flags(flag_type flag) // NOLINT(runtime/explicit)
: mask_(static_cast<S>(flag)) {}
explicit Flags(mask_type mask) : mask_(static_cast<S>(mask)) {}
Flags& operator&=(const Flags& flags) {
mask_ &= flags.mask_;
@ -64,42 +65,44 @@ class Flags FINAL {
};
#define DEFINE_OPERATORS_FOR_FLAGS(Type) \
inline Type operator&(Type::flag_type lhs, \
Type::flag_type rhs)ALLOW_UNUSED WARN_UNUSED_RESULT; \
inline Type operator&(Type::flag_type lhs, Type::flag_type rhs) { \
return Type(lhs) & rhs; \
} \
inline Type operator&(Type::flag_type lhs, \
const Type& rhs)ALLOW_UNUSED WARN_UNUSED_RESULT; \
inline Type operator&(Type::flag_type lhs, const Type& rhs) { \
return rhs & lhs; \
} \
inline void operator&(Type::flag_type lhs, Type::mask_type rhs)ALLOW_UNUSED; \
inline void operator&(Type::flag_type lhs, Type::mask_type rhs) {} \
inline Type operator|(Type::flag_type lhs, Type::flag_type rhs) \
ALLOW_UNUSED WARN_UNUSED_RESULT; \
inline Type operator|(Type::flag_type lhs, Type::flag_type rhs) { \
return Type(lhs) | rhs; \
} \
inline Type operator|(Type::flag_type lhs, const Type& rhs) \
ALLOW_UNUSED WARN_UNUSED_RESULT; \
inline Type operator|(Type::flag_type lhs, const Type& rhs) { \
return rhs | lhs; \
} \
inline void operator|(Type::flag_type lhs, Type::mask_type rhs) \
ALLOW_UNUSED; \
inline void operator|(Type::flag_type lhs, Type::mask_type rhs) {} \
inline Type operator^(Type::flag_type lhs, Type::flag_type rhs) \
ALLOW_UNUSED WARN_UNUSED_RESULT; \
inline Type operator^(Type::flag_type lhs, Type::flag_type rhs) { \
return Type(lhs) ^ rhs; \
} inline Type operator^(Type::flag_type lhs, const Type& rhs) \
ALLOW_UNUSED WARN_UNUSED_RESULT; \
inline Type operator^(Type::flag_type lhs, const Type& rhs) { \
return rhs ^ lhs; \
} inline void operator^(Type::flag_type lhs, Type::mask_type rhs) \
ALLOW_UNUSED; \
#define DEFINE_OPERATORS_FOR_FLAGS(Type) \
inline Type operator&( \
Type::flag_type lhs, \
Type::flag_type rhs)ALLOW_UNUSED_TYPE WARN_UNUSED_RESULT; \
inline Type operator&(Type::flag_type lhs, Type::flag_type rhs) { \
return Type(lhs) & rhs; \
} \
inline Type operator&(Type::flag_type lhs, \
const Type& rhs)ALLOW_UNUSED_TYPE WARN_UNUSED_RESULT; \
inline Type operator&(Type::flag_type lhs, const Type& rhs) { \
return rhs & lhs; \
} \
inline void operator&(Type::flag_type lhs, \
Type::mask_type rhs)ALLOW_UNUSED_TYPE; \
inline void operator&(Type::flag_type lhs, Type::mask_type rhs) {} \
inline Type operator|(Type::flag_type lhs, Type::flag_type rhs) \
ALLOW_UNUSED_TYPE WARN_UNUSED_RESULT; \
inline Type operator|(Type::flag_type lhs, Type::flag_type rhs) { \
return Type(lhs) | rhs; \
} \
inline Type operator|(Type::flag_type lhs, const Type& rhs) \
ALLOW_UNUSED_TYPE WARN_UNUSED_RESULT; \
inline Type operator|(Type::flag_type lhs, const Type& rhs) { \
return rhs | lhs; \
} \
inline void operator|(Type::flag_type lhs, Type::mask_type rhs) \
ALLOW_UNUSED_TYPE; \
inline void operator|(Type::flag_type lhs, Type::mask_type rhs) {} \
inline Type operator^(Type::flag_type lhs, Type::flag_type rhs) \
ALLOW_UNUSED_TYPE WARN_UNUSED_RESULT; \
inline Type operator^(Type::flag_type lhs, Type::flag_type rhs) { \
return Type(lhs) ^ rhs; \
} inline Type operator^(Type::flag_type lhs, const Type& rhs) \
ALLOW_UNUSED_TYPE WARN_UNUSED_RESULT; \
inline Type operator^(Type::flag_type lhs, const Type& rhs) { \
return rhs ^ lhs; \
} inline void operator^(Type::flag_type lhs, Type::mask_type rhs) \
ALLOW_UNUSED_TYPE; \
inline void operator^(Type::flag_type lhs, Type::mask_type rhs) {}
} // namespace base

111
deps/v8/src/base/functional.cc поставляемый Normal file
Просмотреть файл

@ -0,0 +1,111 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// This also contains public domain code from MurmurHash. From the
// MurmurHash header:
//
// MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
#include "src/base/functional.h"
#include <limits>
#include "src/base/bits.h"
namespace v8 {
namespace base {
namespace {
// Thomas Wang, Integer Hash Functions.
// https://gist.github.com/badboy/6267743
template <typename T>
V8_INLINE size_t hash_value_unsigned(T v) {
switch (sizeof(T)) {
case 4: {
// "32 bit Mix Functions"
v = ~v + (v << 15); // v = (v << 15) - v - 1;
v = v ^ (v >> 12);
v = v + (v << 2);
v = v ^ (v >> 4);
v = v * 2057; // v = (v + (v << 3)) + (v << 11);
v = v ^ (v >> 16);
return static_cast<size_t>(v);
}
case 8: {
switch (sizeof(size_t)) {
case 4: {
// "64 bit to 32 bit Hash Functions"
v = ~v + (v << 18); // v = (v << 18) - v - 1;
v = v ^ (v >> 31);
v = v * 21; // v = (v + (v << 2)) + (v << 4);
v = v ^ (v >> 11);
v = v + (v << 6);
v = v ^ (v >> 22);
return static_cast<size_t>(v);
}
case 8: {
// "64 bit Mix Functions"
v = ~v + (v << 21); // v = (v << 21) - v - 1;
v = v ^ (v >> 24);
v = (v + (v << 3)) + (v << 8); // v * 265
v = v ^ (v >> 14);
v = (v + (v << 2)) + (v << 4); // v * 21
v = v ^ (v >> 28);
v = v + (v << 31);
return static_cast<size_t>(v);
}
}
}
}
UNREACHABLE();
return static_cast<size_t>(v);
}
} // namespace
// This code was taken from MurmurHash.
size_t hash_combine(size_t seed, size_t value) {
#if V8_HOST_ARCH_32_BIT
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
value *= c1;
value = bits::RotateRight32(value, 15);
value *= c2;
seed ^= value;
seed = bits::RotateRight32(seed, 13);
seed = seed * 5 + 0xe6546b64;
#else
const uint64_t m = V8_UINT64_C(0xc6a4a7935bd1e995);
const uint32_t r = 47;
value *= m;
value ^= value >> r;
value *= m;
seed ^= value;
seed *= m;
#endif // V8_HOST_ARCH_32_BIT
return seed;
}
size_t hash_value(unsigned int v) { return hash_value_unsigned(v); }
size_t hash_value(unsigned long v) { // NOLINT(runtime/int)
return hash_value_unsigned(v);
}
size_t hash_value(unsigned long long v) { // NOLINT(runtime/int)
return hash_value_unsigned(v);
}
} // namespace base
} // namespace v8

227
deps/v8/src/base/functional.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,227 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_FUNCTIONAL_H_
#define V8_BASE_FUNCTIONAL_H_
#include <stddef.h>
#include <stdint.h>
#include <cstddef>
#include <cstring>
#include <functional>
#include <utility>
#include "src/base/macros.h"
namespace v8 {
namespace base {
// base::hash is an implementation of the hash function object specified by
// C++11. It was designed to be compatible with std::hash (in C++11) and
// boost:hash (which in turn is based on the hash function object specified by
// the Draft Technical Report on C++ Library Extensions (TR1)).
//
// base::hash is implemented by calling the hash_value function. The namespace
// isn't specified so that it can detect overloads via argument dependant
// lookup. So if there is a free function hash_value in the same namespace as a
// custom type, it will get called.
//
// If users are asked to implement a hash function for their own types with no
// guidance, they generally write bad hash functions. Instead, we provide a
// simple function base::hash_combine to pass hash-relevant member variables
// into, in order to define a decent hash function. base::hash_combine is
// declared as:
//
// template<typename T, typename... Ts>
// size_t hash_combine(const T& v, const Ts& ...vs);
//
// Consider the following example:
//
// namespace v8 {
// namespace bar {
// struct Point { int x; int y; };
// size_t hash_value(Point const& p) {
// return base::hash_combine(p.x, p.y);
// }
// }
//
// namespace foo {
// void DoSomeWork(bar::Point const& p) {
// base::hash<bar::Point> h;
// ...
// size_t hash_code = h(p); // calls bar::hash_value(Point const&)
// ...
// }
// }
// }
//
// Based on the "Hashing User-Defined Types in C++1y" proposal from Jeffrey
// Yasskin and Chandler Carruth, see
// http://www.open-std.org/Jtc1/sc22/wg21/docs/papers/2012/n3333.html.
template <typename>
struct hash;
V8_INLINE size_t hash_combine() { return 0u; }
V8_INLINE size_t hash_combine(size_t seed) { return seed; }
size_t hash_combine(size_t seed, size_t value);
template <typename T, typename... Ts>
V8_INLINE size_t hash_combine(T const& v, Ts const&... vs) {
return hash_combine(hash_combine(vs...), hash<T>()(v));
}
template <typename Iterator>
V8_INLINE size_t hash_range(Iterator first, Iterator last) {
size_t seed = 0;
for (; first != last; ++first) {
seed = hash_combine(seed, *first);
}
return seed;
}
#define V8_BASE_HASH_VALUE_TRIVIAL(type) \
V8_INLINE size_t hash_value(type v) { return static_cast<size_t>(v); }
V8_BASE_HASH_VALUE_TRIVIAL(bool)
V8_BASE_HASH_VALUE_TRIVIAL(unsigned char)
V8_BASE_HASH_VALUE_TRIVIAL(unsigned short) // NOLINT(runtime/int)
#undef V8_BASE_HASH_VALUE_TRIVIAL
size_t hash_value(unsigned int);
size_t hash_value(unsigned long); // NOLINT(runtime/int)
size_t hash_value(unsigned long long); // NOLINT(runtime/int)
#define V8_BASE_HASH_VALUE_SIGNED(type) \
V8_INLINE size_t hash_value(signed type v) { \
return hash_value(bit_cast<unsigned type>(v)); \
}
V8_BASE_HASH_VALUE_SIGNED(char)
V8_BASE_HASH_VALUE_SIGNED(short) // NOLINT(runtime/int)
V8_BASE_HASH_VALUE_SIGNED(int) // NOLINT(runtime/int)
V8_BASE_HASH_VALUE_SIGNED(long) // NOLINT(runtime/int)
V8_BASE_HASH_VALUE_SIGNED(long long) // NOLINT(runtime/int)
#undef V8_BASE_HASH_VALUE_SIGNED
V8_INLINE size_t hash_value(float v) {
// 0 and -0 both hash to zero.
return v != 0.0f ? hash_value(bit_cast<uint32_t>(v)) : 0;
}
V8_INLINE size_t hash_value(double v) {
// 0 and -0 both hash to zero.
return v != 0.0 ? hash_value(bit_cast<uint64_t>(v)) : 0;
}
template <typename T, size_t N>
V8_INLINE size_t hash_value(const T (&v)[N]) {
return hash_range(v, v + N);
}
template <typename T, size_t N>
V8_INLINE size_t hash_value(T (&v)[N]) {
return hash_range(v, v + N);
}
template <typename T>
V8_INLINE size_t hash_value(T* const& v) {
return hash_value(bit_cast<uintptr_t>(v));
}
template <typename T1, typename T2>
V8_INLINE size_t hash_value(std::pair<T1, T2> const& v) {
return hash_combine(v.first, v.second);
}
template <typename T>
struct hash : public std::unary_function<T, size_t> {
V8_INLINE size_t operator()(T const& v) const { return hash_value(v); }
};
#define V8_BASE_HASH_SPECIALIZE(type) \
template <> \
struct hash<type> : public std::unary_function<type, size_t> { \
V8_INLINE size_t operator()(type const v) const { \
return ::v8::base::hash_value(v); \
} \
};
V8_BASE_HASH_SPECIALIZE(bool)
V8_BASE_HASH_SPECIALIZE(signed char)
V8_BASE_HASH_SPECIALIZE(unsigned char)
V8_BASE_HASH_SPECIALIZE(short) // NOLINT(runtime/int)
V8_BASE_HASH_SPECIALIZE(unsigned short) // NOLINT(runtime/int)
V8_BASE_HASH_SPECIALIZE(int)
V8_BASE_HASH_SPECIALIZE(unsigned int)
V8_BASE_HASH_SPECIALIZE(long) // NOLINT(runtime/int)
V8_BASE_HASH_SPECIALIZE(unsigned long) // NOLINT(runtime/int)
V8_BASE_HASH_SPECIALIZE(long long) // NOLINT(runtime/int)
V8_BASE_HASH_SPECIALIZE(unsigned long long) // NOLINT(runtime/int)
V8_BASE_HASH_SPECIALIZE(float)
V8_BASE_HASH_SPECIALIZE(double)
#undef V8_BASE_HASH_SPECIALIZE
template <typename T>
struct hash<T*> : public std::unary_function<T*, size_t> {
V8_INLINE size_t operator()(T* const v) const {
return ::v8::base::hash_value(v);
}
};
// base::bit_equal_to is a function object class for bitwise equality
// comparison, similar to std::equal_to, except that the comparison is performed
// on the bit representation of the operands.
//
// base::bit_hash is a function object class for bitwise hashing, similar to
// base::hash. It can be used together with base::bit_equal_to to implement a
// hash data structure based on the bitwise representation of types.
template <typename T>
struct bit_equal_to : public std::binary_function<T, T, bool> {};
template <typename T>
struct bit_hash : public std::unary_function<T, size_t> {};
#define V8_BASE_BIT_SPECIALIZE_TRIVIAL(type) \
template <> \
struct bit_equal_to<type> : public std::equal_to<type> {}; \
template <> \
struct bit_hash<type> : public hash<type> {};
V8_BASE_BIT_SPECIALIZE_TRIVIAL(signed char)
V8_BASE_BIT_SPECIALIZE_TRIVIAL(unsigned char)
V8_BASE_BIT_SPECIALIZE_TRIVIAL(short) // NOLINT(runtime/int)
V8_BASE_BIT_SPECIALIZE_TRIVIAL(unsigned short) // NOLINT(runtime/int)
V8_BASE_BIT_SPECIALIZE_TRIVIAL(int)
V8_BASE_BIT_SPECIALIZE_TRIVIAL(unsigned int)
V8_BASE_BIT_SPECIALIZE_TRIVIAL(long) // NOLINT(runtime/int)
V8_BASE_BIT_SPECIALIZE_TRIVIAL(unsigned long) // NOLINT(runtime/int)
V8_BASE_BIT_SPECIALIZE_TRIVIAL(long long) // NOLINT(runtime/int)
V8_BASE_BIT_SPECIALIZE_TRIVIAL(unsigned long long) // NOLINT(runtime/int)
#undef V8_BASE_BIT_SPECIALIZE_TRIVIAL
#define V8_BASE_BIT_SPECIALIZE_BIT_CAST(type, btype) \
template <> \
struct bit_equal_to<type> : public std::binary_function<type, type, bool> { \
V8_INLINE bool operator()(type lhs, type rhs) const { \
return bit_cast<btype>(lhs) == bit_cast<btype>(rhs); \
} \
}; \
template <> \
struct bit_hash<type> : public std::unary_function<type, size_t> { \
V8_INLINE size_t operator()(type v) const { \
hash<btype> h; \
return h(bit_cast<btype>(v)); \
} \
};
V8_BASE_BIT_SPECIALIZE_BIT_CAST(float, uint32_t)
V8_BASE_BIT_SPECIALIZE_BIT_CAST(double, uint64_t)
#undef V8_BASE_BIT_SPECIALIZE_BIT_CAST
} // namespace base
} // namespace v8
#endif // V8_BASE_FUNCTIONAL_H_

2
deps/v8/src/base/logging.h поставляемый
Просмотреть файл

@ -5,9 +5,9 @@
#ifndef V8_BASE_LOGGING_H_
#define V8_BASE_LOGGING_H_
#include <stdint.h>
#include <string.h>
#include "include/v8stdint.h"
#include "src/base/build_config.h"
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);

53
deps/v8/src/base/macros.h поставляемый
Просмотреть файл

@ -5,9 +5,11 @@
#ifndef V8_BASE_MACROS_H_
#define V8_BASE_MACROS_H_
#include <stddef.h>
#include <stdint.h>
#include <cstring>
#include "include/v8stdint.h"
#include "src/base/build_config.h"
#include "src/base/compiler-specific.h"
#include "src/base/logging.h"
@ -23,6 +25,8 @@
(reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
#if V8_OS_NACL
// ARRAYSIZE_UNSAFE performs essentially the same calculation as arraysize,
// but can be used on anonymous types or types defined inside
// functions. It's less safe than arraysize as it accepts some
@ -63,9 +67,6 @@
((sizeof(a) / sizeof(*(a))) / \
static_cast<size_t>(!(sizeof(a) % sizeof(*(a))))) // NOLINT
#if V8_OS_NACL
// TODO(bmeurer): For some reason, the NaCl toolchain cannot handle the correct
// definition of arraysize() below, so we have to use the unsafe version for
// now.
@ -130,7 +131,7 @@ struct CompileAssert {};
#define COMPILE_ASSERT(expr, msg) \
typedef CompileAssert<static_cast<bool>(expr)> \
msg[static_cast<bool>(expr) ? 1 : -1] ALLOW_UNUSED
msg[static_cast<bool>(expr) ? 1 : -1] ALLOW_UNUSED_TYPE
// Implementation details of COMPILE_ASSERT:
//
@ -150,23 +151,11 @@ struct CompileAssert {};
// COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
// // not a compile-time constant.
//
// - By using the type CompileAssert<(bool(expr))>, we ensures that
// - By using the type CompileAssert<static_cast<bool>(expr)>, we ensure that
// expr is a compile-time constant. (Template arguments must be
// determined at compile-time.)
//
// - The outer parentheses in CompileAssert<(bool(expr))> are necessary
// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written
//
// CompileAssert<bool(expr)>
//
// instead, these compilers will refuse to compile
//
// COMPILE_ASSERT(5 > 0, some_message);
//
// (They seem to think the ">" in "5 > 0" marks the end of the
// template argument list.)
//
// - The array size is (bool(expr) ? 1 : -1), instead of simply
// - The array size is (static_cast<bool>(expr) ? 1 : -1), instead of simply
//
// ((expr) ? 1 : -1).
//
@ -308,10 +297,10 @@ template <> class StaticAssertion<true> { };
// actually causes each use to introduce a new defined type with a
// name depending on the source line.
template <int> class StaticAssertionHelper { };
#define STATIC_ASSERT(test) \
typedef \
StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \
SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) ALLOW_UNUSED
#define STATIC_ASSERT(test) \
typedef StaticAssertionHelper< \
sizeof(StaticAssertion<static_cast<bool>((test))>)> \
SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) ALLOW_UNUSED_TYPE
#endif
@ -408,4 +397,22 @@ inline T RoundUp(T x, intptr_t m) {
return RoundDown<T>(static_cast<T>(x + m - 1), m);
}
namespace v8 {
namespace base {
// TODO(yangguo): This is a poor man's replacement for std::is_fundamental,
// which requires C++11. Switch to std::is_fundamental once possible.
template <typename T>
inline bool is_fundamental() {
return false;
}
template <>
inline bool is_fundamental<uint8_t>() {
return true;
}
}
} // namespace v8::base
#endif // V8_BASE_MACROS_H_

2
deps/v8/src/base/once.h поставляемый
Просмотреть файл

@ -52,6 +52,8 @@
#ifndef V8_BASE_ONCE_H_
#define V8_BASE_ONCE_H_
#include <stddef.h>
#include "src/base/atomicops.h"
namespace v8 {

2
deps/v8/src/base/platform/platform-linux.cc поставляемый
Просмотреть файл

@ -109,7 +109,7 @@ const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
if (!t || !t->tm_zone) return "";
return t->tm_zone;
#endif
}

12
deps/v8/src/base/platform/platform-posix.cc поставляемый
Просмотреть файл

@ -253,14 +253,14 @@ int OS::GetCurrentProcessId() {
int OS::GetCurrentThreadId() {
#if defined(ANDROID)
#if V8_OS_MACOSX
return static_cast<int>(pthread_mach_thread_np(pthread_self()));
#elif V8_OS_LINUX
return static_cast<int>(syscall(__NR_gettid));
#elif defined(SYS_gettid)
return static_cast<int>(syscall(SYS_gettid));
#elif V8_OS_ANDROID
return static_cast<int>(gettid());
#else
// PNaCL doesn't have a way to get an integral thread ID, but it doesn't
// really matter, because we only need it in PerfJitLogger::LogRecordedBuffer.
return 0;
return static_cast<int>(pthread_self());
#endif
}

20
deps/v8/src/base/platform/platform-solaris.cc поставляемый
Просмотреть файл

@ -31,26 +31,6 @@
#include "src/base/platform/platform.h"
// It seems there is a bug in some Solaris distributions (experienced in
// SunOS 5.10 Generic_141445-09) which make it difficult or impossible to
// access signbit() despite the availability of other C99 math functions.
#ifndef signbit
namespace std {
// Test sign - usually defined in math.h
int signbit(double x) {
// We need to take care of the special case of both positive and negative
// versions of zero.
if (x == 0) {
return fpclass(x) & FP_NZERO;
} else {
// This won't detect negative NaN but that should be okay since we don't
// assume that behavior.
return x < 0;
}
}
} // namespace std
#endif // signbit
namespace v8 {
namespace base {

61
deps/v8/src/base/platform/platform-win32.cc поставляемый
Просмотреть файл

@ -15,9 +15,7 @@
#endif // MINGW_HAS_SECURE_API
#endif // __MINGW32__
#ifdef _MSC_VER
#include <limits>
#endif
#include "src/base/win32-headers.h"
@ -28,16 +26,6 @@
#include "src/base/platform/time.h"
#include "src/base/utils/random-number-generator.h"
#ifdef _MSC_VER
// Case-insensitive bounded string comparisons. Use stricmp() on Win32. Usually
// defined in strings.h.
int strncasecmp(const char* s1, const char* s2, int n) {
return _strnicmp(s1, s2, n);
}
#endif // _MSC_VER
// Extra functions for MinGW. Most of these are the _s functions which are in
// the Microsoft Visual Studio C++ CRT.
@ -358,41 +346,26 @@ void Win32Time::SetToCurrentTime() {
}
int64_t FileTimeToInt64(FILETIME ft) {
ULARGE_INTEGER result;
result.LowPart = ft.dwLowDateTime;
result.HighPart = ft.dwHighDateTime;
return static_cast<int64_t>(result.QuadPart);
}
// Return the local timezone offset in milliseconds east of UTC. This
// takes into account whether daylight saving is in effect at the time.
// Only times in the 32-bit Unix range may be passed to this function.
// Also, adding the time-zone offset to the input must not overflow.
// The function EquivalentTime() in date.js guarantees this.
int64_t Win32Time::LocalOffset(TimezoneCache* cache) {
cache->InitializeIfNeeded();
Win32Time rounded_to_second(*this);
rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
1000 * kTimeScaler;
// Convert to local time using POSIX localtime function.
// Windows XP Service Pack 3 made SystemTimeToTzSpecificLocalTime()
// very slow. Other browsers use localtime().
// Convert from JavaScript milliseconds past 1/1/1970 0:00:00 to
// POSIX seconds past 1/1/1970 0:00:00.
double unchecked_posix_time = rounded_to_second.ToJSTime() / 1000;
if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) {
return 0;
}
// Because _USE_32BIT_TIME_T is defined, time_t is a 32-bit int.
time_t posix_time = static_cast<time_t>(unchecked_posix_time);
// Convert to local time, as struct with fields for day, hour, year, etc.
tm posix_local_time_struct;
if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
if (posix_local_time_struct.tm_isdst > 0) {
return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * -kMsPerMinute;
} else if (posix_local_time_struct.tm_isdst == 0) {
return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) * -kMsPerMinute;
} else {
return cache->tzinfo_.Bias * -kMsPerMinute;
}
FILETIME local;
SYSTEMTIME system_utc, system_local;
FileTimeToSystemTime(&time_.ft_, &system_utc);
SystemTimeToTzSpecificLocalTime(NULL, &system_utc, &system_local);
SystemTimeToFileTime(&system_local, &local);
return (FileTimeToInt64(local) - FileTimeToInt64(time_.ft_)) / kTimeScaler;
}
@ -832,7 +805,7 @@ void OS::Abort() {
void OS::DebugBreak() {
#ifdef _MSC_VER
#if V8_CC_MSVC
// To avoid Visual Studio runtime support the following code can be used
// instead
// __asm { int 3 }
@ -1175,11 +1148,7 @@ void OS::SignalCodeMovingGC() { }
double OS::nan_value() {
#ifdef _MSC_VER
return std::numeric_limits<double>::quiet_NaN();
#else // _MSC_VER
return NAN;
#endif // _MSC_VER
}

42
deps/v8/src/base/platform/platform.h поставляемый
Просмотреть файл

@ -21,7 +21,7 @@
#ifndef V8_BASE_PLATFORM_PLATFORM_H_
#define V8_BASE_PLATFORM_PLATFORM_H_
#include <stdarg.h>
#include <cstdarg>
#include <string>
#include <vector>
@ -29,48 +29,10 @@
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
#ifdef __sun
# ifndef signbit
namespace std {
int signbit(double x);
}
# endif
#endif
#if V8_OS_QNX
#include "src/base/qnx-math.h"
#endif
// Microsoft Visual C++ specific stuff.
#if V8_LIBC_MSVCRT
#include "src/base/win32-headers.h"
#include "src/base/win32-math.h"
int strncasecmp(const char* s1, const char* s2, int n);
// Visual C++ 2013 and higher implement this function.
#if (_MSC_VER < 1800)
inline int lrint(double flt) {
int intgr;
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
__asm {
fld flt
fistp intgr
};
#else
intgr = static_cast<int>(flt + 0.5);
if ((intgr & 1) != 0 && intgr - flt == 0.5) {
// If the number is halfway between two integers, round to the even one.
intgr--;
}
#endif
return intgr;
}
#endif // _MSC_VER < 1800
#endif // V8_LIBC_MSVCRT
namespace v8 {
namespace base {
@ -79,7 +41,7 @@ namespace base {
#ifndef V8_NO_FAST_TLS
#if defined(_MSC_VER) && (V8_HOST_ARCH_IA32)
#if V8_CC_MSVC && V8_HOST_ARCH_IA32
#define V8_FAST_TLS_SUPPORTED 1

2
deps/v8/src/base/sys-info.h поставляемый
Просмотреть файл

@ -5,7 +5,7 @@
#ifndef V8_BASE_SYS_INFO_H_
#define V8_BASE_SYS_INFO_H_
#include "include/v8stdint.h"
#include <stdint.h>
#include "src/base/compiler-specific.h"
namespace v8 {

Просмотреть файл

@ -102,6 +102,13 @@ double RandomNumberGenerator::NextDouble() {
}
int64_t RandomNumberGenerator::NextInt64() {
uint64_t lo = bit_cast<unsigned>(Next(32));
uint64_t hi = bit_cast<unsigned>(Next(32));
return lo | (hi << 32);
}
void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) {
for (size_t n = 0; n < buflen; ++n) {
static_cast<uint8_t*>(buffer)[n] = static_cast<uint8_t>(Next(8));

Просмотреть файл

@ -68,6 +68,13 @@ class RandomNumberGenerator FINAL {
// (exclusive), is pseudorandomly generated and returned.
double NextDouble() WARN_UNUSED_RESULT;
// Returns the next pseudorandom, uniformly distributed int64 value from this
// random number generator's sequence. The general contract of |NextInt64()|
// is that one 64-bit int value is pseudorandomly generated and returned.
// All 2^64 possible integer values are produced with (approximately) equal
// probability.
int64_t NextInt64() WARN_UNUSED_RESULT;
// Fills the elements of a specified array of bytes with random numbers.
void NextBytes(void* buffer, size_t buflen);

82
deps/v8/src/base/win32-math.cc поставляемый
Просмотреть файл

@ -1,82 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
// refer to The Open Group Base Specification for specification of the correct
// semantics for these functions.
// (http://www.opengroup.org/onlinepubs/000095399/)
#if defined(_MSC_VER) && (_MSC_VER < 1800)
#include "src/base/win32-headers.h"
#include <float.h> // Required for DBL_MAX and on Win32 for finite()
#include <limits.h> // Required for INT_MAX etc.
#include <cmath>
#include "src/base/win32-math.h"
#include "src/base/logging.h"
namespace std {
// Test for a NaN (not a number) value - usually defined in math.h
int isnan(double x) {
return _isnan(x);
}
// Test for infinity - usually defined in math.h
int isinf(double x) {
return (_fpclass(x) & (_FPCLASS_PINF | _FPCLASS_NINF)) != 0;
}
// Test for finite value - usually defined in math.h
int isfinite(double x) {
return _finite(x);
}
// Test if x is less than y and both nominal - usually defined in math.h
int isless(double x, double y) {
return isnan(x) || isnan(y) ? 0 : x < y;
}
// Test if x is greater than y and both nominal - usually defined in math.h
int isgreater(double x, double y) {
return isnan(x) || isnan(y) ? 0 : x > y;
}
// Classify floating point number - usually defined in math.h
int fpclassify(double x) {
// Use the MS-specific _fpclass() for classification.
int flags = _fpclass(x);
// Determine class. We cannot use a switch statement because
// the _FPCLASS_ constants are defined as flags.
if (flags & (_FPCLASS_PN | _FPCLASS_NN)) return FP_NORMAL;
if (flags & (_FPCLASS_PZ | _FPCLASS_NZ)) return FP_ZERO;
if (flags & (_FPCLASS_PD | _FPCLASS_ND)) return FP_SUBNORMAL;
if (flags & (_FPCLASS_PINF | _FPCLASS_NINF)) return FP_INFINITE;
// All cases should be covered by the code above.
DCHECK(flags & (_FPCLASS_SNAN | _FPCLASS_QNAN));
return FP_NAN;
}
// Test sign - usually defined in math.h
int signbit(double x) {
// We need to take care of the special case of both positive
// and negative versions of zero.
if (x == 0)
return _fpclass(x) & _FPCLASS_NZ;
else
return x < 0;
}
} // namespace std
#endif // _MSC_VER

42
deps/v8/src/base/win32-math.h поставляемый
Просмотреть файл

@ -1,42 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
// refer to The Open Group Base Specification for specification of the correct
// semantics for these functions.
// (http://www.opengroup.org/onlinepubs/000095399/)
#ifndef V8_BASE_WIN32_MATH_H_
#define V8_BASE_WIN32_MATH_H_
#ifndef _MSC_VER
#error Wrong environment, expected MSVC.
#endif // _MSC_VER
// MSVC 2013+ provides implementations of all standard math functions.
#if (_MSC_VER < 1800)
enum {
FP_NAN,
FP_INFINITE,
FP_ZERO,
FP_SUBNORMAL,
FP_NORMAL
};
namespace std {
int isfinite(double x);
int isinf(double x);
int isnan(double x);
int isless(double x, double y);
int isgreater(double x, double y);
int fpclassify(double x);
int signbit(double x);
} // namespace std
#endif // _MSC_VER < 1800
#endif // V8_BASE_WIN32_MATH_H_

36
deps/v8/src/basic-block-profiler.cc поставляемый
Просмотреть файл

@ -4,37 +4,39 @@
#include "src/basic-block-profiler.h"
#include <sstream>
namespace v8 {
namespace internal {
BasicBlockProfiler::Data::Data(size_t n_blocks)
: n_blocks_(n_blocks), block_ids_(n_blocks_, -1), counts_(n_blocks_, 0) {}
: n_blocks_(n_blocks), block_ids_(n_blocks_), counts_(n_blocks_, 0) {}
BasicBlockProfiler::Data::~Data() {}
static void InsertIntoString(OStringStream* os, std::string* string) {
string->insert(string->begin(), os->c_str(), &os->c_str()[os->size()]);
static void InsertIntoString(std::ostringstream* os, std::string* string) {
string->insert(0, os->str());
}
void BasicBlockProfiler::Data::SetCode(OStringStream* os) {
void BasicBlockProfiler::Data::SetCode(std::ostringstream* os) {
InsertIntoString(os, &code_);
}
void BasicBlockProfiler::Data::SetFunctionName(OStringStream* os) {
void BasicBlockProfiler::Data::SetFunctionName(std::ostringstream* os) {
InsertIntoString(os, &function_name_);
}
void BasicBlockProfiler::Data::SetSchedule(OStringStream* os) {
void BasicBlockProfiler::Data::SetSchedule(std::ostringstream* os) {
InsertIntoString(os, &schedule_);
}
void BasicBlockProfiler::Data::SetBlockId(size_t offset, int block_id) {
void BasicBlockProfiler::Data::SetBlockId(size_t offset, size_t block_id) {
DCHECK(offset < n_blocks_);
block_ids_[offset] = block_id;
}
@ -77,33 +79,33 @@ void BasicBlockProfiler::ResetCounts() {
}
OStream& operator<<(OStream& os, const BasicBlockProfiler& p) {
os << "---- Start Profiling Data ----" << endl;
std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler& p) {
os << "---- Start Profiling Data ----" << std::endl;
typedef BasicBlockProfiler::DataList::const_iterator iterator;
for (iterator i = p.data_list_.begin(); i != p.data_list_.end(); ++i) {
os << **i;
}
os << "---- End Profiling Data ----" << endl;
os << "---- End Profiling Data ----" << std::endl;
return os;
}
OStream& operator<<(OStream& os, const BasicBlockProfiler::Data& d) {
std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler::Data& d) {
const char* name = "unknown function";
if (!d.function_name_.empty()) {
name = d.function_name_.c_str();
}
if (!d.schedule_.empty()) {
os << "schedule for " << name << endl;
os << d.schedule_.c_str() << endl;
os << "schedule for " << name << std::endl;
os << d.schedule_.c_str() << std::endl;
}
os << "block counts for " << name << ":" << endl;
os << "block counts for " << name << ":" << std::endl;
for (size_t i = 0; i < d.n_blocks_; ++i) {
os << "block " << d.block_ids_[i] << " : " << d.counts_[i] << endl;
os << "block " << d.block_ids_[i] << " : " << d.counts_[i] << std::endl;
}
os << endl;
os << std::endl;
if (!d.code_.empty()) {
os << d.code_.c_str() << endl;
os << d.code_.c_str() << std::endl;
}
return os;
}

22
deps/v8/src/basic-block-profiler.h поставляемый
Просмотреть файл

@ -5,7 +5,9 @@
#ifndef V8_BASIC_BLOCK_PROFILER_H_
#define V8_BASIC_BLOCK_PROFILER_H_
#include <iosfwd>
#include <list>
#include <string>
#include "src/v8.h"
@ -22,15 +24,16 @@ class BasicBlockProfiler {
size_t n_blocks() const { return n_blocks_; }
const uint32_t* counts() const { return &counts_[0]; }
void SetCode(OStringStream* os);
void SetFunctionName(OStringStream* os);
void SetSchedule(OStringStream* os);
void SetBlockId(size_t offset, int block_id);
void SetCode(std::ostringstream* os);
void SetFunctionName(std::ostringstream* os);
void SetSchedule(std::ostringstream* os);
void SetBlockId(size_t offset, size_t block_id);
uint32_t* GetCounterAddress(size_t offset);
private:
friend class BasicBlockProfiler;
friend OStream& operator<<(OStream& os, const BasicBlockProfiler::Data& s);
friend std::ostream& operator<<(std::ostream& os,
const BasicBlockProfiler::Data& s);
explicit Data(size_t n_blocks);
~Data();
@ -38,7 +41,7 @@ class BasicBlockProfiler {
void ResetCounts();
const size_t n_blocks_;
std::vector<int> block_ids_;
std::vector<size_t> block_ids_;
std::vector<uint32_t> counts_;
std::string function_name_;
std::string schedule_;
@ -57,15 +60,16 @@ class BasicBlockProfiler {
const DataList* data_list() { return &data_list_; }
private:
friend OStream& operator<<(OStream& os, const BasicBlockProfiler& s);
friend std::ostream& operator<<(std::ostream& os,
const BasicBlockProfiler& s);
DataList data_list_;
DISALLOW_COPY_AND_ASSIGN(BasicBlockProfiler);
};
OStream& operator<<(OStream& os, const BasicBlockProfiler& s);
OStream& operator<<(OStream& os, const BasicBlockProfiler::Data& s);
std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler& s);
std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler::Data& s);
} // namespace internal
} // namespace v8

1
deps/v8/src/bignum-dtoa.cc поставляемый
Просмотреть файл

@ -4,7 +4,6 @@
#include <cmath>
#include "include/v8stdint.h"
#include "src/base/logging.h"
#include "src/utils.h"

Просмотреть файл

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/data-flow.h"
#include "src/bit-vector.h"
#include "src/base/bits.h"
#include "src/scopes.h"
@ -28,12 +28,12 @@ void BitVector::Print() {
void BitVector::Iterator::Advance() {
current_++;
uint32_t val = current_value_;
uintptr_t val = current_value_;
while (val == 0) {
current_index_++;
if (Done()) return;
val = target_->data_[current_index_];
current_ = current_index_ << 5;
current_ = current_index_ << kDataBitShift;
}
val = SkipZeroBytes(val);
val = SkipZeroBits(val);
@ -44,8 +44,12 @@ void BitVector::Iterator::Advance() {
int BitVector::Count() const {
int count = 0;
for (int i = 0; i < data_length_; i++) {
int data = data_[i];
if (data != 0) count += base::bits::CountPopulation32(data);
uintptr_t data = data_[i];
if (sizeof(data) == 8) {
count += base::bits::CountPopulation64(data);
} else {
count += base::bits::CountPopulation32(static_cast<uint32_t>(data));
}
}
return count;
}

Просмотреть файл

@ -15,7 +15,7 @@
namespace v8 {
namespace internal {
class BitVector: public ZoneObject {
class BitVector : public ZoneObject {
public:
// Iterator for the elements of this BitVector.
class Iterator BASE_EMBEDDED {
@ -28,7 +28,7 @@ class BitVector: public ZoneObject {
DCHECK(target->data_length_ > 0);
Advance();
}
~Iterator() { }
~Iterator() {}
bool Done() const { return current_index_ >= target_->data_length_; }
void Advance();
@ -39,14 +39,14 @@ class BitVector: public ZoneObject {
}
private:
uint32_t SkipZeroBytes(uint32_t val) {
uintptr_t SkipZeroBytes(uintptr_t val) {
while ((val & 0xFF) == 0) {
val >>= 8;
current_ += 8;
}
return val;
}
uint32_t SkipZeroBits(uint32_t val) {
uintptr_t SkipZeroBits(uintptr_t val) {
while ((val & 0x1) == 0) {
val >>= 1;
current_++;
@ -56,16 +56,20 @@ class BitVector: public ZoneObject {
BitVector* target_;
int current_index_;
uint32_t current_value_;
uintptr_t current_value_;
int current_;
friend class BitVector;
};
static const int kDataBits = kPointerSize * 8;
static const int kDataBitShift = kPointerSize == 8 ? 6 : 5;
static const uintptr_t kOne = 1; // This saves some static_casts.
BitVector(int length, Zone* zone)
: length_(length),
data_length_(SizeFor(length)),
data_(zone->NewArray<uint32_t>(data_length_)) {
data_(zone->NewArray<uintptr_t>(data_length_)) {
DCHECK(length > 0);
Clear();
}
@ -73,18 +77,11 @@ class BitVector: public ZoneObject {
BitVector(const BitVector& other, Zone* zone)
: length_(other.length()),
data_length_(SizeFor(length_)),
data_(zone->NewArray<uint32_t>(data_length_)) {
data_(zone->NewArray<uintptr_t>(data_length_)) {
CopyFrom(other);
}
static int SizeFor(int length) {
return 1 + ((length - 1) / 32);
}
BitVector& operator=(const BitVector& rhs) {
if (this != &rhs) CopyFrom(rhs);
return *this;
}
static int SizeFor(int length) { return 1 + ((length - 1) / kDataBits); }
void CopyFrom(const BitVector& other) {
DCHECK(other.length() <= length());
@ -98,18 +95,18 @@ class BitVector: public ZoneObject {
bool Contains(int i) const {
DCHECK(i >= 0 && i < length());
uint32_t block = data_[i / 32];
return (block & (1U << (i % 32))) != 0;
uintptr_t block = data_[i / kDataBits];
return (block & (kOne << (i % kDataBits))) != 0;
}
void Add(int i) {
DCHECK(i >= 0 && i < length());
data_[i / 32] |= (1U << (i % 32));
data_[i / kDataBits] |= (kOne << (i % kDataBits));
}
void Remove(int i) {
DCHECK(i >= 0 && i < length());
data_[i / 32] &= ~(1U << (i % 32));
data_[i / kDataBits] &= ~(kOne << (i % kDataBits));
}
void Union(const BitVector& other) {
@ -123,7 +120,7 @@ class BitVector: public ZoneObject {
DCHECK(other.length() == length());
bool changed = false;
for (int i = 0; i < data_length_; i++) {
uint32_t old_data = data_[i];
uintptr_t old_data = data_[i];
data_[i] |= other.data_[i];
if (data_[i] != old_data) changed = true;
}
@ -141,7 +138,7 @@ class BitVector: public ZoneObject {
DCHECK(other.length() == length());
bool changed = false;
for (int i = 0; i < data_length_; i++) {
uint32_t old_data = data_[i];
uintptr_t old_data = data_[i];
data_[i] &= other.data_[i];
if (data_[i] != old_data) changed = true;
}
@ -184,9 +181,11 @@ class BitVector: public ZoneObject {
#endif
private:
int length_;
int data_length_;
uint32_t* data_;
const int length_;
const int data_length_;
uintptr_t* const data_;
DISALLOW_COPY_AND_ASSIGN(BitVector);
};
@ -195,19 +194,19 @@ class GrowableBitVector BASE_EMBEDDED {
class Iterator BASE_EMBEDDED {
public:
Iterator(const GrowableBitVector* target, Zone* zone)
: it_(target->bits_ == NULL
? new(zone) BitVector(1, zone)
: target->bits_) { }
: it_(target->bits_ == NULL ? new (zone) BitVector(1, zone)
: target->bits_) {}
bool Done() const { return it_.Done(); }
void Advance() { it_.Advance(); }
int Current() const { return it_.Current(); }
private:
BitVector::Iterator it_;
};
GrowableBitVector() : bits_(NULL) { }
GrowableBitVector() : bits_(NULL) {}
GrowableBitVector(int length, Zone* zone)
: bits_(new(zone) BitVector(length, zone)) { }
: bits_(new (zone) BitVector(length, zone)) {}
bool Contains(int value) const {
if (!InBitsRange(value)) return false;
@ -225,7 +224,9 @@ class GrowableBitVector BASE_EMBEDDED {
}
}
void Clear() { if (bits_ != NULL) bits_->Clear(); }
void Clear() {
if (bits_ != NULL) bits_->Clear();
}
private:
static const int kInitialLength = 1024;
@ -238,7 +239,7 @@ class GrowableBitVector BASE_EMBEDDED {
if (InBitsRange(value)) return;
int new_length = bits_ == NULL ? kInitialLength : bits_->length();
while (new_length <= value) new_length *= 2;
BitVector* new_bits = new(zone) BitVector(new_length, zone);
BitVector* new_bits = new (zone) BitVector(new_length, zone);
if (bits_ != NULL) new_bits->CopyFrom(*bits_);
bits_ = new_bits;
}

181
deps/v8/src/bootstrapper.cc поставляемый
Просмотреть файл

@ -57,6 +57,8 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) {
Handle<String> source_code = isolate_->factory()
->NewExternalStringFromOneByte(resource)
.ToHandleChecked();
// Mark this external string with a special map.
source_code->set_map(isolate_->heap()->native_source_string_map());
heap->natives_source_cache()->set(index, *source_code);
}
Handle<Object> cached_source(heap->natives_source_cache()->get(index),
@ -126,7 +128,7 @@ char* Bootstrapper::AllocateAutoDeletedArray(int bytes) {
void Bootstrapper::TearDown() {
if (delete_these_non_arrays_on_tear_down_ != NULL) {
int len = delete_these_non_arrays_on_tear_down_->length();
DCHECK(len < 28); // Don't use this mechanism for unbounded allocations.
DCHECK(len < 1000); // Don't use this mechanism for unbounded allocations.
for (int i = 0; i < len; i++) {
delete delete_these_non_arrays_on_tear_down_->at(i);
delete_these_non_arrays_on_tear_down_->at(i) = NULL;
@ -208,6 +210,16 @@ class Genesis BASE_EMBEDDED {
// Used for creating a context from scratch.
void InstallNativeFunctions();
void InstallExperimentalNativeFunctions();
#define DECLARE_FEATURE_INITIALIZATION(id, descr) \
void InstallNativeFunctions_##id(); \
void InitializeGlobal_##id();
HARMONY_INPROGRESS(DECLARE_FEATURE_INITIALIZATION)
HARMONY_STAGED(DECLARE_FEATURE_INITIALIZATION)
HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
#undef DECLARE_FEATURE_INITIALIZATION
Handle<JSFunction> InstallInternalArray(Handle<JSBuiltinsObject> builtins,
const char* name,
ElementsKind elements_kind);
@ -507,7 +519,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// prototype, otherwise the missing initial_array_prototype will cause
// assertions during startup.
native_context()->set_initial_array_prototype(*prototype);
Accessors::FunctionSetPrototype(object_fun, prototype);
Accessors::FunctionSetPrototype(object_fun, prototype).Assert();
}
// Allocate the empty function as the prototype for function ECMAScript
@ -1328,6 +1340,11 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
delegate->shared()->DontAdaptArguments();
}
#define FEATURE_INITIALIZE_GLOBAL(id, descr) InitializeGlobal_##id();
HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
#undef FEATURE_INITIALIZE_GLOBAL
// Initialize the embedder data slot.
Handle<FixedArray> embedder_data = factory->NewFixedArray(3);
native_context()->set_embedder_data(*embedder_data);
@ -1358,16 +1375,11 @@ void Genesis::InstallTypedArray(
void Genesis::InitializeExperimentalGlobal() {
// TODO(erikcorry): Move this into Genesis::InitializeGlobal once we no
// longer need to live behind a flag.
Handle<JSObject> builtins(native_context()->builtins());
#define FEATURE_INITIALIZE_GLOBAL(id, descr) InitializeGlobal_##id();
Handle<HeapObject> flag(
FLAG_harmony_regexps ? heap()->true_value() : heap()->false_value());
PropertyAttributes attributes =
static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
Runtime::DefineObjectProperty(builtins, factory()->harmony_regexps_string(),
flag, attributes).Assert();
HARMONY_INPROGRESS(FEATURE_INITIALIZE_GLOBAL)
HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
#undef FEATURE_INITIALIZE_GLOBAL
}
@ -1503,12 +1515,6 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
.ToHandleChecked(); \
native_context()->set_##var(Type::cast(*var##_native));
#define INSTALL_NATIVE_MATH(name) \
{ \
Handle<Object> fun = \
ResolveBuiltinIdHolder(native_context(), "Math." #name); \
native_context()->set_math_##name##_fun(JSFunction::cast(*fun)); \
}
void Genesis::InstallNativeFunctions() {
HandleScope scope(isolate());
@ -1556,25 +1562,9 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(Symbol, "symbolUnscopables", unscopables_symbol);
INSTALL_NATIVE(JSFunction, "ArrayValues", array_values_iterator);
INSTALL_NATIVE_MATH(abs)
INSTALL_NATIVE_MATH(acos)
INSTALL_NATIVE_MATH(asin)
INSTALL_NATIVE_MATH(atan)
INSTALL_NATIVE_MATH(atan2)
INSTALL_NATIVE_MATH(ceil)
INSTALL_NATIVE_MATH(cos)
INSTALL_NATIVE_MATH(exp)
INSTALL_NATIVE_MATH(floor)
INSTALL_NATIVE_MATH(imul)
INSTALL_NATIVE_MATH(log)
INSTALL_NATIVE_MATH(max)
INSTALL_NATIVE_MATH(min)
INSTALL_NATIVE_MATH(pow)
INSTALL_NATIVE_MATH(random)
INSTALL_NATIVE_MATH(round)
INSTALL_NATIVE_MATH(sin)
INSTALL_NATIVE_MATH(sqrt)
INSTALL_NATIVE_MATH(tan)
#define INSTALL_NATIVE_FUNCTIONS_FOR(id, descr) InstallNativeFunctions_##id();
HARMONY_SHIPPING(INSTALL_NATIVE_FUNCTIONS_FOR)
#undef INSTALL_NATIVE_FUNCTIONS_FOR
}
@ -1585,10 +1575,65 @@ void Genesis::InstallExperimentalNativeFunctions() {
INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
}
#define INSTALL_NATIVE_FUNCTIONS_FOR(id, descr) InstallNativeFunctions_##id();
HARMONY_INPROGRESS(INSTALL_NATIVE_FUNCTIONS_FOR)
HARMONY_STAGED(INSTALL_NATIVE_FUNCTIONS_FOR)
#undef INSTALL_NATIVE_FUNCTIONS_FOR
}
#define EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(id) \
void Genesis::InstallNativeFunctions_##id() {}
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_scoping)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_modules)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_strings)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_arrays)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_classes)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_object_literals)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_regexps)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_arrow_functions)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_numeric_literals)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_tostring)
void Genesis::InstallNativeFunctions_harmony_proxies() {
if (FLAG_harmony_proxies) {
INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
}
}
#undef INSTALL_NATIVE
#define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
void Genesis::InitializeGlobal_##id() {}
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_scoping)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_modules)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_strings)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_arrays)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_classes)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_literals)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_arrow_functions)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_numeric_literals)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tostring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_proxies)
void Genesis::InitializeGlobal_harmony_regexps() {
Handle<JSObject> builtins(native_context()->builtins());
Handle<HeapObject> flag(FLAG_harmony_regexps ? heap()->true_value()
: heap()->false_value());
PropertyAttributes attributes =
static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
Runtime::DefineObjectProperty(builtins, factory()->harmony_regexps_string(),
flag, attributes).Assert();
}
Handle<JSFunction> Genesis::InstallInternalArray(
Handle<JSBuiltinsObject> builtins,
@ -1697,7 +1742,7 @@ bool Genesis::InstallNatives() {
isolate()->initial_object_prototype(), Builtins::kIllegal);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
Accessors::FunctionSetPrototype(script_fun, prototype);
Accessors::FunctionSetPrototype(script_fun, prototype).Assert();
native_context()->set_script_function(*script_fun);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
@ -1839,7 +1884,7 @@ bool Genesis::InstallNatives() {
isolate()->initial_object_prototype(), Builtins::kIllegal);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
Accessors::FunctionSetPrototype(opaque_reference_fun, prototype);
Accessors::FunctionSetPrototype(opaque_reference_fun, prototype).Assert();
native_context()->set_opaque_reference_function(*opaque_reference_fun);
}
@ -1980,7 +2025,7 @@ bool Genesis::InstallNatives() {
// Apply embeds an IC, so we need a type vector of size 1 in the shared
// function info.
Handle<TypeFeedbackVector> feedback_vector =
factory()->NewTypeFeedbackVector(1);
factory()->NewTypeFeedbackVector(0, 1);
apply->shared()->set_feedback_vector(*feedback_vector);
}
@ -2095,23 +2140,51 @@ bool Genesis::InstallNatives() {
}
#define INSTALL_EXPERIMENTAL_NATIVE(i, flag, file) \
if (FLAG_harmony_##flag && \
strcmp(ExperimentalNatives::GetScriptName(i).start(), \
"native " file) == 0) { \
if (!CompileExperimentalBuiltin(isolate(), i)) return false; \
#define INSTALL_EXPERIMENTAL_NATIVE(i, flag, file) \
if (FLAG_##flag && \
strcmp(ExperimentalNatives::GetScriptName(i).start(), "native " file) == \
0) { \
if (!CompileExperimentalBuiltin(isolate(), i)) return false; \
}
bool Genesis::InstallExperimentalNatives() {
static const char* harmony_arrays_natives[] = {
"native harmony-array.js", "native harmony-typedarray.js", NULL};
static const char* harmony_proxies_natives[] = {"native proxy.js", NULL};
static const char* harmony_strings_natives[] = {"native harmony-string.js",
NULL};
static const char* harmony_classes_natives[] = {"native harmony-classes.js",
NULL};
static const char* harmony_modules_natives[] = {NULL};
static const char* harmony_scoping_natives[] = {NULL};
static const char* harmony_object_literals_natives[] = {NULL};
static const char* harmony_regexps_natives[] = {NULL};
static const char* harmony_arrow_functions_natives[] = {NULL};
static const char* harmony_numeric_literals_natives[] = {NULL};
static const char* harmony_tostring_natives[] = {"native harmony-tostring.js",
NULL};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount();
i++) {
INSTALL_EXPERIMENTAL_NATIVE(i, proxies, "proxy.js")
INSTALL_EXPERIMENTAL_NATIVE(i, strings, "harmony-string.js")
INSTALL_EXPERIMENTAL_NATIVE(i, arrays, "harmony-array.js")
INSTALL_EXPERIMENTAL_NATIVE(i, classes, "harmony-classes.js")
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
#define INSTALL_EXPERIMENTAL_NATIVES(id, desc) \
if (FLAG_##id) { \
for (size_t j = 0; id##_natives[j] != NULL; j++) { \
if (strcmp(ExperimentalNatives::GetScriptName(i).start(), \
id##_natives[j]) == 0) { \
if (!CompileExperimentalBuiltin(isolate(), i)) return false; \
} \
} \
}
// Iterate over flags that are not enabled by default.
HARMONY_INPROGRESS(INSTALL_EXPERIMENTAL_NATIVES);
HARMONY_STAGED(INSTALL_EXPERIMENTAL_NATIVES);
#undef INSTALL_EXPERIMENTAL_NATIVES
}
#define USE_NATIVES_FOR_FEATURE(id, descr) USE(id##_natives);
HARMONY_SHIPPING(USE_NATIVES_FOR_FEATURE)
#undef USE_NATIVES_FOR_FEATURE
InstallExperimentalNativeFunctions();
return true;
@ -2588,20 +2661,24 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
class NoTrackDoubleFieldsForSerializerScope {
public:
explicit NoTrackDoubleFieldsForSerializerScope(Isolate* isolate)
: flag_(FLAG_track_double_fields) {
: flag_(FLAG_track_double_fields), enabled_(false) {
if (isolate->serializer_enabled()) {
// Disable tracking double fields because heap numbers treated as
// immutable by the serializer.
FLAG_track_double_fields = false;
enabled_ = true;
}
}
~NoTrackDoubleFieldsForSerializerScope() {
FLAG_track_double_fields = flag_;
if (enabled_) {
FLAG_track_double_fields = flag_;
}
}
private:
bool flag_;
bool enabled_;
};

89
deps/v8/src/builtins.cc поставляемый
Просмотреть файл

@ -182,23 +182,25 @@ static void MoveDoubleElements(FixedDoubleArray* dst, int dst_index,
}
static bool ArrayPrototypeHasNoElements(Heap* heap,
Context* native_context,
JSObject* array_proto) {
static bool ArrayPrototypeHasNoElements(Heap* heap, PrototypeIterator* iter) {
DisallowHeapAllocation no_gc;
// This method depends on non writability of Object and Array prototype
// fields.
if (array_proto->elements() != heap->empty_fixed_array()) return false;
// Object.prototype
PrototypeIterator iter(heap->isolate(), array_proto);
if (iter.IsAtEnd()) {
return false;
for (; !iter->IsAtEnd(); iter->Advance()) {
if (iter->GetCurrent()->IsJSProxy()) return false;
if (JSObject::cast(iter->GetCurrent())->elements() !=
heap->empty_fixed_array()) {
return false;
}
}
array_proto = JSObject::cast(iter.GetCurrent());
if (array_proto != native_context->initial_object_prototype()) return false;
if (array_proto->elements() != heap->empty_fixed_array()) return false;
iter.Advance();
return iter.IsAtEnd();
return true;
}
static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
JSArray* receiver) {
if (!FLAG_clever_optimizations) return false;
DisallowHeapAllocation no_gc;
PrototypeIterator iter(heap->isolate(), receiver);
return ArrayPrototypeHasNoElements(heap, &iter);
}
@ -213,13 +215,13 @@ static inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
// If there may be elements accessors in the prototype chain, the fast path
// cannot be used if there arguments to add to the array.
if (args != NULL && array->map()->DictionaryElementsInPrototypeChainOnly()) {
Heap* heap = isolate->heap();
if (args != NULL && !IsJSArrayFastElementMovingAllowed(heap, *array)) {
return MaybeHandle<FixedArrayBase>();
}
if (array->map()->is_observed()) return MaybeHandle<FixedArrayBase>();
if (!array->map()->is_extensible()) return MaybeHandle<FixedArrayBase>();
Handle<FixedArrayBase> elms(array->elements(), isolate);
Heap* heap = isolate->heap();
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
if (args == NULL || array->HasFastObjectElements()) return elms;
@ -264,19 +266,6 @@ static inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
}
static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
JSArray* receiver) {
if (!FLAG_clever_optimizations) return false;
DisallowHeapAllocation no_gc;
Context* native_context = heap->isolate()->context()->native_context();
JSObject* array_proto =
JSObject::cast(native_context->array_function()->prototype());
PrototypeIterator iter(heap->isolate(), receiver);
return iter.GetCurrent() == array_proto &&
ArrayPrototypeHasNoElements(heap, native_context, array_proto);
}
MUST_USE_RESULT static Object* CallJsBuiltin(
Isolate* isolate,
const char* name,
@ -453,8 +442,7 @@ BUILTIN(ArrayShift) {
EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
Handle<FixedArrayBase> elms_obj;
if (!maybe_elms_obj.ToHandle(&elms_obj) ||
!IsJSArrayFastElementMovingAllowed(heap,
*Handle<JSArray>::cast(receiver))) {
!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(*receiver))) {
return CallJsBuiltin(isolate, "ArrayShift", args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@ -499,11 +487,9 @@ BUILTIN(ArrayUnshift) {
Heap* heap = isolate->heap();
Handle<Object> receiver = args.receiver();
MaybeHandle<FixedArrayBase> maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1);
Handle<FixedArrayBase> elms_obj;
if (!maybe_elms_obj.ToHandle(&elms_obj) ||
!IsJSArrayFastElementMovingAllowed(heap,
*Handle<JSArray>::cast(receiver))) {
if (!maybe_elms_obj.ToHandle(&elms_obj)) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@ -524,9 +510,6 @@ BUILTIN(ArrayUnshift) {
Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
JSObject::EnsureCanContainElements(array, &args, 1, to_add,
DONT_ALLOW_DOUBLE_ELEMENTS);
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
@ -708,9 +691,7 @@ BUILTIN(ArraySplice) {
MaybeHandle<FixedArrayBase> maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3);
Handle<FixedArrayBase> elms_obj;
if (!maybe_elms_obj.ToHandle(&elms_obj) ||
!IsJSArrayFastElementMovingAllowed(heap,
*Handle<JSArray>::cast(receiver))) {
if (!maybe_elms_obj.ToHandle(&elms_obj)) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@ -928,9 +909,10 @@ BUILTIN(ArrayConcat) {
DisallowHeapAllocation no_gc;
Heap* heap = isolate->heap();
Context* native_context = isolate->context()->native_context();
JSObject* array_proto =
JSObject::cast(native_context->array_function()->prototype());
if (!ArrayPrototypeHasNoElements(heap, native_context, array_proto)) {
Object* array_proto = native_context->array_function()->prototype();
PrototypeIterator iter(isolate, array_proto,
PrototypeIterator::START_AT_RECEIVER);
if (!ArrayPrototypeHasNoElements(heap, &iter)) {
AllowHeapAllocation allow_allocation;
return CallJsBuiltin(isolate, "ArrayConcatJS", args);
}
@ -1279,11 +1261,6 @@ static void Generate_KeyedLoadIC_Generic(MacroAssembler* masm) {
}
static void Generate_KeyedLoadIC_String(MacroAssembler* masm) {
KeyedLoadIC::GenerateString(masm);
}
static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
KeyedLoadIC::GeneratePreMonomorphic(masm);
}
@ -1314,6 +1291,16 @@ static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
}
static void Generate_KeyedStoreIC_Megamorphic(MacroAssembler* masm) {
KeyedStoreIC::GenerateMegamorphic(masm, SLOPPY);
}
static void Generate_KeyedStoreIC_Megamorphic_Strict(MacroAssembler* masm) {
KeyedStoreIC::GenerateMegamorphic(masm, STRICT);
}
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
KeyedStoreIC::GenerateGeneric(masm, SLOPPY);
}
@ -1569,7 +1556,7 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
PROFILE(isolate,
CodeCreateEvent(Logger::BUILTIN_TAG, *code, functions[i].s_name));
builtins_[i] = *code;
if (code->kind() == Code::BUILTIN) code->set_builtin_index(i);
code->set_builtin_index(i);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {
CodeTracer::Scope trace_scope(isolate->GetCodeTracer());

4
deps/v8/src/builtins.h поставляемый
Просмотреть файл

@ -88,19 +88,21 @@ enum BuiltinExtraArguments {
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
kNoExtraICState) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, GENERIC, kNoExtraICState) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState) \
\
V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, StoreIC::kStrictModeState) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, kNoExtraICState) \
V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
kNoExtraICState) \
V(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, MEGAMORPHIC, kNoExtraICState) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, GENERIC, kNoExtraICState) \
\
V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
StoreIC::kStrictModeState) \
V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
StoreIC::kStrictModeState) \
V(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
StoreIC::kStrictModeState) \
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \
StoreIC::kStrictModeState) \
V(KeyedStoreIC_SloppyArguments, KEYED_STORE_IC, MONOMORPHIC, \

2
deps/v8/src/cached-powers.cc поставляемый
Просмотреть файл

@ -4,9 +4,9 @@
#include <limits.h>
#include <stdarg.h>
#include <stdint.h>
#include <cmath>
#include "include/v8stdint.h"
#include "src/base/logging.h"
#include "src/cached-powers.h"
#include "src/globals.h"

42
deps/v8/src/char-predicates.cc поставляемый Normal file
Просмотреть файл

@ -0,0 +1,42 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/char-predicates.h"
#ifdef V8_I18N_SUPPORT
#include "unicode/uchar.h"
#include "unicode/urename.h"
#endif // V8_I18N_SUPPORT
namespace v8 {
namespace internal {
bool SupplementaryPlanes::IsIDStart(uc32 c) {
DCHECK(c > 0xFFFF);
#ifdef V8_I18N_SUPPORT
// This only works for code points in the SMPs, since ICU does not exclude
// code points with properties 'Pattern_Syntax' or 'Pattern_White_Space'.
// Code points in the SMP do not have those properties.
return u_isIDStart(c);
#else
// This is incorrect, but if we don't have ICU, use this as fallback.
return false;
#endif // V8_I18N_SUPPORT
}
bool SupplementaryPlanes::IsIDPart(uc32 c) {
DCHECK(c > 0xFFFF);
#ifdef V8_I18N_SUPPORT
// This only works for code points in the SMPs, since ICU does not exclude
// code points with properties 'Pattern_Syntax' or 'Pattern_White_Space'.
// Code points in the SMP do not have those properties.
return u_isIDPart(c);
#else
// This is incorrect, but if we don't have ICU, use this as fallback.
return false;
#endif // V8_I18N_SUPPORT
}
}
} // namespace v8::internal

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше