Remove V8 backports that are not longer needed

This commit is contained in:
Aleksei Kuzmin 2018-03-26 15:43:19 +03:00
Родитель 1eb8618056
Коммит 3257a5b437
11 изменённых файлов: 0 добавлений и 944 удалений

Просмотреть файл

@ -12,50 +12,6 @@ patches:
owners: zcbenz
file: ostreams.patch
description: null
-
owners: codebytere
file: cherry_pick_37a3a15c3.patch
description: null
-
owners: alespergl
file: backport_3ecb047.patch
description: |
Fixes security bug
https://bugs.chromium.org/p/project-zero/issues/detail?id=1445
-
owners: alespergl
file: backport_a2b91ed.patch
description: |
Fixes security bug
https://bugs.chromium.org/p/project-zero/issues/detail?id=1446
-
owners: codebytere
file: cherry_pick_c690f54d95802.patch
description: null
-
owners: codebytere
file: cherry_pick_98c40a4bae915.patch
description: null
-
owners: codebytere
file: backport_14ac02c.patch
description: null
-
owners: codebytere
file: backport_76c3ac5.patch
description: null
-
owners: codebytere
file: cherry_pick_c3458a8.patch
description: null
-
owners: codebytere
file: cherry_pick_50f7455.patch
description: null
-
owners: codebytere
file: cherry_pick_ac0fe8ec.patch
description: null
-
owners: alexeykuzmin,alespergl
file: export_platform.patch

Просмотреть файл

@ -1,110 +0,0 @@
diff --git a/src/profiler/profiler-listener.cc b/src/profiler/profiler-listener.cc
index 169b12da07..540d930024 100644
--- a/src/profiler/profiler-listener.cc
+++ b/src/profiler/profiler-listener.cc
@@ -16,11 +16,7 @@ namespace internal {
ProfilerListener::ProfilerListener(Isolate* isolate)
: function_and_resource_names_(isolate->heap()) {}
-ProfilerListener::~ProfilerListener() {
- for (auto code_entry : code_entries_) {
- delete code_entry;
- }
-}
+ProfilerListener::~ProfilerListener() = default;
void ProfilerListener::CallbackEvent(Name* name, Address entry_point) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
@@ -286,19 +282,23 @@ CodeEntry* ProfilerListener::NewCodeEntry(
CodeEventListener::LogEventsAndTags tag, const char* name,
const char* name_prefix, const char* resource_name, int line_number,
int column_number, JITLineInfoTable* line_info, Address instruction_start) {
- CodeEntry* code_entry =
- new CodeEntry(tag, name, name_prefix, resource_name, line_number,
- column_number, line_info, instruction_start);
- code_entries_.push_back(code_entry);
- return code_entry;
+ std::unique_ptr<CodeEntry> code_entry = base::make_unique<CodeEntry>(
+ tag, name, name_prefix, resource_name, line_number, column_number,
+ line_info, instruction_start);
+ CodeEntry* raw_code_entry = code_entry.get();
+ code_entries_.push_back(std::move(code_entry));
+ return raw_code_entry;
}
void ProfilerListener::AddObserver(CodeEventObserver* observer) {
base::LockGuard<base::Mutex> guard(&mutex_);
- if (std::find(observers_.begin(), observers_.end(), observer) !=
- observers_.end())
- return;
- observers_.push_back(observer);
+ if (observers_.empty()) {
+ code_entries_.clear();
+ }
+ if (std::find(observers_.begin(), observers_.end(), observer) ==
+ observers_.end()) {
+ observers_.push_back(observer);
+ }
}
void ProfilerListener::RemoveObserver(CodeEventObserver* observer) {
diff --git a/src/profiler/profiler-listener.h b/src/profiler/profiler-listener.h
index f4a9e24c7d..440afd87a2 100644
--- a/src/profiler/profiler-listener.h
+++ b/src/profiler/profiler-listener.h
@@ -74,6 +74,7 @@ class ProfilerListener : public CodeEventListener {
const char* GetFunctionName(const char* name) {
return function_and_resource_names_.GetFunctionName(name);
}
+ size_t entries_count_for_test() const { return code_entries_.size(); }
private:
void RecordInliningInfo(CodeEntry* entry, AbstractCode* abstract_code);
@@ -87,7 +88,7 @@ class ProfilerListener : public CodeEventListener {
}
StringsStorage function_and_resource_names_;
- std::vector<CodeEntry*> code_entries_;
+ std::vector<std::unique_ptr<CodeEntry>> code_entries_;
std::vector<CodeEventObserver*> observers_;
base::Mutex mutex_;
diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc
index 0a297d9f0c..7930606d37 100644
--- a/test/cctest/test-cpu-profiler.cc
+++ b/test/cctest/test-cpu-profiler.cc
@@ -2185,6 +2185,34 @@ TEST(TracingCpuProfiler) {
i::V8::SetPlatformForTesting(old_platform);
}
+TEST(CodeEntriesMemoryLeak) {
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
+
+ std::string source = "function start() {}\n";
+ for (int i = 0; i < 1000; ++i) {
+ source += "function foo" + std::to_string(i) + "() { return " +
+ std::to_string(i) +
+ "; }\n"
+ "foo" +
+ std::to_string(i) + "();\n";
+ }
+ CompileRun(source.c_str());
+ v8::Local<v8::Function> function = GetFunction(env, "start");
+
+ ProfilerHelper helper(env);
+
+ for (int j = 0; j < 100; ++j) {
+ v8::CpuProfile* profile = helper.Run(function, nullptr, 0);
+ profile->Delete();
+ }
+ ProfilerListener* profiler_listener =
+ CcTest::i_isolate()->logger()->profiler_listener();
+
+ CHECK_GE(10000ul, profiler_listener->entries_count_for_test());
+}
+
TEST(Issue763073) {
class AllowNativesSyntax {
public:

Просмотреть файл

@ -1,40 +0,0 @@
diff --git a/src/objects.h b/src/objects.h
index 895d92ba31..700cb84a51 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1952,6 +1952,7 @@ class PropertyArray : public HeapObject {
static const int kLengthFieldSize = 10;
class LengthField : public BitField<int, 0, kLengthFieldSize> {};
+ static const int kMaxLength = LengthField::kMax;
class HashField : public BitField<int, kLengthFieldSize,
kSmiValueSize - kLengthFieldSize - 1> {};
@@ -2651,6 +2652,8 @@ class JSObject: public JSReceiver {
// its size by more than the 1 entry necessary, so sequentially adding fields
// to the same object requires fewer allocations and copies.
static const int kFieldsAdded = 3;
+ STATIC_ASSERT(kMaxNumberOfDescriptors + kFieldsAdded <=
+ PropertyArray::kMaxLength);
// Layout description.
static const int kElementsOffset = JSReceiver::kHeaderSize;
diff --git a/src/property-details.h b/src/property-details.h
index d007a0414c..5e0ecc0424 100644
--- a/src/property-details.h
+++ b/src/property-details.h
@@ -197,10 +197,10 @@ class Representation {
static const int kDescriptorIndexBitCount = 10;
-// The maximum number of descriptors we want in a descriptor array (should
-// fit in a page).
-static const int kMaxNumberOfDescriptors =
- (1 << kDescriptorIndexBitCount) - 2;
+// The maximum number of descriptors we want in a descriptor array. It should
+// fit in a page and also the following should hold:
+// kMaxNumberOfDescriptors + kFieldsAdded <= PropertyArray::kMaxLength.
+static const int kMaxNumberOfDescriptors = (1 << kDescriptorIndexBitCount) - 4;
static const int kInvalidEnumCacheSentinel =
(1 << kDescriptorIndexBitCount) - 1;

Просмотреть файл

@ -1,118 +0,0 @@
diff --git a/src/profiler/profiler-listener.cc b/src/profiler/profiler-listener.cc
index 540d930024..b90f5a4894 100644
--- a/src/profiler/profiler-listener.cc
+++ b/src/profiler/profiler-listener.cc
@@ -226,11 +226,18 @@ void ProfilerListener::RecordInliningInfo(CodeEntry* entry,
SharedFunctionInfo* shared_info = SharedFunctionInfo::cast(
deopt_input_data->LiteralArray()->get(shared_info_id));
if (!depth++) continue; // Skip the current function itself.
- CodeEntry* inline_entry = new CodeEntry(
- entry->tag(), GetFunctionName(shared_info->DebugName()),
- CodeEntry::kEmptyNamePrefix, entry->resource_name(),
- CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+ const char* resource_name =
+ (shared_info->script()->IsScript() &&
+ Script::cast(shared_info->script())->name()->IsName())
+ ? GetName(Name::cast(Script::cast(shared_info->script())->name()))
+ : CodeEntry::kEmptyResourceName;
+
+ CodeEntry* inline_entry =
+ new CodeEntry(entry->tag(), GetFunctionName(shared_info->DebugName()),
+ CodeEntry::kEmptyNamePrefix, resource_name,
+ CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, nullptr,
+ code->instruction_start());
inline_entry->FillFunctionInfo(shared_info);
inline_stack.push_back(inline_entry);
}
diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc
index f22a42a977..b441d04fdd 100644
--- a/test/cctest/test-cpu-profiler.cc
+++ b/test/cctest/test-cpu-profiler.cc
@@ -1745,6 +1745,85 @@ TEST(FunctionDetails) {
script_a->GetUnboundScript()->GetId(), 5, 14);
}
+TEST(FunctionDetailsInlining) {
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
+ i::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
+ ProfilerHelper helper(env);
+
+ // alpha is in a_script, beta in b_script. beta is
+ // inlined in alpha, but it should be attributed to b_script.
+
+ v8::Local<v8::Script> script_b = CompileWithOrigin(
+ "function beta(k) {\n"
+ " let sum = 2;\n"
+ " for(let i = 0; i < k; i ++) {\n"
+ " sum += i;\n"
+ " sum = sum + 'a';\n"
+ " }\n"
+ " return sum;\n"
+ "}\n"
+ "\n",
+ "script_b");
+
+ v8::Local<v8::Script> script_a = CompileWithOrigin(
+ "function alpha(p) {\n"
+ " let res = beta(p);\n"
+ " res = res + res;\n"
+ " return res;\n"
+ "}\n"
+ "let p = 2;\n"
+ "\n"
+ "\n"
+ "// Warm up before profiling or the inlining doesn't happen.\n"
+ "p = alpha(p);\n"
+ "p = alpha(p);\n"
+ "%OptimizeFunctionOnNextCall(alpha);\n"
+ "p = alpha(p);\n"
+ "\n"
+ "\n"
+ "startProfiling();\n"
+ "for(let i = 0; i < 10000; i++) {\n"
+ " p = alpha(p);\n"
+ "}\n"
+ "stopProfiling();\n"
+ "\n"
+ "\n",
+ "script_a");
+
+ script_b->Run(env).ToLocalChecked();
+ script_a->Run(env).ToLocalChecked();
+
+ const v8::CpuProfile* profile = i::ProfilerExtension::last_profile;
+ const v8::CpuProfileNode* current = profile->GetTopDownRoot();
+ reinterpret_cast<ProfileNode*>(const_cast<v8::CpuProfileNode*>(current))
+ ->Print(0);
+ // The tree should look like this:
+ // 0 (root) 0 #1
+ // 5 (program) 0 #6
+ // 2 14 #2 script_a:1
+ // ;;; deopted at script_id: 14 position: 299 with reason 'Insufficient
+ // type feedback for call'.
+ // 1 alpha 14 #4 script_a:1
+ // 9 beta 13 #5 script_b:0
+ // 0 startProfiling 0 #3
+
+ const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+ const v8::CpuProfileNode* script = GetChild(env, root, "");
+ CheckFunctionDetails(env->GetIsolate(), script, "", "script_a",
+ script_a->GetUnboundScript()->GetId(), 1, 1);
+ const v8::CpuProfileNode* alpha = FindChild(env, script, "alpha");
+ // Return early if profiling didn't sample alpha.
+ if (!alpha) return;
+ CheckFunctionDetails(env->GetIsolate(), alpha, "alpha", "script_a",
+ script_a->GetUnboundScript()->GetId(), 1, 15);
+ const v8::CpuProfileNode* beta = FindChild(env, alpha, "beta");
+ if (!beta) return;
+ CheckFunctionDetails(env->GetIsolate(), beta, "beta", "script_b",
+ script_b->GetUnboundScript()->GetId(), 0, 0);
+}
TEST(DontStopOnFinishedProfileDelete) {
v8::HandleScope scope(CcTest::isolate());

Просмотреть файл

@ -1,20 +0,0 @@
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 125ca932f7..dd8e4a1745 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -3550,6 +3550,7 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case JS_SET_VALUE_ITERATOR_TYPE: {
Handle<JSSetIterator> object = Handle<JSSetIterator>::cast(
isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+ slot->value_ = object;
Handle<Object> properties = materializer.FieldAt(value_index);
Handle<Object> elements = materializer.FieldAt(value_index);
Handle<Object> table = materializer.FieldAt(value_index);
@@ -3565,6 +3566,7 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case JS_MAP_VALUE_ITERATOR_TYPE: {
Handle<JSMapIterator> object = Handle<JSMapIterator>::cast(
isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+ slot->value_ = object;
Handle<Object> properties = materializer.FieldAt(value_index);
Handle<Object> elements = materializer.FieldAt(value_index);
Handle<Object> table = materializer.FieldAt(value_index);

Просмотреть файл

@ -1,85 +0,0 @@
diff --git a/src/lookup.h b/src/lookup.h
index 25c5a6cc3b..9ea2d77cf6 100644
--- a/src/lookup.h
+++ b/src/lookup.h
@@ -22,7 +22,7 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
kInterceptor = 1 << 0,
kPrototypeChain = 1 << 1,
- // Convience combinations of bits.
+ // Convenience combinations of bits.
OWN_SKIP_INTERCEPTOR = 0,
OWN = kInterceptor,
PROTOTYPE_CHAIN_SKIP_INTERCEPTOR = kPrototypeChain,
diff --git a/src/objects.cc b/src/objects.cc
index c1a2e41bf1..28c1cd681f 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -6713,17 +6713,6 @@ Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(Isolate* isolate,
it.Next();
}
- // Handle interceptor
- if (it.state() == LookupIterator::INTERCEPTOR) {
- if (it.HolderIsReceiverOrHiddenPrototype()) {
- Maybe<bool> result = DefinePropertyWithInterceptorInternal(
- &it, it.GetInterceptor(), should_throw, *desc);
- if (result.IsNothing() || result.FromJust()) {
- return result;
- }
- }
- }
-
return OrdinaryDefineOwnProperty(&it, desc, should_throw);
}
@@ -6739,6 +6728,20 @@ Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
PropertyDescriptor current;
MAYBE_RETURN(GetOwnPropertyDescriptor(it, &current), Nothing<bool>());
+ it->Restart();
+ // Handle interceptor
+ for (; it->IsFound(); it->Next()) {
+ if (it->state() == LookupIterator::INTERCEPTOR) {
+ if (it->HolderIsReceiverOrHiddenPrototype()) {
+ Maybe<bool> result = DefinePropertyWithInterceptorInternal(
+ it, it->GetInterceptor(), should_throw, *desc);
+ if (result.IsNothing() || result.FromJust()) {
+ return result;
+ }
+ }
+ }
+ }
+
// TODO(jkummerow/verwaest): It would be nice if we didn't have to reset
// the iterator every time. Currently, the reasons why we need it are:
// - handle interceptors correctly
diff --git a/test/cctest/test-api-interceptors.cc b/test/cctest/test-api-interceptors.cc
index 265698d131..ca9b18016d 100644
--- a/test/cctest/test-api-interceptors.cc
+++ b/test/cctest/test-api-interceptors.cc
@@ -716,20 +716,21 @@ bool define_was_called_in_order = false;
void GetterCallbackOrder(Local<Name> property,
const v8::PropertyCallbackInfo<v8::Value>& info) {
get_was_called_in_order = true;
- CHECK(define_was_called_in_order);
+ CHECK(!define_was_called_in_order);
info.GetReturnValue().Set(property);
}
void DefinerCallbackOrder(Local<Name> property,
const v8::PropertyDescriptor& desc,
const v8::PropertyCallbackInfo<v8::Value>& info) {
- CHECK(!get_was_called_in_order); // Define called before get.
+ // Get called before DefineProperty because we query the descriptor first.
+ CHECK(get_was_called_in_order);
define_was_called_in_order = true;
}
} // namespace
-// Check that definer callback is called before getter callback.
+// Check that getter callback is called before definer callback.
THREADED_TEST(DefinerCallbackGetAndDefine) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::FunctionTemplate> templ =

Просмотреть файл

@ -1,124 +0,0 @@
diff --git a/src/api.cc b/src/api.cc
index 147ef1f755..83b4c4c447 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -10092,6 +10092,25 @@ void debug::QueryObjects(v8::Local<v8::Context> v8_context,
predicate, objects);
}
+void debug::GlobalLexicalScopeNames(
+ v8::Local<v8::Context> v8_context,
+ v8::PersistentValueVector<v8::String>* names) {
+ i::Handle<i::Context> context = Utils::OpenHandle(*v8_context);
+ i::Handle<i::ScriptContextTable> table(
+ context->global_object()->native_context()->script_context_table());
+ for (int i = 0; i < table->used(); i++) {
+ i::Handle<i::Context> context = i::ScriptContextTable::GetContext(table, i);
+ DCHECK(context->IsScriptContext());
+ i::Handle<i::ScopeInfo> scope_info(context->scope_info());
+ int local_count = scope_info->ContextLocalCount();
+ for (int j = 0; j < local_count; ++j) {
+ i::String* name = scope_info->ContextLocalName(j);
+ if (i::ScopeInfo::VariableIsSynthetic(name)) continue;
+ names->Append(Utils::ToLocal(handle(name)));
+ }
+ }
+}
+
Local<String> CpuProfileNode::GetFunctionName() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
i::Isolate* isolate = node->isolate();
diff --git a/src/debug/debug-interface.h b/src/debug/debug-interface.h
index 78001524b2..887208ad72 100644
--- a/src/debug/debug-interface.h
+++ b/src/debug/debug-interface.h
@@ -401,6 +401,9 @@ void QueryObjects(v8::Local<v8::Context> context,
QueryObjectPredicate* predicate,
v8::PersistentValueVector<v8::Object>* objects);
+void GlobalLexicalScopeNames(v8::Local<v8::Context> context,
+ v8::PersistentValueVector<v8::String>* names);
+
} // namespace debug
} // namespace v8
diff --git a/src/inspector/js_protocol.json b/src/inspector/js_protocol.json
index 439de72c10..0be9bd2bcc 100644
--- a/src/inspector/js_protocol.json
+++ b/src/inspector/js_protocol.json
@@ -353,6 +353,17 @@
{ "name": "objects", "$ref": "RemoteObject", "description": "Array with objects." }
],
"experimental": true
+ },
+ {
+ "name": "globalLexicalScopeNames",
+ "parameters": [
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to lookup global scope variables." }
+ ],
+ "returns": [
+ { "name": "names", "type": "array", "items": { "type": "string" } }
+ ],
+ "description": "Returns all let, const and class variables from global scope.",
+ "experimental": true
}
],
"events": [
diff --git a/src/inspector/v8-runtime-agent-impl.cc b/src/inspector/v8-runtime-agent-impl.cc
index bdae0ef7a1..e9159451f7 100644
--- a/src/inspector/v8-runtime-agent-impl.cc
+++ b/src/inspector/v8-runtime-agent-impl.cc
@@ -537,6 +537,27 @@ Response V8RuntimeAgentImpl::queryObjects(
resultArray, scope.objectGroupName(), false, false, objects);
}
+Response V8RuntimeAgentImpl::globalLexicalScopeNames(
+ Maybe<int> executionContextId,
+ std::unique_ptr<protocol::Array<String16>>* outNames) {
+ int contextId = 0;
+ Response response = ensureContext(m_inspector, m_session->contextGroupId(),
+ std::move(executionContextId), &contextId);
+ if (!response.isSuccess()) return response;
+
+ InjectedScript::ContextScope scope(m_session, contextId);
+ response = scope.initialize();
+ if (!response.isSuccess()) return response;
+
+ v8::PersistentValueVector<v8::String> names(m_inspector->isolate());
+ v8::debug::GlobalLexicalScopeNames(scope.context(), &names);
+ *outNames = protocol::Array<String16>::create();
+ for (size_t i = 0; i < names.Size(); ++i) {
+ (*outNames)->addItem(toProtocolString(names.Get(i)));
+ }
+ return Response::OK();
+}
+
void V8RuntimeAgentImpl::restore() {
if (!m_state->booleanProperty(V8RuntimeAgentImplState::runtimeEnabled, false))
return;
diff --git a/src/inspector/v8-runtime-agent-impl.h b/src/inspector/v8-runtime-agent-impl.h
index 6f3b98cf44..5f2be2b8ab 100644
--- a/src/inspector/v8-runtime-agent-impl.h
+++ b/src/inspector/v8-runtime-agent-impl.h
@@ -100,6 +100,9 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
Response queryObjects(
const String16& prototypeObjectId,
std::unique_ptr<protocol::Runtime::RemoteObject>* objects) override;
+ Response globalLexicalScopeNames(
+ Maybe<int> executionContextId,
+ std::unique_ptr<protocol::Array<String16>>* outNames) override;
void reset();
void reportExecutionContextCreated(InspectedContext*);
diff --git a/test/inspector/runtime/runtime-restore.js b/test/inspector/runtime/runtime-restore.js
index 09e44677e5..77fa823e7e 100644
--- a/test/inspector/runtime/runtime-restore.js
+++ b/test/inspector/runtime/runtime-restore.js
@@ -1,6 +1,6 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.v8
+// found in the LICENSE file.
let {session, contextGroup, Protocol} = InspectorTest.start('Checks that Runtime agent correctly restore its state.');

Просмотреть файл

@ -1,22 +0,0 @@
diff --git a/include/v8-platform.h b/include/v8-platform.h
index b1a57c9e2c..6c3c4292c5 100644
--- a/include/v8-platform.h
+++ b/include/v8-platform.h
@@ -199,7 +199,7 @@ class Platform {
* Returns a TaskRunner which can be used to post a task on the foreground.
* This function should only be called from a foreground thread.
*/
- virtual std::unique_ptr<v8::TaskRunner> GetForegroundTaskRunner(
+ virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
Isolate* isolate) {
// TODO(ahaas): Make this function abstract after it got implemented on all
// platforms.
@@ -210,7 +210,7 @@ class Platform {
* Returns a TaskRunner which can be used to post a task on a background.
* This function should only be called from a foreground thread.
*/
- virtual std::unique_ptr<v8::TaskRunner> GetBackgroundTaskRunner(
+ virtual std::shared_ptr<v8::TaskRunner> GetBackgroundTaskRunner(
Isolate* isolate) {
// TODO(ahaas): Make this function abstract after it got implemented on all
// platforms.

Просмотреть файл

@ -1,284 +0,0 @@
diff --git a/src/builtins/arm/builtins-arm.cc b/src/builtins/arm/builtins-arm.cc
index 0af87f2c8a..7ca98251a9 100644
--- a/src/builtins/arm/builtins-arm.cc
+++ b/src/builtins/arm/builtins-arm.cc
@@ -1073,22 +1073,15 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Runtime::kCompileOptimized_Concurrent);
{
- // Otherwise, the marker is InOptimizationQueue.
+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping
+ // that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ cmp(
optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, kExpectedOptimizationSentinel);
}
- // Checking whether the queued function is ready for install is
- // optional, since we come across interrupts and stack checks elsewhere.
- // However, not checking may delay installing ready functions, and
- // always checking would be quite expensive. A good compromise is to
- // first check against stack limit as a cue for an interrupt signal.
- __ LoadRoot(scratch2, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(scratch2));
- __ b(hs, &fallthrough);
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ __ jmp(&fallthrough);
}
}
diff --git a/src/builtins/arm64/builtins-arm64.cc b/src/builtins/arm64/builtins-arm64.cc
index 03a4995f75..ca8da67b85 100644
--- a/src/builtins/arm64/builtins-arm64.cc
+++ b/src/builtins/arm64/builtins-arm64.cc
@@ -1084,22 +1084,15 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Runtime::kCompileOptimized_Concurrent);
{
- // Otherwise, the marker is InOptimizationQueue.
+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping
+ // that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ Cmp(
optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, kExpectedOptimizationSentinel);
}
-
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
- __ B(hs, &fallthrough);
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ __ B(&fallthrough);
}
}
diff --git a/src/builtins/ia32/builtins-ia32.cc b/src/builtins/ia32/builtins-ia32.cc
index 03db488b7e..9d3178dc89 100644
--- a/src/builtins/ia32/builtins-ia32.cc
+++ b/src/builtins/ia32/builtins-ia32.cc
@@ -715,24 +715,15 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Runtime::kCompileOptimized_Concurrent);
{
- // Otherwise, the marker is InOptimizationQueue.
+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping
+ // that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ cmp(
optimized_code_entry,
Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(equal, kExpectedOptimizationSentinel);
}
-
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &fallthrough);
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ __ jmp(&fallthrough);
}
}
diff --git a/src/builtins/mips/builtins-mips.cc b/src/builtins/mips/builtins-mips.cc
index b280c161d6..af214cb4b9 100644
--- a/src/builtins/mips/builtins-mips.cc
+++ b/src/builtins/mips/builtins-mips.cc
@@ -1052,21 +1052,14 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Runtime::kCompileOptimized_Concurrent);
{
- // Otherwise, the marker is InOptimizationQueue.
+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping
+ // that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ Assert(
eq, kExpectedOptimizationSentinel, optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
}
-
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- __ LoadRoot(at, Heap::kStackLimitRootIndex);
- __ Branch(&fallthrough, hs, sp, Operand(at));
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ __ jmp(&fallthrough);
}
}
diff --git a/src/builtins/mips64/builtins-mips64.cc b/src/builtins/mips64/builtins-mips64.cc
index b65a796785..fd014cc902 100644
--- a/src/builtins/mips64/builtins-mips64.cc
+++ b/src/builtins/mips64/builtins-mips64.cc
@@ -1054,21 +1054,14 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Runtime::kCompileOptimized_Concurrent);
{
- // Otherwise, the marker is InOptimizationQueue.
+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping
+ // that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ Assert(
eq, kExpectedOptimizationSentinel, optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
}
-
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- __ LoadRoot(t0, Heap::kStackLimitRootIndex);
- __ Branch(&fallthrough, hs, sp, Operand(t0));
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ __ jmp(&fallthrough);
}
}
diff --git a/src/builtins/ppc/builtins-ppc.cc b/src/builtins/ppc/builtins-ppc.cc
index 646f7f62bc..8a5ec14b42 100644
--- a/src/builtins/ppc/builtins-ppc.cc
+++ b/src/builtins/ppc/builtins-ppc.cc
@@ -1081,23 +1081,15 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Runtime::kCompileOptimized_Concurrent);
{
- // Otherwise, the marker is InOptimizationQueue.
+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping
+ // that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ CmpSmiLiteral(
optimized_code_entry,
Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
__ Assert(eq, kExpectedOptimizationSentinel);
}
-
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmpl(sp, ip);
- __ bge(&fallthrough);
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ __ b(&fallthrough);
}
}
diff --git a/src/builtins/s390/builtins-s390.cc b/src/builtins/s390/builtins-s390.cc
index c965805fc7..c9800fa287 100644
--- a/src/builtins/s390/builtins-s390.cc
+++ b/src/builtins/s390/builtins-s390.cc
@@ -1081,22 +1081,15 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Runtime::kCompileOptimized_Concurrent);
{
- // Otherwise, the marker is InOptimizationQueue.
+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping
+ // that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ CmpSmiLiteral(
optimized_code_entry,
Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
__ Assert(eq, kExpectedOptimizationSentinel);
}
-
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
- __ bge(&fallthrough, Label::kNear);
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ __ b(&fallthrough, Label::kNear);
}
}
diff --git a/src/builtins/x64/builtins-x64.cc b/src/builtins/x64/builtins-x64.cc
index 981bb65fd1..047c128106 100644
--- a/src/builtins/x64/builtins-x64.cc
+++ b/src/builtins/x64/builtins-x64.cc
@@ -798,21 +798,14 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Runtime::kCompileOptimized_Concurrent);
{
- // Otherwise, the marker is InOptimizationQueue.
+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping
+ // that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ SmiCompare(optimized_code_entry,
Smi::FromEnum(OptimizationMarker::kInOptimizationQueue));
__ Assert(equal, kExpectedOptimizationSentinel);
}
-
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &fallthrough);
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ __ jmp(&fallthrough);
}
}
diff --git a/src/runtime/runtime-compiler.cc b/src/runtime/runtime-compiler.cc
index 4b57593227..fd2df5afe3 100644
--- a/src/runtime/runtime-compiler.cc
+++ b/src/runtime/runtime-compiler.cc
@@ -340,27 +340,6 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
return NULL;
}
-
-RUNTIME_FUNCTION(Runtime_TryInstallOptimizedCode) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-
- // First check if this is a real stack overflow.
- StackLimitCheck check(isolate);
- if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
- return isolate->StackOverflow();
- }
-
- // Only try to install optimized functions if the interrupt was InstallCode.
- if (isolate->stack_guard()->CheckAndClearInstallCode()) {
- isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
- }
-
- return (function->IsOptimized()) ? function->code()
- : function->shared()->code();
-}
-
static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
Handle<SharedFunctionInfo> outer_info,
LanguageMode language_mode,
diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h
index e7084a8cca..a11d274d25 100644
--- a/src/runtime/runtime.h
+++ b/src/runtime/runtime.h
@@ -120,7 +120,6 @@ namespace internal {
F(NotifyStubFailure, 0, 1) \
F(NotifyDeoptimized, 0, 1) \
F(CompileForOnStackReplacement, 1, 1) \
- F(TryInstallOptimizedCode, 1, 1) \
F(ResolvePossiblyDirectEval, 6, 1) \
F(InstantiateAsmJs, 4, 1)

Просмотреть файл

@ -1,12 +0,0 @@
diff --git a/src/parsing/parser-base.h b/src/parsing/parser-base.h
index f555dbdbe0..1788eba51c 100644
--- a/src/parsing/parser-base.h
+++ b/src/parsing/parser-base.h
@@ -3634,6 +3634,7 @@ void ParserBase<Impl>::ParseFormalParameter(FormalParametersT* parameters,
// BindingElement[?Yield, ?GeneratorParameter]
bool is_rest = parameters->has_rest;
+ FuncNameInferrer::State fni_state(fni_);
ExpressionT pattern = ParsePrimaryExpression(CHECK_OK_CUSTOM(Void));
ValidateBindingPattern(CHECK_OK_CUSTOM(Void));

Просмотреть файл

@ -1,85 +0,0 @@
diff --git a/include/v8-platform.h b/include/v8-platform.h
index ed2acc3a74..b1a57c9e2c 100644
--- a/include/v8-platform.h
+++ b/include/v8-platform.h
@@ -36,6 +36,51 @@ class IdleTask {
virtual void Run(double deadline_in_seconds) = 0;
};
+/**
+ * A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to
+ * post tasks after the isolate gets destructed, but these tasks may not get
+ * executed anymore. All tasks posted to a given TaskRunner will be invoked in
+ * sequence. Tasks can be posted from any thread.
+ */
+class TaskRunner {
+ public:
+ /**
+ * Schedules a task to be invoked by this TaskRunner. The TaskRunner
+ * implementation takes ownership of |task|.
+ */
+ virtual void PostTask(std::unique_ptr<Task> task) = 0;
+
+ /**
+ * Schedules a task to be invoked by this TaskRunner. The task is scheduled
+ * after the given number of seconds |delay_in_seconds|. The TaskRunner
+ * implementation takes ownership of |task|.
+ */
+ virtual void PostDelayedTask(std::unique_ptr<Task> task,
+ double delay_in_seconds) = 0;
+
+ /**
+ * Schedules an idle task to be invoked by this TaskRunner. The task is
+ * scheduled when the embedder is idle. Requires that
+ * TaskRunner::SupportsIdleTasks(isolate) is true. Idle tasks may be reordered
+ * relative to other task types and may be starved for an arbitrarily long
+ * time if no idle time is available. The TaskRunner implementation takes
+ * ownership of |task|.
+ */
+ virtual void PostIdleTask(std::unique_ptr<IdleTask> task) = 0;
+
+ /**
+ * Returns true if idle tasks are enabled for this TaskRunner.
+ */
+ virtual bool IdleTasksEnabled() = 0;
+
+ TaskRunner() = default;
+ virtual ~TaskRunner() = default;
+
+ private:
+ TaskRunner(const TaskRunner&) = delete;
+ TaskRunner& operator=(const TaskRunner&) = delete;
+};
+
/**
* The interface represents complex arguments to trace events.
*/
@@ -150,6 +195,28 @@ class Platform {
*/
virtual size_t NumberOfAvailableBackgroundThreads() { return 0; }
+ /**
+ * Returns a TaskRunner which can be used to post a task on the foreground.
+ * This function should only be called from a foreground thread.
+ */
+ virtual std::unique_ptr<v8::TaskRunner> GetForegroundTaskRunner(
+ Isolate* isolate) {
+ // TODO(ahaas): Make this function abstract after it got implemented on all
+ // platforms.
+ return {};
+ }
+
+ /**
+ * Returns a TaskRunner which can be used to post a task on a background.
+ * This function should only be called from a foreground thread.
+ */
+ virtual std::unique_ptr<v8::TaskRunner> GetBackgroundTaskRunner(
+ Isolate* isolate) {
+ // TODO(ahaas): Make this function abstract after it got implemented on all
+ // platforms.
+ return {};
+ }
+
/**
* Schedules a task to be invoked on a background thread. |expected_runtime|
* indicates that the task will run a long time. The Platform implementation