diff --git a/README.md b/README.md index c56db86..7a5395e 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ zone.execute( return text; }, ['hello world']) - .then((result: napa.zone.ExecuteResponse) => { + .then((result: napa.zone.Result) => { console.log(result.value); }); ``` diff --git a/cpp-test/api/zone-tests.cpp b/cpp-test/api/zone-tests.cpp index 9f3f491..be49b83 100644 --- a/cpp-test/api/zone-tests.cpp +++ b/cpp-test/api/zone-tests.cpp @@ -20,102 +20,102 @@ TEST_CASE("zone apis", "[api]") { SECTION("broadcast valid javascript") { napa::Zone zone("zone1"); - auto response = zone.BroadcastSync("var i = 3 + 5;"); + auto result = zone.BroadcastSync("var i = 3 + 5;"); - REQUIRE(response == NAPA_RESPONSE_SUCCESS); + REQUIRE(result == NAPA_RESULT_SUCCESS); } SECTION("broadcast illegal javascript") { napa::Zone zone("zone1"); - auto response = zone.BroadcastSync("var i = 3 +"); + auto result = zone.BroadcastSync("var i = 3 +"); - REQUIRE(response == NAPA_RESPONSE_BROADCAST_SCRIPT_ERROR); + REQUIRE(result == NAPA_RESULT_BROADCAST_SCRIPT_ERROR); } SECTION("broadcast and execute javascript") { napa::Zone zone("zone1"); - auto responseCode = zone.BroadcastSync("function func(a, b) { return Number(a) + Number(b); }"); - REQUIRE(responseCode == NAPA_RESPONSE_SUCCESS); + auto resultCode = zone.BroadcastSync("function func(a, b) { return Number(a) + Number(b); }"); + REQUIRE(resultCode == NAPA_RESULT_SUCCESS); - napa::ExecuteRequest request; - request.function = NAPA_STRING_REF("func"); - request.arguments = { NAPA_STRING_REF("2"), NAPA_STRING_REF("3") }; + napa::FunctionSpec spec; + spec.function = NAPA_STRING_REF("func"); + spec.arguments = { NAPA_STRING_REF("2"), NAPA_STRING_REF("3") }; - auto response = zone.ExecuteSync(request); - REQUIRE(response.code == NAPA_RESPONSE_SUCCESS); - REQUIRE(response.returnValue == "5"); + auto result = zone.ExecuteSync(spec); + REQUIRE(result.code == NAPA_RESULT_SUCCESS); + REQUIRE(result.returnValue == "5"); } SECTION("broadcast and execute javascript async") { napa::Zone zone("zone1"); - std::promise promise; + std::promise promise; auto future = promise.get_future(); - zone.Broadcast("function func(a, b) { return Number(a) + Number(b); }", [&promise, &zone](napa::ResponseCode) { - napa::ExecuteRequest request; - request.function = NAPA_STRING_REF("func"); - request.arguments = { NAPA_STRING_REF("2"), NAPA_STRING_REF("3") }; + zone.Broadcast("function func(a, b) { return Number(a) + Number(b); }", [&promise, &zone](napa::ResultCode) { + napa::FunctionSpec spec; + spec.function = NAPA_STRING_REF("func"); + spec.arguments = { NAPA_STRING_REF("2"), NAPA_STRING_REF("3") }; - zone.Execute(request, [&promise](napa::ExecuteResponse response) { - promise.set_value(std::move(response)); + zone.Execute(spec, [&promise](napa::Result result) { + promise.set_value(std::move(result)); }); }); - auto response = future.get(); - REQUIRE(response.code == NAPA_RESPONSE_SUCCESS); - REQUIRE(response.returnValue == "5"); + auto result = future.get(); + REQUIRE(result.code == NAPA_RESULT_SUCCESS); + REQUIRE(result.returnValue == "5"); } SECTION("broadcast and execute javascript without timing out") { napa::Zone zone("zone1"); - std::promise promise; + std::promise promise; auto future = promise.get_future(); // Warmup to avoid loading napajs on first call zone.BroadcastSync("require('napajs');"); - zone.Broadcast("function func(a, b) { return Number(a) + Number(b); }", [&promise, &zone](napa::ResponseCode) { - napa::ExecuteRequest request; - request.function = NAPA_STRING_REF("func"); - request.arguments = { NAPA_STRING_REF("2"), NAPA_STRING_REF("3") }; - request.options.timeout = 100; + zone.Broadcast("function func(a, b) { return Number(a) + Number(b); }", [&promise, &zone](napa::ResultCode) { + napa::FunctionSpec spec; + spec.function = NAPA_STRING_REF("func"); + spec.arguments = { NAPA_STRING_REF("2"), NAPA_STRING_REF("3") }; + spec.options.timeout = 100; - zone.Execute(request, [&promise](napa::ExecuteResponse response) { - promise.set_value(std::move(response)); + zone.Execute(spec, [&promise](napa::Result result) { + promise.set_value(std::move(result)); }); }); - auto response = future.get(); - REQUIRE(response.code == NAPA_RESPONSE_SUCCESS); - REQUIRE(response.returnValue == "5"); + auto result = future.get(); + REQUIRE(result.code == NAPA_RESULT_SUCCESS); + REQUIRE(result.returnValue == "5"); } SECTION("broadcast and execute javascript with exceeded timeout") { napa::Zone zone("zone1"); - std::promise promise; + std::promise promise; auto future = promise.get_future(); // Warmup to avoid loading napajs on first call zone.BroadcastSync("require('napajs');"); - zone.Broadcast("function func() { while(true) {} }", [&promise, &zone](napa::ResponseCode) { - napa::ExecuteRequest request; - request.function = NAPA_STRING_REF("func"); - request.options.timeout = 200; + zone.Broadcast("function func() { while(true) {} }", [&promise, &zone](napa::ResultCode) { + napa::FunctionSpec spec; + spec.function = NAPA_STRING_REF("func"); + spec.options.timeout = 200; - zone.Execute(request, [&promise](napa::ExecuteResponse response) { - promise.set_value(std::move(response)); + zone.Execute(spec, [&promise](napa::Result result) { + promise.set_value(std::move(result)); }); }); - auto response = future.get(); - REQUIRE(response.code == NAPA_RESPONSE_TIMEOUT); - REQUIRE(response.errorMessage == "Execute exceeded timeout"); + auto result = future.get(); + REQUIRE(result.code == NAPA_RESULT_TIMEOUT); + REQUIRE(result.errorMessage == "Terminated due to timeout"); } SECTION("execute 2 javascript functions, one succeeds and one times out") { @@ -125,66 +125,66 @@ TEST_CASE("zone apis", "[api]") { zone.BroadcastSync("require('napajs');"); auto res = zone.BroadcastSync("function f1(a, b) { return Number(a) + Number(b); }"); - REQUIRE(res == NAPA_RESPONSE_SUCCESS); + REQUIRE(res == NAPA_RESULT_SUCCESS); res = zone.BroadcastSync("function f2() { while(true) {} }"); - REQUIRE(res == NAPA_RESPONSE_SUCCESS); + REQUIRE(res == NAPA_RESULT_SUCCESS); - std::promise promise1; + std::promise promise1; auto future1 = promise1.get_future(); - std::promise promise2; + std::promise promise2; auto future2 = promise2.get_future(); - napa::ExecuteRequest request1; + napa::FunctionSpec request1; request1.function = NAPA_STRING_REF("f1"); request1.arguments = { NAPA_STRING_REF("2"), NAPA_STRING_REF("3") }; request1.options.timeout = 100; - napa::ExecuteRequest request2; + napa::FunctionSpec request2; request2.function = NAPA_STRING_REF("f2"); request2.options.timeout = 100; - zone.Execute(request1, [&promise1](napa::ExecuteResponse response) { - promise1.set_value(std::move(response)); + zone.Execute(request1, [&promise1](napa::Result result) { + promise1.set_value(std::move(result)); }); - zone.Execute(request2, [&promise2](napa::ExecuteResponse response) { - promise2.set_value(std::move(response)); + zone.Execute(request2, [&promise2](napa::Result result) { + promise2.set_value(std::move(result)); }); - auto response = future1.get(); - REQUIRE(response.code == NAPA_RESPONSE_SUCCESS); - REQUIRE(response.returnValue == "5"); + auto result = future1.get(); + REQUIRE(result.code == NAPA_RESULT_SUCCESS); + REQUIRE(result.returnValue == "5"); - response = future2.get(); - REQUIRE(response.code == NAPA_RESPONSE_TIMEOUT); - REQUIRE(response.errorMessage == "Execute exceeded timeout"); + result = future2.get(); + REQUIRE(result.code == NAPA_RESULT_TIMEOUT); + REQUIRE(result.errorMessage == "Terminated due to timeout"); } SECTION("broadcast javascript requiring a module") { napa::Zone zone("zone1"); - auto responseCode = zone.BroadcastSync("var path = require('path'); function func() { return path.extname('test.txt'); }"); - REQUIRE(responseCode == NAPA_RESPONSE_SUCCESS); + auto resultCode = zone.BroadcastSync("var path = require('path'); function func() { return path.extname('test.txt'); }"); + REQUIRE(resultCode == NAPA_RESULT_SUCCESS); - napa::ExecuteRequest request; - request.function = NAPA_STRING_REF("func"); + napa::FunctionSpec spec; + spec.function = NAPA_STRING_REF("func"); - auto response = zone.ExecuteSync(request); - REQUIRE(response.code == NAPA_RESPONSE_SUCCESS); - REQUIRE(response.returnValue == "\".txt\""); + auto result = zone.ExecuteSync(spec); + REQUIRE(result.code == NAPA_RESULT_SUCCESS); + REQUIRE(result.returnValue == "\".txt\""); } SECTION("execute function in a module") { napa::Zone zone("zone1"); - napa::ExecuteRequest request; - request.module = NAPA_STRING_REF("path"); - request.function = NAPA_STRING_REF("extname"); - request.arguments = { NAPA_STRING_REF("\"test.txt\"") }; + napa::FunctionSpec spec; + spec.module = NAPA_STRING_REF("path"); + spec.function = NAPA_STRING_REF("extname"); + spec.arguments = { NAPA_STRING_REF("\"test.txt\"") }; - auto response = zone.ExecuteSync(request); - REQUIRE(response.code == NAPA_RESPONSE_SUCCESS); - REQUIRE(response.returnValue == "\".txt\""); + auto result = zone.ExecuteSync(spec); + REQUIRE(result.code == NAPA_RESULT_SUCCESS); + REQUIRE(result.returnValue == "\".txt\""); } } diff --git a/cpp-test/component/napa-component-tests.vcxproj b/cpp-test/component/napa-component-tests.vcxproj index 2cea8be..27f1053 100644 --- a/cpp-test/component/napa-component-tests.vcxproj +++ b/cpp-test/component/napa-component-tests.vcxproj @@ -30,7 +30,7 @@ 4100;4251;4459;4996 - BUILDING_NAPA_EXTENSION;NAPA_EXPORTS;BUILDING_V8_SHARED;%(PreprocessorDefinitions) + NAPA_EXPORTS;NAPA_BINDING_EXPORTS;BUILDING_NAPA_EXTENSION;BUILDING_V8_SHARED;%(PreprocessorDefinitions) $(NapaVanillaRoot)\inc;%(AdditionalIncludeDirectories) $(NapaVanillaRoot)\src;%(AdditionalIncludeDirectories) $(NapaVanillaRoot)\test\component;%(AdditionalIncludeDirectories) @@ -49,30 +49,32 @@ - - - - - - - - - - + + + + + + + + + - - - - - - - - + + + + + + + + + + + @@ -82,8 +84,9 @@ - + + diff --git a/cpp-test/component/napa-initialization-guard.h b/cpp-test/component/napa-initialization-guard.h index 884f235..0b4567e 100644 --- a/cpp-test/component/napa-initialization-guard.h +++ b/cpp-test/component/napa-initialization-guard.h @@ -14,7 +14,7 @@ private: class NapaInitialization { public: NapaInitialization() { - napa::PlatformSettings settings; + napa::settings::PlatformSettings settings; settings.loggingProvider = "console"; napa::providers::Initialize(settings); diff --git a/cpp-test/component/node-zone-tests.cpp b/cpp-test/component/node-zone-tests.cpp new file mode 100644 index 0000000..b337095 --- /dev/null +++ b/cpp-test/component/node-zone-tests.cpp @@ -0,0 +1,53 @@ +#include "catch.hpp" + +#include "zone/node-zone.h" + +#include + +using namespace napa; +using namespace napa::zone; + +TEST_CASE("node zone not available before init", "[node-zone]") { + auto zone = NodeZone::Get(); + REQUIRE(zone == nullptr); +} + +TEST_CASE("node zone delegate should work after init", "[node-zone]") { + auto broadcast = [](const std::string& source, BroadcastCallback callback){ + callback(NAPA_RESULT_SUCCESS); + }; + + auto execute = [](const FunctionSpec& spec, ExecuteCallback callback) { + callback({ NAPA_RESULT_SUCCESS, "", std::string("hello world"), nullptr }); + }; + + NodeZone::Init(broadcast, execute); + + auto zone = NodeZone::Get(); + REQUIRE(zone != nullptr); + + { + std::promise promise; + auto future = promise.get_future(); + + zone->Broadcast("", [&promise](ResultCode resultCode) { + promise.set_value(NAPA_RESULT_SUCCESS); + }); + + REQUIRE(future.get() == NAPA_RESULT_SUCCESS); + } + + { + std::promise promise; + auto future = promise.get_future(); + + FunctionSpec spec; + zone->Execute(spec, [&promise](Result result) { + promise.set_value(std::move(result)); + }); + + auto result = future.get(); + REQUIRE(result.code == NAPA_RESULT_SUCCESS); + REQUIRE(result.returnValue == "hello world"); + } +} diff --git a/cpp-test/component/tasks-tests.cpp b/cpp-test/component/tasks-tests.cpp index f37d169..48b5746 100644 --- a/cpp-test/component/tasks-tests.cpp +++ b/cpp-test/component/tasks-tests.cpp @@ -1,9 +1,11 @@ #include "catch.hpp" +#include "module/loader/module-loader.h" #include "providers/providers.h" -#include "scheduler/broadcast-task.h" -#include "scheduler/execute-task.h" -#include "scheduler/task-decorators.h" +#include "zone/eval-task.h" +#include "zone/call-task.h" +#include "zone/task-decorators.h" +#include "zone/worker-context.h" #include "settings/settings.h" #include "v8/array-buffer-allocator.h" #include "napa-initialization-guard.h" @@ -13,7 +15,7 @@ #include using namespace napa; -using namespace napa::scheduler; +using namespace napa::zone; using namespace std::chrono_literals; // Make sure V8 it initialized exactly once. @@ -31,139 +33,142 @@ TEST_CASE("tasks", "[tasks]") { v8::Isolate::Scope isolateScope(isolate); v8::HandleScope handleScope(isolate); v8::Local context = v8::Context::New(isolate); + context->SetSecurityToken(v8::Undefined(isolate)); v8::Context::Scope contextScope(context); - // Set a simple zone main function - BroadcastTask("function __zone_execute__(module, func, args) { return this[func].apply(this, args); }").Execute(); + INIT_WORKER_CONTEXT(); + CREATE_MODULE_LOADER(); + + EvalTask("require('../lib/index');").Execute(); SECTION("load valid javascript") { - ResponseCode loadResponseCode; - BroadcastTask("var i = 3 + 5;", "", [&loadResponseCode](ResponseCode code) { + ResultCode loadResponseCode; + EvalTask("var i = 3 + 5;", "", [&loadResponseCode](ResultCode code) { loadResponseCode = code; }).Execute(); - REQUIRE(loadResponseCode == NAPA_RESPONSE_SUCCESS); + REQUIRE(loadResponseCode == NAPA_RESULT_SUCCESS); } SECTION("load fails when javascript is malformed") { - ResponseCode loadResponseCode; - BroadcastTask("var j = 3 +", "", [&loadResponseCode](ResponseCode code) { + ResultCode loadResponseCode; + EvalTask("var j = 3 +", "", [&loadResponseCode](ResultCode code) { loadResponseCode = code; }).Execute(); - REQUIRE(loadResponseCode == NAPA_RESPONSE_BROADCAST_SCRIPT_ERROR); + REQUIRE(loadResponseCode == NAPA_RESULT_BROADCAST_SCRIPT_ERROR); } SECTION("load fails when javascript exception is thrown") { - ResponseCode loadResponseCode; - BroadcastTask("throw Error('error');", "", [&loadResponseCode](ResponseCode code) { + ResultCode loadResponseCode; + EvalTask("throw Error('error');", "", [&loadResponseCode](ResultCode code) { loadResponseCode = code; }).Execute(); - REQUIRE(loadResponseCode == NAPA_RESPONSE_BROADCAST_SCRIPT_ERROR); + REQUIRE(loadResponseCode == NAPA_RESULT_BROADCAST_SCRIPT_ERROR); } SECTION("execute succeeds with a valid and existing function") { - BroadcastTask("function foo(a, b) { return Number(a) + Number(b); }").Execute(); + EvalTask("function foo(a, b) { return a + b; }").Execute(); - ExecuteRequest request; - request.function = NAPA_STRING_REF("foo"); - request.arguments = { NAPA_STRING_REF("3"), NAPA_STRING_REF("5") }; + FunctionSpec spec; + spec.function = NAPA_STRING_REF("foo"); + spec.arguments = { NAPA_STRING_REF("3"), NAPA_STRING_REF("5") }; - ExecuteResponse response; - ExecuteTask(request, [&](ExecuteResponse res) { - response = std::move(res); - }).Execute(); + Result result; + CallTask(std::make_shared(spec, [&](Result res) { + result = std::move(res); + })).Execute(); - REQUIRE(response.code == NAPA_RESPONSE_SUCCESS); - REQUIRE(response.returnValue == "8"); + REQUIRE(result.code == NAPA_RESULT_SUCCESS); + REQUIRE(result.returnValue == "8"); } SECTION("execute fails for non-existing function") { - ExecuteRequest request; - request.function = NAPA_STRING_REF("bar"); - request.arguments = { NAPA_STRING_REF("3"), NAPA_STRING_REF("5") }; + FunctionSpec spec; + spec.function = NAPA_STRING_REF("bar"); + spec.arguments = { NAPA_STRING_REF("3"), NAPA_STRING_REF("5") }; - ExecuteResponse response; - ExecuteTask(request, [&](ExecuteResponse res) { - response = std::move(res); - }).Execute(); + Result result; + CallTask(std::make_shared(spec, [&](Result res) { + result = std::move(res); + })).Execute(); - REQUIRE(response.code == NAPA_RESPONSE_EXECUTE_FUNC_ERROR); + REQUIRE(result.code == NAPA_RESULT_EXECUTE_FUNC_ERROR); } SECTION("execute fails when function throws exception") { - BroadcastTask("function f1(a, b) { throw 'an error' }").Execute(); + EvalTask("function f1(a, b) { throw 'an error' }").Execute(); - ExecuteRequest request; - request.function = NAPA_STRING_REF("f1"); - request.arguments = { NAPA_STRING_REF("3"), NAPA_STRING_REF("5") }; + FunctionSpec spec; + spec.function = NAPA_STRING_REF("f1"); + spec.arguments = { NAPA_STRING_REF("3"), NAPA_STRING_REF("5") }; - ExecuteResponse response; - ExecuteTask(request, [&](ExecuteResponse res) { - response = std::move(res); - }).Execute(); + Result result; + CallTask(std::make_shared(spec, [&](Result res) { + result = std::move(res); + })).Execute(); - REQUIRE(response.code == NAPA_RESPONSE_EXECUTE_FUNC_ERROR); - REQUIRE(response.errorMessage == "an error"); + REQUIRE(result.code == NAPA_RESULT_EXECUTE_FUNC_ERROR); + REQUIRE(result.errorMessage == "an error"); } SECTION("execute succeeds when timeout was not exceeded") { - BroadcastTask("function f2(a, b) { return Number(a) + Number(b); }").Execute(); + EvalTask("function f2(a, b) { return a + b; }").Execute(); - ExecuteRequest request; - request.function = NAPA_STRING_REF("f2"); - request.arguments = { NAPA_STRING_REF("3"), NAPA_STRING_REF("5") }; + FunctionSpec spec; + spec.function = NAPA_STRING_REF("f2"); + spec.arguments = { NAPA_STRING_REF("3"), NAPA_STRING_REF("5") }; - ExecuteResponse response; - TimeoutTaskDecorator(100ms, request, [&](ExecuteResponse res) { - response = std::move(res); - }).Execute(); + Result result; + TimeoutTaskDecorator(100ms, std::make_shared(spec, [&](Result res) { + result = std::move(res); + })).Execute(); - REQUIRE(response.code == NAPA_RESPONSE_SUCCESS); - REQUIRE(response.returnValue == "8"); + REQUIRE(result.code == NAPA_RESULT_SUCCESS); + REQUIRE(result.returnValue == "8"); } SECTION("execute fails when timeout exceeded") { - BroadcastTask("function f3() { while(true) {} }").Execute(); + EvalTask("function f3() { while(true) {} }").Execute(); - ExecuteRequest request; - request.function = NAPA_STRING_REF("f3"); + FunctionSpec spec; + spec.function = NAPA_STRING_REF("f3"); - ExecuteResponse response; - TimeoutTaskDecorator(30ms, request, [&](ExecuteResponse res) { - response = std::move(res); - }).Execute(); + Result result; + TimeoutTaskDecorator(30ms, std::make_shared(spec, [&](Result res) { + result = std::move(res); + })).Execute(); - REQUIRE(response.code == NAPA_RESPONSE_TIMEOUT); - REQUIRE(response.errorMessage == "Execute exceeded timeout"); + REQUIRE(result.code == NAPA_RESULT_TIMEOUT); + REQUIRE(result.errorMessage == "Terminated due to timeout"); } SECTION("execute succeeds after a failed task") { - BroadcastTask("function f4() { while(true) {} }").Execute(); - BroadcastTask("function f5(a, b) { return Number(a) + Number(b); }").Execute(); + EvalTask("function f4() { while(true) {} }").Execute(); + EvalTask("function f5(a, b) { return Number(a) + Number(b); }").Execute(); - ExecuteRequest request1; + FunctionSpec request1; request1.function = NAPA_STRING_REF("f4"); - ExecuteResponse response1; - TimeoutTaskDecorator(30ms, request1, [&](ExecuteResponse res) { + Result response1; + TimeoutTaskDecorator(30ms, std::make_shared(request1, [&](Result res) { response1 = std::move(res); - }).Execute(); + })).Execute(); - REQUIRE(response1.code == NAPA_RESPONSE_TIMEOUT); - REQUIRE(response1.errorMessage == "Execute exceeded timeout"); + REQUIRE(response1.code == NAPA_RESULT_TIMEOUT); + REQUIRE(response1.errorMessage == "Terminated due to timeout"); - ExecuteRequest request2; + FunctionSpec request2; request2.function = NAPA_STRING_REF("f5"); request2.arguments = { NAPA_STRING_REF("3"), NAPA_STRING_REF("5") }; - ExecuteResponse response2; - TimeoutTaskDecorator(100ms, request2, [&](ExecuteResponse res) { + Result response2; + TimeoutTaskDecorator(100ms, std::make_shared(request2, [&](Result res) { response2 = std::move(res); - }).Execute(); + })).Execute(); - REQUIRE(response2.code == NAPA_RESPONSE_SUCCESS); + REQUIRE(response2.code == NAPA_RESULT_SUCCESS); REQUIRE(response2.returnValue == "8"); } } diff --git a/cpp-test/component/worker-tests.cpp b/cpp-test/component/worker-tests.cpp index 4b34e1c..3326b71 100644 --- a/cpp-test/component/worker-tests.cpp +++ b/cpp-test/component/worker-tests.cpp @@ -1,6 +1,6 @@ #include "catch.hpp" -#include "scheduler/worker.h" +#include "zone/worker.h" #include "napa-initialization-guard.h" #include "v8.h" @@ -10,7 +10,9 @@ #include using namespace napa; -using namespace napa::scheduler; +using namespace napa::settings; +using namespace napa::zone; + class TestTask : public Task { diff --git a/cpp-test/module/async/async.cpp b/cpp-test/module/async/async.cpp index c38483e..ec03ec8 100644 --- a/cpp-test/module/async/async.cpp +++ b/cpp-test/module/async/async.cpp @@ -1,4 +1,5 @@ #include +#include #include #include @@ -22,7 +23,7 @@ void Increase(const FunctionCallbackInfo& args) { auto value = args[0]->Uint32Value(); - napa::module::PostAsyncWork(Local::Cast(args[1]), + napa::zone::PostAsyncWork(Local::Cast(args[1]), [value]() { // This runs at the separate thread. _now += value; @@ -51,7 +52,7 @@ void IncreaseSync(const FunctionCallbackInfo& args) { auto value = args[0]->Uint32Value(); - napa::module::DoAsyncWork(Local::Cast(args[1]), + napa::zone::DoAsyncWork(Local::Cast(args[1]), [value](auto complete) { // This runs at the same thread. _now += value; diff --git a/cpp-test/module/module-loader/module-loader-tests.cpp b/cpp-test/module/module-loader/module-loader-tests.cpp index 20b8117..1827cd4 100644 --- a/cpp-test/module/module-loader/module-loader-tests.cpp +++ b/cpp-test/module/module-loader/module-loader-tests.cpp @@ -1,11 +1,11 @@ #include -#include -#include +#include #include #include -#include -#include +#include +#include +#include #include #include @@ -22,7 +22,9 @@ using namespace napa; using namespace napa::module; -using namespace napa::scheduler; +using namespace napa::settings; +using namespace napa::zone; + class V8InitializationGuard { public: @@ -311,7 +313,7 @@ TEST_CASE("resolve full path modules", "[module-loader]") { class AsyncTestTask : public Task { public: - AsyncTestTask(ZoneImpl* zone, std::string filename) + AsyncTestTask(zone::NapaZone* zone, std::string filename) : _zone(zone), _filename(std::move(filename)), _succeeded(false) {} void Execute() override { @@ -361,7 +363,7 @@ public: private: - ZoneImpl* _zone; + zone::NapaZone* _zone; std::string _filename; bool _succeeded = false; @@ -375,7 +377,7 @@ TEST_CASE("async", "[module-loader]") { settings.id = "zone"; settings.workers = 1; - auto zone = ZoneImpl::Create(settings); + auto zone = zone::NapaZone::Create(settings); auto scheduler = zone->GetScheduler(); scheduler->ScheduleOnAllWorkers(std::make_shared(zone.get(), std::string())); diff --git a/cpp-test/module/module-loader/napa-module-loader-tests.vcxproj b/cpp-test/module/module-loader/napa-module-loader-tests.vcxproj index 591d960..53205a2 100644 --- a/cpp-test/module/module-loader/napa-module-loader-tests.vcxproj +++ b/cpp-test/module/module-loader/napa-module-loader-tests.vcxproj @@ -38,7 +38,7 @@ 4100;4251;4996 - BUILDING_NAPA_EXTENSION;NAPA_EXPORTS;BUILDING_V8_SHARED;%(PreprocessorDefinitions) + NAPA_EXPORTS;NAPA_BINDING_EXPORTS;BUILDING_NAPA_EXTENSION;BUILDING_V8_SHARED;%(PreprocessorDefinitions) $(NapaVanillaRoot)\inc;%(AdditionalIncludeDirectories) $(NapaVanillaRoot)\src;%(AdditionalIncludeDirectories) @@ -49,26 +49,29 @@ - - - - - - - - - + + + + + + + + + - - - - - - - - + + + + + + + + + + + diff --git a/cpp-test/unit/module/file-system-helpers-tests.cpp b/cpp-test/unit/module/file-system-helpers-tests.cpp index bde5b2a..3f38bd4 100644 --- a/cpp-test/unit/module/file-system-helpers-tests.cpp +++ b/cpp-test/unit/module/file-system-helpers-tests.cpp @@ -1,8 +1,9 @@ #include "catch.hpp" -#include "napa/module/platform.h" -#include "module/core-modules/node/file-system-helpers.h" +#include +#include +using namespace napa; using namespace napa::module; TEST_CASE("File system helpers reads/writes a file correctly.", "[file-system-helpers]") { diff --git a/cpp-test/unit/module/module-resolver-tests.cpp b/cpp-test/unit/module/module-resolver-tests.cpp index d726026..65d3017 100644 --- a/cpp-test/unit/module/module-resolver-tests.cpp +++ b/cpp-test/unit/module/module-resolver-tests.cpp @@ -1,14 +1,16 @@ #include "catch.hpp" -#include -#include +#include +#include #include #include +using namespace napa; using namespace napa::module; + namespace { ModuleResolver& GetModuleResolver() { diff --git a/cpp-test/unit/napa-unit-tests.vcxproj b/cpp-test/unit/napa-unit-tests.vcxproj index 3dc637d..2029962 100644 --- a/cpp-test/unit/napa-unit-tests.vcxproj +++ b/cpp-test/unit/napa-unit-tests.vcxproj @@ -28,7 +28,7 @@ 4100;4996;4459 - NAPA_EXPORTS;NAPA_LOG_DISABLED;%(PreprocessorDefinitions) + NAPA_EXPORTS;NAPA_BINDING_EXPORTS;NAPA_LOG_DISABLED;%(PreprocessorDefinitions) $(NapaVanillaRoot)\inc;%(AdditionalIncludeDirectories) $(NapaVanillaRoot)\src;%(AdditionalIncludeDirectories) @@ -37,13 +37,13 @@ - - + - - + + + @@ -51,8 +51,8 @@ - - + + diff --git a/cpp-test/unit/settings/parser-tests.cpp b/cpp-test/unit/settings/parser-tests.cpp index 081582b..729b3f1 100644 --- a/cpp-test/unit/settings/parser-tests.cpp +++ b/cpp-test/unit/settings/parser-tests.cpp @@ -7,68 +7,68 @@ using namespace napa; TEST_CASE("Parsing nothing doesn't fail", "[settings-parser]") { - PlatformSettings settings; + settings::PlatformSettings settings; - REQUIRE(settings_parser::ParseFromString("", settings)); - REQUIRE(settings_parser::ParseFromConsole(0, nullptr, settings)); + REQUIRE(settings::ParseFromString("", settings)); + REQUIRE(settings::ParseFromConsole(0, nullptr, settings)); } TEST_CASE("Parsing from string", "[settings-parser]") { - PlatformSettings settings; + settings::PlatformSettings settings; settings.workers = 1; settings.loggingProvider = ""; - REQUIRE(settings_parser::ParseFromString("--workers 5 --loggingProvider myProvider", settings)); + REQUIRE(settings::ParseFromString("--workers 5 --loggingProvider myProvider", settings)); REQUIRE(settings.workers == 5); REQUIRE(settings.loggingProvider == "myProvider"); } TEST_CASE("Parsing from console", "[settings-parser]") { - PlatformSettings settings; + settings::PlatformSettings settings; settings.workers = 1; settings.loggingProvider = ""; std::vector args = { "--workers", "5", "--loggingProvider", "myProvider" }; - REQUIRE(settings_parser::ParseFromConsole(static_cast(args.size()), args.data(), settings)); + REQUIRE(settings::ParseFromConsole(static_cast(args.size()), args.data(), settings)); REQUIRE(settings.workers == 5); REQUIRE(settings.loggingProvider == "myProvider"); } TEST_CASE("Parsing non existing setting fails", "[settings-parser]") { - PlatformSettings settings; + settings::PlatformSettings settings; - REQUIRE(settings_parser::ParseFromString("--thisSettingDoesNotExist noValue", settings) == false); + REQUIRE(settings::ParseFromString("--thisSettingDoesNotExist noValue", settings) == false); } TEST_CASE("Parsing does not change defaults if setting is not provided", "[settings-parser]") { - PlatformSettings settings; + settings::PlatformSettings settings; settings.workers = 2412; - REQUIRE(settings_parser::ParseFromString("--loggingProvider myProvider", settings)); + REQUIRE(settings::ParseFromString("--loggingProvider myProvider", settings)); REQUIRE(settings.workers == 2412); } TEST_CASE("Parsing with extra white spaces succeeds", "[settings-parser]") { - PlatformSettings settings; + settings::PlatformSettings settings; settings.workers = 1; settings.loggingProvider = ""; - REQUIRE(settings_parser::ParseFromString(" --workers 5 --loggingProvider \t myProvider \t\t ", settings)); + REQUIRE(settings::ParseFromString(" --workers 5 --loggingProvider \t myProvider \t\t ", settings)); REQUIRE(settings.workers == 5); REQUIRE(settings.loggingProvider == "myProvider"); } TEST_CASE("Parsing with multiple values for one setting", "[settings-parser]") { - PlatformSettings settings; + settings::PlatformSettings settings; settings.workers = 1; settings.v8Flags = {}; - REQUIRE(settings_parser::ParseFromString("--v8Flags one two three --workers 5", settings)); + REQUIRE(settings::ParseFromString("--v8Flags one two three --workers 5", settings)); REQUIRE(settings.workers == 5); REQUIRE(settings.v8Flags.size() == 3); @@ -78,13 +78,13 @@ TEST_CASE("Parsing with multiple values for one setting", "[settings-parser]") { } TEST_CASE("Parsing with empty string succeeds", "[settings-parser]") { - PlatformSettings settings; + settings::PlatformSettings settings; - REQUIRE(settings_parser::ParseFromString("", settings) == true); + REQUIRE(settings::ParseFromString("", settings) == true); } TEST_CASE("Parsing with different value type fails", "[settings-parser]") { - PlatformSettings settings; + settings::PlatformSettings settings; - REQUIRE(settings_parser::ParseFromString("--workers five", settings) == false); + REQUIRE(settings::ParseFromString("--workers five", settings) == false); } diff --git a/cpp-test/unit/scheduler/scheduler-tests.cpp b/cpp-test/unit/zone/scheduler-tests.cpp similarity index 97% rename from cpp-test/unit/scheduler/scheduler-tests.cpp rename to cpp-test/unit/zone/scheduler-tests.cpp index b5f05fd..1e2412a 100644 --- a/cpp-test/unit/scheduler/scheduler-tests.cpp +++ b/cpp-test/unit/zone/scheduler-tests.cpp @@ -1,13 +1,13 @@ #include "catch.hpp" -#include "scheduler/scheduler.h" +#include "zone/scheduler.h" #include #include using namespace napa; -using namespace napa::scheduler; - +using namespace napa::zone; +using namespace napa::settings; class TestTask : public Task { public: diff --git a/cpp-test/unit/scheduler/timeout-service-tests.cpp b/cpp-test/unit/zone/timeout-service-tests.cpp similarity index 95% rename from cpp-test/unit/scheduler/timeout-service-tests.cpp rename to cpp-test/unit/zone/timeout-service-tests.cpp index 232d887..eb8d8b1 100644 --- a/cpp-test/unit/scheduler/timeout-service-tests.cpp +++ b/cpp-test/unit/zone/timeout-service-tests.cpp @@ -1,11 +1,11 @@ #include "catch.hpp" -#include "scheduler/timeout-service.h" +#include "zone/timeout-service.h" #include #include -using namespace napa::scheduler; +using namespace napa::zone; using namespace std::chrono; using namespace std::chrono_literals; diff --git a/docs/api/module.md b/docs/api/module.md index a3fc74d..c607eb0 100644 --- a/docs/api/module.md +++ b/docs/api/module.md @@ -40,7 +40,7 @@ As you can count on NPM, most modules are pure JavaScript. These are only a few | ------------------------------------------------------------ | ---------- | ------------- | -------------- | ------------ | | Export JavaScript function only | | | | hello-world [[.md](../../examples/modules/hello-world/README.md) [.cpp](../../examples/modules/hello-world/node/addon.cpp) [test](../../examples/modules/hello-world/test/test.ts)] | | Export JavaScript object (ObjectWrap) | X | | | plus-number [[.md](../../examples/modules/plus-number/README.md) [.cpp](../../examples/modules/plus-number/node/addon.cpp) [test](../../examples/modules/plus-number/test/module-test/test.ts)] | -| Share C++ object across isolates | X | X | | allocator-wrap [[.h](../../src/module/core-modules/napa-wraps/allocator-wrap.h) [.cpp](../../src/module/core-modules/napa-wraps/allocator-wrap.cpp)] | +| Share C++ object across isolates | X | X | | allocator-wrap [[.h](../../src/module/core-modules/napa/allocator-wrap.h) [.cpp](../../src/module/core-modules/napa/allocator-wrap.cpp)] | | Export asynchronous JavaScript function | X | | X | async-number [[.md](../../examples/modules/async-number/README.md) [.cpp](../../examples/modules/async-number/node/addon.cpp) [test](../../examples/modules/async-number/test/test.ts)] | ## Special topics diff --git a/docs/api/zone.md b/docs/api/zone.md index 966e888..090ab22 100644 --- a/docs/api/zone.md +++ b/docs/api/zone.md @@ -2,7 +2,6 @@ ## Table of Contents - [`create(id: string, settings: ZoneSettings = DEFAULT_SETTINGS): Zone`](#create-id-string-settings-zonesettings-default_settings-zone) -- [`getOrCreate(id: string, settings: ZoneSettings = DEFAULT_SETTINGS): Zone`](#getOrCreate-id-string-settings-zonesettings-default_settings-zone) - [`get(id: string): Zone`](#get-id-string-zone) - [`current: Zone`](#current-zone) - [`node: Zone`](#node-zone) @@ -15,11 +14,11 @@ - [`zone.broadcast(function: (...args: any[]) => void, args: any[]): Promise`](#zone-broadcast-function-args-any-void-args-any-promise-void) - [`zone.broadcastSync(code: string): void`](#zone-broadcastsync-code-string-void) - [`zone.broadcastSync(function: (...args: any[]) => void, args: any[]): void`](#zone-broadcastsync-function-args-any-void-args-any-void) - - [`zone.execute(moduleName: string, functionName: string, args: any[], timeout: number): Promise`](#zone-execute-modulename-string-functionname-string-args-any-timeout-number-promise-executeresult) - - [`zone.execute(function: (...args[]) => any, args: any[], timeout: number): Promise`](#zone-execute-function-args-any-args-any-timeout-number-promise-executeresult) - - [`zone.executeSync(moduleName: string, functionName: string, args: any[], timeout: number): ExecuteResult`](#zone-executesync-modulename-string-functionname-string-args-any-timeout-number-executeresult) - - [`zone.executeSync(function: (...args: any[]) => any, args: any[], timeout: number): ExecuteResult`](#zone-executesync-function-args-any-any-args-any-timeout-number-executeresult) -- interface [`ExecuteResult`](#interface-executeresult) + - [`zone.execute(moduleName: string, functionName: string, args: any[], timeout: number): Promise`](#zone-execute-modulename-string-functionname-string-args-any-timeout-number-promise-result) + - [`zone.execute(function: (...args[]) => any, args: any[], timeout: number): Promise`](#zone-execute-function-args-any-args-any-timeout-number-promise-result) + - [`zone.executeSync(moduleName: string, functionName: string, args: any[], timeout: number): Result`](#zone-executesync-modulename-string-functionname-string-args-any-timeout-number-result) + - [`zone.executeSync(function: (...args: any[]) => any, args: any[], timeout: number): Result`](#zone-executesync-function-args-any-any-args-any-timeout-number-result) +- interface [`Result`](#interface-result) - [`result.value: any`](#result-value-any) - [`result.payload: string`](#result-payload-string) - [`result.transportContext: transport.TransportContext`](#result-transportcontext-transport-transportcontext) @@ -40,15 +39,6 @@ let zone = napa.zone.create('zone1', { workers: 1 }); ``` -### getOrCreate(id: string, settings: ZoneSettings): Zone -It gets a reference of zone by an id if a zone with the id already exists, otherwise create a new one and return its reference. - -Example: -```ts -let zone = napa.zone.getOrCreate('zone1', { - workers: 4 -}); -``` ### get(id: string): Zone It gets a reference of zone by an id. Error will be thrown if the zone doesn't exist. @@ -154,12 +144,12 @@ catch (error) { } ``` ### zone.execute(moduleName: string, functionName: string, args: any[], timeout: number = 0): Promise\ -Execute a function asynchronously on arbitrary worker via module name and function name. Arguments can be of any JavaScript type that is [transportable](transport.md#transportable-types). It returns a Promise of [`ExecuteResult`](#interface-executeresult). If error happens, either bad code, user exception, or timeout is reached, promise will be rejected. +Execute a function asynchronously on arbitrary worker via module name and function name. Arguments can be of any JavaScript type that is [transportable](transport.md#transportable-types). It returns a Promise of [`Result`](#interface-result). If error happens, either bad code, user exception, or timeout is reached, promise will be rejected. Example: Execute function 'bar' in module 'foo', with arguments [1, 'hello', { field1: 1 }]. 300ms timeout is applied. ```ts zone.execute('foo', 'bar', [1, "hello", {field1: 1}], 300) - .then((result: ExecuteResult) => { + .then((result: Result) => { console.log('execute succeeded:', result.value); }) .catch((error) => { @@ -170,14 +160,14 @@ zone.execute('foo', 'bar', [1, "hello", {field1: 1}], 300) ### zone.execute(function: (...args: any[]) => any, args: any[], timeout: number = 0): Promise\ -Execute an anonymous function asynchronously on arbitrary worker. Arguments can be of any JavaScript type that is [transportable](transport.md#transportable-types). It returns a Promise of [`ExecuteResult`](#interface-executeresult). If error happens, either bad code, user exception, or timeout is reached, promise will be rejected. +Execute an anonymous function asynchronously on arbitrary worker. Arguments can be of any JavaScript type that is [transportable](transport.md#transportable-types). It returns a Promise of [`Result`](#interface-result). If error happens, either bad code, user exception, or timeout is reached, promise will be rejected. Example: ```ts zone.execute((a: number, b: string, c: object) => { return a + b + JSON.stringify(c); }, [1, "hello", {field1: 1}]) - .then((result: ExecuteResult) => { + .then((result: Result) => { console.log('execute succeeded:', result.value); }) .catch((error) => { @@ -188,7 +178,7 @@ zone.execute((a: number, b: string, c: object) => { ### zone.executeSync(moduleName: string, functionName: string, args: any[], timeout: number = 0): any -Execute a function synchronously on arbitrary worker via module name and function name. Arguments can be of any JavaScript type that is [transportable](transport.md#transportable-types). It returns an [`ExecuteResult`](#interface-executeresult). If error happens, either bad code, user exception, or timeout is reached, error will be thrown. +Execute a function synchronously on arbitrary worker via module name and function name. Arguments can be of any JavaScript type that is [transportable](transport.md#transportable-types). It returns an [`Result`](#interface-result). If error happens, either bad code, user exception, or timeout is reached, error will be thrown. Example: Execute function 'bar' in module 'foo', with arguments [1, 'hello', { field1: 1 }]. 300ms timeout is applied. ```ts @@ -203,7 +193,7 @@ catch (error) { ``` ### zone.executeSync(function: (...args: any[]) => any, args: any[], timeout: number = 0): any -Execute an annoymouse function synchronously on arbitrary worker. Arguments can be of any JavaScript type that is [transportable](transport.md#transportable-types). It returns an [`ExecuteResult`](#interface-executeresult). If error happens, either bad code, user exception, or timeout is reached, error will be thrown. +Execute an annoymouse function synchronously on arbitrary worker. Arguments can be of any JavaScript type that is [transportable](transport.md#transportable-types). It returns an [`Result`](#interface-result). If error happens, either bad code, user exception, or timeout is reached, error will be thrown. Example: Execute annoymouse function sychronously, with arguments [1, 'hello', { field1: 1 }]. No timeout is applied. ```ts @@ -219,7 +209,7 @@ catch (error) { ``` -## Interface `ExecuteResult` +## Interface `Result` Interface to access return value of `zone.execute` or `zone.executeSync`. ### result.value: any diff --git a/examples/modules/async-number/node/addon.cpp b/examples/modules/async-number/node/addon.cpp index 2a87001..88f6c93 100644 --- a/examples/modules/async-number/node/addon.cpp +++ b/examples/modules/async-number/node/addon.cpp @@ -1,4 +1,5 @@ #include +#include #include #include @@ -22,7 +23,7 @@ void Increase(const FunctionCallbackInfo& args) { auto value = args[0]->Uint32Value(); - napa::module::PostAsyncWork(Local::Cast(args[1]), + napa::zone::PostAsyncWork(Local::Cast(args[1]), [value]() { // This runs at the separate thread. _now += value; @@ -51,7 +52,7 @@ void IncreaseSync(const FunctionCallbackInfo& args) { auto value = args[0]->Uint32Value(); - napa::module::DoAsyncWork(Local::Cast(args[1]), + napa::zone::DoAsyncWork(Local::Cast(args[1]), [value](auto complete) { // This runs at the same thread. _now += value; diff --git a/examples/modules/dirs.proj b/examples/modules/dirs.proj new file mode 100644 index 0000000..fd4b7d0 --- /dev/null +++ b/examples/modules/dirs.proj @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/inc/napa-async.h b/inc/napa-async.h new file mode 100644 index 0000000..557323b --- /dev/null +++ b/inc/napa-async.h @@ -0,0 +1,8 @@ +#pragma once + +#ifdef BUILDING_NAPA_EXTENSION +#include "napa/zone/napa-async-runner.h" +#else +#include "napa/zone/node-async-runner.h" +#endif + diff --git a/inc/napa-c.h b/inc/napa-c.h index e233a78..f087b45 100644 --- a/inc/napa-c.h +++ b/inc/napa-c.h @@ -30,7 +30,7 @@ EXTERN_C NAPA_API napa_zone_handle napa_zone_get_current(); /// Releases the zone handle. When all handles for a zone are released the zone is destoryed. /// The zone handle. -EXTERN_C NAPA_API napa_response_code napa_zone_release(napa_zone_handle handle); +EXTERN_C NAPA_API napa_result_code napa_zone_release(napa_zone_handle handle); /// /// Initializes the napa zone, providing specific settings. @@ -39,7 +39,7 @@ EXTERN_C NAPA_API napa_response_code napa_zone_release(napa_zone_handle handle); /// /// The zone handle. /// The settings string. -EXTERN_C NAPA_API napa_response_code napa_zone_init( +EXTERN_C NAPA_API napa_result_code napa_zone_init( napa_zone_handle handle, napa_string_ref settings); @@ -60,12 +60,12 @@ EXTERN_C NAPA_API void napa_zone_broadcast( /// Executes a pre-loaded function asynchronously in a single zone wroker. /// The zone handle. -/// The execution request. +/// The function spec to call. /// A callback that is triggered when execution is done. /// An opaque pointer that is passed back in the callback. EXTERN_C NAPA_API void napa_zone_execute( napa_zone_handle handle, - napa_zone_execute_request request, + napa_zone_function_spec spec, napa_zone_execute_callback callback, void* context); @@ -76,23 +76,23 @@ EXTERN_C NAPA_API void napa_zone_execute( /// TODO: specify public settings here. /// /// The settings string. -EXTERN_C NAPA_API napa_response_code napa_initialize(napa_string_ref settings); +EXTERN_C NAPA_API napa_result_code napa_initialize(napa_string_ref settings); /// /// Same as napa_initialize only accepts arguments as provided by console /// /// Number of arguments. /// The arguments. -EXTERN_C NAPA_API napa_response_code napa_initialize_from_console( +EXTERN_C NAPA_API napa_result_code napa_initialize_from_console( int argc, char* argv[]); /// Invokes napa shutdown steps. All non released zones will be destroyed. -EXTERN_C NAPA_API napa_response_code napa_shutdown(); +EXTERN_C NAPA_API napa_result_code napa_shutdown(); -/// Convert the napa response code to its string representation. -/// The response code. -EXTERN_C NAPA_API const char* napa_response_code_to_string(napa_response_code code); +/// Convert the napa result code to its string representation. +/// The result code. +EXTERN_C NAPA_API const char* napa_result_code_to_string(napa_result_code code); /// Set customized allocator, which will be used for napa_allocate and napa_deallocate. /// If user doesn't call napa_allocator_set, C runtime malloc/free from napa.dll will be used. diff --git a/inc/napa-module.h b/inc/napa-module.h index 0231c37..0a6ddc7 100644 --- a/inc/napa-module.h +++ b/inc/napa-module.h @@ -7,10 +7,8 @@ #ifdef BUILDING_NAPA_EXTENSION #include "napa/module/module-internal.h" #include "napa/module/module-node-compat.h" -#include "napa/module/napa-async-runner.h" #include "napa/module/object-wrap.h" #else -#include "napa/module/node-async-runner.h" #include #include #endif diff --git a/inc/napa.h b/inc/napa.h index f498e2b..922aeb1 100644 --- a/inc/napa.h +++ b/inc/napa.h @@ -8,17 +8,17 @@ namespace napa { /// Initializes napa with global scope settings. - inline ResponseCode Initialize(const std::string& settings = "") { + inline ResultCode Initialize(const std::string& settings = "") { return napa_initialize(STD_STRING_TO_NAPA_STRING_REF(settings)); } /// Initialize napa using console provided arguments. - inline ResponseCode InitializeFromConsole(int argc, char* argv[]) { + inline ResultCode InitializeFromConsole(int argc, char* argv[]) { return napa_initialize_from_console(argc, argv); } /// Shut down napa. - inline ResponseCode Shutdown() { + inline ResultCode Shutdown() { return napa_shutdown(); } @@ -33,9 +33,9 @@ namespace napa { _handle = napa_zone_create(STD_STRING_TO_NAPA_STRING_REF(id)); auto res = napa_zone_init(_handle, STD_STRING_TO_NAPA_STRING_REF(settings)); - if (res != NAPA_RESPONSE_SUCCESS) { + if (res != NAPA_RESULT_SUCCESS) { napa_zone_release(_handle); - throw std::runtime_error(napa_response_code_to_string(res)); + throw std::runtime_error(napa_result_code_to_string(res)); } } @@ -59,7 +59,7 @@ namespace napa { napa_zone_broadcast( _handle, STD_STRING_TO_NAPA_STRING_REF(source), - [](napa_response_code code, void* context) { + [](napa_result_code code, void* context) { // Ensures the context is deleted when this scope ends. std::unique_ptr callback(reinterpret_cast(context)); @@ -69,11 +69,11 @@ namespace napa { /// Compiles and run the provided source code on all zone workers synchronously. /// The source code. - ResponseCode BroadcastSync(const std::string& source) { - std::promise prom; + ResultCode BroadcastSync(const std::string& source) { + std::promise prom; auto fut = prom.get_future(); - Broadcast(source, [&prom](ResponseCode code) { + Broadcast(source, [&prom](ResultCode code) { prom.set_value(code); }); @@ -81,47 +81,47 @@ namespace napa { } /// Executes a pre-loaded JS function asynchronously. - /// The execution request. + /// A function spec to call. /// A callback that is triggered when execution is done. - void Execute(const ExecuteRequest& request, ExecuteCallback callback) { + void Execute(const FunctionSpec& spec, ExecuteCallback callback) { // Will be deleted on when the callback scope ends. auto context = new ExecuteCallback(std::move(callback)); - napa_zone_execute_request req; - req.module = request.module; - req.function = request.function; - req.arguments = request.arguments.data(); - req.arguments_count = request.arguments.size(); - req.options = request.options; + napa_zone_function_spec req; + req.module = spec.module; + req.function = spec.function; + req.arguments = spec.arguments.data(); + req.arguments_count = spec.arguments.size(); + req.options = spec.options; // Release ownership of transport context - req.transport_context = reinterpret_cast(request.transportContext.release()); + req.transport_context = reinterpret_cast(spec.transportContext.release()); - napa_zone_execute(_handle, req, [](napa_zone_execute_response response, void* context) { + napa_zone_execute(_handle, req, [](napa_zone_result result, void* context) { // Ensures the context is deleted when this scope ends. std::unique_ptr callback(reinterpret_cast(context)); - ExecuteResponse res; - res.code = response.code; - res.errorMessage = NAPA_STRING_REF_TO_STD_STRING(response.error_message); - res.returnValue = NAPA_STRING_REF_TO_STD_STRING(response.return_value); + Result res; + res.code = result.code; + res.errorMessage = NAPA_STRING_REF_TO_STD_STRING(result.error_message); + res.returnValue = NAPA_STRING_REF_TO_STD_STRING(result.return_value); // Assume ownership of transport context res.transportContext.reset( - reinterpret_cast(response.transport_context)); + reinterpret_cast(result.transport_context)); (*callback)(std::move(res)); }, context); } /// Executes a pre-loaded JS function synchronously. - /// The execution request. - ExecuteResponse ExecuteSync(const ExecuteRequest& request) { - std::promise prom; + /// The function spec to call. + Result ExecuteSync(const FunctionSpec& spec) { + std::promise prom; auto fut = prom.get_future(); - Execute(request, [&prom](ExecuteResponse response) { - prom.set_value(std::move(response)); + Execute(spec, [&prom](Result result) { + prom.set_value(std::move(result)); }); return fut.get(); diff --git a/inc/napa/exports.h b/inc/napa/exports.h index 0b6e1fa..24b3cd5 100644 --- a/inc/napa/exports.h +++ b/inc/napa/exports.h @@ -1,11 +1,19 @@ #pragma once +// API exported from napa.dll #ifdef NAPA_EXPORTS #define NAPA_API __declspec(dllexport) #else #define NAPA_API __declspec(dllimport) #endif // NAPA_EXPORTS +// API exported from napa-binding. (both napa.dll and napa-binding.node) +#ifdef NAPA_BINDING_EXPORTS +#define NAPA_BINDING_API __declspec(dllexport) +#else +#define NAPA_BINDING_API __declspec(dllimport) +#endif // NAPA_BINDING_EXPORTS + #ifdef __cplusplus #define EXTERN_C extern "C" #else diff --git a/inc/napa/module/binding.h b/inc/napa/module/binding.h index 19aa21d..fd8f87d 100644 --- a/inc/napa/module/binding.h +++ b/inc/napa/module/binding.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include #include @@ -14,14 +14,7 @@ namespace binding { /// Get 'module' object of napa binding, which is napa-binding.node in Node.JS isolate or napa-binding from core-modules in Napa isolate. /// 'module' object for napa binding (napajs/bin/napa-binding.node or napa.dll) - inline v8::Local GetModule() { - auto persistentModule = - reinterpret_cast*>( - WorkerContext::Get(WorkerContextItem::NAPA_BINDING)); - - NAPA_ASSERT(persistentModule != nullptr, "\"napajs\" must be required before napa::module::binding::GetModule() can be called from C++."); - return v8::Local::New(v8::Isolate::GetCurrent(), *persistentModule); - } + NAPA_BINDING_API v8::Local GetModule(); /// Get 'module.exports' from napa binding. /// 'module.exports' object for napa binding (napajs/bin/napa-binding.node or napa.dll) diff --git a/inc/napa/module/module-internal.h b/inc/napa/module/module-internal.h index dd0b34a..5a205d1 100644 --- a/inc/napa/module/module-internal.h +++ b/inc/napa/module/module-internal.h @@ -1,7 +1,5 @@ #pragma once -#include "worker-context.h" - #include #include @@ -91,43 +89,12 @@ namespace module { /// It sets the persistent constructor at the current V8 isolate. /// Unique constructor name. It's recommended to use the same name as module. /// V8 persistent function to constructor V8 object. - inline void SetPersistentConstructor(const char* name, - v8::Local constructor) { - auto isolate = v8::Isolate::GetCurrent(); - v8::HandleScope scope(isolate); - - auto constructorInfo = - static_cast(WorkerContext::Get(WorkerContextItem::CONSTRUCTOR)); - if (constructorInfo == nullptr) { - constructorInfo = new ConstructorInfo(); - WorkerContext::Set(WorkerContextItem::CONSTRUCTOR, constructorInfo); - } - - constructorInfo->constructorMap.emplace(std::piecewise_construct, - std::forward_as_tuple(name), - std::forward_as_tuple(isolate, constructor)); - } + NAPA_API void SetPersistentConstructor(const char* name, v8::Local constructor); /// It gets the given persistent constructor from the current V8 isolate. /// Unique constructor name given at SetPersistentConstructor() call. /// V8 local function object. - inline v8::Local GetPersistentConstructor(const char* name) { - auto isolate = v8::Isolate::GetCurrent(); - v8::EscapableHandleScope scope(isolate); - - auto constructorInfo = - static_cast(WorkerContext::Get(WorkerContextItem::CONSTRUCTOR)); - if (constructorInfo == nullptr) { - return scope.Escape(v8::Local()); - } - - auto iter = constructorInfo->constructorMap.find(name); - if (iter != constructorInfo->constructorMap.end()) { - auto constructor = v8::Local::New(isolate, iter->second); - return scope.Escape(constructor); - } else { - return scope.Escape(v8::Local()); - } - } + NAPA_API v8::Local GetPersistentConstructor(const char* name); + } // End of namespace module. } // End of namespace napa. \ No newline at end of file diff --git a/inc/napa/module/shareable-wrap.h b/inc/napa/module/shareable-wrap.h index 2b36e71..9269267 100644 --- a/inc/napa/module/shareable-wrap.h +++ b/inc/napa/module/shareable-wrap.h @@ -59,10 +59,16 @@ namespace module { return _object; } + /// Get reference of T, which is the type of contained native object. + template + typename std::enable_if_t::value, T&> GetRef() { + return *std::static_pointer_cast(_object); + } + /// It creates a new instance of WrapType of shared_ptr, WrapType is a sub-class of ShareableWrap. /// shared_ptr of object. /// V8 object of type ShareableWrap. - template + template static v8::Local NewInstance(std::shared_ptr object) { auto instance = napa::module::NewInstance().ToLocalChecked(); Set(instance, std::move(object)); @@ -110,9 +116,9 @@ namespace module { /// It implements TransportableObject.load(payload: object, transportContext: TransportContext): void static void LoadCallback(const v8::FunctionCallbackInfo& args) { auto isolate = v8::Isolate::GetCurrent(); - auto context = isolate->GetCurrentContext(); v8::HandleScope scope(isolate); - + auto context = isolate->GetCurrentContext(); + CHECK_ARG(isolate, args.Length() == 2, "2 arguments are required for \"load\"."); CHECK_ARG(isolate, args[0]->IsObject(), "Argument \"payload\" shall be 'Object' type."); CHECK_ARG(isolate, args[1]->IsObject(), "Argument \"transportContext\" shall be 'TransportContextWrap' type."); @@ -134,8 +140,8 @@ namespace module { /// It implements TransportableObject.save(payload: object, transportContext: TransportContext): void static void SaveCallback(const v8::FunctionCallbackInfo& args) { auto isolate = v8::Isolate::GetCurrent(); - auto context = isolate->GetCurrentContext(); v8::HandleScope scope(isolate); + auto context = isolate->GetCurrentContext(); CHECK_ARG(isolate, args.Length() == 2, "2 arguments are required for \"save\"."); CHECK_ARG(isolate, args[0]->IsObject(), "Argument \"payload\" should be 'Object' type."); diff --git a/inc/napa/response-codes.inc b/inc/napa/response-codes.inc deleted file mode 100644 index 42d3efe..0000000 --- a/inc/napa/response-codes.inc +++ /dev/null @@ -1,29 +0,0 @@ -/// -/// Napa response codes definition file! -/// -/// Guidelines: -/// 1. Use NAPA_RESPONSE_CODE_DEF to define new codes. -/// 2. Add new codes at the end of the list. -/// 3. Make sure to add a comma at the end of the previous response code defintion -/// -/// |----------------------- symbol name ---------- string representation --| -/// NAPA_RESPONSE_CODE_DEF( EXAMPLE_NAME1, "Example string message1"), -/// NAPA_RESPONSE_CODE_DEF( EXAMPLE_NAME2, "Example string message2") -/// Always add news codes -/// - -#ifndef NAPA_RESPONSE_CODE_DEF -#error NAPA_RESPONSE_CODE_DEF must be defined before including response_code.inc -#endif - -NAPA_RESPONSE_CODE_DEF( SUCCESS, "Success"), -NAPA_RESPONSE_CODE_DEF( UNDEFINED, "Undefined"), -NAPA_RESPONSE_CODE_DEF( INTERNAL_ERROR, "Napa internal error"), -NAPA_RESPONSE_CODE_DEF( TIMEOUT, "The request timed out"), -NAPA_RESPONSE_CODE_DEF( ZONE_INIT_ERROR, "Failed to initialize zone"), -NAPA_RESPONSE_CODE_DEF( BROADCAST_SCRIPT_ERROR, "Failed to broadcast JavaScript code in zone"), -NAPA_RESPONSE_CODE_DEF( EXECUTE_FUNC_ERROR, "Failed to execute the JavaScript function"), -NAPA_RESPONSE_CODE_DEF( SETTINGS_PARSER_ERROR, "Failed to parse settings"), -NAPA_RESPONSE_CODE_DEF( PROVIDERS_INIT_ERROR, "Failed to initialize providers"), -NAPA_RESPONSE_CODE_DEF( V8_INIT_ERROR, "Failed to initialize V8"), -NAPA_RESPONSE_CODE_DEF( GLOBAL_VALUE_ERROR, "Failed to set global value") diff --git a/inc/napa/result-codes.inc b/inc/napa/result-codes.inc new file mode 100644 index 0000000..5ddb133 --- /dev/null +++ b/inc/napa/result-codes.inc @@ -0,0 +1,29 @@ +/// +/// Napa result codes definition file! +/// +/// Guidelines: +/// 1. Use NAPA_RESULT_CODE_DEF to define new codes. +/// 2. Add new codes at the end of the list. +/// 3. Make sure to add a comma at the end of the previous result code defintion +/// +/// |----------------------- symbol name ---------- string representation --| +/// NAPA_RESULT_CODE_DEF( EXAMPLE_NAME1, "Example string message1"), +/// NAPA_RESULT_CODE_DEF( EXAMPLE_NAME2, "Example string message2") +/// Always add news codes +/// + +#ifndef NAPA_RESULT_CODE_DEF +#error NAPA_RESULT_CODE_DEF must be defined before including response_code.inc +#endif + +NAPA_RESULT_CODE_DEF( SUCCESS, "Success"), +NAPA_RESULT_CODE_DEF( UNDEFINED, "Undefined"), +NAPA_RESULT_CODE_DEF( INTERNAL_ERROR, "Napa internal error"), +NAPA_RESULT_CODE_DEF( TIMEOUT, "The request timed out"), +NAPA_RESULT_CODE_DEF( ZONE_INIT_ERROR, "Failed to initialize zone"), +NAPA_RESULT_CODE_DEF( BROADCAST_SCRIPT_ERROR, "Failed to broadcast JavaScript code in zone"), +NAPA_RESULT_CODE_DEF( EXECUTE_FUNC_ERROR, "Failed to execute the JavaScript function"), +NAPA_RESULT_CODE_DEF( SETTINGS_PARSER_ERROR, "Failed to parse settings"), +NAPA_RESULT_CODE_DEF( PROVIDERS_INIT_ERROR, "Failed to initialize providers"), +NAPA_RESULT_CODE_DEF( V8_INIT_ERROR, "Failed to initialize V8"), +NAPA_RESULT_CODE_DEF( GLOBAL_VALUE_ERROR, "Failed to set global value") diff --git a/inc/napa/transport/transportable.h b/inc/napa/transport/transportable.h index 1343025..3066d8b 100644 --- a/inc/napa/transport/transportable.h +++ b/inc/napa/transport/transportable.h @@ -42,8 +42,8 @@ namespace transport { /// It implements Transportable.marshall(context: TransportContext): object static void MarshallCallback(const v8::FunctionCallbackInfo& args) { auto isolate = v8::Isolate::GetCurrent(); - auto context = isolate->GetCurrentContext(); v8::HandleScope scope(isolate); + auto context = isolate->GetCurrentContext(); CHECK_ARG(isolate, args.Length() == 1, "1 argument is required for calling 'marshall'."); CHECK_ARG(isolate, args[0]->IsObject(), "The 1st argument of 'marshall' shall be object of TransportContext."); @@ -86,8 +86,8 @@ namespace transport { /// It implements Transportable.unmarshall(payload: object, context: TransportContext): void static void UnmarshallCallback(const v8::FunctionCallbackInfo& args) { auto isolate = v8::Isolate::GetCurrent(); - auto context = isolate->GetCurrentContext(); v8::HandleScope scope(isolate); + auto context = isolate->GetCurrentContext(); CHECK_ARG(isolate, args.Length() == 2, "Two arguments are required for calling 'unmarshall'. "); diff --git a/inc/napa/types.h b/inc/napa/types.h index 1267adf..c74e5e7 100644 --- a/inc/napa/types.h +++ b/inc/napa/types.h @@ -25,21 +25,21 @@ namespace napa { #endif // __cplusplus -/// Represents response code in napa zone apis. -#define NAPA_RESPONSE_CODE_DEF(symbol, ...) NAPA_RESPONSE_##symbol +/// Represents result code in napa zone apis. +#define NAPA_RESULT_CODE_DEF(symbol, ...) NAPA_RESULT_##symbol typedef enum { -#include "napa/response-codes.inc" +#include "napa/result-codes.inc" -} napa_response_code; +} napa_result_code; -#undef NAPA_RESPONSE_CODE_DEF +#undef NAPA_RESULT_CODE_DEF #ifdef __cplusplus namespace napa { - typedef napa_response_code ResponseCode; + typedef napa_result_code ResultCode; } #endif // __cplusplus @@ -66,7 +66,7 @@ namespace napa { #endif // __cplusplus -/// Represents options for an execution request. +/// Represents options for calling a function. typedef struct { /// Timeout in milliseconds - Use 0 for inifinite. @@ -74,17 +74,17 @@ typedef struct { /// Arguments transport option. Default is AUTO. napa_transport_option transport; -} napa_zone_execute_options; +} napa_zone_call_options; #ifdef __cplusplus namespace napa { - typedef napa_zone_execute_options ExecuteOptions; + typedef napa_zone_call_options CallOptions; } #endif // __cplusplus -/// Represents an execution request for a zone. +/// Represents a function to run within a zone, with binded aruments . typedef struct { /// The module that exports the function to execute. @@ -100,11 +100,11 @@ typedef struct { size_t arguments_count; /// Options. - napa_zone_execute_options options; + napa_zone_call_options options; /// A context used for transporting handles across zones/workers. void* transport_context; -} napa_zone_execute_request; +} napa_zone_function_spec; #ifdef __cplusplus @@ -114,8 +114,8 @@ typedef struct { #include namespace napa { - /// Represents an execution request. - struct ExecuteRequest { + /// Represents a function to call with its arguments. + struct FunctionSpec { /// The module that exports the function to execute. StringRef module = EMPTY_NAPA_STRING_REF; @@ -127,7 +127,7 @@ namespace napa { std::vector arguments; /// Execute options. - ExecuteOptions options = { 0, AUTO }; + CallOptions options = { 0, AUTO }; /// Used for transporting shared_ptr and unique_ptr across zones/workers. mutable std::unique_ptr transportContext; @@ -136,11 +136,11 @@ namespace napa { #endif // __cplusplus -/// Represents a response from executing in a zone. +/// Represents a result from executing in a zone. typedef struct { - /// A response code. - napa_response_code code; + /// A result code. + napa_result_code code; /// The error message in case of an error. napa_string_ref error_message; @@ -150,16 +150,16 @@ typedef struct { /// A context used for transporting handles across zones/workers. void* transport_context; -} napa_zone_execute_response; +} napa_zone_result; #ifdef __cplusplus namespace napa { - /// Represents an execution response. - struct ExecuteResponse { + /// Represents a function call result. + struct Result { - /// A response code. - ResponseCode code; + /// A result code. + ResultCode code; /// The error message in case of an error. std::string errorMessage; @@ -175,16 +175,16 @@ namespace napa { #endif // __cplusplus /// Callback signatures. -typedef void(*napa_zone_broadcast_callback)(napa_response_code code, void* context); -typedef void(*napa_zone_execute_callback)(napa_zone_execute_response response, void* context); +typedef void(*napa_zone_broadcast_callback)(napa_result_code code, void* context); +typedef void(*napa_zone_execute_callback)(napa_zone_result result, void* context); #ifdef __cplusplus #include namespace napa { - typedef std::function BroadcastCallback; - typedef std::function ExecuteCallback; + typedef std::function BroadcastCallback; + typedef std::function ExecuteCallback; } #endif // __cplusplus diff --git a/inc/napa/v8-helpers.h b/inc/napa/v8-helpers.h index d86f8cd..53ff164 100644 --- a/inc/napa/v8-helpers.h +++ b/inc/napa/v8-helpers.h @@ -8,4 +8,5 @@ #include #include #include -#include \ No newline at end of file +#include +#include \ No newline at end of file diff --git a/inc/napa/v8-helpers/console.h b/inc/napa/v8-helpers/console.h index 5b5daef..9aa70af 100644 --- a/inc/napa/v8-helpers/console.h +++ b/inc/napa/v8-helpers/console.h @@ -12,8 +12,8 @@ namespace v8_helpers { /// Actual arguments. static void Log(int argc, v8::Local argv[]) { auto isolate = v8::Isolate::GetCurrent(); - auto context = isolate->GetCurrentContext(); v8::HandleScope scope(isolate); + auto context = isolate->GetCurrentContext(); auto console = v8::Local::Cast(context->Global()->Get( v8_helpers::MakeV8String(isolate, "console"))); diff --git a/inc/napa/v8-helpers/function.h b/inc/napa/v8-helpers/function.h index f330152..af2cc01 100644 --- a/inc/napa/v8-helpers/function.h +++ b/inc/napa/v8-helpers/function.h @@ -19,9 +19,9 @@ namespace v8_helpers { v8::Local argv[] = nullptr) { auto isolate = v8::Isolate::GetCurrent(); - auto context = isolate->GetCurrentContext(); v8::EscapableHandleScope scope(isolate); - + + auto context = isolate->GetCurrentContext(); auto function = context->Global()->Get(v8_helpers::MakeV8String(isolate, functionName)); JS_ENSURE_WITH_RETURN( @@ -49,9 +49,9 @@ namespace v8_helpers { v8::Local argv[] = nullptr) { auto isolate = v8::Isolate::GetCurrent(); - auto context = isolate->GetCurrentContext(); v8::EscapableHandleScope scope(isolate); - + + auto context = isolate->GetCurrentContext(); auto function = object->Get(v8_helpers::MakeV8String(isolate, functionName)); JS_ENSURE_WITH_RETURN( diff --git a/inc/napa/v8-helpers/json.h b/inc/napa/v8-helpers/json.h index fc19323..990659d 100644 --- a/inc/napa/v8-helpers/json.h +++ b/inc/napa/v8-helpers/json.h @@ -10,8 +10,8 @@ namespace v8_helpers { /// JSON.stringify /// TODO @asib: Use v8::JSON::Stringify when available inline v8::MaybeLocal Stringify(v8::Isolate* isolate, const v8::Local& value) { - auto context = isolate->GetCurrentContext(); v8::EscapableHandleScope scope(isolate); + auto context = isolate->GetCurrentContext(); auto json = context->Global() ->Get(context, v8::String::NewFromUtf8(isolate, "JSON")) diff --git a/inc/napa/v8-helpers/time.h b/inc/napa/v8-helpers/time.h new file mode 100644 index 0000000..a31743c --- /dev/null +++ b/inc/napa/v8-helpers/time.h @@ -0,0 +1,46 @@ +#pragma once + +#include +#include + +namespace napa { +namespace v8_helpers { + + static const uint32_t NANOS_PER_SECOND = 1000000000; + + /// Make a v8 array from high-resolution time. + inline v8::Local HrtimeToV8Uint32Array(v8::Isolate* isolate, uint64_t time) { + v8::EscapableHandleScope scope(isolate); + auto context = isolate->GetCurrentContext(); + + v8::Local res = v8::Array::New(isolate, 2); + (void)res->CreateDataProperty( + context, + 0, + v8::Integer::NewFromUnsigned(isolate, static_cast(time / NANOS_PER_SECOND))); + + (void)res->CreateDataProperty( + context, + 1, + v8::Integer::NewFromUnsigned(isolate, static_cast(time % NANOS_PER_SECOND))); + + return scope.Escape(res); + } + + /// Convert a 2-element v8 array to high-resolution time in nano-seconds. + inline std::pair V8Uint32ArrayToHrtime(v8::Isolate* isolate, v8::Local value) { + v8::EscapableHandleScope scope(isolate); + auto context = isolate->GetCurrentContext(); + + if (value.IsEmpty() || !value->IsArray()) { + return std::make_pair(0, false); + } + + auto array = v8::Local::Cast(value); + if (array->Length() != 2) { + return std::make_pair(0, false); + } + return std::make_pair(static_cast(array->Get(0)->Uint32Value()) * NANOS_PER_SECOND + array->Get(1)->Uint32Value(), true); + } +} +} \ No newline at end of file diff --git a/inc/napa/module/napa-async-runner.h b/inc/napa/zone/napa-async-runner.h similarity index 97% rename from inc/napa/module/napa-async-runner.h rename to inc/napa/zone/napa-async-runner.h index d33d7e0..086d46f 100644 --- a/inc/napa/module/napa-async-runner.h +++ b/inc/napa/zone/napa-async-runner.h @@ -7,7 +7,7 @@ #include namespace napa { -namespace module { +namespace zone { /// Function to run asynchronously in separate thread. /// Return value will be the input to 'AsyncCompleteCallback'. diff --git a/inc/napa/module/node-async-runner.h b/inc/napa/zone/node-async-runner.h similarity index 99% rename from inc/napa/module/node-async-runner.h rename to inc/napa/zone/node-async-runner.h index a87e506..0fdaaf8 100644 --- a/inc/napa/module/node-async-runner.h +++ b/inc/napa/zone/node-async-runner.h @@ -8,7 +8,7 @@ #include namespace napa { -namespace module { +namespace zone { /// Function to run asynchronously in separate thread. /// Return value will be the input to 'AsyncCompleteCallback'. diff --git a/lib/index.ts b/lib/index.ts index e7f437e..d4af662 100644 --- a/lib/index.ts +++ b/lib/index.ts @@ -6,4 +6,8 @@ import * as store from './store'; import * as transport from './transport'; import * as zone from './zone'; -export { log, memory, metric, runtime, store, transport, zone }; \ No newline at end of file +export { log, memory, metric, runtime, store, transport, zone }; + +// Add execute proxy to global context. +import { call } from './zone/function-call'; +((global))["__napa_zone_call__"] = call; \ No newline at end of file diff --git a/lib/zone.ts b/lib/zone.ts index 0231125..0ed6d7e 100644 --- a/lib/zone.ts +++ b/lib/zone.ts @@ -1,6 +1,5 @@ -import * as napa from './zone/napa-zone'; -import * as node from './zone/node-zone'; import * as zone from './zone/zone'; +import * as impl from './zone/zone-impl'; import * as platform from './runtime/platform'; @@ -14,31 +13,32 @@ declare var __in_napa: boolean; /// The settings of the new zone. export function create(id: string, settings: zone.ZoneSettings = zone.DEFAULT_SETTINGS) : zone.Zone { platform.initialize(); - return new napa.NapaZone(binding.createZone(id, settings)); + return new impl.ZoneImpl(binding.createZone(id, settings)); } /// Returns the zone associated with the provided id. export function get(id: string) : zone.Zone { platform.initialize(); - if (id === "node") { - return new node.NodeZone(); - } - - return new napa.NapaZone(binding.getZone(id)); + return new impl.ZoneImpl(binding.getZone(id)); } /// TODO: add function getOrCreate(id: string, settings: zone.ZoneSettings): Zone. -/// Define a getter property 'current' to retrieves the current zone. +/// Define a getter property 'current' to retrieve the current zone. export declare let current: zone.Zone; Object.defineProperty(exports, "current", { get: function () : zone.Zone { platform.initialize(); - if (typeof __in_napa !== 'undefined') { - return new napa.NapaZone(binding.getCurrentZone()); - } + return new impl.ZoneImpl(binding.getCurrentZone()); + } +}); - return new node.NodeZone(); +/// Define a getter property 'node' to retrieve node zone. +export declare let node: zone.Zone; +Object.defineProperty(exports, "node", { + get: function () : zone.Zone { + platform.initialize(); + return new impl.ZoneImpl(binding.getZone('node')); } }); diff --git a/lib/zone/function-call.ts b/lib/zone/function-call.ts new file mode 100644 index 0000000..6e47f47 --- /dev/null +++ b/lib/zone/function-call.ts @@ -0,0 +1,145 @@ +import * as transport from '../transport'; +import { CallOptions } from './zone'; + +/// Rejection type +/// TODO: we need a better mapping between error code and result code. +export enum RejectionType { + TIMEOUT = 3, + APP_ERROR = 6 +} + +/// Interface for Call context. +export interface CallContext { + + /// Resolve task with marshalled result. + resolve(result: string): void; + + /// Reject task with reason. + reject(reason: any): void; + + /// Reject task with a rejection type and reason. + reject(type: RejectionType, reason: any): void; + + /// Returns whether task has finished (either completed or cancelled). + readonly finished: boolean; + + /// Elapse in nano-seconds since task started. + readonly elapse: [number, number]; + + /// Module name to select function. + readonly module: string; + + /// Function name to execute. + readonly function: string; + + /// Marshalled arguments. + readonly args: string[]; + + /// Transport context. + readonly transportContext: transport.TransportContext; + + /// Execute options. + readonly options: CallOptions; +} + +/// +/// Proxy function for __napa_zone_call__. +/// 1) calling a global function: +/// module name: undefined or empty string +/// function name: global function name +/// 2) calling an anonymous function at client side: +/// module name: literal '__function' +/// function name: hash returned from transport.saveFunction(). +/// 3) calling a function from a module: +/// module name: target module path. +/// function name: target function name from the module. +/// +/// function name can have multiple levels like 'foo.bar'. +/// +export function call(context: CallContext): void { + // Cache the context since every call to context.transportContext will create a new wrap upon inner TransportContext pointer. + let transportContext = context.transportContext; + let result: any = undefined; + try { + result = callFunction( + context.module, + context.function, + context.args, + transportContext, + context.options); + } + catch(error) { + context.reject(error); + return; + } + + if (result != null + && typeof result === 'object' + && typeof result['then'] === 'function') { + // Delay completion if return value is a promise. + result.then((value: any) => { + finishCall(context, transportContext, value); + }) + .catch((error: any) => { + context.reject(error); + }); + return; + } + finishCall(context, transportContext, result); +} + +/// Call a function. +function callFunction( + moduleName: string, + functionName: string, + marshalledArgs: string[], + transportContext: transport.TransportContext, + options: CallOptions): any { + + let module: any = null; + if (moduleName == null || moduleName.length === 0) { + module = global; + } else if (moduleName !== '__function') { + module = require(moduleName); + } + + let func = null; + if (module != null) { + func = module; + if (functionName != null && functionName.length != 0) { + var path = functionName.split('.'); + for (let item of path) { + func = func[item]; + if (func === undefined) { + throw new Error("Cannot find function '" + functionName + "' in module '" + moduleName + "'"); + } + } + } + if (typeof func !== 'function') { + throw new Error("'" + functionName + "' in module '" + moduleName + "' is not a function"); + } + } else { + // Anonymous function. + func = transport.loadFunction(functionName); + } + + let args = marshalledArgs.map((arg) => { return transport.unmarshall(arg, transportContext); }); + return func.apply(this, args); +} + +/// Finish call with result. +function finishCall( + context: CallContext, + transportContext: transport.TransportContext, + result: any) { + + let payload: string = undefined; + try { + payload = transport.marshall(result, transportContext); + } + catch (error) { + context.reject(error); + return; + } + context.resolve(payload); +} \ No newline at end of file diff --git a/lib/zone/node-zone.ts b/lib/zone/node-zone.ts deleted file mode 100644 index 612e42c..0000000 --- a/lib/zone/node-zone.ts +++ /dev/null @@ -1,32 +0,0 @@ -import * as zone from "./zone"; - -/// A virtual Zone consists only 1 worker, which is Node event loop. -export class NodeZone implements zone.Zone { - - public get id(): string { - return "node"; - } - - public toJSON(): any { - return { id: this.id, type: "node" }; - } - - public broadcast(arg1: any, arg2?: any) : Promise { - // TODO @asib: add implementation - return undefined; - } - - public broadcastSync(arg1: any, arg2?: any) : void { - // TODO @asib: add implementation - } - - public execute(arg1: any, arg2: any, arg3?: any, arg4?: any) : Promise { - // TODO @asib: add implementation - return undefined; - } - - public executeSync(arg1: any, arg2: any, arg3?: any, arg4?: any) : zone.ExecuteResult { - // TODO @asib: add implementation - return undefined; - } -} diff --git a/lib/zone/napa-zone.ts b/lib/zone/zone-impl.ts similarity index 68% rename from lib/zone/napa-zone.ts rename to lib/zone/zone-impl.ts index 92b6cb4..c41258c 100644 --- a/lib/zone/napa-zone.ts +++ b/lib/zone/zone-impl.ts @@ -1,17 +1,15 @@ import * as zone from './zone'; import * as transport from '../transport'; -declare var __in_napa: boolean; - -interface ExecuteRequest { +interface FunctionSpec { module: string; function: string; arguments: any[]; - options: zone.ExecuteOptions; + options: zone.CallOptions; transportContext: transport.TransportContext; } -export class NapaExecuteResult implements zone.ExecuteResult{ +class ExecuteResult implements zone.Result{ constructor(payload: string, transportContext: transport.TransportContext) { this._payload = payload; @@ -40,7 +38,7 @@ export class NapaExecuteResult implements zone.ExecuteResult{ }; /// Zone consists of Napa isolates. -export class NapaZone implements zone.Zone { +export class ZoneImpl implements zone.Zone { private _nativeZone: any; constructor(nativeZone: any) { @@ -52,18 +50,18 @@ export class NapaZone implements zone.Zone { } public toJSON(): any { - return { id: this.id, type: "napa" }; + return { id: this.id, type: this.id === 'node'? 'node': 'napa' }; } public broadcast(arg1: any, arg2?: any) : Promise { let source: string = this.createBroadcastSource(arg1, arg2); return new Promise((resolve, reject) => { - this._nativeZone.broadcast(source, (responseCode: number) => { - if (responseCode === 0) { + this._nativeZone.broadcast(source, (resultCode: number) => { + if (resultCode === 0) { resolve(); } else { - reject("broadcast failed with response code: " + responseCode); + reject("broadcast failed with result code: " + resultCode); } }); }); @@ -72,36 +70,36 @@ export class NapaZone implements zone.Zone { public broadcastSync(arg1: any, arg2?: any) : void { let source: string = this.createBroadcastSource(arg1, arg2); - let responseCode: number = this._nativeZone.broadcastSync(source); - if (responseCode !== 0) { - throw new Error("broadcast failed with response code: " + responseCode); + let resultCode: number = this._nativeZone.broadcastSync(source); + if (resultCode !== 0) { + throw new Error("broadcast failed with result code: " + resultCode); } } - public execute(arg1: any, arg2: any, arg3?: any, arg4?: any) : Promise { - let request : ExecuteRequest = this.createExecuteRequest(arg1, arg2, arg3, arg4); + public execute(arg1: any, arg2: any, arg3?: any, arg4?: any) : Promise { + let spec : FunctionSpec = this.createExecuteRequest(arg1, arg2, arg3, arg4); - return new Promise((resolve, reject) => { - this._nativeZone.execute(request, (response: any) => { - if (response.code === 0) { - resolve(new NapaExecuteResult( - response.returnValue, - transport.createTransportContext(response.contextHandle))); + return new Promise((resolve, reject) => { + this._nativeZone.execute(spec, (result: any) => { + if (result.code === 0) { + resolve(new ExecuteResult( + result.returnValue, + transport.createTransportContext(result.contextHandle))); } else { - reject(response.errorMessage); + reject(result.errorMessage); } }); }); } - public executeSync(arg1: any, arg2: any, arg3?: any, arg4?: any) : zone.ExecuteResult { - let request : ExecuteRequest = this.createExecuteRequest(arg1, arg2, arg3, arg4); + public executeSync(arg1: any, arg2: any, arg3?: any, arg4?: any) : zone.Result { + let spec : FunctionSpec = this.createExecuteRequest(arg1, arg2, arg3, arg4); - let response = this._nativeZone.executeSync(request); - if (response.code === 0) { - return new NapaExecuteResult(response.returnValue, transport.createTransportContext(response.contextHandle)); + let result = this._nativeZone.executeSync(spec); + if (result.code === 0) { + return new ExecuteResult(result.returnValue, transport.createTransportContext(result.contextHandle)); } else { - throw new Error(response.errorMessage); + throw new Error(result.errorMessage); } } @@ -135,12 +133,12 @@ export class NapaZone implements zone.Zone { return source; } - private createExecuteRequest(arg1: any, arg2: any, arg3?: any, arg4?: any) : ExecuteRequest { + private createExecuteRequest(arg1: any, arg2: any, arg3?: any, arg4?: any) : FunctionSpec { let moduleName: string = null; let functionName: string = null; let args: any[] = null; - let options: zone.ExecuteOptions = undefined; + let options: zone.CallOptions = undefined; if (typeof arg1 === 'function') { moduleName = "__function"; @@ -160,7 +158,7 @@ export class NapaZone implements zone.Zone { module: moduleName, function: functionName, arguments: (>args).map(arg => { return transport.marshall(arg, transportContext); }), - options: options != null? options: zone.DEFAULT_EXECUTE_OPTIONS, + options: options != null? options: zone.DEFAULT_CALL_OPTIONS, transportContext: transportContext }; } diff --git a/lib/zone/zone-main.js b/lib/zone/zone-main.js deleted file mode 100644 index 1edada6..0000000 --- a/lib/zone/zone-main.js +++ /dev/null @@ -1,34 +0,0 @@ -var transport = require('../lib/transport'); - -function __zone_execute__(moduleName, functionName, args, transportContextHandle, options) { - var module = null; - if (moduleName == null || moduleName.length === 0) { - module = this; - } else if (moduleName !== '__function') { - module = require(moduleName); - } - - var func = null; - if (module != null) { - func = module; - if (functionName != null && functionName.length != 0) { - var path = functionName.split('.'); - for (item of path) { - func = func[item]; - if (func === undefined) { - throw new Error("Cannot find function '" + functionName + "' in module '" + moduleName + "'"); - } - } - } - if (typeof func !== 'function') { - throw new Error("'" + functionName + "' in module '" + moduleName + "' is not a function"); - } - } else { - // Anonymous function. - func = transport.loadFunction(functionName); - } - - var transportContext = transport.createTransportContext(transportContextHandle); - var args = args.map((arg) => { return transport.unmarshall(arg, transportContext); }); - return transport.marshall(func.apply(this, args), transportContext); -} \ No newline at end of file diff --git a/lib/zone/zone.ts b/lib/zone/zone.ts index 373e2bb..d7155b4 100644 --- a/lib/zone/zone.ts +++ b/lib/zone/zone.ts @@ -27,8 +27,8 @@ export enum TransportOption { MANUAL, } -/// Represent the options of an execute call. -export interface ExecuteOptions { +/// Represent the options of calling a function. +export interface CallOptions { /// Timeout in milliseconds. By default set to 0 if timeout is not needed. timeout?: number, @@ -38,7 +38,7 @@ export interface ExecuteOptions { } /// Default execution options. -export let DEFAULT_EXECUTE_OPTIONS: ExecuteOptions = { +export let DEFAULT_CALL_OPTIONS: CallOptions = { /// No timeout. timeout: 0, @@ -48,7 +48,7 @@ export let DEFAULT_EXECUTE_OPTIONS: ExecuteOptions = { } /// Represent the result of an execute call. -export interface ExecuteResult { +export interface Result { /// The unmarshalled result value. readonly value : any; @@ -81,15 +81,15 @@ export interface Zone { /// The module name that contains the function to execute. /// The function name to execute. /// The arguments that will pass to the function. - /// Execute options, defaults to DEFAULT_EXECUTE_OPTIONS. - execute(module: string, func: string, args: any[], options?: ExecuteOptions) : Promise; - executeSync(module: string, func: string, args: any[], options?: ExecuteOptions) : ExecuteResult; + /// Call options, defaults to DEFAULT_CALL_OPTIONS. + execute(module: string, func: string, args: any[], options?: CallOptions) : Promise; + executeSync(module: string, func: string, args: any[], options?: CallOptions) : Result; /// Executes the function on one of the zone workers. /// The JS function to execute. /// The arguments that will pass to the function. - /// Execute options, defaults to DEFAULT_EXECUTE_OPTIONS. - execute(func: Function, args: any[], timeout?: number) : Promise; - executeSync(func: Function, args: any[], timeout?: number) : ExecuteResult; + /// Call options, defaults to DEFAULT_CALL_OPTIONS. + execute(func: Function, args: any[], timeout?: number) : Promise; + executeSync(func: Function, args: any[], timeout?: number) : Result; } diff --git a/node/CMakeLists.txt b/node/CMakeLists.txt index d314631..85623c5 100644 --- a/node/CMakeLists.txt +++ b/node/CMakeLists.txt @@ -1,5 +1,12 @@ # Files to compile -file(GLOB SOURCE_FILES "addon.cpp" "../src/module/core-modules/napa/*.cpp") +file(GLOB SOURCE_FILES + "addon.cpp" + "node-zone-delegates.cpp" + "${PROJECT_SOURCE_DIR}/src/zone/call-context.cpp" + "${PROJECT_SOURCE_DIR}/src/zone/call-task.cpp" + "${PROJECT_SOURCE_DIR}/src/zone/eval-task.cpp" + "${PROJECT_SOURCE_DIR}/src/zone/terminable-task.cpp" + "${PROJECT_SOURCE_DIR}/src/module/core-modules/napa/*.cpp") # The addon name set(TARGET_NAME "${PROJECT_NAME}-binding") @@ -13,10 +20,11 @@ set_target_properties(${TARGET_NAME} PROPERTIES PREFIX "" SUFFIX ".node") target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_JS_INC} ${Boost_INCLUDE_DIRS} + ${PROJECT_SOURCE_DIR}/src ${PROJECT_SOURCE_DIR}/src/module/core-modules/napa) # Compiler definitions -target_compile_definitions(${TARGET_NAME} PRIVATE BUILDING_NODE_EXTENSION) +target_compile_definitions(${TARGET_NAME} PRIVATE BUILDING_NODE_EXTENSION NAPA_BINDING_EXPORTS) # Link libraries target_link_libraries(${TARGET_NAME} PRIVATE diff --git a/node/addon.cpp b/node/addon.cpp index 32fdd7c..8b8c852 100644 --- a/node/addon.cpp +++ b/node/addon.cpp @@ -1,8 +1,11 @@ #include "napa-binding.h" +#include "node-zone-delegates.h" #include #include +#include + void Initialize(const v8::FunctionCallbackInfo& args) { auto isolate = v8::Isolate::GetCurrent(); v8::HandleScope scope(isolate); @@ -34,6 +37,8 @@ void Shutdown(const v8::FunctionCallbackInfo&) { void InitAll(v8::Local exports, v8::Local module) { napa::module::binding::Init(exports, module); + napa::zone::NodeZone::Init(napa::node_zone::Broadcast, napa::node_zone::Execute); + // Only node addon can initialize/shutdown napa. NAPA_SET_METHOD(exports, "initialize", Initialize); NAPA_SET_METHOD(exports, "shutdown", Shutdown); diff --git a/node/node-zone-delegates.cpp b/node/node-zone-delegates.cpp new file mode 100644 index 0000000..a787630 --- /dev/null +++ b/node/node-zone-delegates.cpp @@ -0,0 +1,52 @@ +#include "node-zone-delegates.h" + +#include +#include + +#include + +struct AsyncContext { + /// libuv request. + uv_async_t work; + + /// Callback that will be running in Node event loop. + std::function callback; +}; + +/// Run an async work item in Node. +void Run(uv_async_t* work) { + auto context = static_cast(work->data); + + context->callback(); + + uv_close(reinterpret_cast(work), [](auto work) { + auto context = static_cast(work->data); + delete context; + }); +} + +/// Schedule a function in Node event loop. +void ScheduleInNode(std::function callback) { + auto context = new AsyncContext(); + context->work.data = context; + context->callback = std::move(callback); + + uv_async_init(uv_default_loop(), &context->work, Run); + uv_async_send(&context->work); +} + +void napa::node_zone::Broadcast(const std::string& source, napa::BroadcastCallback callback) { + std::string sourceCopy = source; + ScheduleInNode([sourceCopy = std::move(sourceCopy), callback = std::move(callback)]() { + napa::zone::EvalTask task(std::move(sourceCopy), "", std::move(callback)); + task.Execute(); + }); +} + +void napa::node_zone::Execute(const napa::FunctionSpec& spec, napa::ExecuteCallback callback) { + auto requestContext = std::make_shared(spec, callback); + ScheduleInNode([requestContext = std::move(requestContext)]() { + napa::zone::CallTask task(std::move(requestContext)); + task.Execute(); + }); +} diff --git a/node/node-zone-delegates.h b/node/node-zone-delegates.h new file mode 100644 index 0000000..0c928e6 --- /dev/null +++ b/node/node-zone-delegates.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +namespace napa { +namespace node_zone { + + /// Broadcast to Node zone. + void Broadcast(const std::string& source, napa::BroadcastCallback callback); + + /// Execute in Node zone. + void Execute(const napa::FunctionSpec& spec, napa::ExecuteCallback callback); +} +} diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c06936d..8209a9f 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -17,7 +17,7 @@ target_include_directories(${TARGET_NAME} ${PROJECT_SOURCE_DIR}/inc) # Compiler definitions -target_compile_definitions(${TARGET_NAME} PRIVATE NAPA_EXPORTS BUILDING_NAPA_EXTENSION) +target_compile_definitions(${TARGET_NAME} PRIVATE NAPA_EXPORTS BUILDING_NAPA_EXTENSION NAPA_BINDING_EXPORTS) # Link libraries target_link_libraries(${TARGET_NAME} PRIVATE ${Boost_LIBRARIES}) diff --git a/src/api/api.cpp b/src/api/api.cpp index 3efda44..571857a 100644 --- a/src/api/api.cpp +++ b/src/api/api.cpp @@ -3,7 +3,8 @@ #include "providers/providers.h" #include "settings/settings-parser.h" #include "v8/v8-common.h" -#include "zone/zone-impl.h" +#include "zone/napa-zone.h" +#include "zone/node-zone.h" #include @@ -20,12 +21,12 @@ using namespace napa; static std::atomic _initialized(false); -static PlatformSettings _platformSettings; +static settings::PlatformSettings _platformSettings; /// a simple wrapper around Zone for managing lifetime using shared_ptr. struct napa_zone { std::string id; - std::shared_ptr zone; + std::shared_ptr zone; }; napa_zone_handle napa_zone_create(napa_string_ref id) { @@ -39,7 +40,13 @@ napa_zone_handle napa_zone_get(napa_string_ref id) { NAPA_ASSERT(_initialized, "Napa wasn't initialized"); auto zoneId = NAPA_STRING_REF_TO_STD_STRING(id); - auto zone = ZoneImpl::Get(zoneId); + std::shared_ptr zone; + if (zoneId == "node") { + zone = zone::NodeZone::Get(); + } else { + zone = zone::NapaZone::Get(zoneId); + } + if (!zone) { return nullptr; } @@ -50,7 +57,7 @@ napa_zone_handle napa_zone_get(napa_string_ref id) { napa_zone_handle napa_zone_get_current() { NAPA_ASSERT(_initialized, "Napa wasn't initialized"); - auto zone = reinterpret_cast(napa::module::WorkerContext::Get(napa::module::WorkerContextItem::ZONE)); + auto zone = reinterpret_cast(zone::WorkerContext::Get(zone::WorkerContextItem::ZONE)); if (zone == nullptr) { LOG_WARNING("Api", "Trying to get current zone from a thread that is not associated with a zone"); return nullptr; @@ -59,39 +66,39 @@ napa_zone_handle napa_zone_get_current() { return napa_zone_get(STD_STRING_TO_NAPA_STRING_REF(zone->GetId())); } -napa_response_code napa_zone_init(napa_zone_handle handle, napa_string_ref settings) { +napa_result_code napa_zone_init(napa_zone_handle handle, napa_string_ref settings) { NAPA_ASSERT(_initialized, "Napa wasn't initialized"); NAPA_ASSERT(handle, "Zone handle is null"); // Zone settings are based on platform settings. - ZoneSettings zoneSettings = _platformSettings; - if (!napa::settings_parser::ParseFromString(NAPA_STRING_REF_TO_STD_STRING(settings), zoneSettings)) { + settings::ZoneSettings zoneSettings = _platformSettings; + if (!napa::settings::ParseFromString(NAPA_STRING_REF_TO_STD_STRING(settings), zoneSettings)) { LOG_ERROR("Api", "Failed to parse zone settings: %s", settings.data); - return NAPA_RESPONSE_SETTINGS_PARSER_ERROR; + return NAPA_RESULT_SETTINGS_PARSER_ERROR; } zoneSettings.id = handle->id; // Create the actual zone. - handle->zone = ZoneImpl::Create(zoneSettings); + handle->zone = zone::NapaZone::Create(zoneSettings); if (handle->zone == nullptr) { LOG_ERROR("Api", "Failed to initialize zone: %s", handle->id.c_str()); - return NAPA_RESPONSE_ZONE_INIT_ERROR; + return NAPA_RESULT_ZONE_INIT_ERROR; } LOG_INFO("Api", "Napa zone '%s' initialized successfully", handle->id.c_str()); - return NAPA_RESPONSE_SUCCESS; + return NAPA_RESULT_SUCCESS; } -napa_response_code napa_zone_release(napa_zone_handle handle) { +napa_result_code napa_zone_release(napa_zone_handle handle) { NAPA_ASSERT(_initialized, "Napa wasn't initialized"); NAPA_ASSERT(handle, "Zone handle is null"); handle->zone = nullptr; delete handle; - return NAPA_RESPONSE_SUCCESS; + return NAPA_RESULT_SUCCESS; } napa_string_ref napa_zone_get_id(napa_zone_handle handle) { @@ -109,54 +116,54 @@ void napa_zone_broadcast(napa_zone_handle handle, NAPA_ASSERT(handle, "Zone handle is null"); NAPA_ASSERT(handle->zone, "Zone handle wasn't initialized"); - handle->zone->Broadcast(NAPA_STRING_REF_TO_STD_STRING(source), [callback, context](napa_response_code code) { + handle->zone->Broadcast(NAPA_STRING_REF_TO_STD_STRING(source), [callback, context](napa_result_code code) { callback(code, context); }); } void napa_zone_execute(napa_zone_handle handle, - napa_zone_execute_request request, + napa_zone_function_spec spec, napa_zone_execute_callback callback, void* context) { NAPA_ASSERT(_initialized, "Napa wasn't initialized"); NAPA_ASSERT(handle, "Zone handle is null"); NAPA_ASSERT(handle->zone, "Zone handle wasn't initialized"); - ExecuteRequest req; - req.module = request.module; - req.function = request.function; + FunctionSpec req; + req.module = spec.module; + req.function = spec.function; - req.arguments.reserve(request.arguments_count); - for (size_t i = 0; i < request.arguments_count; i++) { - req.arguments.emplace_back(request.arguments[i]); + req.arguments.reserve(spec.arguments_count); + for (size_t i = 0; i < spec.arguments_count; i++) { + req.arguments.emplace_back(spec.arguments[i]); } - req.options = request.options; + req.options = spec.options; // Assume ownership of transport context - req.transportContext.reset(reinterpret_cast(request.transport_context)); + req.transportContext.reset(reinterpret_cast(spec.transport_context)); - handle->zone->Execute(req, [callback, context](ExecuteResponse response) { - napa_zone_execute_response res; - res.code = response.code; - res.error_message = STD_STRING_TO_NAPA_STRING_REF(response.errorMessage); - res.return_value = STD_STRING_TO_NAPA_STRING_REF(response.returnValue); + handle->zone->Execute(req, [callback, context](Result result) { + napa_zone_result res; + res.code = result.code; + res.error_message = STD_STRING_TO_NAPA_STRING_REF(result.errorMessage); + res.return_value = STD_STRING_TO_NAPA_STRING_REF(result.returnValue); // Release ownership of transport context - res.transport_context = reinterpret_cast(response.transportContext.release()); + res.transport_context = reinterpret_cast(result.transportContext.release()); callback(res, context); }); } -static napa_response_code napa_initialize_common() { +static napa_result_code napa_initialize_common() { if (!napa::providers::Initialize(_platformSettings)) { - return NAPA_RESPONSE_PROVIDERS_INIT_ERROR; + return NAPA_RESULT_PROVIDERS_INIT_ERROR; } if (_platformSettings.initV8) { if (!napa::v8_common::Initialize()) { - return NAPA_RESPONSE_V8_INIT_ERROR; + return NAPA_RESULT_V8_INIT_ERROR; } } @@ -164,30 +171,30 @@ static napa_response_code napa_initialize_common() { LOG_INFO("Api", "Napa initialized successfully"); - return NAPA_RESPONSE_SUCCESS; + return NAPA_RESULT_SUCCESS; } -napa_response_code napa_initialize(napa_string_ref settings) { +napa_result_code napa_initialize(napa_string_ref settings) { NAPA_ASSERT(!_initialized, "Napa was already initialized"); - if (!napa::settings_parser::ParseFromString(NAPA_STRING_REF_TO_STD_STRING(settings), _platformSettings)) { - return NAPA_RESPONSE_SETTINGS_PARSER_ERROR; + if (!napa::settings::ParseFromString(NAPA_STRING_REF_TO_STD_STRING(settings), _platformSettings)) { + return NAPA_RESULT_SETTINGS_PARSER_ERROR; } return napa_initialize_common(); } -napa_response_code napa_initialize_from_console(int argc, char* argv[]) { +napa_result_code napa_initialize_from_console(int argc, char* argv[]) { NAPA_ASSERT(!_initialized, "Napa was already initialized"); - if (!napa::settings_parser::ParseFromConsole(argc, argv, _platformSettings)) { - return NAPA_RESPONSE_SETTINGS_PARSER_ERROR; + if (!napa::settings::ParseFromConsole(argc, argv, _platformSettings)) { + return NAPA_RESULT_SETTINGS_PARSER_ERROR; } return napa_initialize_common(); } -napa_response_code napa_shutdown() { +napa_result_code napa_shutdown() { NAPA_ASSERT(_initialized, "Napa wasn't initialized"); napa::providers::Shutdown(); @@ -198,23 +205,23 @@ napa_response_code napa_shutdown() { LOG_INFO("Api", "Napa shutdown successfully"); - return NAPA_RESPONSE_SUCCESS; + return NAPA_RESULT_SUCCESS; } -#define NAPA_RESPONSE_CODE_DEF(symbol, string_rep) string_rep +#define NAPA_RESULT_CODE_DEF(symbol, string_rep) string_rep static const char* NAPA_REPONSE_CODE_STRINGS[] = { -#include "napa/response-codes.inc" +#include "napa/result-codes.inc" }; -#undef NAPA_RESPONSE_CODE_DEF +#undef NAPA_RESULT_CODE_DEF template constexpr size_t size(T(&)[N]) { return N; } -const char* napa_response_code_to_string(napa_response_code code) { - NAPA_ASSERT(code < size(NAPA_REPONSE_CODE_STRINGS), "response code out of range"); +const char* napa_result_code_to_string(napa_result_code code) { + NAPA_ASSERT(code < size(NAPA_REPONSE_CODE_STRINGS), "result code out of range"); return NAPA_REPONSE_CODE_STRINGS[code]; } diff --git a/src/module/core-modules/napa/call-context-wrap.cpp b/src/module/core-modules/napa/call-context-wrap.cpp new file mode 100644 index 0000000..0c0858b --- /dev/null +++ b/src/module/core-modules/napa/call-context-wrap.cpp @@ -0,0 +1,144 @@ +#include "call-context-wrap.h" +#include "transport-context-wrap-impl.h" + +#include +#include + +using namespace napa; +using namespace napa::module; + +NAPA_DEFINE_PERSISTENT_CONSTRUCTOR(CallContextWrap) + +void CallContextWrap::Init() { + auto isolate = v8::Isolate::GetCurrent(); + auto constructorTemplate = v8::FunctionTemplate::New(isolate, DefaultConstructorCallback); + constructorTemplate->SetClassName(v8_helpers::MakeV8String(isolate, exportName)); + constructorTemplate->InstanceTemplate()->SetInternalFieldCount(1); + + InitConstructorTemplate(constructorTemplate); + + NAPA_SET_PROTOTYPE_METHOD(constructorTemplate, "resolve", ResolveCallback); + NAPA_SET_PROTOTYPE_METHOD(constructorTemplate, "reject", RejectCallback); + NAPA_SET_ACCESSOR(constructorTemplate, "finished", IsFinishedCallback, nullptr); + NAPA_SET_ACCESSOR(constructorTemplate, "elapse", GetElapseCallback, nullptr); + NAPA_SET_ACCESSOR(constructorTemplate, "module", GetModuleCallback, nullptr); + NAPA_SET_ACCESSOR(constructorTemplate, "function", GetFunctionCallback, nullptr); + NAPA_SET_ACCESSOR(constructorTemplate, "args", GetArgumentsCallback, nullptr); + NAPA_SET_ACCESSOR(constructorTemplate, "transportContext", GetTransportContextCallback, nullptr); + NAPA_SET_ACCESSOR(constructorTemplate, "options", GetOptionsCallback, nullptr); + + auto constructor = constructorTemplate->GetFunction(); + InitConstructor("", constructor); + NAPA_SET_PERSISTENT_CONSTRUCTOR(exportName, constructor); +} + +v8::Local CallContextWrap::NewInstance(std::shared_ptr call) { + return ShareableWrap::NewInstance(call); +} + +zone::CallContext& CallContextWrap::GetRef() { + return ShareableWrap::GetRef(); +} + +void CallContextWrap::ResolveCallback(const v8::FunctionCallbackInfo& args) { + auto isolate = v8::Isolate::GetCurrent(); + v8::HandleScope scope(isolate); + + CHECK_ARG(isolate, args.Length() == 1, "1 argument of 'result' is required for \"resolve\"."); + + v8::String::Utf8Value result(args[0]); + auto thisObject = NAPA_OBJECTWRAP::Unwrap(args.Holder()); + auto success = thisObject->GetRef().Resolve(std::string(*result, result.length())); + + JS_ENSURE(isolate, success, "Resolve call failed: Already finished."); +} + +void CallContextWrap::RejectCallback(const v8::FunctionCallbackInfo& args) { + auto isolate = v8::Isolate::GetCurrent(); + v8::HandleScope scope(isolate); + + CHECK_ARG(isolate, args.Length() == 1 || args.Length() == 2, "at least 1 argument of 'reason' is required for \"reject\"."); + + napa::ResultCode code = NAPA_RESULT_EXECUTE_FUNC_ERROR; + v8::Local reason; + + if (args.Length() == 1) { + reason = args[0]; + } else { + CHECK_ARG(isolate, args[0]->IsUint32(), "arg 'resultCode' should be a number type."); + code = static_cast(args[0]->Uint32Value()); + + reason = args[1]; + } + + v8::String::Utf8Value reasonStr(reason); + + auto thisObject = NAPA_OBJECTWRAP::Unwrap(args.Holder()); + auto success = thisObject->GetRef().Reject(code, std::string(*reasonStr, reasonStr.length())); + JS_ENSURE(isolate, success, "Reject call failed: Already finished."); +} + +void CallContextWrap::IsFinishedCallback(v8::Local /*propertyName*/, const v8::PropertyCallbackInfo& args){ + auto isolate = v8::Isolate::GetCurrent(); + v8::HandleScope scope(isolate); + auto thisObject = NAPA_OBJECTWRAP::Unwrap(args.Holder()); + args.GetReturnValue().Set(thisObject->GetRef().IsFinished()); +} + +void CallContextWrap::GetElapseCallback(v8::Local /*propertyName*/, const v8::PropertyCallbackInfo& args){ + auto isolate = v8::Isolate::GetCurrent(); + v8::HandleScope scope(isolate); + auto thisObject = NAPA_OBJECTWRAP::Unwrap(args.Holder()); + args.GetReturnValue().Set(v8_helpers::HrtimeToV8Uint32Array(isolate, thisObject->GetRef().GetElapse().count())); +} + +void CallContextWrap::GetModuleCallback(v8::Local /*propertyName*/, const v8::PropertyCallbackInfo& args) { + auto isolate = v8::Isolate::GetCurrent(); + v8::HandleScope scope(isolate); + auto thisObject = NAPA_OBJECTWRAP::Unwrap(args.Holder()); + args.GetReturnValue().Set(v8_helpers::MakeV8String(isolate, thisObject->GetRef().GetModule())); +} + +void CallContextWrap::GetFunctionCallback(v8::Local /*propertyName*/, const v8::PropertyCallbackInfo& args) { + auto isolate = v8::Isolate::GetCurrent(); + v8::HandleScope scope(isolate); + auto thisObject = NAPA_OBJECTWRAP::Unwrap(args.Holder()); + args.GetReturnValue().Set(v8_helpers::MakeV8String(isolate, thisObject->GetRef().GetFunction())); +} + +void CallContextWrap::GetArgumentsCallback(v8::Local /*propertyName*/, const v8::PropertyCallbackInfo& args) { + auto isolate = v8::Isolate::GetCurrent(); + v8::HandleScope scope(isolate); + auto context = isolate->GetCurrentContext(); + auto thisObject = NAPA_OBJECTWRAP::Unwrap(args.Holder()); + + auto& cppArgs = thisObject->GetRef().GetArguments(); + auto jsArgs = v8::Array::New(isolate, static_cast(cppArgs.size())); + for (size_t i = 0; i < cppArgs.size(); ++i) { + (void)jsArgs->CreateDataProperty(context, static_cast(i), v8_helpers::MakeExternalV8String(isolate, cppArgs[i])); + } + args.GetReturnValue().Set(jsArgs); +} + +void CallContextWrap::GetTransportContextCallback(v8::Local /*propertyName*/, const v8::PropertyCallbackInfo& args) { + auto isolate = v8::Isolate::GetCurrent(); + v8::HandleScope scope(isolate); + auto context = isolate->GetCurrentContext(); + auto thisObject = NAPA_OBJECTWRAP::Unwrap(args.Holder()); + + auto& transportContext = thisObject->GetRef().GetTransportContext(); + auto wrap = TransportContextWrapImpl::NewInstance(&transportContext); + args.GetReturnValue().Set(wrap); +} + +void CallContextWrap::GetOptionsCallback(v8::Local /*propertyName*/, const v8::PropertyCallbackInfo& args) { + auto isolate = v8::Isolate::GetCurrent(); + v8::HandleScope scope(isolate); + + // Prepare execute options. + // NOTE: export necessary fields from CallContext.GetOptions to jsOptions object here. Now it's empty. + auto jsOptions = v8::ObjectTemplate::New(isolate)->NewInstance(); + + args.GetReturnValue().Set(jsOptions); +} + diff --git a/src/module/core-modules/napa/call-context-wrap.h b/src/module/core-modules/napa/call-context-wrap.h new file mode 100644 index 0000000..ac5b8e3 --- /dev/null +++ b/src/module/core-modules/napa/call-context-wrap.h @@ -0,0 +1,63 @@ +#pragma once + +#include +#include +#include + +#include + +namespace napa { +namespace module { + + /// + /// Class that wraps zone::CallContext, which enables JavaScript world to + /// resolve or reject a function call. + /// + class CallContextWrap: public ShareableWrap { + public: + /// It creates a persistent constructor for CallContextWrap instance. + static void Init(); + + /// Create a new instance of wrap associating with specific call context. + static v8::Local NewInstance(std::shared_ptr call); + + /// Get call context. + zone::CallContext& GetRef(); + + /// Exported class name. + static constexpr const char* exportName = "CallContextWrap"; + + /// Declare constructor in public, so we can export class constructor in JavaScript world. + NAPA_DECLARE_PERSISTENT_CONSTRUCTOR + + protected: + /// It implements CallContext.resolve(result: any): void + static void ResolveCallback(const v8::FunctionCallbackInfo& args); + + /// It implements CallContext.reject(reason: string): void + static void RejectCallback(const v8::FunctionCallbackInfo& args); + + /// It implements CallContext.finished: boolean + static void IsFinishedCallback(v8::Local propertyName, const v8::PropertyCallbackInfo& args); + + /// It implements CallContext.module: string + static void GetModuleCallback(v8::Local propertyName, const v8::PropertyCallbackInfo& args); + + /// It implements CallContext.function: string + static void GetFunctionCallback(v8::Local propertyName, const v8::PropertyCallbackInfo& args); + + /// It implements CallContext.args: string[] + static void GetArgumentsCallback(v8::Local propertyName, const v8::PropertyCallbackInfo& args); + + /// It implements CallContext.transportContext: TransportContext + static void GetTransportContextCallback(v8::Local propertyName, const v8::PropertyCallbackInfo& args); + + /// It implements CallContext.options: CallOptions + static void GetOptionsCallback(v8::Local propertyName, const v8::PropertyCallbackInfo& args); + + /// It implements CallContext.elapse: [number, number] (precision in nano-second) + static void GetElapseCallback(v8::Local propertyName, const v8::PropertyCallbackInfo& args); + }; +} +} + \ No newline at end of file diff --git a/src/module/core-modules/napa/napa-binding.cpp b/src/module/core-modules/napa/napa-binding.cpp index 935f640..a6250df 100644 --- a/src/module/core-modules/napa/napa-binding.cpp +++ b/src/module/core-modules/napa/napa-binding.cpp @@ -3,15 +3,18 @@ #include "metric-wrap.h" #include "allocator-debugger-wrap.h" #include "allocator-wrap.h" +#include "call-context-wrap.h" #include "shared-ptr-wrap.h" #include "store-wrap.h" #include "transport-context-wrap-impl.h" #include "zone-wrap.h" +#include + #include #include +#include #include -#include #include #include @@ -23,7 +26,16 @@ static void RegisterBinding(v8::Local module) { v8::HandleScope scope(isolate); auto persistentModule = new v8::Persistent(isolate, module); - WorkerContext::Set(WorkerContextItem::NAPA_BINDING, persistentModule); + zone::WorkerContext::Set(zone::WorkerContextItem::NAPA_BINDING, persistentModule); +} + +v8::Local napa::module::binding::GetModule() { + auto persistentModule = + reinterpret_cast*>( + zone::WorkerContext::Get(zone::WorkerContextItem::NAPA_BINDING)); + + NAPA_ASSERT(persistentModule != nullptr, "\"napajs\" must be required before napa::module::binding::GetModule() can be called from C++."); + return v8::Local::New(v8::Isolate::GetCurrent(), *persistentModule); } static void CreateZone(const v8::FunctionCallbackInfo& args) { @@ -194,6 +206,7 @@ void binding::Init(v8::Local exports, v8::Local module) AllocatorDebuggerWrap::Init(); AllocatorWrap::Init(); MetricWrap::Init(); + CallContextWrap::Init(); SharedPtrWrap::Init(); StoreWrap::Init(); TransportContextWrapImpl::Init(); @@ -202,6 +215,7 @@ void binding::Init(v8::Local exports, v8::Local module) NAPA_EXPORT_OBJECTWRAP(exports, "AllocatorDebuggerWrap", AllocatorDebuggerWrap); NAPA_EXPORT_OBJECTWRAP(exports, "AllocatorWrap", AllocatorWrap); NAPA_EXPORT_OBJECTWRAP(exports, "MetricWrap", MetricWrap); + NAPA_EXPORT_OBJECTWRAP(exports, "CallContextWrap", CallContextWrap); NAPA_EXPORT_OBJECTWRAP(exports, "SharedPtrWrap", SharedPtrWrap); NAPA_EXPORT_OBJECTWRAP(exports, "TransportContextWrap", TransportContextWrapImpl); diff --git a/src/module/core-modules/napa/napa-core-modules.proj b/src/module/core-modules/napa/napa-core-modules.proj index 0fc5ec3..2a38145 100644 --- a/src/module/core-modules/napa/napa-core-modules.proj +++ b/src/module/core-modules/napa/napa-core-modules.proj @@ -2,6 +2,7 @@ + diff --git a/src/module/core-modules/napa/transport-context-wrap-impl.cpp b/src/module/core-modules/napa/transport-context-wrap-impl.cpp index f7e1a0b..54c51d1 100644 --- a/src/module/core-modules/napa/transport-context-wrap-impl.cpp +++ b/src/module/core-modules/napa/transport-context-wrap-impl.cpp @@ -12,6 +12,14 @@ TransportContextWrapImpl::TransportContextWrapImpl(TransportContext* context) { _context = context; } +v8::Local TransportContextWrapImpl::NewInstance(napa::transport::TransportContext* context) { + auto object = napa::module::NewInstance().ToLocalChecked(); + auto wrap = NAPA_OBJECTWRAP::Unwrap(object); + wrap->_context = context; + + return object; +} + TransportContext* TransportContextWrapImpl::Get() { return _context; } @@ -19,14 +27,14 @@ TransportContext* TransportContextWrapImpl::Get() { void TransportContextWrapImpl::Init() { auto isolate = v8::Isolate::GetCurrent(); auto constructorTemplate = v8::FunctionTemplate::New(isolate, TransportContextWrapImpl::ConstructorCallback); - constructorTemplate->SetClassName(MakeV8String(isolate, _exportName)); + constructorTemplate->SetClassName(MakeV8String(isolate, exportName)); constructorTemplate->InstanceTemplate()->SetInternalFieldCount(1); NAPA_SET_PROTOTYPE_METHOD(constructorTemplate, "saveShared", SaveSharedCallback); NAPA_SET_PROTOTYPE_METHOD(constructorTemplate, "loadShared", LoadSharedCallback); NAPA_SET_ACCESSOR(constructorTemplate, "sharedCount", GetSharedCountCallback, nullptr); - NAPA_SET_PERSISTENT_CONSTRUCTOR(_exportName, constructorTemplate->GetFunction()); + NAPA_SET_PERSISTENT_CONSTRUCTOR(exportName, constructorTemplate->GetFunction()); } void TransportContextWrapImpl::GetSharedCountCallback(v8::Local, const v8::PropertyCallbackInfo& args){ diff --git a/src/module/core-modules/napa/transport-context-wrap-impl.h b/src/module/core-modules/napa/transport-context-wrap-impl.h index aa3ebf7..8c5cb9c 100644 --- a/src/module/core-modules/napa/transport-context-wrap-impl.h +++ b/src/module/core-modules/napa/transport-context-wrap-impl.h @@ -13,12 +13,18 @@ namespace module { /// Init this wrap. static void Init(); + /// Create a non-owning transport context wrap. + static v8::Local NewInstance(napa::transport::TransportContext* context); + /// Get transport context. napa::transport::TransportContext* Get() override; /// Declare constructor in public, so we can export class constructor to JavaScript world. NAPA_DECLARE_PERSISTENT_CONSTRUCTOR + /// Exported class name. + static constexpr const char* exportName = "TransportContextWrap"; + private: /// Constructor. TransportContextWrapImpl(napa::transport::TransportContext* context); @@ -39,9 +45,6 @@ namespace module { /// It implements TransportContext.loadShared(handle: Handle): napajs.memory.ShareableWrap) static void LoadSharedCallback(const v8::FunctionCallbackInfo& args); - /// Exported class name. - static constexpr const char* _exportName = "TransportContextWrap"; - /// Non-owning transport context. napa::transport::TransportContext* _context; }; diff --git a/src/module/core-modules/napa/zone-wrap.cpp b/src/module/core-modules/napa/zone-wrap.cpp index a9ae1c5..c373cc7 100644 --- a/src/module/core-modules/napa/zone-wrap.cpp +++ b/src/module/core-modules/napa/zone-wrap.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -15,7 +16,7 @@ using namespace napa::v8_helpers; NAPA_DEFINE_PERSISTENT_CONSTRUCTOR(ZoneWrap); // Forward declaration. -static v8::Local CreateResponseObject(const napa::ExecuteResponse& response); +static v8::Local CreateResponseObject(const napa::Result& result); template static void CreateRequestAndExecute(v8::Local obj, Func&& func); @@ -66,23 +67,22 @@ void ZoneWrap::Broadcast(const v8::FunctionCallbackInfo& args) { v8::String::Utf8Value source(args[0]->ToString()); - napa::module::DoAsyncWork(v8::Local::Cast(args[1]), + napa::zone::DoAsyncWork(v8::Local::Cast(args[1]), [&args, &source](std::function complete) { auto wrap = ObjectWrap::Unwrap(args.Holder()); - wrap->_zoneProxy->Broadcast(*source, [complete = std::move(complete)](ResponseCode responseCode) { - complete(reinterpret_cast(static_cast(responseCode))); + wrap->_zoneProxy->Broadcast(*source, [complete = std::move(complete)](ResultCode resultCode) { + complete(reinterpret_cast(static_cast(resultCode))); }); }, [](auto jsCallback, void* result) { auto isolate = v8::Isolate::GetCurrent(); + v8::HandleScope scope(isolate); auto context = isolate->GetCurrentContext(); - v8::HandleScope scope(isolate); - std::vector> argv; - auto responseCode = static_cast(reinterpret_cast(result)); - argv.emplace_back(v8::Uint32::NewFromUnsigned(isolate, responseCode)); + auto resultCode = static_cast(reinterpret_cast(result)); + argv.emplace_back(v8::Uint32::NewFromUnsigned(isolate, resultCode)); (void)jsCallback->Call(context, context->Global(), static_cast(argv.size()), argv.data()); } @@ -97,41 +97,41 @@ void ZoneWrap::BroadcastSync(const v8::FunctionCallbackInfo& args) { v8::String::Utf8Value source(args[0]->ToString()); auto wrap = ObjectWrap::Unwrap(args.Holder()); - auto responseCode = wrap->_zoneProxy->BroadcastSync(*source); + auto resultCode = wrap->_zoneProxy->BroadcastSync(*source); - args.GetReturnValue().Set(v8::Uint32::NewFromUnsigned(isolate, responseCode)); + args.GetReturnValue().Set(v8::Uint32::NewFromUnsigned(isolate, resultCode)); } void ZoneWrap::Execute(const v8::FunctionCallbackInfo& args) { auto isolate = v8::Isolate::GetCurrent(); - CHECK_ARG(isolate, args[0]->IsObject(), "first argument to zone.execute must be the execution request object"); + CHECK_ARG(isolate, args[0]->IsObject(), "first argument to zone.execute must be the function spec object"); CHECK_ARG(isolate, args[1]->IsFunction(), "second argument to zone.execute must be the callback"); - napa::module::DoAsyncWork(v8::Local::Cast(args[1]), + napa::zone::DoAsyncWork(v8::Local::Cast(args[1]), [&args](std::function complete) { - CreateRequestAndExecute(args[0]->ToObject(), [&args, &complete](const napa::ExecuteRequest& request) { + CreateRequestAndExecute(args[0]->ToObject(), [&args, &complete](const napa::FunctionSpec& spec) { auto wrap = ObjectWrap::Unwrap(args.Holder()); - wrap->_zoneProxy->Execute(request, [complete = std::move(complete)](napa::ExecuteResponse response) { - complete(new napa::ExecuteResponse(std::move(response))); + wrap->_zoneProxy->Execute(spec, [complete = std::move(complete)](napa::Result result) { + complete(new napa::Result(std::move(result))); }); }); }, - [](auto jsCallback, void* result) { + [](auto jsCallback, void* res) { auto isolate = v8::Isolate::GetCurrent(); auto context = isolate->GetCurrentContext(); - auto response = static_cast(result); + auto result = static_cast(res); v8::HandleScope scope(isolate); std::vector> argv; - argv.emplace_back(CreateResponseObject(*response)); + argv.emplace_back(CreateResponseObject(*result)); (void)jsCallback->Call(context, context->Global(), static_cast(argv.size()), argv.data()); - delete response; + delete result; } ); } @@ -139,17 +139,17 @@ void ZoneWrap::Execute(const v8::FunctionCallbackInfo& args) { void ZoneWrap::ExecuteSync(const v8::FunctionCallbackInfo& args) { auto isolate = v8::Isolate::GetCurrent(); - CHECK_ARG(isolate, args[0]->IsObject(), "first argument to zone.execute must be the execution request object"); + CHECK_ARG(isolate, args[0]->IsObject(), "first argument to zone.execute must be the function spec object"); - CreateRequestAndExecute(args[0]->ToObject(), [&args](const napa::ExecuteRequest& request) { + CreateRequestAndExecute(args[0]->ToObject(), [&args](const napa::FunctionSpec& spec) { auto wrap = ObjectWrap::Unwrap(args.Holder()); - napa::ExecuteResponse response = wrap->_zoneProxy->ExecuteSync(request); - args.GetReturnValue().Set(CreateResponseObject(response)); + napa::Result result = wrap->_zoneProxy->ExecuteSync(spec); + args.GetReturnValue().Set(CreateResponseObject(result)); }); } -static v8::Local CreateResponseObject(const napa::ExecuteResponse& response) { +static v8::Local CreateResponseObject(const napa::Result& result) { auto isolate = v8::Isolate::GetCurrent(); auto context = isolate->GetCurrentContext(); @@ -158,23 +158,23 @@ static v8::Local CreateResponseObject(const napa::ExecuteResponse& r (void)responseObject->CreateDataProperty( context, MakeV8String(isolate, "code"), - v8::Uint32::NewFromUnsigned(isolate, response.code)); + v8::Uint32::NewFromUnsigned(isolate, result.code)); (void)responseObject->CreateDataProperty( context, MakeV8String(isolate, "errorMessage"), - MakeV8String(isolate, response.errorMessage)); + MakeV8String(isolate, result.errorMessage)); (void)responseObject->CreateDataProperty( context, MakeV8String(isolate, "returnValue"), - MakeV8String(isolate, response.returnValue)); + MakeV8String(isolate, result.returnValue)); // Transport context handle (void)responseObject->CreateDataProperty( context, MakeV8String(isolate, "contextHandle"), - PtrToV8Uint32Array(isolate, response.transportContext.release())); + PtrToV8Uint32Array(isolate, result.transportContext.release())); return responseObject; } @@ -184,35 +184,35 @@ static void CreateRequestAndExecute(v8::Local obj, Func&& func) { auto isolate = v8::Isolate::GetCurrent(); auto context = isolate->GetCurrentContext(); - napa::ExecuteRequest request; + napa::FunctionSpec spec; - // module property is optional in a request + // module property is optional in a spec Utf8String module; auto maybe = obj->Get(context, MakeV8String(isolate, "module")); if (!maybe.IsEmpty()) { module = Utf8String(maybe.ToLocalChecked()); - request.module = NAPA_STRING_REF_WITH_SIZE(module.Data(), module.Length()); + spec.module = NAPA_STRING_REF_WITH_SIZE(module.Data(), module.Length()); } - // function property is mandatory in a request + // function property is mandatory in a spec maybe = obj->Get(context, MakeV8String(isolate, "function")); - CHECK_ARG(isolate, !maybe.IsEmpty(), "function property is missing in execution request object"); + CHECK_ARG(isolate, !maybe.IsEmpty(), "function property is missing in function spec object"); auto functionValue = maybe.ToLocalChecked(); - CHECK_ARG(isolate, functionValue->IsString(), "function property in execution request object must be a string"); + CHECK_ARG(isolate, functionValue->IsString(), "function property in function spec object must be a string"); v8::String::Utf8Value function(functionValue->ToString()); - request.function = NAPA_STRING_REF_WITH_SIZE(*function, static_cast(function.length())); + spec.function = NAPA_STRING_REF_WITH_SIZE(*function, static_cast(function.length())); - // arguments are optional in a request + // arguments are optional in a spec maybe = obj->Get(context, MakeV8String(isolate, "arguments")); std::vector arguments; if (!maybe.IsEmpty()) { arguments = V8ArrayToVector(isolate, v8::Local::Cast(maybe.ToLocalChecked())); - request.arguments.reserve(arguments.size()); + spec.arguments.reserve(arguments.size()); for (const auto& arg : arguments) { - request.arguments.emplace_back(NAPA_STRING_REF_WITH_SIZE(arg.Data(), arg.Length())); + spec.arguments.emplace_back(NAPA_STRING_REF_WITH_SIZE(arg.Data(), arg.Length())); } } @@ -226,23 +226,23 @@ static void CreateRequestAndExecute(v8::Local obj, Func&& func) { // timeout is optional. maybe = options->Get(context, MakeV8String(isolate, "timeout")); if (!maybe.IsEmpty()) { - request.options.timeout = maybe.ToLocalChecked()->Uint32Value(context).FromJust(); + spec.options.timeout = maybe.ToLocalChecked()->Uint32Value(context).FromJust(); } // transport option is optional. maybe = options->Get(context, MakeV8String(isolate, "transport")); if (!maybe.IsEmpty()) { - request.options.transport = static_cast(maybe.ToLocalChecked()->Uint32Value(context).FromJust()); + spec.options.transport = static_cast(maybe.ToLocalChecked()->Uint32Value(context).FromJust()); } } - // transportContext property is mandatory in a request + // transportContext property is mandatory in a spec maybe = obj->Get(context, MakeV8String(isolate, "transportContext")); - CHECK_ARG(isolate, !maybe.IsEmpty(), "transportContext property is missing in execution request object"); + CHECK_ARG(isolate, !maybe.IsEmpty(), "transportContext property is missing in function spec object"); auto transportContextWrap = NAPA_OBJECTWRAP::Unwrap(maybe.ToLocalChecked()->ToObject()); - request.transportContext.reset(transportContextWrap->Get()); + spec.transportContext.reset(transportContextWrap->Get()); // Execute - func(request); + func(spec); } diff --git a/src/module/core-modules/node/os.cpp b/src/module/core-modules/node/os.cpp index bc14d87..d0ffc62 100644 --- a/src/module/core-modules/node/os.cpp +++ b/src/module/core-modules/node/os.cpp @@ -1,7 +1,7 @@ #include "os.h" #include -#include +#include using namespace napa; using namespace napa::module; diff --git a/src/module/core-modules/node/path.cpp b/src/module/core-modules/node/path.cpp index d5ec1dc..7098a0c 100644 --- a/src/module/core-modules/node/path.cpp +++ b/src/module/core-modules/node/path.cpp @@ -1,7 +1,7 @@ #include "path.h" #include -#include +#include #include #include diff --git a/src/module/core-modules/node/process.cpp b/src/module/core-modules/node/process.cpp index ed9a194..1c6ee21 100644 --- a/src/module/core-modules/node/process.cpp +++ b/src/module/core-modules/node/process.cpp @@ -1,7 +1,7 @@ #include "process.h" #include -#include +#include #include @@ -122,8 +122,6 @@ namespace { } void HrtimeCallback(const v8::FunctionCallbackInfo& args) { - const static uint32_t NANOS_PER_SECOND = 1000000000; - auto isolate = v8::Isolate::GetCurrent(); v8::HandleScope scope(isolate); auto context = isolate->GetCurrentContext(); @@ -132,30 +130,13 @@ namespace { uint64_t time = std::chrono::high_resolution_clock::now().time_since_epoch().count(); if (args.Length() == 1) { - CHECK_ARG(isolate, args[0]->IsArray(), "process.hrtime only accepts an Array tuple"); - - auto arr = v8::Local::Cast(args[0]); - CHECK_ARG(isolate, arr->Length() == 2, "process.hrtime only accepts an Array tuple of size 2"); - - uint64_t prev = (static_cast(arr->Get(0)->Uint32Value()) * NANOS_PER_SECOND) - + arr->Get(1)->Uint32Value(); + auto result = v8_helpers::V8Uint32ArrayToHrtime(isolate, args[0]); + JS_ENSURE(isolate, result.second, "The 1st argument of hrtime must be a two-element uint32 array."); // Calculate the delta - time -= prev; + time -= result.first; } - - v8::Local res = v8::Array::New(isolate, 2); - (void)res->CreateDataProperty( - context, - 0, - v8::Integer::NewFromUnsigned(isolate, static_cast(time / NANOS_PER_SECOND))); - - (void)res->CreateDataProperty( - context, - 1, - v8::Integer::NewFromUnsigned(isolate, static_cast(time % NANOS_PER_SECOND))); - - args.GetReturnValue().Set(res); + args.GetReturnValue().Set(v8_helpers::HrtimeToV8Uint32Array(isolate, time)); } void UmaskCallback(const v8::FunctionCallbackInfo& args) { diff --git a/src/module/core-modules/node/tty-wrap.cpp b/src/module/core-modules/node/tty-wrap.cpp index 9e94cc6..f7fb316 100644 --- a/src/module/core-modules/node/tty-wrap.cpp +++ b/src/module/core-modules/node/tty-wrap.cpp @@ -1,7 +1,7 @@ #include "tty-wrap.h" #include -#include +#include using namespace napa; using namespace napa::module; diff --git a/src/module/binary-module-loader.cpp b/src/module/loader/binary-module-loader.cpp similarity index 100% rename from src/module/binary-module-loader.cpp rename to src/module/loader/binary-module-loader.cpp diff --git a/src/module/binary-module-loader.h b/src/module/loader/binary-module-loader.h similarity index 100% rename from src/module/binary-module-loader.h rename to src/module/loader/binary-module-loader.h diff --git a/src/module/core-module-loader.cpp b/src/module/loader/core-module-loader.cpp similarity index 100% rename from src/module/core-module-loader.cpp rename to src/module/loader/core-module-loader.cpp diff --git a/src/module/core-module-loader.h b/src/module/loader/core-module-loader.h similarity index 100% rename from src/module/core-module-loader.h rename to src/module/loader/core-module-loader.h diff --git a/src/module/javascript-module-loader.cpp b/src/module/loader/javascript-module-loader.cpp similarity index 100% rename from src/module/javascript-module-loader.cpp rename to src/module/loader/javascript-module-loader.cpp diff --git a/src/module/javascript-module-loader.h b/src/module/loader/javascript-module-loader.h similarity index 100% rename from src/module/javascript-module-loader.h rename to src/module/loader/javascript-module-loader.h diff --git a/src/module/json-module-loader.cpp b/src/module/loader/json-module-loader.cpp similarity index 100% rename from src/module/json-module-loader.cpp rename to src/module/loader/json-module-loader.cpp diff --git a/src/module/json-module-loader.h b/src/module/loader/json-module-loader.h similarity index 100% rename from src/module/json-module-loader.h rename to src/module/loader/json-module-loader.h diff --git a/src/module/module-cache.cpp b/src/module/loader/module-cache.cpp similarity index 100% rename from src/module/module-cache.cpp rename to src/module/loader/module-cache.cpp diff --git a/src/module/module-cache.h b/src/module/loader/module-cache.h similarity index 100% rename from src/module/module-cache.h rename to src/module/loader/module-cache.h diff --git a/src/module/module-file-loader.h b/src/module/loader/module-file-loader.h similarity index 100% rename from src/module/module-file-loader.h rename to src/module/loader/module-file-loader.h diff --git a/src/module/module-loader-helpers.cpp b/src/module/loader/module-loader-helpers.cpp similarity index 99% rename from src/module/module-loader-helpers.cpp rename to src/module/loader/module-loader-helpers.cpp index dceaa0a..f8d6994 100644 --- a/src/module/module-loader-helpers.cpp +++ b/src/module/loader/module-loader-helpers.cpp @@ -1,5 +1,5 @@ #include "module-loader-helpers.h" -#include "core-modules/node/file-system-helpers.h" +#include #include #include diff --git a/src/module/module-loader-helpers.h b/src/module/loader/module-loader-helpers.h similarity index 100% rename from src/module/module-loader-helpers.h rename to src/module/loader/module-loader-helpers.h diff --git a/src/module/module-loader.cpp b/src/module/loader/module-loader.cpp similarity index 95% rename from src/module/module-loader.cpp rename to src/module/loader/module-loader.cpp index a0d0447..d641375 100644 --- a/src/module/module-loader.cpp +++ b/src/module/loader/module-loader.cpp @@ -8,7 +8,10 @@ #include "module-loader-helpers.h" #include "module-resolver.h" -#include "core-modules/core-modules.h" +#include + +// TODO: decouple dependencies between moduler-loader and zone. +#include #include #include @@ -104,10 +107,10 @@ private: }; void ModuleLoader::CreateModuleLoader() { - auto moduleLoader = reinterpret_cast(WorkerContext::Get(WorkerContextItem::MODULE_LOADER)); + auto moduleLoader = reinterpret_cast(zone::WorkerContext::Get(zone::WorkerContextItem::MODULE_LOADER)); if (moduleLoader == nullptr) { moduleLoader = new ModuleLoader(); - WorkerContext::Set(WorkerContextItem::MODULE_LOADER, moduleLoader); + zone::WorkerContext::Set(zone::WorkerContextItem::MODULE_LOADER, moduleLoader); // Now, Javascript core module's 'require' can find module loader instance correctly. moduleLoader->_impl->Bootstrap(); @@ -184,7 +187,7 @@ void ModuleLoader::ModuleLoaderImpl::RequireCallback(const v8::FunctionCallbackI args.Length() == 1 || args.Length() == 2 || args[0]->IsString(), "Invalid arguments"); - auto moduleLoader = reinterpret_cast(WorkerContext::Get(WorkerContextItem::MODULE_LOADER)); + auto moduleLoader = reinterpret_cast(zone::WorkerContext::Get(zone::WorkerContextItem::MODULE_LOADER)); JS_ENSURE(isolate, moduleLoader != nullptr, "Module loader is not initialized"); v8::String::Utf8Value path(args[0]); @@ -197,7 +200,7 @@ void ModuleLoader::ModuleLoaderImpl::ResolveCallback(const v8::FunctionCallbackI CHECK_ARG(isolate, args.Length() == 1 && args[0]->IsString(), "Invalid arguments"); - auto moduleLoader = reinterpret_cast(WorkerContext::Get(WorkerContextItem::MODULE_LOADER)); + auto moduleLoader = reinterpret_cast(zone::WorkerContext::Get(zone::WorkerContextItem::MODULE_LOADER)); JS_ENSURE(isolate, moduleLoader != nullptr, "Module loader is not initialized"); v8::String::Utf8Value path(args[0]); @@ -213,7 +216,7 @@ void ModuleLoader::ModuleLoaderImpl::BindingCallback(const v8::FunctionCallbackI CHECK_ARG(isolate, args.Length() == 1 && args[0]->IsString(), "Invalid arguments"); - auto moduleLoader = reinterpret_cast(WorkerContext::Get(WorkerContextItem::MODULE_LOADER)); + auto moduleLoader = reinterpret_cast(zone::WorkerContext::Get(zone::WorkerContextItem::MODULE_LOADER)); JS_ENSURE(isolate, moduleLoader != nullptr, "Module loader is not initialized"); v8::String::Utf8Value name(args[0]); diff --git a/src/module/module-loader.h b/src/module/loader/module-loader.h similarity index 100% rename from src/module/module-loader.h rename to src/module/loader/module-loader.h diff --git a/src/module/module-resolver.cpp b/src/module/loader/module-resolver.cpp similarity index 99% rename from src/module/module-resolver.cpp rename to src/module/loader/module-resolver.cpp index 0638d84..0c7fa58 100644 --- a/src/module/module-resolver.cpp +++ b/src/module/loader/module-resolver.cpp @@ -1,6 +1,6 @@ #include "module-resolver.h" -#include +#include #include #include diff --git a/src/module/module-resolver.h b/src/module/loader/module-resolver.h similarity index 100% rename from src/module/module-resolver.h rename to src/module/loader/module-resolver.h diff --git a/src/module/module.cpp b/src/module/module.cpp new file mode 100644 index 0000000..9112fae --- /dev/null +++ b/src/module/module.cpp @@ -0,0 +1,47 @@ +#include + +#include + +using namespace napa; + +/// It sets the persistent constructor at the current V8 isolate. +/// Unique constructor name. It's recommended to use the same name as module. +/// V8 persistent function to constructor V8 object. +void napa::module::SetPersistentConstructor(const char* name, + v8::Local constructor) { + auto isolate = v8::Isolate::GetCurrent(); + v8::HandleScope scope(isolate); + + auto constructorInfo = + static_cast(zone::WorkerContext::Get(zone::WorkerContextItem::CONSTRUCTOR)); + if (constructorInfo == nullptr) { + constructorInfo = new ConstructorInfo(); + zone::WorkerContext::Set(zone::WorkerContextItem::CONSTRUCTOR, constructorInfo); + } + + constructorInfo->constructorMap.emplace(std::piecewise_construct, + std::forward_as_tuple(name), + std::forward_as_tuple(isolate, constructor)); +} + +/// It gets the given persistent constructor from the current V8 isolate. +/// Unique constructor name given at SetPersistentConstructor() call. +/// V8 local function object. +v8::Local napa::module::GetPersistentConstructor(const char* name) { + auto isolate = v8::Isolate::GetCurrent(); + v8::EscapableHandleScope scope(isolate); + + auto constructorInfo = + static_cast(zone::WorkerContext::Get(zone::WorkerContextItem::CONSTRUCTOR)); + if (constructorInfo == nullptr) { + return scope.Escape(v8::Local()); + } + + auto iter = constructorInfo->constructorMap.find(name); + if (iter != constructorInfo->constructorMap.end()) { + auto constructor = v8::Local::New(isolate, iter->second); + return scope.Escape(constructor); + } else { + return scope.Escape(v8::Local()); + } +} diff --git a/inc/napa/module/platform.h b/src/platform/platform.h similarity index 96% rename from inc/napa/module/platform.h rename to src/platform/platform.h index 2f6155a..9a0e41b 100644 --- a/inc/napa/module/platform.h +++ b/src/platform/platform.h @@ -3,7 +3,6 @@ #include namespace napa { -namespace module { namespace platform { /// Global variable to indicate the number of process arguments. @@ -44,5 +43,4 @@ namespace platform { int32_t Isatty(int32_t fd); } // End of namespce platform. -} // End of namespce module } // End of namespce napa. diff --git a/inc/napa/module/thread-local-storage.h b/src/platform/thread-local-storage.h similarity index 95% rename from inc/napa/module/thread-local-storage.h rename to src/platform/thread-local-storage.h index bdfc9cc..5ca3733 100644 --- a/inc/napa/module/thread-local-storage.h +++ b/src/platform/thread-local-storage.h @@ -3,7 +3,7 @@ #include namespace napa { -namespace module { +namespace platform { namespace tls { /// Allocate a thread local storage index. diff --git a/src/platform/win/platform.cpp b/src/platform/win/platform.cpp index aa0a20a..6fe2bb7 100644 --- a/src/platform/win/platform.cpp +++ b/src/platform/win/platform.cpp @@ -1,4 +1,4 @@ -#include +#include #include @@ -8,7 +8,7 @@ #include #include -using namespace napa::module; +using namespace napa; int platform::argc = __argc; char** platform::argv = __argv; diff --git a/src/platform/win/thread-local-storage.cpp b/src/platform/win/thread-local-storage.cpp index 5ad9a74..7c3f1a1 100644 --- a/src/platform/win/thread-local-storage.cpp +++ b/src/platform/win/thread-local-storage.cpp @@ -1,7 +1,7 @@ -#include +#include #include -using namespace napa::module; +using namespace napa::platform; uint32_t tls::Alloc() { return TlsAlloc(); diff --git a/src/providers/providers.cpp b/src/providers/providers.cpp index ce93b75..2885602 100644 --- a/src/providers/providers.cpp +++ b/src/providers/providers.cpp @@ -4,7 +4,7 @@ #include "nop-logging-provider.h" #include "nop-metric-provider.h" -#include "module/module-resolver.h" +#include #include @@ -13,7 +13,7 @@ #include - +using namespace napa; using namespace napa::providers; // Forward declarations. @@ -25,7 +25,7 @@ static LoggingProvider* _loggingProvider = LoadLoggingProvider(""); static MetricProvider* _metricProvider = LoadMetricProvider(""); -bool napa::providers::Initialize(const napa::PlatformSettings& settings) { +bool napa::providers::Initialize(const settings::PlatformSettings& settings) { _loggingProvider = LoadLoggingProvider(settings.loggingProvider); _metricProvider = LoadMetricProvider(settings.metricProvider); diff --git a/src/providers/providers.h b/src/providers/providers.h index d277f50..42c04ed 100644 --- a/src/providers/providers.h +++ b/src/providers/providers.h @@ -10,7 +10,7 @@ namespace napa { namespace providers { /// Initializes and loads all providers based on the provided settings. - bool Initialize(const napa::PlatformSettings& settings); + bool Initialize(const settings::PlatformSettings& settings); /// Clean up and destroy all loaded providers. void Shutdown(); diff --git a/src/scheduler/execute-task.cpp b/src/scheduler/execute-task.cpp deleted file mode 100644 index 64692b9..0000000 --- a/src/scheduler/execute-task.cpp +++ /dev/null @@ -1,90 +0,0 @@ -#include "execute-task.h" - -#include -#include - -#include - -using namespace napa::scheduler; -using namespace napa::v8_helpers; - -napa::scheduler::ExecuteTask::ExecuteTask(const ExecuteRequest& request, ExecuteCallback callback) : - _module(NAPA_STRING_REF_TO_STD_STRING(request.module)), - _func(NAPA_STRING_REF_TO_STD_STRING(request.function)), - _callback(std::move(callback)) { - - _args.reserve(request.arguments.size()); - for (auto& arg : request.arguments) { - _args.emplace_back(NAPA_STRING_REF_TO_STD_STRING(arg)); - } - _options = request.options; - - // Pass ownership of the transport context to the execute task. - _transportContext = std::move(request.transportContext); -} - -void ExecuteTask::Execute() { - auto isolate = v8::Isolate::GetCurrent(); - auto context = isolate->GetCurrentContext(); - v8::HandleScope scope(isolate); - - // Get the module based main function from global scope. - auto executeFunction = context->Global()->Get(MakeExternalV8String(isolate, "__zone_execute__")); - NAPA_ASSERT(executeFunction->IsFunction(), "__zone_execute__ function must exist in global scope"); - - // Prepare function args - auto args = v8::Array::New(isolate, static_cast(_args.size())); - for (size_t i = 0; i < _args.size(); ++i) { - (void)args->CreateDataProperty(context, static_cast(i), MakeExternalV8String(isolate, _args[i])); - } - - // Prepare execute options. - // NOTE: export necessary fields from _options to options object here. Now it's empty. - auto options = v8::ObjectTemplate::New(isolate)->NewInstance(); - - v8::Local argv[] = { - MakeExternalV8String(isolate, _module), - MakeExternalV8String(isolate, _func), - args, - PtrToV8Uint32Array(isolate, _transportContext.get()), - options - }; - - // Execute the function. - v8::TryCatch tryCatch(isolate); - auto res = v8::Local::Cast(executeFunction)->Call(context->Global(), 5, argv); - - // Terminating an isolate may occur from a different thread, i.e. from timeout service. - // If the function call already finished successfully when the isolate is terminated it may lead - // to one the following: - // 1. Terminate was called before tryCatch.HasTerminated(), the user gets an error code. - // 2. Terminate was called after tryCatch.HasTerminated(), the user gets a success code. - // - // In both cases the isolate is being restored since this happens before each task executes. - if (tryCatch.HasTerminated()) { - if (_terminationReason == TerminationReason::TIMEOUT) { - LOG_ERROR("Execute", "Task was terminated due to timeout"); - _callback({ NAPA_RESPONSE_TIMEOUT, "Execute exceeded timeout", "", std::move(_transportContext) }); - } else { - LOG_ERROR("Execute", "Task was terminated for unknown reason"); - _callback({ NAPA_RESPONSE_INTERNAL_ERROR, "Execute task terminated", "", std::move(_transportContext) }); - } - - return; - } - - if (tryCatch.HasCaught()) { - auto exception = tryCatch.Exception(); - v8::String::Utf8Value exceptionStr(exception); - auto stackTrace = tryCatch.StackTrace(); - v8::String::Utf8Value stackTraceStr(stackTrace); - - LOG_ERROR("Execute", "JS exception thrown: %s - %s", *exceptionStr, *stackTraceStr); - - _callback({ NAPA_RESPONSE_EXECUTE_FUNC_ERROR, *exceptionStr, "", std::move(_transportContext) }); - return; - } - - v8::String::Utf8Value val(res); - _callback({ NAPA_RESPONSE_SUCCESS, "", std::string(*val, val.length()), std::move(_transportContext) }); -} diff --git a/src/scheduler/execute-task.h b/src/scheduler/execute-task.h deleted file mode 100644 index b1dd0d8..0000000 --- a/src/scheduler/execute-task.h +++ /dev/null @@ -1,35 +0,0 @@ -#pragma once - -#include "terminable-task.h" - -#include - -#include -#include -#include - -namespace napa { -namespace scheduler { - - /// A task for executing pre-loaded javascript functions. - class ExecuteTask : public TerminableTask { - public: - - /// Constructor. - /// The execution request. - /// A callback that is triggered when the execute task is completed. - ExecuteTask(const ExecuteRequest& request, ExecuteCallback callback); - - /// Overrides Task.Execute to define execution logic. - virtual void Execute() override; - - private: - std::string _module; - std::string _func; - std::vector _args; - ExecuteOptions _options; - ExecuteCallback _callback; - std::unique_ptr _transportContext; - }; -} -} diff --git a/src/settings/settings-parser.cpp b/src/settings/settings-parser.cpp index f734a2e..2a00332 100644 --- a/src/settings/settings-parser.cpp +++ b/src/settings/settings-parser.cpp @@ -6,6 +6,7 @@ using namespace boost::program_options; using namespace napa; +using namespace napa::settings; static void AddZoneOptions(options_description& desc, ZoneSettings& settings) { // Zone parsing options should be added here. @@ -26,7 +27,7 @@ static void AddPlatformOptions(options_description& desc, PlatformSettings& sett ("initV8", value(&settings.initV8), "specify whether v8 should be initialized"); } -bool settings_parser::Parse(const std::vector& args, ZoneSettings& settings) { +bool settings::Parse(const std::vector& args, ZoneSettings& settings) { options_description desc; AddZoneOptions(desc, settings); @@ -45,7 +46,7 @@ bool settings_parser::Parse(const std::vector& args, ZoneSettings& return true; } -bool settings_parser::Parse(const std::vector& args, PlatformSettings& settings) { +bool settings::Parse(const std::vector& args, PlatformSettings& settings) { options_description desc; AddZoneOptions(desc, settings); AddPlatformOptions(desc, settings); diff --git a/src/settings/settings-parser.h b/src/settings/settings-parser.h index f84d787..7e04bc7 100644 --- a/src/settings/settings-parser.h +++ b/src/settings/settings-parser.h @@ -9,7 +9,7 @@ #include namespace napa { -namespace settings_parser { +namespace settings { /// Parses napa settings from a vector of arguments. /// The arguments holding the settings. diff --git a/src/settings/settings.h b/src/settings/settings.h index 2aeba62..1bdf636 100644 --- a/src/settings/settings.h +++ b/src/settings/settings.h @@ -7,6 +7,7 @@ namespace napa { +namespace settings { /// Zone specific settings. struct ZoneSettings { @@ -45,4 +46,5 @@ namespace napa { /// A flag to specify whether v8 should be initialized. bool initV8 = true; }; +} } \ No newline at end of file diff --git a/src/scheduler/async-complete-task.cpp b/src/zone/async-complete-task.cpp similarity index 92% rename from src/scheduler/async-complete-task.cpp rename to src/zone/async-complete-task.cpp index 0fa42d3..df200ee 100644 --- a/src/scheduler/async-complete-task.cpp +++ b/src/zone/async-complete-task.cpp @@ -2,8 +2,7 @@ #include -using namespace napa; -using namespace napa::module; +using namespace napa::zone; AsyncCompleteTask::AsyncCompleteTask(std::shared_ptr context) : _context(std::move(context)) {} diff --git a/src/scheduler/async-complete-task.h b/src/zone/async-complete-task.h similarity index 85% rename from src/scheduler/async-complete-task.h rename to src/zone/async-complete-task.h index da09793..e7fcc1b 100644 --- a/src/scheduler/async-complete-task.h +++ b/src/zone/async-complete-task.h @@ -2,15 +2,15 @@ #include "async-context.h" -#include +#include #include namespace napa { -namespace module { +namespace zone { /// A task to run Javascript callback after asynchronous callback completes. - class AsyncCompleteTask : public scheduler::Task { + class AsyncCompleteTask : public Task { public: /// Constructor. diff --git a/src/scheduler/async-context.h b/src/zone/async-context.h similarity index 78% rename from src/scheduler/async-context.h rename to src/zone/async-context.h index bc029e6..0592e6f 100644 --- a/src/scheduler/async-context.h +++ b/src/zone/async-context.h @@ -1,26 +1,27 @@ #pragma once -#include -#include -#include +#include + +#include +#include #include #include namespace napa { -namespace module { +namespace zone { /// Class holding asynchonous callbacks. struct AsyncContext { /// Zone instance issueing asynchronous work. - ZoneImpl* zone = nullptr; + NapaZone* zone = nullptr; /// Keep scheduler instance referenced until async work completes. - std::shared_ptr scheduler; + std::shared_ptr scheduler; /// Worker Id issueing asynchronous work. - scheduler::WorkerId workerId; + zone::WorkerId workerId; /// Future to wait async callback. std::future future; @@ -38,5 +39,5 @@ namespace module { AsyncCompleteCallback asyncCompleteCallback; }; -} // End of namespace scheduler. +} // End of namespace zone. } // End of namespace napa. diff --git a/src/module/async-runner.cpp b/src/zone/async-runner.cpp similarity index 80% rename from src/module/async-runner.cpp rename to src/zone/async-runner.cpp index 0776e59..0f88da7 100644 --- a/src/module/async-runner.cpp +++ b/src/zone/async-runner.cpp @@ -1,10 +1,11 @@ -#include -#include -#include -#include +#include + +#include +#include +#include using namespace napa; -using namespace napa::module; +using namespace napa::zone; namespace { @@ -23,9 +24,9 @@ namespace { /// Javascript callback. /// Function to run asynchronously in separate thread. /// Callback running in V8 isolate after asynchronous callback completes. -void napa::module::PostAsyncWork(v8::Local jsCallback, - AsyncWork asyncWork, - AsyncCompleteCallback asyncCompleteCallback) { +void napa::zone::PostAsyncWork(v8::Local jsCallback, + AsyncWork asyncWork, + AsyncCompleteCallback asyncCompleteCallback) { auto context = PrepareAsyncWork(jsCallback, std::move(asyncWork), std::move(asyncCompleteCallback)); if (context == nullptr) { return; @@ -43,9 +44,9 @@ void napa::module::PostAsyncWork(v8::Local jsCallback, /// Javascript callback. /// Function to wrap async-supporting function. /// Callback running in V8 isolate after asynchronous function completes. -void napa::module::DoAsyncWork(v8::Local jsCallback, - const CompletionWork& asyncWork, - AsyncCompleteCallback asyncCompleteCallback) { +void napa::zone::DoAsyncWork(v8::Local jsCallback, + const CompletionWork& asyncWork, + AsyncCompleteCallback asyncCompleteCallback) { auto context = PrepareAsyncWork(jsCallback, nullptr, std::move(asyncCompleteCallback)); if (context == nullptr) { return; @@ -67,16 +68,16 @@ namespace { auto isolate = v8::Isolate::GetCurrent(); v8::HandleScope scope(isolate); - auto context = std::make_shared(); + auto context = std::make_shared(); - context->zone = reinterpret_cast(WorkerContext::Get(WorkerContextItem::ZONE)); + context->zone = reinterpret_cast(WorkerContext::Get(WorkerContextItem::ZONE)); if (context->zone == nullptr) { return nullptr; } context->scheduler = context->zone->GetScheduler(); - context->workerId = static_cast( - reinterpret_cast(module::WorkerContext::Get(WorkerContextItem::WORKER_ID))); + context->workerId = static_cast( + reinterpret_cast(WorkerContext::Get(WorkerContextItem::WORKER_ID))); context->jsCallback.Reset(isolate, jsCallback); context->asyncWork = std::move(asyncWork); diff --git a/src/zone/call-context.cpp b/src/zone/call-context.cpp new file mode 100644 index 0000000..0c4d38d --- /dev/null +++ b/src/zone/call-context.cpp @@ -0,0 +1,87 @@ +// See: https://groups.google.com/forum/#!topic/nodejs/onA0S01INtw +#ifdef BUILDING_NODE_EXTENSION +#include +#endif + +#include "call-context.h" + +//#include +#include +#include + +#include + +using namespace napa::zone; + +CallContext::CallContext(const napa::FunctionSpec& spec, napa::ExecuteCallback callback) : + _module(NAPA_STRING_REF_TO_STD_STRING(spec.module)), + _function(NAPA_STRING_REF_TO_STD_STRING(spec.function)), + _callback(callback), + _finished(false) { + + // Audit start time. + _startTime = std::chrono::high_resolution_clock::now(); + + _arguments.reserve(spec.arguments.size()); + for (auto& arg : spec.arguments) { + _arguments.emplace_back(NAPA_STRING_REF_TO_STD_STRING(arg)); + } + _options = spec.options; + + // Pass ownership of the transport context. + _transportContext = std::move(spec.transportContext); +} + +bool CallContext::Resolve(std::string marshalledResult) { + auto expected = false; + if (!_finished.compare_exchange_strong(expected, true)) { + return false; + } + _callback({ + NAPA_RESULT_SUCCESS, + "", + std::move(marshalledResult), + std::move(_transportContext) + }); + return true; +} + +bool CallContext::Reject(napa::ResultCode code, std::string reason) { + auto expected = false; + if (!_finished.compare_exchange_strong(expected, true)) { + return false; + } + + LOG_ERROR("Execute", "Call was rejected: %s", reason.c_str()); + + _callback({ code, reason, "", std::move(_transportContext) }); + return true; +} + +bool CallContext::IsFinished() const { + return _finished; +} + +const std::string& CallContext::GetModule() const { + return _module; +} + +const std::string& CallContext::GetFunction() const { + return _function; +} + +const std::vector& CallContext::GetArguments() const { + return _arguments; +} + +napa::transport::TransportContext& CallContext::GetTransportContext() { + return *_transportContext.get(); +} + +const napa::CallOptions& CallContext::GetOptions() const { + return _options; +} + +std::chrono::nanoseconds CallContext::GetElapse() const { + return std::chrono::high_resolution_clock::now() - _startTime; +} \ No newline at end of file diff --git a/src/zone/call-context.h b/src/zone/call-context.h new file mode 100644 index 0000000..ea676bc --- /dev/null +++ b/src/zone/call-context.h @@ -0,0 +1,80 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include + +namespace napa { +namespace zone { + + /// Context of calling a JavaScript function. + class CallContext { + + public: + /// Construct spec from external FunctionSpec. + explicit CallContext(const napa::FunctionSpec& spec, napa::ExecuteCallback callback); + + /// Resolve current spec. + /// marshalled return value. + /// True if operation is successful, otherwise if task is already finished before. + //bool Resolve(v8::Local result); + bool Resolve(std::string result); + + /// Reject current spec. + /// Response code to return to user. + /// Reason of cancellation. + /// True if operation is successful, otherwise if task is already finished before. + bool Reject(napa::ResultCode code, std::string reason); + + /// Returns whether current job is completed or cancelled. + bool IsFinished() const; + + /// Get module name to load function. + const std::string& GetModule() const; + + /// Get function name to execute. + const std::string& GetFunction() const; + + /// Get marshalled arguments. + const std::vector& GetArguments() const; + + /// Get transport context. + napa::transport::TransportContext& GetTransportContext(); + + /// Get options. + const napa::CallOptions& GetOptions() const; + + /// Get elapse since task start in nano-second. + std::chrono::nanoseconds GetElapse() const; + + private: + /// Module name. + std::string _module; + + /// Function name. + std::string _function; + + /// Arguments. + std::vector _arguments; + + /// Execute options. + napa::CallOptions _options; + + /// Transport context. + std::unique_ptr _transportContext; + + /// Callback when task completes. + napa::ExecuteCallback _callback; + + /// Whether this task is finished. + std::atomic _finished; + + /// Call start time. + std::chrono::high_resolution_clock::time_point _startTime; + }; +} +} \ No newline at end of file diff --git a/src/zone/call-task.cpp b/src/zone/call-task.cpp new file mode 100644 index 0000000..e2cd06d --- /dev/null +++ b/src/zone/call-task.cpp @@ -0,0 +1,51 @@ +// See: https://groups.google.com/forum/#!topic/nodejs/onA0S01INtw +#ifdef BUILDING_NODE_EXTENSION +#include +#endif + +#include "call-task.h" + +#include + +using namespace napa::zone; +using namespace napa::v8_helpers; + +napa::zone::CallTask::CallTask(std::shared_ptr context) : + _context(std::move(context)) { +} + +void CallTask::Execute() { + auto isolate = v8::Isolate::GetCurrent(); + v8::HandleScope scope(isolate); + auto context = isolate->GetCurrentContext(); + + // Get the module based main function from global scope. + auto executeFunction = context->Global()->Get(MakeExternalV8String(isolate, "__napa_zone_call__")); + NAPA_ASSERT(executeFunction->IsFunction(), "__napa_zone_call__ function must exist in global scope"); + + // Create task wrap. + auto contextWrap = napa::module::CallContextWrap::NewInstance(_context); + v8::Local argv[] = { contextWrap }; + + // Execute the function. + v8::TryCatch tryCatch(isolate); + auto res = v8::Local::Cast(executeFunction)->Call(context->Global(), 1, argv); + + // Terminating an isolate may occur from a different thread, i.e. from timeout service. + // If the function call already finished successfully when the isolate is terminated it may lead + // to one the following: + // 1. Terminate was called before tryCatch.HasTerminated(), the user gets an error code. + // 2. Terminate was called after tryCatch.HasTerminated(), the user gets a success code. + // + // In both cases the isolate is being restored since this happens before each task executes. + if (tryCatch.HasTerminated()) { + if (_terminationReason == TerminationReason::TIMEOUT) { + (void)_context->Reject(NAPA_RESULT_TIMEOUT, "Terminated due to timeout"); + } else { + (void)_context->Reject(NAPA_RESULT_INTERNAL_ERROR, "Terminated with unknown reason"); + } + return; + } + + NAPA_ASSERT(!tryCatch.HasCaught(), "__napa_zone_call__ should catch all user exceptions and reject task."); +} diff --git a/src/zone/call-task.h b/src/zone/call-task.h new file mode 100644 index 0000000..7a8e7b8 --- /dev/null +++ b/src/zone/call-task.h @@ -0,0 +1,26 @@ +#pragma once + +#include "call-context.h" +#include "terminable-task.h" + +#include + +namespace napa { +namespace zone { + + /// A task for executing pre-loaded javascript functions. + class CallTask : public TerminableTask { + public: + /// Constructor. + /// Call context. + CallTask(std::shared_ptr context); + + /// Overrides Task.Execute to define execution logic. + virtual void Execute() override; + + private: + /// Call context. + std::shared_ptr _context; + }; +} +} diff --git a/src/scheduler/broadcast-task.cpp b/src/zone/eval-task.cpp similarity index 79% rename from src/scheduler/broadcast-task.cpp rename to src/zone/eval-task.cpp index d17c1c7..9cddff6 100644 --- a/src/scheduler/broadcast-task.cpp +++ b/src/zone/eval-task.cpp @@ -1,19 +1,26 @@ -#include "broadcast-task.h" +// See: https://groups.google.com/forum/#!topic/nodejs/onA0S01INtw +#ifdef BUILDING_NODE_EXTENSION +#include +#endif + +#include "eval-task.h" #include #include #include + #include -using namespace napa::scheduler; +using namespace napa; +using namespace napa::zone; -BroadcastTask::BroadcastTask(std::string source, std::string sourceOrigin, BroadcastTaskCallback callback) : +EvalTask::EvalTask(std::string source, std::string sourceOrigin, BroadcastCallback callback) : _source(std::move(source)), _sourceOrigin(std::move(sourceOrigin)), _callback(std::move(callback)) {} -void BroadcastTask::Execute() { +void EvalTask::Execute() { auto isolate = v8::Isolate::GetCurrent(); v8::HandleScope scope(isolate); auto context = isolate->GetCurrentContext(); @@ -35,7 +42,7 @@ void BroadcastTask::Execute() { auto compileResult = v8::Script::Compile(context, source, &sourceOrigin); if (compileResult.IsEmpty()) { LOG_ERROR("Broadcast", "Failed while compiling the provided source code. %s", _source.c_str()); - _callback(NAPA_RESPONSE_BROADCAST_SCRIPT_ERROR); + _callback(NAPA_RESULT_BROADCAST_SCRIPT_ERROR); return; } auto script = compileResult.ToLocalChecked(); @@ -50,9 +57,9 @@ void BroadcastTask::Execute() { v8::String::Utf8Value stackTraceStr(stackTrace); LOG_ERROR("Broadcast", "JS exception thrown: %s - %s", *exceptionStr, *stackTraceStr); - _callback(NAPA_RESPONSE_BROADCAST_SCRIPT_ERROR); + _callback(NAPA_RESULT_BROADCAST_SCRIPT_ERROR); return; } - _callback(NAPA_RESPONSE_SUCCESS); + _callback(NAPA_RESULT_SUCCESS); } diff --git a/src/scheduler/broadcast-task.h b/src/zone/eval-task.h similarity index 62% rename from src/scheduler/broadcast-task.h rename to src/zone/eval-task.h index 2441d52..768f09e 100644 --- a/src/scheduler/broadcast-task.h +++ b/src/zone/eval-task.h @@ -8,22 +8,18 @@ #include namespace napa { -namespace scheduler { +namespace zone { - /// A task for loading javascript source code. - class BroadcastTask : public Task { + /// A task for evaluating javascript source code. + class EvalTask : public Task { public: - - /// Signature of the callback function. - typedef std::function BroadcastTaskCallback; - /// Constructor. /// The JS source code to load on the isolate the runs this task. /// The origin of the source code. /// A callback that is triggered when the task execution completed. - BroadcastTask(std::string source, + EvalTask(std::string source, std::string sourceOrigin = "", - BroadcastTaskCallback callback = [](ResponseCode) {}); + BroadcastCallback callback = [](ResultCode) {}); /// Overrides Task.Execute to define loading execution logic. virtual void Execute() override; @@ -31,7 +27,7 @@ namespace scheduler { private: std::string _source; std::string _sourceOrigin; - BroadcastTaskCallback _callback; + BroadcastCallback _callback; }; } } diff --git a/src/zone/napa-zone.cpp b/src/zone/napa-zone.cpp new file mode 100644 index 0000000..e4f80a2 --- /dev/null +++ b/src/zone/napa-zone.cpp @@ -0,0 +1,120 @@ +#include "napa-zone.h" + +#include "zone/eval-task.h" +#include "zone/call-task.h" +#include "zone/call-context.h" +#include "zone/task-decorators.h" + +#include + +#include +#include +#include + +using namespace napa; +using namespace napa::zone; + +// Static members initialization +std::mutex NapaZone::_mutex; +std::unordered_map> NapaZone::_zones; + +std::shared_ptr NapaZone::Create(const settings::ZoneSettings& settings) { + std::lock_guard lock(_mutex); + + auto iter = _zones.find(settings.id); + if (iter != _zones.end() && !iter->second.expired()) { + LOG_ERROR("Zone", "Failed to create zone '%s': a zone with this name already exists.", settings.id.c_str()); + return nullptr; + } + + // An helper class to enable make_shared of NapaZone + struct MakeSharedEnabler : public NapaZone { + MakeSharedEnabler(const settings::ZoneSettings& settings) : NapaZone(settings) {} + }; + + std::shared_ptr zone = std::make_shared(settings); + _zones[settings.id] = zone; + try { + zone->Init(); + } catch (const std::exception& ex) { + LOG_ERROR("Zone", "Failed to initialize zone '%s': %s", settings.id.c_str(), ex.what()); + return nullptr; + } + return zone; +} + +std::shared_ptr NapaZone::Get(const std::string& id) { + std::lock_guard lock(_mutex); + + auto iter = _zones.find(id); + if (iter == _zones.end()) { + return nullptr; + } + + auto zone = iter->second.lock(); + if (zone == nullptr) { + LOG_WARNING("Zone", "Zone '%s' was already deleted.", id.c_str()); + + // Use this chance to clean up the map + _zones.erase(id); + } + + return zone; +} + +NapaZone::NapaZone(const settings::ZoneSettings& settings) : _settings(settings) { +} + +/// Load 'napajs' module during bootstrap. We use relative path to decouple from how module will be published. +static const std::string NAPAJS_MODULE_PATH = boost::dll::this_line_location().parent_path().parent_path().string(); +static const std::string BOOTSTRAP_SOURCE = "require('" + boost::replace_all_copy(NAPAJS_MODULE_PATH, "\\", "\\\\") + "');"; + +void NapaZone::Init() { + // Create the zone's scheduler. + _scheduler = std::make_unique(_settings); + + // Bootstrap after zone is created. + Broadcast(BOOTSTRAP_SOURCE, [](ResultCode code){ + NAPA_ASSERT(code == NAPA_RESULT_SUCCESS, "Bootstrap Napa zone failed."); + }); +} + +const std::string& NapaZone::GetId() const { + return _settings.id; +} + +void NapaZone::Broadcast(const std::string& source, BroadcastCallback callback) { + // Makes sure the callback is only called once, after all workers finished running the broadcast task. + auto counter = std::make_shared>(_settings.workers); + auto callOnce = [callback = std::move(callback), counter](napa_result_code code) { + if (--(*counter) == 0) { + callback(code); + } + }; + + auto broadcastTask = std::make_shared(source, "", std::move(callOnce)); + + _scheduler->ScheduleOnAllWorkers(std::move(broadcastTask)); +} + +void NapaZone::Execute(const FunctionSpec& spec, ExecuteCallback callback) { + std::shared_ptr task; + + if (spec.options.timeout > 0) { + task = std::make_shared>( + std::chrono::milliseconds(spec.options.timeout), + std::make_shared(spec, std::move(callback))); + } else { + task = std::make_shared(std::make_shared(spec, std::move(callback))); + } + + _scheduler->Schedule(std::move(task)); +} + +const settings::ZoneSettings& NapaZone::GetSettings() const { + return _settings; +} + +std::shared_ptr NapaZone::GetScheduler() { + return _scheduler; +} diff --git a/src/zone/zone-impl.h b/src/zone/napa-zone.h similarity index 54% rename from src/zone/zone-impl.h rename to src/zone/napa-zone.h index 5bb34d4..c74b1db 100644 --- a/src/zone/zone-impl.h +++ b/src/zone/napa-zone.h @@ -2,7 +2,7 @@ #include "zone.h" -#include "scheduler/scheduler.h" +#include "zone/scheduler.h" #include "settings/settings.h" #include @@ -11,16 +11,17 @@ namespace napa { +namespace zone { - /// Concrete implementation of a zone. - class ZoneImpl : public internal::Zone { + /// Concrete implementation of a Napa zone. + class NapaZone : public Zone { public: /// Creates a new zone with the provided id and settings. - static std::shared_ptr Create(const ZoneSettings& settings); + static std::shared_ptr Create(const settings::ZoneSettings& settings); /// Retrieves an existing zone by id. - static std::shared_ptr Get(const std::string& id); + static std::shared_ptr Get(const std::string& id); /// virtual const std::string& GetId() const override; @@ -29,24 +30,24 @@ namespace napa { virtual void Broadcast(const std::string& source, BroadcastCallback callback) override; /// - virtual void Execute(const ExecuteRequest& request, ExecuteCallback callback) override; + virtual void Execute(const FunctionSpec& spec, ExecuteCallback callback) override; /// Retrieves the zone settings. - const ZoneSettings& GetSettings() const; + const settings::ZoneSettings& GetSettings() const; /// Retrieves the zone scheduler. /// Asynchronous works keep the reference on scheduler, so they can finish up safely. - std::shared_ptr GetScheduler(); + std::shared_ptr GetScheduler(); private: - explicit ZoneImpl(const ZoneSettings& settings); + explicit NapaZone(const settings::ZoneSettings& settings); void Init(); - ZoneSettings _settings; - std::shared_ptr _scheduler; + settings::ZoneSettings _settings; + std::shared_ptr _scheduler; static std::mutex _mutex; - static std::unordered_map> _zones; + static std::unordered_map> _zones; }; - +} } \ No newline at end of file diff --git a/src/zone/node-zone.cpp b/src/zone/node-zone.cpp new file mode 100644 index 0000000..a139e83 --- /dev/null +++ b/src/zone/node-zone.cpp @@ -0,0 +1,43 @@ +#include "node-zone.h" + +#include "worker-context.h" + +#include + +using namespace napa; +using namespace napa::zone; + +std::shared_ptr NodeZone::_instance; + +void NodeZone::Init(BroadcastDelegate broadcast, ExecuteDelegate execute) { + _instance.reset(new NodeZone(broadcast, execute)); +} + +NodeZone::NodeZone(BroadcastDelegate broadcast, ExecuteDelegate execute): + _broadcast(std::move(broadcast)), _execute(std::move(execute)), _id("node") { + + NAPA_ASSERT(_broadcast, "Broadcast delegate must be a valid function."); + NAPA_ASSERT(_execute, "Execute delegate must be a valid function."); + + // Zone instance into TLS. + WorkerContext::Set(WorkerContextItem::ZONE, reinterpret_cast(this)); + + // Worker Id into TLS. + WorkerContext::Set(WorkerContextItem::WORKER_ID, reinterpret_cast(static_cast(0))); +} + +std::shared_ptr NodeZone::Get() { + return _instance; +} + +const std::string& NodeZone::GetId() const { + return _id; +} + +void NodeZone::Broadcast(const std::string& source, BroadcastCallback callback) { + _broadcast(source, callback); +} + +void NodeZone::Execute(const FunctionSpec& spec, ExecuteCallback callback) { + _execute(spec, callback); +} diff --git a/src/zone/node-zone.h b/src/zone/node-zone.h new file mode 100644 index 0000000..9cc11be --- /dev/null +++ b/src/zone/node-zone.h @@ -0,0 +1,53 @@ +#pragma once + +#include "zone.h" +#include + +namespace napa { +namespace zone { + + /// Delegate for Broadcast on Node zone. + using BroadcastDelegate = std::function; + + /// Delegate for Execute on Node zone. + using ExecuteDelegate = std::function; + + /// Concrete implementation of a Node zone. + class NodeZone : public Zone { + public: + /// Set delegate function for Broadcast and Execute on node zone. This is intended to be called from napa-binding.node. + static NAPA_API void Init(BroadcastDelegate broadcast, ExecuteDelegate execute); + + /// + /// Retrieves an existing zone. + /// If Node is not applicable (like in embed mode), a nullptr will be returned. + /// + static std::shared_ptr Get(); + + /// + virtual const std::string& GetId() const override; + + /// + virtual void Broadcast(const std::string& source, BroadcastCallback callback) override; + + /// + virtual void Execute(const FunctionSpec& spec, ExecuteCallback callback) override; + + private: + /// Constructor. + NodeZone(BroadcastDelegate broadcast, ExecuteDelegate execute); + + /// Broadcast delegate for node zone. + BroadcastDelegate _broadcast; + + /// Execute delegate for node zone. + ExecuteDelegate _execute; + + /// Node zone id. + std::string _id; + + /// Node zone instance. + static std::shared_ptr _instance; + }; +} +} \ No newline at end of file diff --git a/src/scheduler/scheduler.h b/src/zone/scheduler.h similarity index 97% rename from src/scheduler/scheduler.h rename to src/zone/scheduler.h index 2ab294e..0e3b2dc 100644 --- a/src/scheduler/scheduler.h +++ b/src/zone/scheduler.h @@ -1,12 +1,12 @@ #pragma once #include "worker.h" +#include "worker-context.h" #include "settings/settings.h" #include "simple-thread-pool.h" #include "task.h" #include -#include #include #include @@ -16,7 +16,7 @@ #include namespace napa { -namespace scheduler { +namespace zone { /// The scheduler is responsible for assigning tasks to workers. template @@ -25,7 +25,7 @@ namespace scheduler { /// Constructor. /// A settings object. - explicit SchedulerImpl(const ZoneSettings& settings); + explicit SchedulerImpl(const settings::ZoneSettings& settings); /// Destructor. Waits for all tasks to finish. ~SchedulerImpl(); @@ -78,7 +78,7 @@ namespace scheduler { typedef SchedulerImpl Scheduler; template - SchedulerImpl::SchedulerImpl(const ZoneSettings& settings) : + SchedulerImpl::SchedulerImpl(const settings::ZoneSettings& settings) : _idleWorkersFlags(settings.workers), _synchronizer(std::make_unique(1)), _shouldStop(false) { diff --git a/src/scheduler/simple-thread-pool.cpp b/src/zone/simple-thread-pool.cpp similarity index 97% rename from src/scheduler/simple-thread-pool.cpp rename to src/zone/simple-thread-pool.cpp index 4f1add8..51708d8 100644 --- a/src/scheduler/simple-thread-pool.cpp +++ b/src/zone/simple-thread-pool.cpp @@ -1,6 +1,6 @@ #include "simple-thread-pool.h" -using namespace napa::scheduler; +using namespace napa::zone; SimpleThreadPool::Worker::Worker(SimpleThreadPool& pool) : _pool(pool) {} diff --git a/src/scheduler/simple-thread-pool.h b/src/zone/simple-thread-pool.h similarity index 98% rename from src/scheduler/simple-thread-pool.h rename to src/zone/simple-thread-pool.h index 6b6c6af..a320bc1 100644 --- a/src/scheduler/simple-thread-pool.h +++ b/src/zone/simple-thread-pool.h @@ -10,7 +10,7 @@ #include namespace napa { -namespace scheduler { +namespace zone { /// Simple thread pool. class SimpleThreadPool { diff --git a/src/scheduler/task-decorators.h b/src/zone/task-decorators.h similarity index 98% rename from src/scheduler/task-decorators.h rename to src/zone/task-decorators.h index 2745ac2..18f9bd1 100644 --- a/src/scheduler/task-decorators.h +++ b/src/zone/task-decorators.h @@ -11,7 +11,7 @@ #include namespace napa { -namespace scheduler { +namespace zone { template class TaskDecorator : public Task { diff --git a/src/scheduler/task.h b/src/zone/task.h similarity index 93% rename from src/scheduler/task.h rename to src/zone/task.h index ed599d5..dce269c 100644 --- a/src/scheduler/task.h +++ b/src/zone/task.h @@ -2,7 +2,7 @@ namespace napa { -namespace scheduler { +namespace zone { /// Represents an execution logic that can be scheduled using the Napa scheduler. class Task { diff --git a/src/scheduler/terminable-task.cpp b/src/zone/terminable-task.cpp similarity index 56% rename from src/scheduler/terminable-task.cpp rename to src/zone/terminable-task.cpp index faf7ffc..2eb1a9b 100644 --- a/src/scheduler/terminable-task.cpp +++ b/src/zone/terminable-task.cpp @@ -1,8 +1,13 @@ +// See: https://groups.google.com/forum/#!topic/nodejs/onA0S01INtw +#ifdef BUILDING_NODE_EXTENSION +#include +#endif + #include "terminable-task.h" #include -using namespace napa::scheduler; +using namespace napa::zone; void TerminableTask::Terminate(TerminationReason reason, v8::Isolate* isolate) { _terminationReason = reason; diff --git a/src/scheduler/terminable-task.h b/src/zone/terminable-task.h similarity index 97% rename from src/scheduler/terminable-task.h rename to src/zone/terminable-task.h index 14ea817..16648ca 100644 --- a/src/scheduler/terminable-task.h +++ b/src/zone/terminable-task.h @@ -7,7 +7,7 @@ namespace v8 { } namespace napa { -namespace scheduler { +namespace zone { /// Specifies the possible reasons for termination. enum class TerminationReason { diff --git a/src/scheduler/timeout-service.cpp b/src/zone/timeout-service.cpp similarity index 97% rename from src/scheduler/timeout-service.cpp rename to src/zone/timeout-service.cpp index 5e12b5d..0a7ea29 100644 --- a/src/scheduler/timeout-service.cpp +++ b/src/zone/timeout-service.cpp @@ -6,7 +6,7 @@ #include -using namespace napa::scheduler; +using namespace napa::zone; class TokenImpl : public TimeoutService::Token { public: diff --git a/src/scheduler/timeout-service.h b/src/zone/timeout-service.h similarity index 98% rename from src/scheduler/timeout-service.h rename to src/zone/timeout-service.h index 7a9775e..7281115 100644 --- a/src/scheduler/timeout-service.h +++ b/src/zone/timeout-service.h @@ -7,7 +7,7 @@ #include namespace napa { -namespace scheduler { +namespace zone { /// Enables registering callbacks that will be triggered after some defined time. class TimeoutService { diff --git a/src/module/worker-context.cpp b/src/zone/worker-context.cpp similarity index 88% rename from src/module/worker-context.cpp rename to src/zone/worker-context.cpp index 123d981..c4521cf 100644 --- a/src/module/worker-context.cpp +++ b/src/zone/worker-context.cpp @@ -1,10 +1,10 @@ -#include -#include +#include "worker-context.h" #include +#include -using namespace napa; -using namespace napa::module; +using namespace napa::platform; +using namespace napa::zone; // Global instance of WorkerContext. WorkerContext& WorkerContext::GetInstance() { diff --git a/inc/napa/module/worker-context.h b/src/zone/worker-context.h similarity index 93% rename from inc/napa/module/worker-context.h rename to src/zone/worker-context.h index 86f0d23..78ad84e 100644 --- a/inc/napa/module/worker-context.h +++ b/src/zone/worker-context.h @@ -5,7 +5,7 @@ #include namespace napa { -namespace module { +namespace zone { /// Worker context item to store Napa specific data for a module to be able to access. enum class WorkerContextItem : uint32_t { @@ -63,7 +63,7 @@ namespace module { std::array(WorkerContextItem::END_OF_WORKER_CONTEXT_ITEM)> _tlsIndexes; }; - #define INIT_WORKER_CONTEXT napa::module::WorkerContext::Init + #define INIT_WORKER_CONTEXT napa::zone::WorkerContext::Init -} // End of namespace module. +} // End of namespace zone. } // End of namespace napa. \ No newline at end of file diff --git a/src/scheduler/worker.cpp b/src/zone/worker.cpp similarity index 68% rename from src/scheduler/worker.cpp rename to src/zone/worker.cpp index 5e67615..84e6da2 100644 --- a/src/scheduler/worker.cpp +++ b/src/zone/worker.cpp @@ -1,29 +1,26 @@ #include "worker.h" -#include "module/module-loader.h" +#include "worker-context.h" +#include "module/loader/module-loader.h" #include "v8/array-buffer-allocator.h" -#include "zone/zone-impl.h" +#include "zone/napa-zone.h" #include -#include - -// Disable third party library warnnings -#pragma warning(push) -#pragma warning(disable: 4127 4458 4068) -#include -#pragma warning(pop) #include +#include #include +#include +#include #include using namespace napa; -using namespace napa::scheduler; +using namespace napa::zone; // Forward declaration -static v8::Isolate* CreateIsolate(const ZoneSettings& settings); -static void ConfigureIsolate(v8::Isolate* isolate, const ZoneSettings& settings); +static v8::Isolate* CreateIsolate(const settings::ZoneSettings& settings); +static void ConfigureIsolate(v8::Isolate* isolate, const settings::ZoneSettings& settings); struct Worker::Impl { @@ -34,7 +31,13 @@ struct Worker::Impl { std::thread workerThread; /// Queue for tasks scheduled on this worker. - moodycamel::BlockingConcurrentQueue> tasks; + std::queue> tasks; + + /// Condition variable to indicate if there are more tasks to consume. + std::condition_variable hasTaskEvent; + + /// Lock for task queue. + std::mutex queueLock; /// V8 isolate associated with this worker. v8::Isolate* isolate; @@ -44,7 +47,7 @@ struct Worker::Impl { }; Worker::Worker(WorkerId id, - const ZoneSettings& settings, + const settings::ZoneSettings& settings, std::function idleNotificationCallback) : _impl(std::make_unique()) { @@ -55,7 +58,7 @@ Worker::Worker(WorkerId id, Worker::~Worker() { // Signal the thread loop that it should stop processing tasks. - _impl->tasks.enqueue(nullptr); + Enqueue(nullptr); _impl->workerThread.join(); @@ -68,18 +71,25 @@ Worker::Worker(Worker&&) = default; Worker& Worker::operator=(Worker&&) = default; void Worker::Schedule(std::shared_ptr task) { - NAPA_ASSERT(task, "task was null"); - - _impl->tasks.enqueue(std::move(task)); + NAPA_ASSERT(task != nullptr, "Task should not be null"); + Enqueue(task); } -void Worker::WorkerThreadFunc(const ZoneSettings& settings) { +void Worker::Enqueue(std::shared_ptr task) { + { + std::unique_lock lock(_impl->queueLock); + _impl->tasks.emplace(std::move(task)); + } + _impl->hasTaskEvent.notify_one(); +} + +void Worker::WorkerThreadFunc(const settings::ZoneSettings& settings) { // Zone instance into TLS. - module::WorkerContext::Set(module::WorkerContextItem::ZONE, - reinterpret_cast(ZoneImpl::Get(settings.id).get())); + WorkerContext::Set(WorkerContextItem::ZONE, + reinterpret_cast(NapaZone::Get(settings.id).get())); // Worker Id into TLS. - module::WorkerContext::Set(module::WorkerContextItem::WORKER_ID, + WorkerContext::Set(WorkerContextItem::WORKER_ID, reinterpret_cast(static_cast(_impl->id))); _impl->isolate = CreateIsolate(settings); @@ -103,11 +113,18 @@ void Worker::WorkerThreadFunc(const ZoneSettings& settings) { while (true) { std::shared_ptr task; - - // Retrieve a task to execute. Wait if non exists. - if (!_impl->tasks.try_dequeue(task)) { - _impl->idleNotificationCallback(_impl->id); - _impl->tasks.wait_dequeue(task); + + { + std::unique_lock lock(_impl->queueLock); + if (_impl->tasks.empty()) { + _impl->idleNotificationCallback(_impl->id); + + // Wait until new tasks come. + _impl->hasTaskEvent.wait(lock, [this]() { return !_impl->tasks.empty(); }); + } + + task = _impl->tasks.front(); + _impl->tasks.pop(); } // A null task means that the worker needs to shutdown. @@ -122,7 +139,7 @@ void Worker::WorkerThreadFunc(const ZoneSettings& settings) { } } -static v8::Isolate* CreateIsolate(const ZoneSettings& settings) { +static v8::Isolate* CreateIsolate(const settings::ZoneSettings& settings) { // The allocator is a global V8 setting. static napa::v8_extensions::ArrayBufferAllocator commonAllocator; @@ -137,7 +154,7 @@ static v8::Isolate* CreateIsolate(const ZoneSettings& settings) { return v8::Isolate::New(createParams); } -static void ConfigureIsolate(v8::Isolate* isolate, const ZoneSettings& settings) { +static void ConfigureIsolate(v8::Isolate* isolate, const settings::ZoneSettings& settings) { isolate->SetFatalErrorHandler([](const char* location, const char* message) { LOG_ERROR("V8", "V8 Fatal error at %s. Error: %s", location, message); }); diff --git a/src/scheduler/worker.h b/src/zone/worker.h similarity index 85% rename from src/scheduler/worker.h rename to src/zone/worker.h index 54a899e..23c97b9 100644 --- a/src/scheduler/worker.h +++ b/src/zone/worker.h @@ -8,7 +8,7 @@ namespace napa { -namespace scheduler { +namespace zone { // Represent the worker id type. using WorkerId = uint32_t; @@ -22,7 +22,7 @@ namespace scheduler { /// A settings object. /// Triggers when the worker becomes idle. Worker(WorkerId id, - const ZoneSettings &settings, + const settings::ZoneSettings &settings, std::function idleNotificationCallback); /// Destructor. @@ -45,7 +45,10 @@ namespace scheduler { private: /// The worker thread logic. - void WorkerThreadFunc(const ZoneSettings& settings); + void WorkerThreadFunc(const settings::ZoneSettings& settings); + + /// Enqueue a task. + void Enqueue(std::shared_ptr task); struct Impl; std::unique_ptr _impl; diff --git a/src/zone/zone-impl.cpp b/src/zone/zone-impl.cpp deleted file mode 100644 index c01626c..0000000 --- a/src/zone/zone-impl.cpp +++ /dev/null @@ -1,147 +0,0 @@ -#include "zone-impl.h" - -#include "scheduler/broadcast-task.h" -#include "scheduler/execute-task.h" -#include "scheduler/task-decorators.h" - -#include - -#include -#include - -#include - -using namespace napa; -using namespace napa::scheduler; - -// Forward declarations -static void BroadcastFromFile(const std::string& file, Scheduler& scheduler); - -// Static members initialization -std::mutex ZoneImpl::_mutex; -std::unordered_map> ZoneImpl::_zones; - -// The path to the file containing the execute main function -static const std::string ZONE_MAIN_FILE = (boost::dll::this_line_location().parent_path().parent_path() / - "lib\\zone\\zone-main.js").string(); - -std::shared_ptr ZoneImpl::Create(const ZoneSettings& settings) { - std::lock_guard lock(_mutex); - - auto iter = _zones.find(settings.id); - if (iter != _zones.end() && !iter->second.expired()) { - LOG_ERROR("Zone", "Failed to create zone '%s': a zone with this name already exists.", settings.id.c_str()); - return nullptr; - } - - // An helper class to enable make_shared of ZoneImpl - struct MakeSharedEnabler : public ZoneImpl { - MakeSharedEnabler(const ZoneSettings& settings) : ZoneImpl(settings) {} - }; - - std::shared_ptr zone = std::make_shared(settings); - _zones[settings.id] = zone; - try { - zone->Init(); - } catch (const std::exception& ex) { - LOG_ERROR("Zone", "Failed to initialize zone '%s': %s", settings.id.c_str(), ex.what()); - return nullptr; - } - return zone; -} - -std::shared_ptr ZoneImpl::Get(const std::string& id) { - std::lock_guard lock(_mutex); - - auto iter = _zones.find(id); - if (iter == _zones.end()) { - return nullptr; - } - - auto zone = iter->second.lock(); - if (zone == nullptr) { - LOG_WARNING("Zone", "Zone '%s' was already deleted.", id.c_str()); - - // Use this chance to clean up the map - _zones.erase(id); - } - - return zone; -} - -ZoneImpl::ZoneImpl(const ZoneSettings& settings) : _settings(settings) { -} - -void ZoneImpl::Init() { - // Create the zone's scheduler. - _scheduler = std::make_unique(_settings); - - // Read zone main file content and broadcast it on all workers. - BroadcastFromFile(ZONE_MAIN_FILE, *_scheduler); -} - -const std::string& ZoneImpl::GetId() const { - return _settings.id; -} - -void ZoneImpl::Broadcast(const std::string& source, BroadcastCallback callback) { - // Makes sure the callback is only called once, after all workers finished running the broadcast task. - auto counter = std::make_shared>(_settings.workers); - auto callOnce = [callback = std::move(callback), counter](napa_response_code code) { - if (--(*counter) == 0) { - callback(code); - } - }; - - auto broadcastTask = std::make_shared(source, "", std::move(callOnce)); - - _scheduler->ScheduleOnAllWorkers(std::move(broadcastTask)); -} - -void ZoneImpl::Execute(const ExecuteRequest& request, ExecuteCallback callback) { - std::shared_ptr task; - - if (request.options.timeout > 0) { - task = std::make_shared>( - std::chrono::milliseconds(request.options.timeout), - request, - std::move(callback)); - } else { - task = std::make_shared(request, std::move(callback)); - } - - _scheduler->Schedule(std::move(task)); -} - -const ZoneSettings& ZoneImpl::GetSettings() const { - return _settings; -} - -std::shared_ptr ZoneImpl::GetScheduler() { - return _scheduler; -} - -static void BroadcastFromFile(const std::string& file, Scheduler& scheduler) { - auto filePath = boost::filesystem::path(file); - if (filePath.is_relative()) { - filePath = (boost::filesystem::current_path() / filePath).normalize().make_preferred(); - } - - auto filePathString = filePath.string(); - std::ifstream ifs; - ifs.open(filePathString); - - if (!ifs.is_open()) { - throw std::runtime_error("Failed to open file: " + filePathString); - } - - std::stringstream buffer; - buffer << ifs.rdbuf(); - - auto fileContent = buffer.str(); - if (fileContent.empty()) { - throw std::runtime_error("File content was empty: " + filePathString); - } - - scheduler.ScheduleOnAllWorkers(std::make_shared(std::move(fileContent))); -} diff --git a/src/zone/zone.h b/src/zone/zone.h index 839b81b..b7a13a4 100644 --- a/src/zone/zone.h +++ b/src/zone/zone.h @@ -3,7 +3,7 @@ #include namespace napa { -namespace internal { +namespace zone { /// Interface for Zone. struct Zone { @@ -17,9 +17,9 @@ namespace internal { virtual void Broadcast(const std::string& source, BroadcastCallback callback) = 0; /// Executes a pre-loaded JS function asynchronously. - /// The execution request. + /// The function spec. /// A callback that is triggered when execution is done. - virtual void Execute(const ExecuteRequest& request, ExecuteCallback callback) = 0; + virtual void Execute(const FunctionSpec& spec, ExecuteCallback callback) = 0; /// Virtual destructor. virtual ~Zone() {} diff --git a/test/napa-zone/test.ts b/test/napa-zone/test.ts index f0acf53..e3e765a 100644 --- a/test/napa-zone/test.ts +++ b/test/napa-zone/test.ts @@ -17,52 +17,55 @@ export function getCurrentZone(): napa.zone.Zone { return napa.zone.current; } -export function broadcast(id: string, code: string): void { +export function broadcast(id: string, code: string): Promise { let zone = napa.zone.get(id); - // TODO: replace with broadcast when TODO:#3 is done. - zone.broadcastSync(code); + return zone.broadcast(code); } -export function broadcastTestFunction(id: string): void { - // TODO: replace with broadcast when TODO:#3 is done. - napa.zone.get(id).broadcastSync((input: string) => { - console.log(input); - }, ["hello world"]); +export function broadcastTestFunction(id: string): Promise { + return napa.zone.get(id).broadcast((input: string) => { + console.log(input); + }, ["hello world"]); } -export function broadcastTransportable(id: string): void { - // TODO: replace with broadcast when TODO:#3 is done. - napa.zone.get(id).broadcastSync((input: any) => { - console.log(input); - }, [napa.memory.crtAllocator]); +export function broadcastTransportable(id: string): Promise { + return napa.zone.get(id).broadcast((input: any) => { + console.log(input); + }, [napa.memory.crtAllocator]); } -export function broadcastClosure(id: string): void { +export function broadcastClosure(id: string): Promise { let zone = napa.zone.get(id); - // TODO: replace with broadcast when TODO:#3 is done. - zone.broadcastSync(() => { - console.log(zone); - }, []); + return zone.broadcast(() => { + console.log(zone); + }, []); } -export function execute(id: string, moduleName: string, functionName: string, args?: any[]): any { +export function execute(id: string, moduleName: string, functionName: string, args?: any[]): Promise { let zone = napa.zone.get(id); - // TODO: replace with execute when TODO:#3 is done. - return zone.executeSync(moduleName, functionName, args).value; + return new Promise((resolve, reject) => { + zone.execute(moduleName, functionName, args) + .then(result => resolve(result.value)) + .catch(error => reject(error)); + }); } -export function executeTestFunction(id: string): any { +export function executeTestFunction(id: string): Promise { let zone = napa.zone.get(id); - // TODO: replace with execute when TODO:#3 is done. - return zone.executeSync((input: string) => { - return input; - }, ['hello world']).value; + return new Promise((resolve, reject) => { + zone.execute((input: string) => { return input; }, ['hello world']) + .then(result => resolve(result.value)) + .catch(error => reject(error)); + }); } -export function executeTestFunctionWithClosure(id: string): any { +export function executeTestFunctionWithClosure(id: string): Promise { let zone = napa.zone.get(id); - // TODO: replace with execute when TODO:#3 is done. - return zone.executeSync(() => { return zone; }, []).value; + return new Promise((resolve, reject) => { + zone.execute(() => { return zone; }, []) + .then(result => resolve(result.value)) + .catch(error => reject(error)); + }); } /// Memory test helpers. diff --git a/test/zone-test.ts b/test/zone-test.ts index 908ab19..98ad638 100644 --- a/test/zone-test.ts +++ b/test/zone-test.ts @@ -101,8 +101,7 @@ describe('napajs/zone', function () { }); describe('broadcast', () => { - // TODO #1: implement NodeZone. - it.skip('@node: -> node zone with JavaScript code', () => { + it('@node: -> node zone with JavaScript code', () => { return napa.zone.current.broadcast("var state = 0;"); }); @@ -119,8 +118,7 @@ describe('napajs/zone', function () { return napaZone1.execute(napaZoneTestModule, "broadcast", ["napa-zone1", "var state = 0;"]); }); - // Blocked by TODO #1. - it.skip('@napa: -> node zone with JavaScript code', () => { + it('@napa: -> node zone with JavaScript code', () => { return napaZone1.execute(napaZoneTestModule, "broadcast", ["node", "var state = 0;"]); }); @@ -136,34 +134,31 @@ describe('napajs/zone', function () { }); }); - // Blocked by TODO #1. - it.skip('@node: -> node zone throw runtime error', () => { + it('@node: -> node zone throw runtime error', () => { return shouldFail(() => { return napa.zone.current.broadcast("throw new Error();"); }); }); - + it('@node: -> napa zone throw runtime error', () => { return shouldFail(() => { return napaZone1.broadcast("throw new Error();"); }); }); - + it('@napa: -> napa zone throw runtime error', () => { return shouldFail(() => { return napaZone1.execute(napaZoneTestModule, "broadcast", ["napa-zone2", "throw new Error();"]); }); }); - - // Blocked by TODO #1. - it.skip('@napa: -> node zone throw runtime error', () => { + + it('@napa: -> node zone throw runtime error', () => { return shouldFail(() => { return napaZone1.execute(napaZoneTestModule, "broadcast", ["node", "throw new Error();"]); }); }); - - // Blocked by TODO #1. - it.skip('@node: -> node zone with anonymous function', () => { + + it('@node: -> node zone with anonymous function', () => { return napa.zone.current.broadcast((input: string) => { console.log(input); }, ['hello world']); @@ -179,13 +174,11 @@ describe('napajs/zone', function () { return napaZone1.execute(napaZoneTestModule, "broadcastTestFunction", ['napa-zone2']); }); - // Blocked by TODO #1. - it.skip('@napa: -> node zone with anonymous function', () => { + it('@napa: -> node zone with anonymous function', () => { return napaZone1.execute(napaZoneTestModule, "broadcastTestFunction", ['node']); }); // TODO #4: support transportable args in broadcast. - // Also blocked by TODO #1. it.skip('@node: -> node zone with transportable args', () => { return napa.zone.current.broadcast((allocator: any) => { console.log(allocator); @@ -205,13 +198,11 @@ describe('napajs/zone', function () { }); // Blocked by TODO #4. - // Also blocked by TODO #1. it.skip('@napa: -> node zone with transportable args', () => { return napa.zone.current.execute(napaZoneTestModule, "broadcastTransportable", []); }); - // Blocked by TODO #1. - it.skip('@node: -> node zone with anonymous function having closure (should fail)', () => { + it('@node: -> node zone with anonymous function having closure (should fail)', () => { return shouldFail(() => { return napa.zone.current.broadcast(() => { console.log(napaZone1.id); @@ -233,137 +224,68 @@ describe('napajs/zone', function () { }); }); - /// Blocked by TODO #1. - it.skip('@napa: -> node zone with anonymous function having closure (should fail)', () => { + it('@napa: -> node zone with anonymous function having closure (should fail)', () => { return shouldFail(() => { return napaZone1.execute(napaZoneTestModule, "broadcastClosure", ['node']); }); }); }); - describe("broadcastSync", () => { - /// Blocked by TODO #1. - it.skip('@node: -> node zone with JavaScript code', () => { - napa.zone.current.broadcastSync("var state = 0;"); - }); - - it('@node: -> napa zone with JavaScript code', () => { - napaZone1.broadcastSync("var state = 0;"); - }); - - // Duplicated with async broadcast version for now. - it.skip('@napa: -> napa zone with JavaScript code', () => { - }); - - // Blocked by TODO #1. - // Duplicated with async broadcast version for now. - it.skip('@napa: -> node zone with JavaScript code', () => { - }); - - // Blocked by TODO #1. - it.skip('@node: -> node zone with anonymous function', () => { - napa.zone.current.broadcastSync((input: string) => { - console.log(input); - }, ['hello world']); - }); - - it('@node: -> napa zone with anonymous function', () => { - napaZone1.broadcastSync((input: string) => { - console.log(input); - }, ['hello world']); - }); - - // Duplicated with async broadcast version for now. - it.skip('@napa: -> napa zone with anonymous function', () => { - }); - - // Blocked by TODO #1 - // Duplicated with async broadcast version for now - it.skip('@napa: -> node zone with anonymous function', () => { - }); - - // Blocked by TODO #1 - it.skip('@node: -> node zone with runtime error', () => { - assert.throws(() => { - napa.zone.current.broadcastSync(() => { - throw new Error(); - }, ['hello world']); - }); - }); - - it('@node: -> napa zone with runtime error', () => { - assert.throws(() => { - napaZone1.broadcastSync(() => { - throw new Error(); - }, ['hello world']); - }); - }); - - // Duplicated with async broadcast version for now. - it('@napa: -> napa zone with runtime error', () => { - }); - - // Duplicated with async broadcast version for now. - it('@napa: -> node zone with runtime error', () => { - }); - }); - describe('execute', () => { - napaZone1.broadcastSync('function foo(input) { return input; }'); - napaZone1.broadcastSync(` - var ns1; - (function (ns1) { - var ns2; - (function (ns2) { - function foo(input) { - return input; + let fooDef = 'function foo(input) { return input; }'; + let nestedFunctionDef = ` + var ns1 = { + ns2: { + foo: function (input) { + return input; } - ns2.foo = foo; - })(ns2 = ns1.ns2 || (ns1.ns2 = {})); - })(ns1 = exports.ns1 || (exports.ns1 = {})); - `); + } + }; + `; - napaZone2.broadcastSync('function foo(input) { return input; }'); + napaZone1.broadcast(fooDef); + napaZone1.broadcast(nestedFunctionDef); + napaZone2.broadcast(fooDef); + + napa.zone.node.broadcast(fooDef); + napa.zone.node.broadcast(nestedFunctionDef); - // Blocked by TODO #1. - it.skip('@node: -> node zone with global function name', () => { + it('@node: -> node zone with global function name', () => { return napa.zone.current.execute("", "foo", ['hello world']) - .then((result: napa.zone.ExecuteResult) => { + .then((result: napa.zone.Result) => { assert.equal(result.value, 'hello world'); }); }); it('@node: -> napa zone with global function name', () => { return napaZone1.execute("", "foo", ['hello world']) - .then((result: napa.zone.ExecuteResult) => { + .then((result: napa.zone.Result) => { assert.equal(result.value, 'hello world'); }); }); it('@napa: -> napa zone with global function name', () => { return napaZone1.execute(napaZoneTestModule, 'execute', ["napa-zone2", "", "foo", ['hello world']]) - .then((result: napa.zone.ExecuteResult) => { + .then((result: napa.zone.Result) => { assert.equal(result.value, 'hello world'); }); }); - // Blocked by TODO #1. - it.skip('@napa: -> node zone with global function name', () => { + it('@napa: -> node zone with global function name', () => { return napaZone1.execute(napaZoneTestModule, 'execute', ["node", "", "foo", ['hello world']]) - .then((result: napa.zone.ExecuteResult) => { + .then((result: napa.zone.Result) => { assert.equal(result.value, 'hello world'); }); }); it('@node: -> napa zone with global function name: function with namespaces', () => { return napaZone1.execute("", "ns1.ns2.foo", ['hello world']) - .then((result: napa.zone.ExecuteResult) => { + .then((result: napa.zone.Result) => { assert.equal(result.value, 'hello world'); }); }); - // Blocked by TODO #1. - it.skip('@node: -> node zone with global function name not exists', () => { + it('@node: -> node zone with global function name not exists', () => { return shouldFail(() => { return napa.zone.current.execute("", "foo1", ['hello world']); }); @@ -381,59 +303,55 @@ describe('napajs/zone', function () { }); }); - // Blocked by TODO #1. - it.skip('@napa: -> node zone with global function name not exists', () => { + it('@napa: -> node zone with global function name not exists', () => { return shouldFail(() => { return napaZone1.execute(napaZoneTestModule, 'execute', ["node", "", "foo1", []]); }); }); - // Blocked by TODO #1. - it.skip('@node: -> node zone with module function name', () => { + it('@node: -> node zone with module function name', () => { return napa.zone.current.execute(napaZoneTestModule, "bar", ['hello world']) - .then((result: napa.zone.ExecuteResult) => { + .then((result: napa.zone.Result) => { assert.equal(result.value, 'hello world'); }); }); it('@node: -> napa zone with module function name', () => { return napaZone1.execute(napaZoneTestModule, "bar", ['hello world']) - .then((result: napa.zone.ExecuteResult) => { + .then((result: napa.zone.Result) => { assert.equal(result.value, 'hello world'); }); }); it('@napa: -> napa zone with module function name', () => { return napaZone1.execute(napaZoneTestModule, 'execute', ["napa-zone2", napaZoneTestModule, "bar", ['hello world']]) - .then((result: napa.zone.ExecuteResult) => { + .then((result: napa.zone.Result) => { assert.equal(result.value, 'hello world'); }); }); - // Blocked by TODO #1. - it.skip('@napa: -> node zone with module function name', () => { + it('@napa: -> node zone with module function name', () => { return napaZone1.execute(napaZoneTestModule, 'execute', ["node", napaZoneTestModule, "bar", ['hello world']]) - .then((result: napa.zone.ExecuteResult) => { + .then((result: napa.zone.Result) => { assert.equal(result.value, 'hello world'); }); }); it('@node: -> napa zone with module function name: function with namespaces', () => { return napaZone1.execute(napaZoneTestModule, "ns1.ns2.foo", ['hello world']) - .then((result: napa.zone.ExecuteResult) => { + .then((result: napa.zone.Result) => { assert.equal(result.value, 'hello world'); }); }); it('@node: -> napa zone with module function name: module is a function', () => { return napaZone1.execute(path.resolve(__dirname, "./napa-zone/function-as-module"), "", ['hello world']) - .then((result: napa.zone.ExecuteResult) => { + .then((result: napa.zone.Result) => { assert.equal(result.value, 'hello world'); }); }); - // Blocked by TODO #1. - it.skip('@node: -> node zone with module not exists', () => { + it('@node: -> node zone with module not exists', () => { return shouldFail(() => { return napa.zone.current.execute("abc", "foo1", ['hello world']); }); @@ -451,15 +369,13 @@ describe('napajs/zone', function () { }); }); - // Blocked by TODO #1. - it.skip('@napa: -> node zone with module not exists', () => { + it('@napa: -> node zone with module not exists', () => { return shouldFail(() => { return napaZone1.execute(napaZoneTestModule, 'execute', ["node", "abc", "foo.", []]); }); }); - // Blocked by TODO #1. - it.skip('@node: -> node zone with module function not exists', () => { + it('@node: -> node zone with module function not exists', () => { return shouldFail(() => { return napa.zone.current.execute(napaZoneTestModule, "foo1", ['hello world']); }); @@ -477,19 +393,17 @@ describe('napajs/zone', function () { }); }); - // Blocked by TODO #1. - it.skip('@napa: -> node zone with module function not exists', () => { + it('@napa: -> node zone with module function not exists', () => { return shouldFail(() => { return napaZone1.execute(napaZoneTestModule, 'execute', ["node", napaZoneTestModule, "foo1", []]); }); }); - // Blocked by TODO #1. - it.skip('@node: -> node zone with anonymous function', () => { + it('@node: -> node zone with anonymous function', () => { return napa.zone.current.execute((input: string) => { return input; }, ['hello world']) - .then((result: napa.zone.ExecuteResult) => { + .then((result: napa.zone.Result) => { assert.equal(result.value, 'hello world'); }); }); @@ -498,33 +412,32 @@ describe('napajs/zone', function () { return napaZone1.execute((input: string) => { return input; }, ['hello world']) - .then((result: napa.zone.ExecuteResult) => { + .then((result: napa.zone.Result) => { assert.equal(result.value, 'hello world'); }); }); it('@napa: -> napa zone with anonymous function', () => { return napaZone1.execute(napaZoneTestModule, 'executeTestFunction', ["napa-zone2"]) - .then((result: napa.zone.ExecuteResult) => { + .then((result: napa.zone.Result) => { assert.equal(result.value, 'hello world'); }); }); - // Blocked by TODO #1. - it.skip('@napa: -> node zone with anonymous function', () => { + it('@napa: -> node zone with anonymous function', () => { return napaZone1.execute(napaZoneTestModule, 'executeTestFunction', ["node"]) - .then((result: napa.zone.ExecuteResult) => { + .then((result: napa.zone.Result) => { assert.equal(result.value, 'hello world'); }); }); - // Blocked by TODO #1. - it.skip('@node: -> node zone with anonymous function having closure (should fail)', () => { + it('@node: -> node zone with anonymous function having closure (should success)', () => { + return napa.zone.current.execute(() => { return napaZone1; }, []); }); it('@node: -> napa zone with anonymous function having closure (should fail)', () => { return shouldFail(() => { - return napaZone1.execute(() => { return napaZone1; }, ['hello world']) + return napaZone1.execute(() => { return napaZone1; }, []); }); }); @@ -534,14 +447,12 @@ describe('napajs/zone', function () { }); }); - // Blocked by TODO #1. - it.skip('@napa: -> node zone with anonymous function having closure (should fail)', () => { + it('@napa: -> node zone with anonymous function having closure (should fail)', () => { return shouldFail(() => { return napaZone1.execute(napaZoneTestModule, 'executeTestFunctionWithClosure', ["node"]); }); }); - // Blocked by TODO #1. it.skip('@node: -> node zone with transportable args', () => { }); @@ -551,11 +462,9 @@ describe('napajs/zone', function () { it.skip('@napa: -> napa zone with transportable args', () => { }); - // Blocked by TODO #1. it.skip('@napa: -> node zone with transportable args', () => { }); - // Blocked by TODO #1. it.skip('@node: -> node zone with transportable returns', () => { }); @@ -565,7 +474,6 @@ describe('napajs/zone', function () { it.skip('@napa: -> napa zone with transportable returns', () => { }); - // Blocked by TODO #1. it.skip('@napa: -> node zone with transportable returns', () => { }); @@ -595,46 +503,4 @@ describe('napajs/zone', function () { it.skip('@napa: -> napa zone with timed out in multiple hops', () => { }); }); - - describe.skip("executeSync", () => { - // Blocked by TODO #1. - it('@node: -> node zone succeed', () => { - }); - - it('@node: -> napa zone succeed', () => { - }); - - it('@napa: -> napa zone succeed', () => { - }); - - // Blocked by TODO #1. - it('@napa: -> node zone succeed', () => { - }); - - it('@node: -> node zone with runtime error', () => { - }); - - it('@node: -> napa zone with runtime error', () => { - }); - - it('@napa: -> napa zone with runtime error', () => { - }); - - it('@napa: -> node zone with runtime error', () => { - }); - - // Blocked by TODO #1. - it('@node: -> node zone with runtime error', () => { - }); - - it('@node: -> napa zone with timed out', () => { - }); - - it('@napa: -> napa zone with timed out', () => { - }); - - // Blocked by TODO #1. - it('@napa: -> node zone with timed out', () => { - }); - }); }); \ No newline at end of file diff --git a/third-party/moodycamel/blockingconcurrentqueue.h b/third-party/moodycamel/blockingconcurrentqueue.h deleted file mode 100644 index 996c556..0000000 --- a/third-party/moodycamel/blockingconcurrentqueue.h +++ /dev/null @@ -1,760 +0,0 @@ -// Provides an efficient blocking version of moodycamel::ConcurrentQueue. -// ©2015 Cameron Desrochers. Distributed under the terms of the simplified -// BSD license, available at the top of concurrentqueue.h. -// Uses Jeff Preshing's semaphore implementation (under the terms of its -// separate zlib license, embedded below). - -#pragma once - -#include "concurrentqueue.h" -#include -#include - -#if defined(_WIN32) -// Avoid including windows.h in a header; we only need a handful of -// items, so we'll redeclare them here (this is relatively safe since -// the API generally has to remain stable between Windows versions). -// I know this is an ugly hack but it still beats polluting the global -// namespace with thousands of generic names or adding a .cpp for nothing. -extern "C" { - struct _SECURITY_ATTRIBUTES; - __declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes, long lInitialCount, long lMaximumCount, const wchar_t* lpName); - __declspec(dllimport) int __stdcall CloseHandle(void* hObject); - __declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle, unsigned long dwMilliseconds); - __declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount, long* lpPreviousCount); -} -#elif defined(__MACH__) -#include -#elif defined(__unix__) -#include -#endif - -namespace moodycamel -{ -namespace details -{ - // Code in the mpmc_sema namespace below is an adaptation of Jeff Preshing's - // portable + lightweight semaphore implementations, originally from - // https://github.com/preshing/cpp11-on-multicore/blob/master/common/sema.h - // LICENSE: - // Copyright (c) 2015 Jeff Preshing - // - // This software is provided 'as-is', without any express or implied - // warranty. In no event will the authors be held liable for any damages - // arising from the use of this software. - // - // Permission is granted to anyone to use this software for any purpose, - // including commercial applications, and to alter it and redistribute it - // freely, subject to the following restrictions: - // - // 1. The origin of this software must not be misrepresented; you must not - // claim that you wrote the original software. If you use this software - // in a product, an acknowledgement in the product documentation would be - // appreciated but is not required. - // 2. Altered source versions must be plainly marked as such, and must not be - // misrepresented as being the original software. - // 3. This notice may not be removed or altered from any source distribution. - namespace mpmc_sema - { -#if defined(_WIN32) - class Semaphore - { - private: - void* m_hSema; - - Semaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; - Semaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; - - public: - Semaphore(int initialCount = 0) - { - assert(initialCount >= 0); - const long maxLong = 0x7fffffff; - m_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr); - } - - ~Semaphore() - { - CloseHandle(m_hSema); - } - - void wait() - { - const unsigned long infinite = 0xffffffff; - WaitForSingleObject(m_hSema, infinite); - } - - void signal(int count = 1) - { - ReleaseSemaphore(m_hSema, count, nullptr); - } - }; -#elif defined(__MACH__) - //--------------------------------------------------------- - // Semaphore (Apple iOS and OSX) - // Can't use POSIX semaphores due to http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html - //--------------------------------------------------------- - class Semaphore - { - private: - semaphore_t m_sema; - - Semaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; - Semaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; - - public: - Semaphore(int initialCount = 0) - { - assert(initialCount >= 0); - semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount); - } - - ~Semaphore() - { - semaphore_destroy(mach_task_self(), m_sema); - } - - void wait() - { - semaphore_wait(m_sema); - } - - void signal() - { - semaphore_signal(m_sema); - } - - void signal(int count) - { - while (count-- > 0) - { - semaphore_signal(m_sema); - } - } - }; -#elif defined(__unix__) - //--------------------------------------------------------- - // Semaphore (POSIX, Linux) - //--------------------------------------------------------- - class Semaphore - { - private: - sem_t m_sema; - - Semaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; - Semaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; - - public: - Semaphore(int initialCount = 0) - { - assert(initialCount >= 0); - sem_init(&m_sema, 0, initialCount); - } - - ~Semaphore() - { - sem_destroy(&m_sema); - } - - void wait() - { - // http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error - int rc; - do - { - rc = sem_wait(&m_sema); - } - while (rc == -1 && errno == EINTR); - } - - void signal() - { - sem_post(&m_sema); - } - - void signal(int count) - { - while (count-- > 0) - { - sem_post(&m_sema); - } - } - }; -#else -#error Unsupported platform! (No semaphore wrapper available) -#endif - - //--------------------------------------------------------- - // LightweightSemaphore - //--------------------------------------------------------- - class LightweightSemaphore - { - public: - typedef std::make_signed::type ssize_t; - - private: - std::atomic m_count; - Semaphore m_sema; - - void waitWithPartialSpinning() - { - ssize_t oldCount; - // Is there a better way to set the initial spin count? - // If we lower it to 1000, testBenaphore becomes 15x slower on my Core i7-5930K Windows PC, - // as threads start hitting the kernel semaphore. - int spin = 10000; - while (--spin >= 0) - { - oldCount = m_count.load(std::memory_order_relaxed); - if ((oldCount > 0) && m_count.compare_exchange_strong(oldCount, oldCount - 1, std::memory_order_acquire, std::memory_order_relaxed)) - return; - std::atomic_signal_fence(std::memory_order_acquire); // Prevent the compiler from collapsing the loop. - } - oldCount = m_count.fetch_sub(1, std::memory_order_acquire); - if (oldCount <= 0) - { - m_sema.wait(); - } - } - - ssize_t waitManyWithPartialSpinning(ssize_t max) - { - assert(max > 0); - ssize_t oldCount; - int spin = 10000; - while (--spin >= 0) - { - oldCount = m_count.load(std::memory_order_relaxed); - if (oldCount > 0) - { - ssize_t newCount = oldCount > max ? oldCount - max : 0; - if (m_count.compare_exchange_strong(oldCount, newCount, std::memory_order_acquire, std::memory_order_relaxed)) - return oldCount - newCount; - } - std::atomic_signal_fence(std::memory_order_acquire); - } - oldCount = m_count.fetch_sub(1, std::memory_order_acquire); - if (oldCount <= 0) - m_sema.wait(); - if (max > 1) - return 1 + tryWaitMany(max - 1); - return 1; - } - - public: - LightweightSemaphore(ssize_t initialCount = 0) : m_count(initialCount) - { - assert(initialCount >= 0); - } - - bool tryWait() - { - ssize_t oldCount = m_count.load(std::memory_order_relaxed); - while (oldCount > 0) - { - if (m_count.compare_exchange_weak(oldCount, oldCount - 1, std::memory_order_acquire, std::memory_order_relaxed)) - return true; - } - return false; - } - - void wait() - { - if (!tryWait()) - waitWithPartialSpinning(); - } - - // Acquires between 0 and (greedily) max, inclusive - ssize_t tryWaitMany(ssize_t max) - { - assert(max >= 0); - ssize_t oldCount = m_count.load(std::memory_order_relaxed); - while (oldCount > 0) - { - ssize_t newCount = oldCount > max ? oldCount - max : 0; - if (m_count.compare_exchange_weak(oldCount, newCount, std::memory_order_acquire, std::memory_order_relaxed)) - return oldCount - newCount; - } - return 0; - } - - // Acquires at least one, and (greedily) at most max - ssize_t waitMany(ssize_t max) - { - assert(max >= 0); - ssize_t result = tryWaitMany(max); - if (result == 0 && max > 0) - result = waitManyWithPartialSpinning(max); - return result; - } - - void signal(ssize_t count = 1) - { - assert(count >= 0); - ssize_t oldCount = m_count.fetch_add(count, std::memory_order_release); - ssize_t toRelease = -oldCount < count ? -oldCount : count; - if (toRelease > 0) - { - m_sema.signal((int)toRelease); - } - } - - ssize_t availableApprox() const - { - ssize_t count = m_count.load(std::memory_order_relaxed); - return count > 0 ? count : 0; - } - }; - } // end namespace mpmc_sema -} // end namespace details - - -// This is a blocking version of the queue. It has an almost identical interface to -// the normal non-blocking version, with the addition of various wait_dequeue() methods -// and the removal of producer-specific dequeue methods. -template -class BlockingConcurrentQueue -{ -private: - typedef ::moodycamel::ConcurrentQueue ConcurrentQueue; - typedef details::mpmc_sema::LightweightSemaphore LightweightSemaphore; - -public: - typedef typename ConcurrentQueue::producer_token_t producer_token_t; - typedef typename ConcurrentQueue::consumer_token_t consumer_token_t; - - typedef typename ConcurrentQueue::index_t index_t; - typedef typename ConcurrentQueue::size_t size_t; - typedef typename std::make_signed::type ssize_t; - - static const size_t BLOCK_SIZE = ConcurrentQueue::BLOCK_SIZE; - static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = ConcurrentQueue::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD; - static const size_t EXPLICIT_INITIAL_INDEX_SIZE = ConcurrentQueue::EXPLICIT_INITIAL_INDEX_SIZE; - static const size_t IMPLICIT_INITIAL_INDEX_SIZE = ConcurrentQueue::IMPLICIT_INITIAL_INDEX_SIZE; - static const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = ConcurrentQueue::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE; - static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = ConcurrentQueue::EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE; - static const size_t MAX_SUBQUEUE_SIZE = ConcurrentQueue::MAX_SUBQUEUE_SIZE; - -public: - // Creates a queue with at least `capacity` element slots; note that the - // actual number of elements that can be inserted without additional memory - // allocation depends on the number of producers and the block size (e.g. if - // the block size is equal to `capacity`, only a single block will be allocated - // up-front, which means only a single producer will be able to enqueue elements - // without an extra allocation -- blocks aren't shared between producers). - // This method is not thread safe -- it is up to the user to ensure that the - // queue is fully constructed before it starts being used by other threads (this - // includes making the memory effects of construction visible, possibly with a - // memory barrier). - explicit BlockingConcurrentQueue(size_t capacity = 6 * BLOCK_SIZE) - : inner(capacity), sema(create(), &BlockingConcurrentQueue::template destroy) - { - assert(reinterpret_cast((BlockingConcurrentQueue*)1) == &((BlockingConcurrentQueue*)1)->inner && "BlockingConcurrentQueue must have ConcurrentQueue as its first member"); - if (!sema) { - MOODYCAMEL_THROW(std::bad_alloc()); - } - } - - BlockingConcurrentQueue(size_t minCapacity, size_t maxExplicitProducers, size_t maxImplicitProducers) - : inner(minCapacity, maxExplicitProducers, maxImplicitProducers), sema(create(), &BlockingConcurrentQueue::template destroy) - { - assert(reinterpret_cast((BlockingConcurrentQueue*)1) == &((BlockingConcurrentQueue*)1)->inner && "BlockingConcurrentQueue must have ConcurrentQueue as its first member"); - if (!sema) { - MOODYCAMEL_THROW(std::bad_alloc()); - } - } - - // Disable copying and copy assignment - BlockingConcurrentQueue(BlockingConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION; - BlockingConcurrentQueue& operator=(BlockingConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION; - - // Moving is supported, but note that it is *not* a thread-safe operation. - // Nobody can use the queue while it's being moved, and the memory effects - // of that move must be propagated to other threads before they can use it. - // Note: When a queue is moved, its tokens are still valid but can only be - // used with the destination queue (i.e. semantically they are moved along - // with the queue itself). - BlockingConcurrentQueue(BlockingConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT - : inner(std::move(other.inner)), sema(std::move(other.sema)) - { } - - inline BlockingConcurrentQueue& operator=(BlockingConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT - { - return swap_internal(other); - } - - // Swaps this queue's state with the other's. Not thread-safe. - // Swapping two queues does not invalidate their tokens, however - // the tokens that were created for one queue must be used with - // only the swapped queue (i.e. the tokens are tied to the - // queue's movable state, not the object itself). - inline void swap(BlockingConcurrentQueue& other) MOODYCAMEL_NOEXCEPT - { - swap_internal(other); - } - -private: - BlockingConcurrentQueue& swap_internal(BlockingConcurrentQueue& other) - { - if (this == &other) { - return *this; - } - - inner.swap(other.inner); - sema.swap(other.sema); - return *this; - } - -public: - // Enqueues a single item (by copying it). - // Allocates memory if required. Only fails if memory allocation fails (or implicit - // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0, - // or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Thread-safe. - inline bool enqueue(T const& item) - { - if (details::likely(inner.enqueue(item))) { - sema->signal(); - return true; - } - return false; - } - - // Enqueues a single item (by moving it, if possible). - // Allocates memory if required. Only fails if memory allocation fails (or implicit - // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0, - // or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Thread-safe. - inline bool enqueue(T&& item) - { - if (details::likely(inner.enqueue(std::move(item)))) { - sema->signal(); - return true; - } - return false; - } - - // Enqueues a single item (by copying it) using an explicit producer token. - // Allocates memory if required. Only fails if memory allocation fails (or - // Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Thread-safe. - inline bool enqueue(producer_token_t const& token, T const& item) - { - if (details::likely(inner.enqueue(token, item))) { - sema->signal(); - return true; - } - return false; - } - - // Enqueues a single item (by moving it, if possible) using an explicit producer token. - // Allocates memory if required. Only fails if memory allocation fails (or - // Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Thread-safe. - inline bool enqueue(producer_token_t const& token, T&& item) - { - if (details::likely(inner.enqueue(token, std::move(item)))) { - sema->signal(); - return true; - } - return false; - } - - // Enqueues several items. - // Allocates memory if required. Only fails if memory allocation fails (or - // implicit production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE - // is 0, or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Note: Use std::make_move_iterator if the elements should be moved instead of copied. - // Thread-safe. - template - inline bool enqueue_bulk(It itemFirst, size_t count) - { - if (details::likely(inner.enqueue_bulk(std::forward(itemFirst), count))) { - sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count); - return true; - } - return false; - } - - // Enqueues several items using an explicit producer token. - // Allocates memory if required. Only fails if memory allocation fails - // (or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Note: Use std::make_move_iterator if the elements should be moved - // instead of copied. - // Thread-safe. - template - inline bool enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) - { - if (details::likely(inner.enqueue_bulk(token, std::forward(itemFirst), count))) { - sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count); - return true; - } - return false; - } - - // Enqueues a single item (by copying it). - // Does not allocate memory. Fails if not enough room to enqueue (or implicit - // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE - // is 0). - // Thread-safe. - inline bool try_enqueue(T const& item) - { - if (inner.try_enqueue(item)) { - sema->signal(); - return true; - } - return false; - } - - // Enqueues a single item (by moving it, if possible). - // Does not allocate memory (except for one-time implicit producer). - // Fails if not enough room to enqueue (or implicit production is - // disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0). - // Thread-safe. - inline bool try_enqueue(T&& item) - { - if (inner.try_enqueue(std::move(item))) { - sema->signal(); - return true; - } - return false; - } - - // Enqueues a single item (by copying it) using an explicit producer token. - // Does not allocate memory. Fails if not enough room to enqueue. - // Thread-safe. - inline bool try_enqueue(producer_token_t const& token, T const& item) - { - if (inner.try_enqueue(token, item)) { - sema->signal(); - return true; - } - return false; - } - - // Enqueues a single item (by moving it, if possible) using an explicit producer token. - // Does not allocate memory. Fails if not enough room to enqueue. - // Thread-safe. - inline bool try_enqueue(producer_token_t const& token, T&& item) - { - if (inner.try_enqueue(token, std::move(item))) { - sema->signal(); - return true; - } - return false; - } - - // Enqueues several items. - // Does not allocate memory (except for one-time implicit producer). - // Fails if not enough room to enqueue (or implicit production is - // disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0). - // Note: Use std::make_move_iterator if the elements should be moved - // instead of copied. - // Thread-safe. - template - inline bool try_enqueue_bulk(It itemFirst, size_t count) - { - if (inner.try_enqueue_bulk(std::forward(itemFirst), count)) { - sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count); - return true; - } - return false; - } - - // Enqueues several items using an explicit producer token. - // Does not allocate memory. Fails if not enough room to enqueue. - // Note: Use std::make_move_iterator if the elements should be moved - // instead of copied. - // Thread-safe. - template - inline bool try_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) - { - if (inner.try_enqueue_bulk(token, std::forward(itemFirst), count)) { - sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count); - return true; - } - return false; - } - - - // Attempts to dequeue from the queue. - // Returns false if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template - inline bool try_dequeue(U& item) - { - if (sema->tryWait()) { - while (!inner.try_dequeue(item)) { - continue; - } - return true; - } - return false; - } - - // Attempts to dequeue from the queue using an explicit consumer token. - // Returns false if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template - inline bool try_dequeue(consumer_token_t& token, U& item) - { - if (sema->tryWait()) { - while (!inner.try_dequeue(token, item)) { - continue; - } - return true; - } - return false; - } - - // Attempts to dequeue several elements from the queue. - // Returns the number of items actually dequeued. - // Returns 0 if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template - inline size_t try_dequeue_bulk(It itemFirst, size_t max) - { - size_t count = 0; - max = (size_t)sema->tryWaitMany((LightweightSemaphore::ssize_t)(ssize_t)max); - while (count != max) { - count += inner.template try_dequeue_bulk(itemFirst, max - count); - } - return count; - } - - // Attempts to dequeue several elements from the queue using an explicit consumer token. - // Returns the number of items actually dequeued. - // Returns 0 if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template - inline size_t try_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max) - { - size_t count = 0; - max = (size_t)sema->tryWaitMany((LightweightSemaphore::ssize_t)(ssize_t)max); - while (count != max) { - count += inner.template try_dequeue_bulk(token, itemFirst, max - count); - } - return count; - } - - - - // Blocks the current thread until there's something to dequeue, then - // dequeues it. - // Never allocates. Thread-safe. - template - inline void wait_dequeue(U& item) - { - sema->wait(); - while (!inner.try_dequeue(item)) { - continue; - } - } - - // Blocks the current thread until there's something to dequeue, then - // dequeues it using an explicit consumer token. - // Never allocates. Thread-safe. - template - inline void wait_dequeue(consumer_token_t& token, U& item) - { - sema->wait(); - while (!inner.try_dequeue(token, item)) { - continue; - } - } - - // Attempts to dequeue several elements from the queue. - // Returns the number of items actually dequeued, which will - // always be at least one (this method blocks until the queue - // is non-empty) and at most max. - // Never allocates. Thread-safe. - template - inline size_t wait_dequeue_bulk(It itemFirst, size_t max) - { - size_t count = 0; - max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max); - while (count != max) { - count += inner.template try_dequeue_bulk(itemFirst, max - count); - } - return count; - } - - // Attempts to dequeue several elements from the queue using an explicit consumer token. - // Returns the number of items actually dequeued, which will - // always be at least one (this method blocks until the queue - // is non-empty) and at most max. - // Never allocates. Thread-safe. - template - inline size_t wait_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max) - { - size_t count = 0; - max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max); - while (count != max) { - count += inner.template try_dequeue_bulk(token, itemFirst, max - count); - } - return count; - } - - - // Returns an estimate of the total number of elements currently in the queue. This - // estimate is only accurate if the queue has completely stabilized before it is called - // (i.e. all enqueue and dequeue operations have completed and their memory effects are - // visible on the calling thread, and no further operations start while this method is - // being called). - // Thread-safe. - inline size_t size_approx() const - { - return (size_t)sema->availableApprox(); - } - - - // Returns true if the underlying atomic variables used by - // the queue are lock-free (they should be on most platforms). - // Thread-safe. - static bool is_lock_free() - { - return ConcurrentQueue::is_lock_free(); - } - - -private: - template - static inline U* create() - { - auto p = Traits::malloc(sizeof(U)); - return p != nullptr ? new (p) U : nullptr; - } - - template - static inline U* create(A1&& a1) - { - auto p = Traits::malloc(sizeof(U)); - return p != nullptr ? new (p) U(std::forward(a1)) : nullptr; - } - - template - static inline void destroy(U* p) - { - if (p != nullptr) { - p->~U(); - } - Traits::free(p); - } - -private: - ConcurrentQueue inner; - std::unique_ptr sema; -}; - - -template -inline void swap(BlockingConcurrentQueue& a, BlockingConcurrentQueue& b) MOODYCAMEL_NOEXCEPT -{ - a.swap(b); -} - -} // end namespace moodycamel diff --git a/third-party/moodycamel/concurrentqueue.h b/third-party/moodycamel/concurrentqueue.h deleted file mode 100644 index 15cf314..0000000 --- a/third-party/moodycamel/concurrentqueue.h +++ /dev/null @@ -1,3565 +0,0 @@ -// Provides a C++11 implementation of a multi-producer, multi-consumer lock-free queue. -// An overview, including benchmark results, is provided here: -// http://moodycamel.com/blog/2014/a-fast-general-purpose-lock-free-queue-for-c++ -// The full design is also described in excruciating detail at: -// http://moodycamel.com/blog/2014/detailed-design-of-a-lock-free-queue - - -// Simplified BSD license: -// Copyright (c) 2013-2015, Cameron Desrochers. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// - Redistributions of source code must retain the above copyright notice, this list of -// conditions and the following disclaimer. -// - Redistributions in binary form must reproduce the above copyright notice, this list of -// conditions and the following disclaimer in the documentation and/or other materials -// provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL -// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -// OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -// TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, -// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -#pragma once - -#if defined(__GNUC__) -// Disable -Wconversion warnings (spuriously triggered when Traits::size_t and -// Traits::index_t are set to < 32 bits, causing integer promotion, causing warnings -// upon assigning any computed values) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" - -#ifdef MCDBGQ_USE_RELACY -#pragma GCC diagnostic ignored "-Wint-to-pointer-cast" -#endif -#endif - -#ifdef MCDBGQ_USE_RELACY -#include "relacy/relacy_std.hpp" -#include "relacy_shims.h" -// We only use malloc/free anyway, and the delete macro messes up `= delete` method declarations. -// We'll override the default trait malloc ourselves without a macro. -#undef new -#undef delete -#undef malloc -#undef free -#else -#include // Requires C++11. Sorry VS2010. -#include -#endif -#include -#include -#include -#include -#include -#include -#include // for CHAR_BIT -#include -#include // for __WINPTHREADS_VERSION if on MinGW-w64 w/ POSIX threading - -// Platform-specific definitions of a numeric thread ID type and an invalid value -#if defined(MCDBGQ_USE_RELACY) -namespace moodycamel { - namespace details { - typedef std::uint32_t thread_id_t; - static const thread_id_t invalid_thread_id = 0xFFFFFFFFU; - static const thread_id_t invalid_thread_id2 = 0xFFFFFFFEU; - static inline thread_id_t thread_id() { return rl::thread_index(); } - } -} -#elif defined(_WIN32) || defined(__WINDOWS__) || defined(__WIN32__) -// No sense pulling in windows.h in a header, we'll manually declare the function -// we use and rely on backwards-compatibility for this not to break -extern "C" __declspec(dllimport) unsigned long __stdcall GetCurrentThreadId(void); -namespace moodycamel { - namespace details { - static_assert(sizeof(unsigned long) == sizeof(std::uint32_t), "Expected size of unsigned long to be 32 bits on Windows"); - typedef std::uint32_t thread_id_t; - static const thread_id_t invalid_thread_id = 0; // See http://blogs.msdn.com/b/oldnewthing/archive/2004/02/23/78395.aspx - static const thread_id_t invalid_thread_id2 = 0xFFFFFFFFU; // Not technically guaranteed to be invalid, but is never used in practice. Note that all Win32 thread IDs are presently multiples of 4. - static inline thread_id_t thread_id() { return static_cast(::GetCurrentThreadId()); } - } -} -#else -// Use a nice trick from this answer: http://stackoverflow.com/a/8438730/21475 -// In order to get a numeric thread ID in a platform-independent way, we use a thread-local -// static variable's address as a thread identifier :-) -#if defined(__GNUC__) || defined(__INTEL_COMPILER) -#define MOODYCAMEL_THREADLOCAL __thread -#elif defined(_MSC_VER) -#define MOODYCAMEL_THREADLOCAL __declspec(thread) -#else -// Assume C++11 compliant compiler -#define MOODYCAMEL_THREADLOCAL thread_local -#endif -namespace moodycamel { - namespace details { - typedef std::uintptr_t thread_id_t; - static const thread_id_t invalid_thread_id = 0; // Address can't be nullptr - static const thread_id_t invalid_thread_id2 = 1; // Member accesses off a null pointer are also generally invalid. Plus it's not aligned. - static inline thread_id_t thread_id() { static MOODYCAMEL_THREADLOCAL int x; return reinterpret_cast(&x); } - } -} -#endif - -// Exceptions -#ifndef MOODYCAMEL_EXCEPTIONS_ENABLED -#if (defined(_MSC_VER) && defined(_CPPUNWIND)) || (defined(__GNUC__) && defined(__EXCEPTIONS)) || (!defined(_MSC_VER) && !defined(__GNUC__)) -#define MOODYCAMEL_EXCEPTIONS_ENABLED -#define MOODYCAMEL_TRY try -#define MOODYCAMEL_CATCH(...) catch(__VA_ARGS__) -#define MOODYCAMEL_RETHROW throw -#define MOODYCAMEL_THROW(expr) throw (expr) -#else -#define MOODYCAMEL_TRY if (true) -#define MOODYCAMEL_CATCH(...) else if (false) -#define MOODYCAMEL_RETHROW -#define MOODYCAMEL_THROW(expr) -#endif -#endif - -#ifndef MOODYCAMEL_NOEXCEPT -#if !defined(MOODYCAMEL_EXCEPTIONS_ENABLED) -#define MOODYCAMEL_NOEXCEPT -#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) true -#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) true -#elif defined(_MSC_VER) && defined(_NOEXCEPT) && _MSC_VER < 1800 -// VS2012's std::is_nothrow_[move_]constructible is broken and returns true when it shouldn't :-( -// We have to assume *all* non-trivial constructors may throw on VS2012! -#define MOODYCAMEL_NOEXCEPT _NOEXCEPT -#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) (std::is_rvalue_reference::value && std::is_move_constructible::value ? std::is_trivially_move_constructible::value : std::is_trivially_copy_constructible::value) -#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) ((std::is_rvalue_reference::value && std::is_move_assignable::value ? std::is_trivially_move_assignable::value || std::is_nothrow_move_assignable::value : std::is_trivially_copy_assignable::value || std::is_nothrow_copy_assignable::value) && MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr)) -#elif defined(_MSC_VER) && defined(_NOEXCEPT) && _MSC_VER < 1900 -#define MOODYCAMEL_NOEXCEPT _NOEXCEPT -#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) (std::is_rvalue_reference::value && std::is_move_constructible::value ? std::is_trivially_move_constructible::value || std::is_nothrow_move_constructible::value : std::is_trivially_copy_constructible::value || std::is_nothrow_copy_constructible::value) -#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) ((std::is_rvalue_reference::value && std::is_move_assignable::value ? std::is_trivially_move_assignable::value || std::is_nothrow_move_assignable::value : std::is_trivially_copy_assignable::value || std::is_nothrow_copy_assignable::value) && MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr)) -#else -#define MOODYCAMEL_NOEXCEPT noexcept -#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) noexcept(expr) -#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) noexcept(expr) -#endif -#endif - -#ifndef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED -#ifdef MCDBGQ_USE_RELACY -#define MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED -#else -//// VS2013 doesn't support `thread_local`, and MinGW-w64 w/ POSIX threading has a crippling bug: http://sourceforge.net/p/mingw-w64/bugs/445 -//// g++ <=4.7 doesn't support thread_local either -//#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && (!defined(__MINGW32__) && !defined(__MINGW64__) || !defined(__WINPTHREADS_VERSION)) && (!defined(__GNUC__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) -//// Assume `thread_local` is fully supported in all other C++11 compilers/runtimes -//#define MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED -//#endif -#endif -#endif - -// VS2012 doesn't support deleted functions. -// In this case, we declare the function normally but don't define it. A link error will be generated if the function is called. -#ifndef MOODYCAMEL_DELETE_FUNCTION -#if defined(_MSC_VER) && _MSC_VER < 1800 -#define MOODYCAMEL_DELETE_FUNCTION -#else -#define MOODYCAMEL_DELETE_FUNCTION = delete -#endif -#endif - -// Compiler-specific likely/unlikely hints -namespace moodycamel { - namespace details { -#if defined(__GNUC__) - inline bool likely(bool x) { return __builtin_expect((x), true); } - inline bool unlikely(bool x) { return __builtin_expect((x), false); } -#else - inline bool likely(bool x) { return x; } - inline bool unlikely(bool x) { return x; } -#endif - } -} - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG -#include "internal/concurrentqueue_internal_debug.h" -#endif - -namespace moodycamel { - namespace details { - template - struct const_numeric_max { - static_assert(std::is_integral::value, "const_numeric_max can only be used with integers"); - static const T value = std::numeric_limits::is_signed - ? (static_cast(1) << (sizeof(T) * CHAR_BIT - 1)) - static_cast(1) - : static_cast(-1); - }; - } - - // Default traits for the ConcurrentQueue. To change some of the - // traits without re-implementing all of them, inherit from this - // struct and shadow the declarations you wish to be different; - // since the traits are used as a template type parameter, the - // shadowed declarations will be used where defined, and the defaults - // otherwise. - struct ConcurrentQueueDefaultTraits - { - // General-purpose size type. std::size_t is strongly recommended. - typedef std::size_t size_t; - - // The type used for the enqueue and dequeue indices. Must be at least as - // large as size_t. Should be significantly larger than the number of elements - // you expect to hold at once, especially if you have a high turnover rate; - // for example, on 32-bit x86, if you expect to have over a hundred million - // elements or pump several million elements through your queue in a very - // short space of time, using a 32-bit type *may* trigger a race condition. - // A 64-bit int type is recommended in that case, and in practice will - // prevent a race condition no matter the usage of the queue. Note that - // whether the queue is lock-free with a 64-int type depends on the whether - // std::atomic is lock-free, which is platform-specific. - typedef std::size_t index_t; - - // Internally, all elements are enqueued and dequeued from multi-element - // blocks; this is the smallest controllable unit. If you expect few elements - // but many producers, a smaller block size should be favoured. For few producers - // and/or many elements, a larger block size is preferred. A sane default - // is provided. Must be a power of 2. - static const size_t BLOCK_SIZE = 32; - - // For explicit producers (i.e. when using a producer token), the block is - // checked for being empty by iterating through a list of flags, one per element. - // For large block sizes, this is too inefficient, and switching to an atomic - // counter-based approach is faster. The switch is made for block sizes strictly - // larger than this threshold. - static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = 32; - - // How many full blocks can be expected for a single explicit producer? This should - // reflect that number's maximum for optimal performance. Must be a power of 2. - static const size_t EXPLICIT_INITIAL_INDEX_SIZE = 32; - - // How many full blocks can be expected for a single implicit producer? This should - // reflect that number's maximum for optimal performance. Must be a power of 2. - static const size_t IMPLICIT_INITIAL_INDEX_SIZE = 32; - - // The initial size of the hash table mapping thread IDs to implicit producers. - // Note that the hash is resized every time it becomes half full. - // Must be a power of two, and either 0 or at least 1. If 0, implicit production - // (using the enqueue methods without an explicit producer token) is disabled. - static const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = 32; - - // Controls the number of items that an explicit consumer (i.e. one with a token) - // must consume before it causes all consumers to rotate and move on to the next - // internal queue. - static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = 256; - - // The maximum number of elements (inclusive) that can be enqueued to a sub-queue. - // Enqueue operations that would cause this limit to be surpassed will fail. Note - // that this limit is enforced at the block level (for performance reasons), i.e. - // it's rounded up to the nearest block size. - static const size_t MAX_SUBQUEUE_SIZE = details::const_numeric_max::value; - - -#ifndef MCDBGQ_USE_RELACY - // Memory allocation can be customized if needed. - // malloc should return nullptr on failure, and handle alignment like std::malloc. - static inline void* malloc(size_t size) { return std::malloc(size); } - static inline void free(void* ptr) { return std::free(ptr); } -#else - // Debug versions when running under the Relacy race detector (ignore - // these in user code) - static inline void* malloc(size_t size) { return rl::rl_malloc(size, $); } - static inline void free(void* ptr) { return rl::rl_free(ptr, $); } -#endif - }; - - - // When producing or consuming many elements, the most efficient way is to: - // 1) Use one of the bulk-operation methods of the queue with a token - // 2) Failing that, use the bulk-operation methods without a token - // 3) Failing that, create a token and use that with the single-item methods - // 4) Failing that, use the single-parameter methods of the queue - // Having said that, don't create tokens willy-nilly -- ideally there should be - // a maximum of one token per thread (of each kind). - struct ProducerToken; - struct ConsumerToken; - - template class ConcurrentQueue; - template class BlockingConcurrentQueue; - class ConcurrentQueueTests; - - - namespace details - { - struct ConcurrentQueueProducerTypelessBase - { - ConcurrentQueueProducerTypelessBase* next; - std::atomic inactive; - ProducerToken* token; - - ConcurrentQueueProducerTypelessBase() - : inactive(false), token(nullptr) - { - } - }; - - template struct _hash_32_or_64 { - static inline std::uint32_t hash(std::uint32_t h) - { - // MurmurHash3 finalizer -- see https://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp - // Since the thread ID is already unique, all we really want to do is propagate that - // uniqueness evenly across all the bits, so that we can use a subset of the bits while - // reducing collisions significantly - h ^= h >> 16; - h *= 0x85ebca6b; - h ^= h >> 13; - h *= 0xc2b2ae35; - return h ^ (h >> 16); - } - }; - template<> struct _hash_32_or_64<1> { - static inline std::uint64_t hash(std::uint64_t h) - { - h ^= h >> 33; - h *= 0xff51afd7ed558ccd; - h ^= h >> 33; - h *= 0xc4ceb9fe1a85ec53; - return h ^ (h >> 33); - } - }; - template struct hash_32_or_64 : public _hash_32_or_64<(size > 4)> { }; - - static inline size_t hash_thread_id(thread_id_t id) - { - static_assert(sizeof(thread_id_t) <= 8, "Expected a platform where thread IDs are at most 64-bit values"); - return static_cast(hash_32_or_64::hash(id)); - } - - template - static inline bool circular_less_than(T a, T b) - { -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable: 4554) -#endif - static_assert(std::is_integral::value && !std::numeric_limits::is_signed, "circular_less_than is intended to be used only with unsigned integer types"); - return static_cast(a - b) > static_cast(static_cast(1) << static_cast(sizeof(T) * CHAR_BIT - 1)); -#ifdef _MSC_VER -#pragma warning(pop) -#endif - } - - template - static inline char* align_for(char* ptr) - { - const std::size_t alignment = std::alignment_of::value; - return ptr + (alignment - (reinterpret_cast(ptr) % alignment)) % alignment; - } - - template - static inline T ceil_to_pow_2(T x) - { - static_assert(std::is_integral::value && !std::numeric_limits::is_signed, "ceil_to_pow_2 is intended to be used only with unsigned integer types"); - - // Adapted from http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 - --x; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - for (std::size_t i = 1; i < sizeof(T); i <<= 1) { - x |= x >> (i << 3); - } - ++x; - return x; - } - - template - static inline void swap_relaxed(std::atomic& left, std::atomic& right) - { - T temp = std::move(left.load(std::memory_order_relaxed)); - left.store(std::move(right.load(std::memory_order_relaxed)), std::memory_order_relaxed); - right.store(std::move(temp), std::memory_order_relaxed); - } - - template - static inline T const& nomove(T const& x) - { - return x; - } - - template - struct nomove_if - { - template - static inline T const& eval(T const& x) - { - return x; - } - }; - - template<> - struct nomove_if - { - template - static inline auto eval(U&& x) - -> decltype(std::forward(x)) - { - return std::forward(x); - } - }; - - template - static inline auto deref_noexcept(It& it) MOODYCAMEL_NOEXCEPT -> decltype(*it) - { - return *it; - } - -#if defined(__APPLE__) || defined(__clang__) || !defined(__GNUC__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - template struct is_trivially_destructible : std::is_trivially_destructible { }; -#else - template struct is_trivially_destructible : std::has_trivial_destructor { }; -#endif - -#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED -#ifdef MCDBGQ_USE_RELACY - typedef RelacyThreadExitListener ThreadExitListener; - typedef RelacyThreadExitNotifier ThreadExitNotifier; -#else - struct ThreadExitListener - { - typedef void(*callback_t)(void*); - callback_t callback; - void* userData; - - ThreadExitListener* next; // reserved for use by the ThreadExitNotifier - }; - - - class ThreadExitNotifier - { - public: - static void subscribe(ThreadExitListener* listener) - { - auto& tlsInst = instance(); - listener->next = tlsInst.tail; - tlsInst.tail = listener; - } - - static void unsubscribe(ThreadExitListener* listener) - { - auto& tlsInst = instance(); - ThreadExitListener** prev = &tlsInst.tail; - for (auto ptr = tlsInst.tail; ptr != nullptr; ptr = ptr->next) { - if (ptr == listener) { - *prev = ptr->next; - break; - } - prev = &ptr->next; - } - } - - private: - ThreadExitNotifier() : tail(nullptr) { } - ThreadExitNotifier(ThreadExitNotifier const&) MOODYCAMEL_DELETE_FUNCTION; - ThreadExitNotifier& operator=(ThreadExitNotifier const&)MOODYCAMEL_DELETE_FUNCTION; - - ~ThreadExitNotifier() - { - // This thread is about to exit, let everyone know! - assert(this == &instance() && "If this assert fails, you likely have a buggy compiler! Change the preprocessor conditions such that MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED is no longer defined."); - for (auto ptr = tail; ptr != nullptr; ptr = ptr->next) { - ptr->callback(ptr->userData); - } - } - - // Thread-local - static inline ThreadExitNotifier& instance() - { - static thread_local ThreadExitNotifier notifier; - return notifier; - } - - private: - ThreadExitListener* tail; - }; -#endif -#endif - - template struct static_is_lock_free_num { enum { value = 0 }; }; - template<> struct static_is_lock_free_num { enum { value = ATOMIC_CHAR_LOCK_FREE }; }; - template<> struct static_is_lock_free_num { enum { value = ATOMIC_SHORT_LOCK_FREE }; }; - template<> struct static_is_lock_free_num { enum { value = ATOMIC_INT_LOCK_FREE }; }; - template<> struct static_is_lock_free_num { enum { value = ATOMIC_LONG_LOCK_FREE }; }; - template<> struct static_is_lock_free_num { enum { value = ATOMIC_LLONG_LOCK_FREE }; }; - template struct static_is_lock_free : static_is_lock_free_num::type> { }; - template<> struct static_is_lock_free { enum { value = ATOMIC_BOOL_LOCK_FREE }; }; - template struct static_is_lock_free { enum { value = ATOMIC_POINTER_LOCK_FREE }; }; - } - - - struct ProducerToken - { - template - explicit ProducerToken(ConcurrentQueue& queue); - - template - explicit ProducerToken(BlockingConcurrentQueue& queue); - - explicit ProducerToken(ProducerToken&& other) MOODYCAMEL_NOEXCEPT - : producer(other.producer) - { - other.producer = nullptr; - if (producer != nullptr) { - producer->token = this; - } - } - - inline ProducerToken& operator=(ProducerToken&& other) MOODYCAMEL_NOEXCEPT - { - swap(other); - return *this; - } - - void swap(ProducerToken& other) MOODYCAMEL_NOEXCEPT - { - std::swap(producer, other.producer); - if (producer != nullptr) { - producer->token = this; - } - if (other.producer != nullptr) { - other.producer->token = &other; - } - } - - // A token is always valid unless: - // 1) Memory allocation failed during construction - // 2) It was moved via the move constructor - // (Note: assignment does a swap, leaving both potentially valid) - // 3) The associated queue was destroyed - // Note that if valid() returns true, that only indicates - // that the token is valid for use with a specific queue, - // but not which one; that's up to the user to track. - inline bool valid() const { return producer != nullptr; } - - ~ProducerToken() - { - if (producer != nullptr) { - producer->token = nullptr; - producer->inactive.store(true, std::memory_order_release); - } - } - - // Disable copying and assignment - ProducerToken(ProducerToken const&) MOODYCAMEL_DELETE_FUNCTION; - ProducerToken& operator=(ProducerToken const&)MOODYCAMEL_DELETE_FUNCTION; - - private: - template friend class ConcurrentQueue; - friend class ConcurrentQueueTests; - - protected: - details::ConcurrentQueueProducerTypelessBase* producer; - }; - - - struct ConsumerToken - { - template - explicit ConsumerToken(ConcurrentQueue& q); - - template - explicit ConsumerToken(BlockingConcurrentQueue& q); - - explicit ConsumerToken(ConsumerToken&& other) MOODYCAMEL_NOEXCEPT - : initialOffset(other.initialOffset), lastKnownGlobalOffset(other.lastKnownGlobalOffset), itemsConsumedFromCurrent(other.itemsConsumedFromCurrent), currentProducer(other.currentProducer), desiredProducer(other.desiredProducer) - { - } - - inline ConsumerToken& operator=(ConsumerToken&& other) MOODYCAMEL_NOEXCEPT - { - swap(other); - return *this; - } - - void swap(ConsumerToken& other) MOODYCAMEL_NOEXCEPT - { - std::swap(initialOffset, other.initialOffset); - std::swap(lastKnownGlobalOffset, other.lastKnownGlobalOffset); - std::swap(itemsConsumedFromCurrent, other.itemsConsumedFromCurrent); - std::swap(currentProducer, other.currentProducer); - std::swap(desiredProducer, other.desiredProducer); - } - - // Disable copying and assignment - ConsumerToken(ConsumerToken const&) MOODYCAMEL_DELETE_FUNCTION; - ConsumerToken& operator=(ConsumerToken const&)MOODYCAMEL_DELETE_FUNCTION; - - private: - template friend class ConcurrentQueue; - friend class ConcurrentQueueTests; - - private: // but shared with ConcurrentQueue - std::uint32_t initialOffset; - std::uint32_t lastKnownGlobalOffset; - std::uint32_t itemsConsumedFromCurrent; - details::ConcurrentQueueProducerTypelessBase* currentProducer; - details::ConcurrentQueueProducerTypelessBase* desiredProducer; - }; - - // Need to forward-declare this swap because it's in a namespace. - // See http://stackoverflow.com/questions/4492062/why-does-a-c-friend-class-need-a-forward-declaration-only-in-other-namespaces - template - inline void swap(typename ConcurrentQueue::ImplicitProducerKVP& a, typename ConcurrentQueue::ImplicitProducerKVP& b) MOODYCAMEL_NOEXCEPT; - - - template - class ConcurrentQueue - { - public: - typedef ::moodycamel::ProducerToken producer_token_t; - typedef ::moodycamel::ConsumerToken consumer_token_t; - - typedef typename Traits::index_t index_t; - typedef typename Traits::size_t size_t; - - static const size_t BLOCK_SIZE = static_cast(Traits::BLOCK_SIZE); - static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = static_cast(Traits::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD); - static const size_t EXPLICIT_INITIAL_INDEX_SIZE = static_cast(Traits::EXPLICIT_INITIAL_INDEX_SIZE); - static const size_t IMPLICIT_INITIAL_INDEX_SIZE = static_cast(Traits::IMPLICIT_INITIAL_INDEX_SIZE); - static const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = static_cast(Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE); - static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = static_cast(Traits::EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE); -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable: 4307) // + integral constant overflow (that's what the ternary expression is for!) -#pragma warning(disable: 4309) // static_cast: Truncation of constant value -#endif - static const size_t MAX_SUBQUEUE_SIZE = (details::const_numeric_max::value - static_cast(Traits::MAX_SUBQUEUE_SIZE) < BLOCK_SIZE) ? details::const_numeric_max::value : ((static_cast(Traits::MAX_SUBQUEUE_SIZE) + (BLOCK_SIZE - 1)) / BLOCK_SIZE * BLOCK_SIZE); -#ifdef _MSC_VER -#pragma warning(pop) -#endif - - static_assert(!std::numeric_limits::is_signed && std::is_integral::value, "Traits::size_t must be an unsigned integral type"); - static_assert(!std::numeric_limits::is_signed && std::is_integral::value, "Traits::index_t must be an unsigned integral type"); - static_assert(sizeof(index_t) >= sizeof(size_t), "Traits::index_t must be at least as wide as Traits::size_t"); - static_assert((BLOCK_SIZE > 1) && !(BLOCK_SIZE & (BLOCK_SIZE - 1)), "Traits::BLOCK_SIZE must be a power of 2 (and at least 2)"); - static_assert((EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD > 1) && !(EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD & (EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD - 1)), "Traits::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD must be a power of 2 (and greater than 1)"); - static_assert((EXPLICIT_INITIAL_INDEX_SIZE > 1) && !(EXPLICIT_INITIAL_INDEX_SIZE & (EXPLICIT_INITIAL_INDEX_SIZE - 1)), "Traits::EXPLICIT_INITIAL_INDEX_SIZE must be a power of 2 (and greater than 1)"); - static_assert((IMPLICIT_INITIAL_INDEX_SIZE > 1) && !(IMPLICIT_INITIAL_INDEX_SIZE & (IMPLICIT_INITIAL_INDEX_SIZE - 1)), "Traits::IMPLICIT_INITIAL_INDEX_SIZE must be a power of 2 (and greater than 1)"); - static_assert((INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) || !(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE & (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE - 1)), "Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE must be a power of 2"); - static_assert(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0 || INITIAL_IMPLICIT_PRODUCER_HASH_SIZE >= 1, "Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE must be at least 1 (or 0 to disable implicit enqueueing)"); - - public: - // Creates a queue with at least `capacity` element slots; note that the - // actual number of elements that can be inserted without additional memory - // allocation depends on the number of producers and the block size (e.g. if - // the block size is equal to `capacity`, only a single block will be allocated - // up-front, which means only a single producer will be able to enqueue elements - // without an extra allocation -- blocks aren't shared between producers). - // This method is not thread safe -- it is up to the user to ensure that the - // queue is fully constructed before it starts being used by other threads (this - // includes making the memory effects of construction visible, possibly with a - // memory barrier). - explicit ConcurrentQueue(size_t capacity = 6 * BLOCK_SIZE) - : producerListTail(nullptr), - producerCount(0), - initialBlockPoolIndex(0), - nextExplicitConsumerId(0), - globalExplicitConsumerOffset(0) - { - implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed); - populate_initial_implicit_producer_hash(); - populate_initial_block_list(capacity / BLOCK_SIZE + ((capacity & (BLOCK_SIZE - 1)) == 0 ? 0 : 1)); - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG - // Track all the producers using a fully-resolved typed list for - // each kind; this makes it possible to debug them starting from - // the root queue object (otherwise wacky casts are needed that - // don't compile in the debugger's expression evaluator). - explicitProducers.store(nullptr, std::memory_order_relaxed); - implicitProducers.store(nullptr, std::memory_order_relaxed); -#endif - } - - // Computes the correct amount of pre-allocated blocks for you based - // on the minimum number of elements you want available at any given - // time, and the maximum concurrent number of each type of producer. - ConcurrentQueue(size_t minCapacity, size_t maxExplicitProducers, size_t maxImplicitProducers) - : producerListTail(nullptr), - producerCount(0), - initialBlockPoolIndex(0), - nextExplicitConsumerId(0), - globalExplicitConsumerOffset(0) - { - implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed); - populate_initial_implicit_producer_hash(); - size_t blocks = ((((minCapacity + BLOCK_SIZE - 1) / BLOCK_SIZE) - 1) * (maxExplicitProducers + 1) + 2 * (maxExplicitProducers + maxImplicitProducers)) * BLOCK_SIZE; - populate_initial_block_list(blocks); - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG - explicitProducers.store(nullptr, std::memory_order_relaxed); - implicitProducers.store(nullptr, std::memory_order_relaxed); -#endif - } - - // Note: The queue should not be accessed concurrently while it's - // being deleted. It's up to the user to synchronize this. - // This method is not thread safe. - ~ConcurrentQueue() - { - // Destroy producers - auto ptr = producerListTail.load(std::memory_order_relaxed); - while (ptr != nullptr) { - auto next = ptr->next_prod(); - if (ptr->token != nullptr) { - ptr->token->producer = nullptr; - } - destroy(ptr); - ptr = next; - } - - // Destroy implicit producer hash tables - if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE != 0) { - auto hash = implicitProducerHash.load(std::memory_order_relaxed); - while (hash != nullptr) { - auto prev = hash->prev; - if (prev != nullptr) { // The last hash is part of this object and was not allocated dynamically - for (size_t i = 0; i != hash->capacity; ++i) { - hash->entries[i].~ImplicitProducerKVP(); - } - hash->~ImplicitProducerHash(); - Traits::free(hash); - } - hash = prev; - } - } - - // Destroy global free list - auto block = freeList.head_unsafe(); - while (block != nullptr) { - auto next = block->freeListNext.load(std::memory_order_relaxed); - if (block->dynamicallyAllocated) { - destroy(block); - } - block = next; - } - - // Destroy initial free list - destroy_array(initialBlockPool, initialBlockPoolSize); - } - - // Disable copying and copy assignment - ConcurrentQueue(ConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION; - ConcurrentQueue& operator=(ConcurrentQueue const&)MOODYCAMEL_DELETE_FUNCTION; - - // Moving is supported, but note that it is *not* a thread-safe operation. - // Nobody can use the queue while it's being moved, and the memory effects - // of that move must be propagated to other threads before they can use it. - // Note: When a queue is moved, its tokens are still valid but can only be - // used with the destination queue (i.e. semantically they are moved along - // with the queue itself). - ConcurrentQueue(ConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT - : producerListTail(other.producerListTail.load(std::memory_order_relaxed)), - producerCount(other.producerCount.load(std::memory_order_relaxed)), - initialBlockPoolIndex(other.initialBlockPoolIndex.load(std::memory_order_relaxed)), - initialBlockPool(other.initialBlockPool), - initialBlockPoolSize(other.initialBlockPoolSize), - freeList(std::move(other.freeList)), - nextExplicitConsumerId(other.nextExplicitConsumerId.load(std::memory_order_relaxed)), - globalExplicitConsumerOffset(other.globalExplicitConsumerOffset.load(std::memory_order_relaxed)) - { - // Move the other one into this, and leave the other one as an empty queue - implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed); - populate_initial_implicit_producer_hash(); - swap_implicit_producer_hashes(other); - - other.producerListTail.store(nullptr, std::memory_order_relaxed); - other.producerCount.store(0, std::memory_order_relaxed); - other.nextExplicitConsumerId.store(0, std::memory_order_relaxed); - other.globalExplicitConsumerOffset.store(0, std::memory_order_relaxed); - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG - explicitProducers.store(other.explicitProducers.load(std::memory_order_relaxed), std::memory_order_relaxed); - other.explicitProducers.store(nullptr, std::memory_order_relaxed); - implicitProducers.store(other.implicitProducers.load(std::memory_order_relaxed), std::memory_order_relaxed); - other.implicitProducers.store(nullptr, std::memory_order_relaxed); -#endif - - other.initialBlockPoolIndex.store(0, std::memory_order_relaxed); - other.initialBlockPoolSize = 0; - other.initialBlockPool = nullptr; - - reown_producers(); - } - - inline ConcurrentQueue& operator=(ConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT - { - return swap_internal(other); - } - - // Swaps this queue's state with the other's. Not thread-safe. - // Swapping two queues does not invalidate their tokens, however - // the tokens that were created for one queue must be used with - // only the swapped queue (i.e. the tokens are tied to the - // queue's movable state, not the object itself). - inline void swap(ConcurrentQueue& other) MOODYCAMEL_NOEXCEPT - { - swap_internal(other); - } - - private: - ConcurrentQueue& swap_internal(ConcurrentQueue& other) - { - if (this == &other) { - return *this; - } - - details::swap_relaxed(producerListTail, other.producerListTail); - details::swap_relaxed(producerCount, other.producerCount); - details::swap_relaxed(initialBlockPoolIndex, other.initialBlockPoolIndex); - std::swap(initialBlockPool, other.initialBlockPool); - std::swap(initialBlockPoolSize, other.initialBlockPoolSize); - freeList.swap(other.freeList); - details::swap_relaxed(nextExplicitConsumerId, other.nextExplicitConsumerId); - details::swap_relaxed(globalExplicitConsumerOffset, other.globalExplicitConsumerOffset); - - swap_implicit_producer_hashes(other); - - reown_producers(); - other.reown_producers(); - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG - details::swap_relaxed(explicitProducers, other.explicitProducers); - details::swap_relaxed(implicitProducers, other.implicitProducers); -#endif - - return *this; - } - - public: - // Enqueues a single item (by copying it). - // Allocates memory if required. Only fails if memory allocation fails (or implicit - // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0, - // or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Thread-safe. - inline bool enqueue(T const& item) - { - if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; - return inner_enqueue(item); - } - - // Enqueues a single item (by moving it, if possible). - // Allocates memory if required. Only fails if memory allocation fails (or implicit - // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0, - // or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Thread-safe. - inline bool enqueue(T&& item) - { - if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; - return inner_enqueue(std::move(item)); - } - - // Enqueues a single item (by copying it) using an explicit producer token. - // Allocates memory if required. Only fails if memory allocation fails (or - // Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Thread-safe. - inline bool enqueue(producer_token_t const& token, T const& item) - { - return inner_enqueue(token, item); - } - - // Enqueues a single item (by moving it, if possible) using an explicit producer token. - // Allocates memory if required. Only fails if memory allocation fails (or - // Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Thread-safe. - inline bool enqueue(producer_token_t const& token, T&& item) - { - return inner_enqueue(token, std::move(item)); - } - - // Enqueues several items. - // Allocates memory if required. Only fails if memory allocation fails (or - // implicit production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE - // is 0, or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Note: Use std::make_move_iterator if the elements should be moved instead of copied. - // Thread-safe. - template - bool enqueue_bulk(It itemFirst, size_t count) - { - if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; - return inner_enqueue_bulk(std::forward(itemFirst), count); - } - - // Enqueues several items using an explicit producer token. - // Allocates memory if required. Only fails if memory allocation fails - // (or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). - // Note: Use std::make_move_iterator if the elements should be moved - // instead of copied. - // Thread-safe. - template - bool enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) - { - return inner_enqueue_bulk(token, std::forward(itemFirst), count); - } - - // Enqueues a single item (by copying it). - // Does not allocate memory. Fails if not enough room to enqueue (or implicit - // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE - // is 0). - // Thread-safe. - inline bool try_enqueue(T const& item) - { - if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; - return inner_enqueue(item); - } - - // Enqueues a single item (by moving it, if possible). - // Does not allocate memory (except for one-time implicit producer). - // Fails if not enough room to enqueue (or implicit production is - // disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0). - // Thread-safe. - inline bool try_enqueue(T&& item) - { - if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; - return inner_enqueue(std::move(item)); - } - - // Enqueues a single item (by copying it) using an explicit producer token. - // Does not allocate memory. Fails if not enough room to enqueue. - // Thread-safe. - inline bool try_enqueue(producer_token_t const& token, T const& item) - { - return inner_enqueue(token, item); - } - - // Enqueues a single item (by moving it, if possible) using an explicit producer token. - // Does not allocate memory. Fails if not enough room to enqueue. - // Thread-safe. - inline bool try_enqueue(producer_token_t const& token, T&& item) - { - return inner_enqueue(token, std::move(item)); - } - - // Enqueues several items. - // Does not allocate memory (except for one-time implicit producer). - // Fails if not enough room to enqueue (or implicit production is - // disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0). - // Note: Use std::make_move_iterator if the elements should be moved - // instead of copied. - // Thread-safe. - template - bool try_enqueue_bulk(It itemFirst, size_t count) - { - if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; - return inner_enqueue_bulk(std::forward(itemFirst), count); - } - - // Enqueues several items using an explicit producer token. - // Does not allocate memory. Fails if not enough room to enqueue. - // Note: Use std::make_move_iterator if the elements should be moved - // instead of copied. - // Thread-safe. - template - bool try_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) - { - return inner_enqueue_bulk(token, std::forward(itemFirst), count); - } - - - - // Attempts to dequeue from the queue. - // Returns false if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template - bool try_dequeue(U& item) - { - // Instead of simply trying each producer in turn (which could cause needless contention on the first - // producer), we score them heuristically. - size_t nonEmptyCount = 0; - ProducerBase* best = nullptr; - size_t bestSize = 0; - for (auto ptr = producerListTail.load(std::memory_order_acquire); nonEmptyCount < 3 && ptr != nullptr; ptr = ptr->next_prod()) { - auto size = ptr->size_approx(); - if (size > 0) { - if (size > bestSize) { - bestSize = size; - best = ptr; - } - ++nonEmptyCount; - } - } - - // If there was at least one non-empty queue but it appears empty at the time - // we try to dequeue from it, we need to make sure every queue's been tried - if (nonEmptyCount > 0) { - if (details::likely(best->dequeue(item))) { - return true; - } - for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { - if (ptr != best && ptr->dequeue(item)) { - return true; - } - } - } - return false; - } - - // Attempts to dequeue from the queue. - // Returns false if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // This differs from the try_dequeue(item) method in that this one does - // not attempt to reduce contention by interleaving the order that producer - // streams are dequeued from. So, using this method can reduce overall throughput - // under contention, but will give more predictable results in single-threaded - // consumer scenarios. This is mostly only useful for internal unit tests. - // Never allocates. Thread-safe. - template - bool try_dequeue_non_interleaved(U& item) - { - for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { - if (ptr->dequeue(item)) { - return true; - } - } - return false; - } - - // Attempts to dequeue from the queue using an explicit consumer token. - // Returns false if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template - bool try_dequeue(consumer_token_t& token, U& item) - { - // The idea is roughly as follows: - // Every 256 items from one producer, make everyone rotate (increase the global offset) -> this means the highest efficiency consumer dictates the rotation speed of everyone else, more or less - // If you see that the global offset has changed, you must reset your consumption counter and move to your designated place - // If there's no items where you're supposed to be, keep moving until you find a producer with some items - // If the global offset has not changed but you've run out of items to consume, move over from your current position until you find an producer with something in it - - if (token.desiredProducer == nullptr || token.lastKnownGlobalOffset != globalExplicitConsumerOffset.load(std::memory_order_relaxed)) { - if (!update_current_producer_after_rotation(token)) { - return false; - } - } - - // If there was at least one non-empty queue but it appears empty at the time - // we try to dequeue from it, we need to make sure every queue's been tried - if (static_cast(token.currentProducer)->dequeue(item)) { - if (++token.itemsConsumedFromCurrent == EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE) { - globalExplicitConsumerOffset.fetch_add(1, std::memory_order_relaxed); - } - return true; - } - - auto tail = producerListTail.load(std::memory_order_acquire); - auto ptr = static_cast(token.currentProducer)->next_prod(); - if (ptr == nullptr) { - ptr = tail; - } - while (ptr != static_cast(token.currentProducer)) { - if (ptr->dequeue(item)) { - token.currentProducer = ptr; - token.itemsConsumedFromCurrent = 1; - return true; - } - ptr = ptr->next_prod(); - if (ptr == nullptr) { - ptr = tail; - } - } - return false; - } - - // Attempts to dequeue several elements from the queue. - // Returns the number of items actually dequeued. - // Returns 0 if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template - size_t try_dequeue_bulk(It itemFirst, size_t max) - { - size_t count = 0; - for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { - count += ptr->dequeue_bulk(itemFirst, max - count); - if (count == max) { - break; - } - } - return count; - } - - // Attempts to dequeue several elements from the queue using an explicit consumer token. - // Returns the number of items actually dequeued. - // Returns 0 if all producer streams appeared empty at the time they - // were checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template - size_t try_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max) - { - if (token.desiredProducer == nullptr || token.lastKnownGlobalOffset != globalExplicitConsumerOffset.load(std::memory_order_relaxed)) { - if (!update_current_producer_after_rotation(token)) { - return false; - } - } - - size_t count = static_cast(token.currentProducer)->dequeue_bulk(itemFirst, max); - if (count == max) { - if ((token.itemsConsumedFromCurrent += static_cast(max)) >= EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE) { - globalExplicitConsumerOffset.fetch_add(1, std::memory_order_relaxed); - } - return max; - } - token.itemsConsumedFromCurrent += static_cast(count); - max -= count; - - auto tail = producerListTail.load(std::memory_order_acquire); - auto ptr = static_cast(token.currentProducer)->next_prod(); - if (ptr == nullptr) { - ptr = tail; - } - while (ptr != static_cast(token.currentProducer)) { - auto dequeued = ptr->dequeue_bulk(itemFirst, max); - count += dequeued; - if (dequeued != 0) { - token.currentProducer = ptr; - token.itemsConsumedFromCurrent = static_cast(dequeued); - } - if (dequeued == max) { - break; - } - max -= dequeued; - ptr = ptr->next_prod(); - if (ptr == nullptr) { - ptr = tail; - } - } - return count; - } - - - - // Attempts to dequeue from a specific producer's inner queue. - // If you happen to know which producer you want to dequeue from, this - // is significantly faster than using the general-case try_dequeue methods. - // Returns false if the producer's queue appeared empty at the time it - // was checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template - inline bool try_dequeue_from_producer(producer_token_t const& producer, U& item) - { - return static_cast(producer.producer)->dequeue(item); - } - - // Attempts to dequeue several elements from a specific producer's inner queue. - // Returns the number of items actually dequeued. - // If you happen to know which producer you want to dequeue from, this - // is significantly faster than using the general-case try_dequeue methods. - // Returns 0 if the producer's queue appeared empty at the time it - // was checked (so, the queue is likely but not guaranteed to be empty). - // Never allocates. Thread-safe. - template - inline size_t try_dequeue_bulk_from_producer(producer_token_t const& producer, It itemFirst, size_t max) - { - return static_cast(producer.producer)->dequeue_bulk(itemFirst, max); - } - - - // Returns an estimate of the total number of elements currently in the queue. This - // estimate is only accurate if the queue has completely stabilized before it is called - // (i.e. all enqueue and dequeue operations have completed and their memory effects are - // visible on the calling thread, and no further operations start while this method is - // being called). - // Thread-safe. - size_t size_approx() const - { - size_t size = 0; - for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { - size += ptr->size_approx(); - } - return size; - } - - - // Returns true if the underlying atomic variables used by - // the queue are lock-free (they should be on most platforms). - // Thread-safe. - static bool is_lock_free() - { - return - details::static_is_lock_free::value == 2 && - details::static_is_lock_free::value == 2 && - details::static_is_lock_free::value == 2 && - details::static_is_lock_free::value == 2 && - details::static_is_lock_free::value == 2 && - details::static_is_lock_free::value == 2; - } - - - private: - friend struct ProducerToken; - friend struct ConsumerToken; - friend struct ExplicitProducer; - friend class ConcurrentQueueTests; - - enum AllocationMode { CanAlloc, CannotAlloc }; - - - /////////////////////////////// - // Queue methods - /////////////////////////////// - - template - inline bool inner_enqueue(producer_token_t const& token, U&& element) - { - return static_cast(token.producer)->ConcurrentQueue::ExplicitProducer::template enqueue(std::forward(element)); - } - - template - inline bool inner_enqueue(U&& element) - { - auto producer = get_or_add_implicit_producer(); - return producer == nullptr ? false : producer->ConcurrentQueue::ImplicitProducer::template enqueue(std::forward(element)); - } - - template - inline bool inner_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) - { - return static_cast(token.producer)->ConcurrentQueue::ExplicitProducer::template enqueue_bulk(std::forward(itemFirst), count); - } - - template - inline bool inner_enqueue_bulk(It itemFirst, size_t count) - { - auto producer = get_or_add_implicit_producer(); - return producer == nullptr ? false : producer->ConcurrentQueue::ImplicitProducer::template enqueue_bulk(std::forward(itemFirst), count); - } - - inline bool update_current_producer_after_rotation(consumer_token_t& token) - { - // Ah, there's been a rotation, figure out where we should be! - auto tail = producerListTail.load(std::memory_order_acquire); - if (token.desiredProducer == nullptr && tail == nullptr) { - return false; - } - auto prodCount = producerCount.load(std::memory_order_relaxed); - auto globalOffset = globalExplicitConsumerOffset.load(std::memory_order_relaxed); - if (details::unlikely(token.desiredProducer == nullptr)) { - // Aha, first time we're dequeueing anything. - // Figure out our local position - // Note: offset is from start, not end, but we're traversing from end -- subtract from count first - std::uint32_t offset = prodCount - 1 - (token.initialOffset % prodCount); - token.desiredProducer = tail; - for (std::uint32_t i = 0; i != offset; ++i) { - token.desiredProducer = static_cast(token.desiredProducer)->next_prod(); - if (token.desiredProducer == nullptr) { - token.desiredProducer = tail; - } - } - } - - std::uint32_t delta = globalOffset - token.lastKnownGlobalOffset; - if (delta >= prodCount) { - delta = delta % prodCount; - } - for (std::uint32_t i = 0; i != delta; ++i) { - token.desiredProducer = static_cast(token.desiredProducer)->next_prod(); - if (token.desiredProducer == nullptr) { - token.desiredProducer = tail; - } - } - - token.lastKnownGlobalOffset = globalOffset; - token.currentProducer = token.desiredProducer; - token.itemsConsumedFromCurrent = 0; - return true; - } - - - /////////////////////////// - // Free list - /////////////////////////// - - template - struct FreeListNode - { - FreeListNode() : freeListRefs(0), freeListNext(nullptr) { } - - std::atomic freeListRefs; - std::atomic freeListNext; - }; - - // A simple CAS-based lock-free free list. Not the fastest thing in the world under heavy contention, but - // simple and correct (assuming nodes are never freed until after the free list is destroyed), and fairly - // speedy under low contention. - template // N must inherit FreeListNode or have the same fields (and initialization of them) - struct FreeList - { - FreeList() : freeListHead(nullptr) { } - FreeList(FreeList&& other) : freeListHead(other.freeListHead.load(std::memory_order_relaxed)) { other.freeListHead.store(nullptr, std::memory_order_relaxed); } - void swap(FreeList& other) { details::swap_relaxed(freeListHead, other.freeListHead); } - - FreeList(FreeList const&) MOODYCAMEL_DELETE_FUNCTION; - FreeList& operator=(FreeList const&)MOODYCAMEL_DELETE_FUNCTION; - - inline void add(N* node) - { -#if MCDBGQ_NOLOCKFREE_FREELIST - debug::DebugLock lock(mutex); -#endif - // We know that the should-be-on-freelist bit is 0 at this point, so it's safe to - // set it using a fetch_add - if (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST, std::memory_order_acq_rel) == 0) { - // Oh look! We were the last ones referencing this node, and we know - // we want to add it to the free list, so let's do it! - add_knowing_refcount_is_zero(node); - } - } - - inline N* try_get() - { -#if MCDBGQ_NOLOCKFREE_FREELIST - debug::DebugLock lock(mutex); -#endif - auto head = freeListHead.load(std::memory_order_acquire); - while (head != nullptr) { - auto prevHead = head; - auto refs = head->freeListRefs.load(std::memory_order_relaxed); - if ((refs & REFS_MASK) == 0 || !head->freeListRefs.compare_exchange_strong(refs, refs + 1, std::memory_order_acquire, std::memory_order_relaxed)) { - head = freeListHead.load(std::memory_order_acquire); - continue; - } - - // Good, reference count has been incremented (it wasn't at zero), which means we can read the - // next and not worry about it changing between now and the time we do the CAS - auto next = head->freeListNext.load(std::memory_order_relaxed); - if (freeListHead.compare_exchange_strong(head, next, std::memory_order_acquire, std::memory_order_relaxed)) { - // Yay, got the node. This means it was on the list, which means shouldBeOnFreeList must be false no - // matter the refcount (because nobody else knows it's been taken off yet, it can't have been put back on). - assert((head->freeListRefs.load(std::memory_order_relaxed) & SHOULD_BE_ON_FREELIST) == 0); - - // Decrease refcount twice, once for our ref, and once for the list's ref - head->freeListRefs.fetch_add(static_castfreeListRefs.load())>(-2), std::memory_order_release); - return head; - } - - // OK, the head must have changed on us, but we still need to decrease the refcount we increased. - // Note that we don't need to release any memory effects, but we do need to ensure that the reference - // count decrement happens-after the CAS on the head. - refs = prevHead->freeListRefs.fetch_add(static_castfreeListRefs.load())>(-1), std::memory_order_acq_rel); - if (refs == SHOULD_BE_ON_FREELIST + 1) { - add_knowing_refcount_is_zero(prevHead); - } - } - - return nullptr; - } - - // Useful for traversing the list when there's no contention (e.g. to destroy remaining nodes) - N* head_unsafe() const { return freeListHead.load(std::memory_order_relaxed); } - - private: - inline void add_knowing_refcount_is_zero(N* node) - { - // Since the refcount is zero, and nobody can increase it once it's zero (except us, and we run - // only one copy of this method per node at a time, i.e. the single thread case), then we know - // we can safely change the next pointer of the node; however, once the refcount is back above - // zero, then other threads could increase it (happens under heavy contention, when the refcount - // goes to zero in between a load and a refcount increment of a node in try_get, then back up to - // something non-zero, then the refcount increment is done by the other thread) -- so, if the CAS - // to add the node to the actual list fails, decrease the refcount and leave the add operation to - // the next thread who puts the refcount back at zero (which could be us, hence the loop). - auto head = freeListHead.load(std::memory_order_relaxed); - while (true) { - node->freeListNext.store(head, std::memory_order_relaxed); - node->freeListRefs.store(1, std::memory_order_release); - if (!freeListHead.compare_exchange_strong(head, node, std::memory_order_release, std::memory_order_relaxed)) { - // Hmm, the add failed, but we can only try again when the refcount goes back to zero - if (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST - 1, std::memory_order_release) == 1) { - continue; - } - } - return; - } - } - - private: - // Implemented like a stack, but where node order doesn't matter (nodes are inserted out of order under contention) - std::atomic freeListHead; - - static const std::uint32_t REFS_MASK = 0x7FFFFFFF; - static const std::uint32_t SHOULD_BE_ON_FREELIST = 0x80000000; - -#if MCDBGQ_NOLOCKFREE_FREELIST - debug::DebugMutex mutex; -#endif - }; - - - /////////////////////////// - // Block - /////////////////////////// - - enum InnerQueueContext { implicit_context = 0, explicit_context = 1 }; - - struct Block - { - Block() - : elementsCompletelyDequeued(0), freeListRefs(0), freeListNext(nullptr), shouldBeOnFreeList(false), dynamicallyAllocated(true) - { -#if MCDBGQ_TRACKMEM - owner = nullptr; -#endif - } - - template - inline bool is_empty() const - { - if (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) { - // Check flags - for (size_t i = 0; i < BLOCK_SIZE; ++i) { - if (!emptyFlags[i].load(std::memory_order_relaxed)) { - return false; - } - } - - // Aha, empty; make sure we have all other memory effects that happened before the empty flags were set - std::atomic_thread_fence(std::memory_order_acquire); - return true; - } - else { - // Check counter - if (elementsCompletelyDequeued.load(std::memory_order_relaxed) == BLOCK_SIZE) { - std::atomic_thread_fence(std::memory_order_acquire); - return true; - } - assert(elementsCompletelyDequeued.load(std::memory_order_relaxed) <= BLOCK_SIZE); - return false; - } - } - - // Returns true if the block is now empty (does not apply in explicit context) - template - inline bool set_empty(index_t i) - { - if (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) { - // Set flag - assert(!emptyFlags[BLOCK_SIZE - 1 - static_cast(i & static_cast(BLOCK_SIZE - 1))].load(std::memory_order_relaxed)); - emptyFlags[BLOCK_SIZE - 1 - static_cast(i & static_cast(BLOCK_SIZE - 1))].store(true, std::memory_order_release); - return false; - } - else { - // Increment counter - auto prevVal = elementsCompletelyDequeued.fetch_add(1, std::memory_order_release); - assert(prevVal < BLOCK_SIZE); - return prevVal == BLOCK_SIZE - 1; - } - } - - // Sets multiple contiguous item statuses to 'empty' (assumes no wrapping and count > 0). - // Returns true if the block is now empty (does not apply in explicit context). - template - inline bool set_many_empty(index_t i, size_t count) - { - if (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) { - // Set flags - std::atomic_thread_fence(std::memory_order_release); - i = BLOCK_SIZE - 1 - static_cast(i & static_cast(BLOCK_SIZE - 1)) - count + 1; - for (size_t j = 0; j != count; ++j) { - assert(!emptyFlags[i + j].load(std::memory_order_relaxed)); - emptyFlags[i + j].store(true, std::memory_order_relaxed); - } - return false; - } - else { - // Increment counter - auto prevVal = elementsCompletelyDequeued.fetch_add(count, std::memory_order_release); - assert(prevVal + count <= BLOCK_SIZE); - return prevVal + count == BLOCK_SIZE; - } - } - - template - inline void set_all_empty() - { - if (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) { - // Set all flags - for (size_t i = 0; i != BLOCK_SIZE; ++i) { - emptyFlags[i].store(true, std::memory_order_relaxed); - } - } - else { - // Reset counter - elementsCompletelyDequeued.store(BLOCK_SIZE, std::memory_order_relaxed); - } - } - - template - inline void reset_empty() - { - if (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) { - // Reset flags - for (size_t i = 0; i != BLOCK_SIZE; ++i) { - emptyFlags[i].store(false, std::memory_order_relaxed); - } - } - else { - // Reset counter - elementsCompletelyDequeued.store(0, std::memory_order_relaxed); - } - } - - inline T* operator[](index_t idx) MOODYCAMEL_NOEXCEPT{ return reinterpret_cast(elements)+static_cast(idx & static_cast(BLOCK_SIZE - 1)); } - inline T const* operator[](index_t idx) const MOODYCAMEL_NOEXCEPT{ return reinterpret_cast(elements)+static_cast(idx & static_cast(BLOCK_SIZE - 1)); } - - public: - Block* next; - std::atomic elementsCompletelyDequeued; - std::atomic emptyFlags[BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD ? BLOCK_SIZE : 1]; - private: - char elements[sizeof(T) * BLOCK_SIZE]; - public: - std::atomic freeListRefs; - std::atomic freeListNext; - std::atomic shouldBeOnFreeList; - bool dynamicallyAllocated; // Perhaps a better name for this would be 'isNotPartOfInitialBlockPool' - -#if MCDBGQ_TRACKMEM - void* owner; -#endif - }; - - -#if MCDBGQ_TRACKMEM - public: - struct MemStats; - private: -#endif - - /////////////////////////// - // Producer base - /////////////////////////// - - struct ProducerBase : public details::ConcurrentQueueProducerTypelessBase - { - ProducerBase(ConcurrentQueue* parent, bool isExplicit) : - tailIndex(0), - headIndex(0), - dequeueOptimisticCount(0), - dequeueOvercommit(0), - tailBlock(nullptr), - isExplicit(isExplicit), - parent(parent) - { - } - - virtual ~ProducerBase() { }; - - template - inline bool dequeue(U& element) - { - if (isExplicit) { - return static_cast(this)->dequeue(element); - } - else { - return static_cast(this)->dequeue(element); - } - } - - template - inline size_t dequeue_bulk(It& itemFirst, size_t max) - { - if (isExplicit) { - return static_cast(this)->dequeue_bulk(itemFirst, max); - } - else { - return static_cast(this)->dequeue_bulk(itemFirst, max); - } - } - - inline ProducerBase* next_prod() const { return static_cast(next); } - - inline size_t size_approx() const - { - auto tail = tailIndex.load(std::memory_order_relaxed); - auto head = headIndex.load(std::memory_order_relaxed); - return details::circular_less_than(head, tail) ? static_cast(tail - head) : 0; - } - - inline index_t getTail() const { return tailIndex.load(std::memory_order_relaxed); } - protected: - std::atomic tailIndex; // Where to enqueue to next - std::atomic headIndex; // Where to dequeue from next - - std::atomic dequeueOptimisticCount; - std::atomic dequeueOvercommit; - - Block* tailBlock; - - public: - bool isExplicit; - ConcurrentQueue* parent; - - protected: -#if MCDBGQ_TRACKMEM - friend struct MemStats; -#endif - }; - - - /////////////////////////// - // Explicit queue - /////////////////////////// - - struct ExplicitProducer : public ProducerBase - { - explicit ExplicitProducer(ConcurrentQueue* parent) : - ProducerBase(parent, true), - blockIndex(nullptr), - pr_blockIndexSlotsUsed(0), - pr_blockIndexSize(EXPLICIT_INITIAL_INDEX_SIZE >> 1), - pr_blockIndexFront(0), - pr_blockIndexEntries(nullptr), - pr_blockIndexRaw(nullptr) - { - size_t poolBasedIndexSize = details::ceil_to_pow_2(parent->initialBlockPoolSize) >> 1; - if (poolBasedIndexSize > pr_blockIndexSize) { - pr_blockIndexSize = poolBasedIndexSize; - } - - new_block_index(0); // This creates an index with double the number of current entries, i.e. EXPLICIT_INITIAL_INDEX_SIZE - } - - ~ExplicitProducer() - { - // Destruct any elements not yet dequeued. - // Since we're in the destructor, we can assume all elements - // are either completely dequeued or completely not (no halfways). - if (this->tailBlock != nullptr) { // Note this means there must be a block index too - // First find the block that's partially dequeued, if any - Block* halfDequeuedBlock = nullptr; - if ((this->headIndex.load(std::memory_order_relaxed) & static_cast(BLOCK_SIZE - 1)) != 0) { - // The head's not on a block boundary, meaning a block somewhere is partially dequeued - // (or the head block is the tail block and was fully dequeued, but the head/tail are still not on a boundary) - size_t i = (pr_blockIndexFront - pr_blockIndexSlotsUsed) & (pr_blockIndexSize - 1); - while (details::circular_less_than(pr_blockIndexEntries[i].base + BLOCK_SIZE, this->headIndex.load(std::memory_order_relaxed))) { - i = (i + 1) & (pr_blockIndexSize - 1); - } - assert(details::circular_less_than(pr_blockIndexEntries[i].base, this->headIndex.load(std::memory_order_relaxed))); - halfDequeuedBlock = pr_blockIndexEntries[i].block; - } - - // Start at the head block (note the first line in the loop gives us the head from the tail on the first iteration) - auto block = this->tailBlock; - do { - block = block->next; - if (block->ConcurrentQueue::Block::template is_empty()) { - continue; - } - - size_t i = 0; // Offset into block - if (block == halfDequeuedBlock) { - i = static_cast(this->headIndex.load(std::memory_order_relaxed) & static_cast(BLOCK_SIZE - 1)); - } - - // Walk through all the items in the block; if this is the tail block, we need to stop when we reach the tail index - auto lastValidIndex = (this->tailIndex.load(std::memory_order_relaxed) & static_cast(BLOCK_SIZE - 1)) == 0 ? BLOCK_SIZE : static_cast(this->tailIndex.load(std::memory_order_relaxed) & static_cast(BLOCK_SIZE - 1)); - while (i != BLOCK_SIZE && (block != this->tailBlock || i != lastValidIndex)) { - (*block)[i++]->~T(); - } - } while (block != this->tailBlock); - } - - // Destroy all blocks that we own - if (this->tailBlock != nullptr) { - auto block = this->tailBlock; - do { - auto next = block->next; - if (block->dynamicallyAllocated) { - destroy(block); - } - block = next; - } while (block != this->tailBlock); - } - - // Destroy the block indices - auto header = static_cast(pr_blockIndexRaw); - while (header != nullptr) { - auto prev = static_cast(header->prev); - header->~BlockIndexHeader(); - Traits::free(header); - header = prev; - } - } - - template - inline bool enqueue(U&& element) - { - index_t currentTailIndex = this->tailIndex.load(std::memory_order_relaxed); - index_t newTailIndex = 1 + currentTailIndex; - if ((currentTailIndex & static_cast(BLOCK_SIZE - 1)) == 0) { - // We reached the end of a block, start a new one - auto startBlock = this->tailBlock; - auto originalBlockIndexSlotsUsed = pr_blockIndexSlotsUsed; - if (this->tailBlock != nullptr && this->tailBlock->next->ConcurrentQueue::Block::template is_empty()) { - // We can re-use the block ahead of us, it's empty! - this->tailBlock = this->tailBlock->next; - this->tailBlock->ConcurrentQueue::Block::template reset_empty(); - - // We'll put the block on the block index (guaranteed to be room since we're conceptually removing the - // last block from it first -- except instead of removing then adding, we can just overwrite). - // Note that there must be a valid block index here, since even if allocation failed in the ctor, - // it would have been re-attempted when adding the first block to the queue; since there is such - // a block, a block index must have been successfully allocated. - } - else { - // Whatever head value we see here is >= the last value we saw here (relatively), - // and <= its current value. Since we have the most recent tail, the head must be - // <= to it. - auto head = this->headIndex.load(std::memory_order_relaxed); - assert(!details::circular_less_than(currentTailIndex, head)); - if (!details::circular_less_than(head, currentTailIndex + BLOCK_SIZE) - || (MAX_SUBQUEUE_SIZE != details::const_numeric_max::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head))) { - // We can't enqueue in another block because there's not enough leeway -- the - // tail could surpass the head by the time the block fills up! (Or we'll exceed - // the size limit, if the second part of the condition was true.) - return false; - } - // We're going to need a new block; check that the block index has room - if (pr_blockIndexRaw == nullptr || pr_blockIndexSlotsUsed == pr_blockIndexSize) { - // Hmm, the circular block index is already full -- we'll need - // to allocate a new index. Note pr_blockIndexRaw can only be nullptr if - // the initial allocation failed in the constructor. - - if (allocMode == CannotAlloc || !new_block_index(pr_blockIndexSlotsUsed)) { - return false; - } - } - - // Insert a new block in the circular linked list - auto newBlock = this->parent->ConcurrentQueue::template requisition_block(); - if (newBlock == nullptr) { - return false; - } -#if MCDBGQ_TRACKMEM - newBlock->owner = this; -#endif - newBlock->ConcurrentQueue::Block::template reset_empty(); - if (this->tailBlock == nullptr) { - newBlock->next = newBlock; - } - else { - newBlock->next = this->tailBlock->next; - this->tailBlock->next = newBlock; - } - this->tailBlock = newBlock; - ++pr_blockIndexSlotsUsed; - } - - if (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (nullptr) T(std::forward(element)))) { - // The constructor may throw. We want the element not to appear in the queue in - // that case (without corrupting the queue): - MOODYCAMEL_TRY{ - new ((*this->tailBlock)[currentTailIndex]) T(std::forward(element)); - } - MOODYCAMEL_CATCH(...) { - // Revert change to the current block, but leave the new block available - // for next time - pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; - this->tailBlock = startBlock == nullptr ? this->tailBlock : startBlock; - MOODYCAMEL_RETHROW; - } - } - else { - (void)startBlock; - (void)originalBlockIndexSlotsUsed; - } - - // Add block to block index - auto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront]; - entry.base = currentTailIndex; - entry.block = this->tailBlock; - blockIndex.load(std::memory_order_relaxed)->front.store(pr_blockIndexFront, std::memory_order_release); - pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1); - - if (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (nullptr) T(std::forward(element)))) { - this->tailIndex.store(newTailIndex, std::memory_order_release); - return true; - } - } - - // Enqueue - new ((*this->tailBlock)[currentTailIndex]) T(std::forward(element)); - - this->tailIndex.store(newTailIndex, std::memory_order_release); - return true; - } - - template - bool dequeue(U& element) - { - auto tail = this->tailIndex.load(std::memory_order_relaxed); - auto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed); - if (details::circular_less_than(this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit, tail)) { - // Might be something to dequeue, let's give it a try - - // Note that this if is purely for performance purposes in the common case when the queue is - // empty and the values are eventually consistent -- we may enter here spuriously. - - // Note that whatever the values of overcommit and tail are, they are not going to change (unless we - // change them) and must be the same value at this point (inside the if) as when the if condition was - // evaluated. - - // We insert an acquire fence here to synchronize-with the release upon incrementing dequeueOvercommit below. - // This ensures that whatever the value we got loaded into overcommit, the load of dequeueOptisticCount in - // the fetch_add below will result in a value at least as recent as that (and therefore at least as large). - // Note that I believe a compiler (signal) fence here would be sufficient due to the nature of fetch_add (all - // read-modify-write operations are guaranteed to work on the latest value in the modification order), but - // unfortunately that can't be shown to be correct using only the C++11 standard. - // See http://stackoverflow.com/questions/18223161/what-are-the-c11-memory-ordering-guarantees-in-this-corner-case - std::atomic_thread_fence(std::memory_order_acquire); - - // Increment optimistic counter, then check if it went over the boundary - auto myDequeueCount = this->dequeueOptimisticCount.fetch_add(1, std::memory_order_relaxed); - - // Note that since dequeueOvercommit must be <= dequeueOptimisticCount (because dequeueOvercommit is only ever - // incremented after dequeueOptimisticCount -- this is enforced in the `else` block below), and since we now - // have a version of dequeueOptimisticCount that is at least as recent as overcommit (due to the release upon - // incrementing dequeueOvercommit and the acquire above that synchronizes with it), overcommit <= myDequeueCount. - assert(overcommit <= myDequeueCount); - - // Note that we reload tail here in case it changed; it will be the same value as before or greater, since - // this load is sequenced after (happens after) the earlier load above. This is supported by read-read - // coherency (as defined in the standard), explained here: http://en.cppreference.com/w/cpp/atomic/memory_order - tail = this->tailIndex.load(std::memory_order_acquire); - if (details::likely(details::circular_less_than(myDequeueCount - overcommit, tail))) { - // Guaranteed to be at least one element to dequeue! - - // Get the index. Note that since there's guaranteed to be at least one element, this - // will never exceed tail. We need to do an acquire-release fence here since it's possible - // that whatever condition got us to this point was for an earlier enqueued element (that - // we already see the memory effects for), but that by the time we increment somebody else - // has incremented it, and we need to see the memory effects for *that* element, which is - // in such a case is necessarily visible on the thread that incremented it in the first - // place with the more current condition (they must have acquired a tail that is at least - // as recent). - auto index = this->headIndex.fetch_add(1, std::memory_order_acq_rel); - - - // Determine which block the element is in - - auto localBlockIndex = blockIndex.load(std::memory_order_acquire); - auto localBlockIndexHead = localBlockIndex->front.load(std::memory_order_acquire); - - // We need to be careful here about subtracting and dividing because of index wrap-around. - // When an index wraps, we need to preserve the sign of the offset when dividing it by the - // block size (in order to get a correct signed block count offset in all cases): - auto headBase = localBlockIndex->entries[localBlockIndexHead].base; - auto blockBaseIndex = index & ~static_cast(BLOCK_SIZE - 1); - auto offset = static_cast(static_cast::type>(blockBaseIndex - headBase) / BLOCK_SIZE); - auto block = localBlockIndex->entries[(localBlockIndexHead + offset) & (localBlockIndex->size - 1)].block; - - // Dequeue - auto& el = *((*block)[index]); - if (!MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, element = std::move(el))) { - // Make sure the element is still fully dequeued and destroyed even if the assignment - // throws - #ifdef _MSC_VER - #pragma warning( push ) - #pragma warning( disable : 4189) - #endif - struct Guard { - Block* block; - index_t index; - - ~Guard() - { - (*block)[index]->~T(); - block->ConcurrentQueue::Block::template set_empty(index); - } - } guard = { block, index }; - #ifdef _MSC_VER - #pragma pop - #endif - element = std::move(el); - } - else { - element = std::move(el); - el.~T(); - block->ConcurrentQueue::Block::template set_empty(index); - } - - return true; - } - else { - // Wasn't anything to dequeue after all; make the effective dequeue count eventually consistent - this->dequeueOvercommit.fetch_add(1, std::memory_order_release); // Release so that the fetch_add on dequeueOptimisticCount is guaranteed to happen before this write - } - } - - return false; - } - - template - bool enqueue_bulk(It itemFirst, size_t count) - { - // First, we need to make sure we have enough room to enqueue all of the elements; - // this means pre-allocating blocks and putting them in the block index (but only if - // all the allocations succeeded). - index_t startTailIndex = this->tailIndex.load(std::memory_order_relaxed); - auto startBlock = this->tailBlock; - auto originalBlockIndexFront = pr_blockIndexFront; - auto originalBlockIndexSlotsUsed = pr_blockIndexSlotsUsed; - - Block* firstAllocatedBlock = nullptr; - - // Figure out how many blocks we'll need to allocate, and do so - size_t blockBaseDiff = ((startTailIndex + count - 1) & ~static_cast(BLOCK_SIZE - 1)) - ((startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1)); - index_t currentTailIndex = (startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1); - if (blockBaseDiff > 0) { - // Allocate as many blocks as possible from ahead - while (blockBaseDiff > 0 && this->tailBlock != nullptr && this->tailBlock->next != firstAllocatedBlock && this->tailBlock->next->ConcurrentQueue::Block::template is_empty()) { - blockBaseDiff -= static_cast(BLOCK_SIZE); - currentTailIndex += static_cast(BLOCK_SIZE); - - this->tailBlock = this->tailBlock->next; - firstAllocatedBlock = firstAllocatedBlock == nullptr ? this->tailBlock : firstAllocatedBlock; - - auto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront]; - entry.base = currentTailIndex; - entry.block = this->tailBlock; - pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1); - } - - // Now allocate as many blocks as necessary from the block pool - while (blockBaseDiff > 0) { - blockBaseDiff -= static_cast(BLOCK_SIZE); - currentTailIndex += static_cast(BLOCK_SIZE); - - auto head = this->headIndex.load(std::memory_order_relaxed); - assert(!details::circular_less_than(currentTailIndex, head)); - bool full = !details::circular_less_than(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head)); - if (pr_blockIndexRaw == nullptr || pr_blockIndexSlotsUsed == pr_blockIndexSize || full) { - if (allocMode == CannotAlloc || full || !new_block_index(originalBlockIndexSlotsUsed)) { - // Failed to allocate, undo changes (but keep injected blocks) - pr_blockIndexFront = originalBlockIndexFront; - pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; - this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock; - return false; - } - - // pr_blockIndexFront is updated inside new_block_index, so we need to - // update our fallback value too (since we keep the new index even if we - // later fail) - originalBlockIndexFront = originalBlockIndexSlotsUsed; - } - - // Insert a new block in the circular linked list - auto newBlock = this->parent->ConcurrentQueue::template requisition_block(); - if (newBlock == nullptr) { - pr_blockIndexFront = originalBlockIndexFront; - pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; - this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock; - return false; - } - -#if MCDBGQ_TRACKMEM - newBlock->owner = this; -#endif - newBlock->ConcurrentQueue::Block::template set_all_empty(); - if (this->tailBlock == nullptr) { - newBlock->next = newBlock; - } - else { - newBlock->next = this->tailBlock->next; - this->tailBlock->next = newBlock; - } - this->tailBlock = newBlock; - firstAllocatedBlock = firstAllocatedBlock == nullptr ? this->tailBlock : firstAllocatedBlock; - - ++pr_blockIndexSlotsUsed; - - auto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront]; - entry.base = currentTailIndex; - entry.block = this->tailBlock; - pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1); - } - - // Excellent, all allocations succeeded. Reset each block's emptiness before we fill them up, and - // publish the new block index front - auto block = firstAllocatedBlock; - while (true) { - block->ConcurrentQueue::Block::template reset_empty(); - if (block == this->tailBlock) { - break; - } - block = block->next; - } - - if (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst)))) { - blockIndex.load(std::memory_order_relaxed)->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release); - } - } - - // Enqueue, one block at a time - index_t newTailIndex = startTailIndex + static_cast(count); - currentTailIndex = startTailIndex; - auto endBlock = this->tailBlock; - this->tailBlock = startBlock; - assert((startTailIndex & static_cast(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr || count == 0); - if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) == 0 && firstAllocatedBlock != nullptr) { - this->tailBlock = firstAllocatedBlock; - } - while (true) { - auto stopIndex = (currentTailIndex & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); - if (details::circular_less_than(newTailIndex, stopIndex)) { - stopIndex = newTailIndex; - } - if (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst)))) { - while (currentTailIndex != stopIndex) { - new ((*this->tailBlock)[currentTailIndex++]) T(*itemFirst++); - } - } - else { - MOODYCAMEL_TRY{ - while (currentTailIndex != stopIndex) { - // Must use copy constructor even if move constructor is available - // because we may have to revert if there's an exception. - // Sorry about the horrible templated next line, but it was the only way - // to disable moving *at compile time*, which is important because a type - // may only define a (noexcept) move constructor, and so calls to the - // cctor will not compile, even if they are in an if branch that will never - // be executed - new ((*this->tailBlock)[currentTailIndex]) T(details::nomove_if<(bool)!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst)))>::eval(*itemFirst)); - ++currentTailIndex; - ++itemFirst; - } - } - MOODYCAMEL_CATCH(...) { - // Oh dear, an exception's been thrown -- destroy the elements that - // were enqueued so far and revert the entire bulk operation (we'll keep - // any allocated blocks in our linked list for later, though). - auto constructedStopIndex = currentTailIndex; - auto lastBlockEnqueued = this->tailBlock; - - pr_blockIndexFront = originalBlockIndexFront; - pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; - this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock; - - if (!details::is_trivially_destructible::value) { - auto block = startBlock; - if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) == 0) { - block = firstAllocatedBlock; - } - currentTailIndex = startTailIndex; - while (true) { - auto stopIndex = (currentTailIndex & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); - if (details::circular_less_than(constructedStopIndex, stopIndex)) { - stopIndex = constructedStopIndex; - } - while (currentTailIndex != stopIndex) { - (*block)[currentTailIndex++]->~T(); - } - if (block == lastBlockEnqueued) { - break; - } - block = block->next; - } - } - MOODYCAMEL_RETHROW; - } - } - - if (this->tailBlock == endBlock) { - assert(currentTailIndex == newTailIndex); - break; - } - this->tailBlock = this->tailBlock->next; - } - - if (!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst))) && firstAllocatedBlock != nullptr) { - blockIndex.load(std::memory_order_relaxed)->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release); - } - - this->tailIndex.store(newTailIndex, std::memory_order_release); - return true; - } - - template - size_t dequeue_bulk(It& itemFirst, size_t max) - { - auto tail = this->tailIndex.load(std::memory_order_relaxed); - auto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed); - auto desiredCount = static_cast(tail - (this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit)); - if (details::circular_less_than(0, desiredCount)) { - desiredCount = desiredCount < max ? desiredCount : max; - std::atomic_thread_fence(std::memory_order_acquire); - - auto myDequeueCount = this->dequeueOptimisticCount.fetch_add(desiredCount, std::memory_order_relaxed); - assert(overcommit <= myDequeueCount); - - tail = this->tailIndex.load(std::memory_order_acquire); - auto actualCount = static_cast(tail - (myDequeueCount - overcommit)); - if (details::circular_less_than(0, actualCount)) { - actualCount = desiredCount < actualCount ? desiredCount : actualCount; - if (actualCount < desiredCount) { - this->dequeueOvercommit.fetch_add(desiredCount - actualCount, std::memory_order_release); - } - - // Get the first index. Note that since there's guaranteed to be at least actualCount elements, this - // will never exceed tail. - auto firstIndex = this->headIndex.fetch_add(actualCount, std::memory_order_acq_rel); - - // Determine which block the first element is in - auto localBlockIndex = blockIndex.load(std::memory_order_acquire); - auto localBlockIndexHead = localBlockIndex->front.load(std::memory_order_acquire); - - auto headBase = localBlockIndex->entries[localBlockIndexHead].base; - auto firstBlockBaseIndex = firstIndex & ~static_cast(BLOCK_SIZE - 1); - auto offset = static_cast(static_cast::type>(firstBlockBaseIndex - headBase) / BLOCK_SIZE); - auto indexIndex = (localBlockIndexHead + offset) & (localBlockIndex->size - 1); - - // Iterate the blocks and dequeue - auto index = firstIndex; - do { - auto firstIndexInBlock = index; - auto endIndex = (index & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); - endIndex = details::circular_less_than(firstIndex + static_cast(actualCount), endIndex) ? firstIndex + static_cast(actualCount) : endIndex; - auto block = localBlockIndex->entries[indexIndex].block; - if (MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, details::deref_noexcept(itemFirst) = std::move((*(*block)[index])))) { - while (index != endIndex) { - auto& el = *((*block)[index]); - *itemFirst++ = std::move(el); - el.~T(); - ++index; - } - } - else { - MOODYCAMEL_TRY{ - while (index != endIndex) { - auto& el = *((*block)[index]); - *itemFirst = std::move(el); - ++itemFirst; - el.~T(); - ++index; - } - } - MOODYCAMEL_CATCH(...) { - // It's too late to revert the dequeue, but we can make sure that all - // the dequeued objects are properly destroyed and the block index - // (and empty count) are properly updated before we propagate the exception - do { - block = localBlockIndex->entries[indexIndex].block; - while (index != endIndex) { - (*block)[index++]->~T(); - } - block->ConcurrentQueue::Block::template set_many_empty(firstIndexInBlock, static_cast(endIndex - firstIndexInBlock)); - indexIndex = (indexIndex + 1) & (localBlockIndex->size - 1); - - firstIndexInBlock = index; - endIndex = (index & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); - endIndex = details::circular_less_than(firstIndex + static_cast(actualCount), endIndex) ? firstIndex + static_cast(actualCount) : endIndex; - } while (index != firstIndex + actualCount); - - MOODYCAMEL_RETHROW; - } - } - block->ConcurrentQueue::Block::template set_many_empty(firstIndexInBlock, static_cast(endIndex - firstIndexInBlock)); - indexIndex = (indexIndex + 1) & (localBlockIndex->size - 1); - } while (index != firstIndex + actualCount); - - return actualCount; - } - else { - // Wasn't anything to dequeue after all; make the effective dequeue count eventually consistent - this->dequeueOvercommit.fetch_add(desiredCount, std::memory_order_release); - } - } - - return 0; - } - - private: - struct BlockIndexEntry - { - index_t base; - Block* block; - }; - - struct BlockIndexHeader - { - size_t size; - std::atomic front; // Current slot (not next, like pr_blockIndexFront) - BlockIndexEntry* entries; - void* prev; - }; - - - bool new_block_index(size_t numberOfFilledSlotsToExpose) - { - auto prevBlockSizeMask = pr_blockIndexSize - 1; - - // Create the new block - pr_blockIndexSize <<= 1; - auto newRawPtr = static_cast(Traits::malloc(sizeof(BlockIndexHeader) + std::alignment_of::value - 1 + sizeof(BlockIndexEntry) * pr_blockIndexSize)); - if (newRawPtr == nullptr) { - pr_blockIndexSize >>= 1; // Reset to allow graceful retry - return false; - } - - auto newBlockIndexEntries = reinterpret_cast(details::align_for(newRawPtr + sizeof(BlockIndexHeader))); - - // Copy in all the old indices, if any - size_t j = 0; - if (pr_blockIndexSlotsUsed != 0) { - auto i = (pr_blockIndexFront - pr_blockIndexSlotsUsed) & prevBlockSizeMask; - do { - newBlockIndexEntries[j++] = pr_blockIndexEntries[i]; - i = (i + 1) & prevBlockSizeMask; - } while (i != pr_blockIndexFront); - } - - // Update everything - auto header = new (newRawPtr)BlockIndexHeader; - header->size = pr_blockIndexSize; - header->front.store(numberOfFilledSlotsToExpose - 1, std::memory_order_relaxed); - header->entries = newBlockIndexEntries; - header->prev = pr_blockIndexRaw; // we link the new block to the old one so we can free it later - - pr_blockIndexFront = j; - pr_blockIndexEntries = newBlockIndexEntries; - pr_blockIndexRaw = newRawPtr; - blockIndex.store(header, std::memory_order_release); - - return true; - } - - private: - std::atomic blockIndex; - - // To be used by producer only -- consumer must use the ones in referenced by blockIndex - size_t pr_blockIndexSlotsUsed; - size_t pr_blockIndexSize; - size_t pr_blockIndexFront; // Next slot (not current) - BlockIndexEntry* pr_blockIndexEntries; - void* pr_blockIndexRaw; - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG - public: - ExplicitProducer* nextExplicitProducer; - private: -#endif - -#if MCDBGQ_TRACKMEM - friend struct MemStats; -#endif - }; - - - ////////////////////////////////// - // Implicit queue - ////////////////////////////////// - - struct ImplicitProducer : public ProducerBase - { - ImplicitProducer(ConcurrentQueue* parent) : - ProducerBase(parent, false), - nextBlockIndexCapacity(IMPLICIT_INITIAL_INDEX_SIZE), - blockIndex(nullptr) - { - new_block_index(); - } - - ~ImplicitProducer() - { - // Note that since we're in the destructor we can assume that all enqueue/dequeue operations - // completed already; this means that all undequeued elements are placed contiguously across - // contiguous blocks, and that only the first and last remaining blocks can be only partially - // empty (all other remaining blocks must be completely full). - -#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED - // Unregister ourselves for thread termination notification - if (!this->inactive.load(std::memory_order_relaxed)) { - details::ThreadExitNotifier::unsubscribe(&threadExitListener); - } -#endif - - // Destroy all remaining elements! - auto tail = this->tailIndex.load(std::memory_order_relaxed); - auto index = this->headIndex.load(std::memory_order_relaxed); - Block* block = nullptr; - assert(index == tail || details::circular_less_than(index, tail)); - bool forceFreeLastBlock = index != tail; // If we enter the loop, then the last (tail) block will not be freed - while (index != tail) { - if ((index & static_cast(BLOCK_SIZE - 1)) == 0 || block == nullptr) { - if (block != nullptr && block->dynamicallyAllocated) { - // Free the old block - this->parent->destroy(block); - } - - block = get_block_index_entry_for_index(index)->value.load(std::memory_order_relaxed); - } - - ((*block)[index])->~T(); - ++index; - } - // Even if the queue is empty, there's still one block that's not on the free list - // (unless the head index reached the end of it, in which case the tail will be poised - // to create a new block). - if (this->tailBlock != nullptr && (forceFreeLastBlock || (tail & static_cast(BLOCK_SIZE - 1)) != 0) && this->tailBlock->dynamicallyAllocated) { - this->parent->destroy(this->tailBlock); - } - - // Destroy block index - auto localBlockIndex = blockIndex.load(std::memory_order_relaxed); - if (localBlockIndex != nullptr) { - for (size_t i = 0; i != localBlockIndex->capacity; ++i) { - localBlockIndex->index[i]->~BlockIndexEntry(); - } - do { - auto prev = localBlockIndex->prev; - localBlockIndex->~BlockIndexHeader(); - Traits::free(localBlockIndex); - localBlockIndex = prev; - } while (localBlockIndex != nullptr); - } - } - - template - inline bool enqueue(U&& element) - { - index_t currentTailIndex = this->tailIndex.load(std::memory_order_relaxed); - index_t newTailIndex = 1 + currentTailIndex; - if ((currentTailIndex & static_cast(BLOCK_SIZE - 1)) == 0) { - // We reached the end of a block, start a new one - auto head = this->headIndex.load(std::memory_order_relaxed); - assert(!details::circular_less_than(currentTailIndex, head)); - if (!details::circular_less_than(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head))) { - return false; - } -#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX - debug::DebugLock lock(mutex); -#endif - // Find out where we'll be inserting this block in the block index - BlockIndexEntry* idxEntry; - if (!insert_block_index_entry(idxEntry, currentTailIndex)) { - return false; - } - - // Get ahold of a new block - auto newBlock = this->parent->ConcurrentQueue::template requisition_block(); - if (newBlock == nullptr) { - rewind_block_index_tail(); - idxEntry->value.store(nullptr, std::memory_order_relaxed); - return false; - } -#if MCDBGQ_TRACKMEM - newBlock->owner = this; -#endif - newBlock->ConcurrentQueue::Block::template reset_empty(); - - if (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (nullptr) T(std::forward(element)))) { - // May throw, try to insert now before we publish the fact that we have this new block - MOODYCAMEL_TRY{ - new ((*newBlock)[currentTailIndex]) T(std::forward(element)); - } - MOODYCAMEL_CATCH(...) { - rewind_block_index_tail(); - idxEntry->value.store(nullptr, std::memory_order_relaxed); - this->parent->add_block_to_free_list(newBlock); - MOODYCAMEL_RETHROW; - } - } - - // Insert the new block into the index - idxEntry->value.store(newBlock, std::memory_order_relaxed); - - this->tailBlock = newBlock; - - if (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (nullptr) T(std::forward(element)))) { - this->tailIndex.store(newTailIndex, std::memory_order_release); - return true; - } - } - - // Enqueue - new ((*this->tailBlock)[currentTailIndex]) T(std::forward(element)); - - this->tailIndex.store(newTailIndex, std::memory_order_release); - return true; - } - - template - bool dequeue(U& element) - { - // See ExplicitProducer::dequeue for rationale and explanation - index_t tail = this->tailIndex.load(std::memory_order_relaxed); - index_t overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed); - if (details::circular_less_than(this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit, tail)) { - std::atomic_thread_fence(std::memory_order_acquire); - - index_t myDequeueCount = this->dequeueOptimisticCount.fetch_add(1, std::memory_order_relaxed); - assert(overcommit <= myDequeueCount); - tail = this->tailIndex.load(std::memory_order_acquire); - if (details::likely(details::circular_less_than(myDequeueCount - overcommit, tail))) { - index_t index = this->headIndex.fetch_add(1, std::memory_order_acq_rel); - - // Determine which block the element is in - auto entry = get_block_index_entry_for_index(index); - - // Dequeue - auto block = entry->value.load(std::memory_order_relaxed); - auto& el = *((*block)[index]); - - if (!MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, element = std::move(el))) { -#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX - // Note: Acquiring the mutex with every dequeue instead of only when a block - // is released is very sub-optimal, but it is, after all, purely debug code. - debug::DebugLock lock(producer->mutex); -#endif - #ifdef _MSC_VER - #pragma warning( push ) - #pragma warning( disable : 4189) - #endif - struct Guard { - Block* block; - index_t index; - BlockIndexEntry* entry; - ConcurrentQueue* parent; - - ~Guard() - { - (*block)[index]->~T(); - if (block->ConcurrentQueue::Block::template set_empty(index)) { - entry->value.store(nullptr, std::memory_order_relaxed); - parent->add_block_to_free_list(block); - } - } - } guard = { block, index, entry, this->parent }; - #ifdef _MSC_VER - #pragma pop - #endif - element = std::move(el); - } - else { - element = std::move(el); - el.~T(); - - if (block->ConcurrentQueue::Block::template set_empty(index)) { - { -#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX - debug::DebugLock lock(mutex); -#endif - // Add the block back into the global free pool (and remove from block index) - entry->value.store(nullptr, std::memory_order_relaxed); - } - this->parent->add_block_to_free_list(block); // releases the above store - } - } - - return true; - } - else { - this->dequeueOvercommit.fetch_add(1, std::memory_order_release); - } - } - - return false; - } - - template - bool enqueue_bulk(It itemFirst, size_t count) - { - // First, we need to make sure we have enough room to enqueue all of the elements; - // this means pre-allocating blocks and putting them in the block index (but only if - // all the allocations succeeded). - - // Note that the tailBlock we start off with may not be owned by us any more; - // this happens if it was filled up exactly to the top (setting tailIndex to - // the first index of the next block which is not yet allocated), then dequeued - // completely (putting it on the free list) before we enqueue again. - - index_t startTailIndex = this->tailIndex.load(std::memory_order_relaxed); - auto startBlock = this->tailBlock; - Block* firstAllocatedBlock = nullptr; - auto endBlock = this->tailBlock; - - // Figure out how many blocks we'll need to allocate, and do so - size_t blockBaseDiff = ((startTailIndex + count - 1) & ~static_cast(BLOCK_SIZE - 1)) - ((startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1)); - index_t currentTailIndex = (startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1); - if (blockBaseDiff > 0) { -#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX - debug::DebugLock lock(mutex); -#endif - do { - blockBaseDiff -= static_cast(BLOCK_SIZE); - currentTailIndex += static_cast(BLOCK_SIZE); - - // Find out where we'll be inserting this block in the block index - BlockIndexEntry* idxEntry; - Block* newBlock; - bool indexInserted = false; - auto head = this->headIndex.load(std::memory_order_relaxed); - assert(!details::circular_less_than(currentTailIndex, head)); - bool full = !details::circular_less_than(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head)); - if (full || !(indexInserted = insert_block_index_entry(idxEntry, currentTailIndex)) || (newBlock = this->parent->ConcurrentQueue::template requisition_block()) == nullptr) { - // Index allocation or block allocation failed; revert any other allocations - // and index insertions done so far for this operation - if (indexInserted) { - rewind_block_index_tail(); - idxEntry->value.store(nullptr, std::memory_order_relaxed); - } - currentTailIndex = (startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1); - for (auto block = firstAllocatedBlock; block != nullptr; block = block->next) { - currentTailIndex += static_cast(BLOCK_SIZE); - idxEntry = get_block_index_entry_for_index(currentTailIndex); - idxEntry->value.store(nullptr, std::memory_order_relaxed); - rewind_block_index_tail(); - } - this->parent->add_blocks_to_free_list(firstAllocatedBlock); - this->tailBlock = startBlock; - - return false; - } - -#if MCDBGQ_TRACKMEM - newBlock->owner = this; -#endif - newBlock->ConcurrentQueue::Block::template reset_empty(); - newBlock->next = nullptr; - - // Insert the new block into the index - idxEntry->value.store(newBlock, std::memory_order_relaxed); - - // Store the chain of blocks so that we can undo if later allocations fail, - // and so that we can find the blocks when we do the actual enqueueing - if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr) { - assert(this->tailBlock != nullptr); - this->tailBlock->next = newBlock; - } - this->tailBlock = newBlock; - endBlock = newBlock; - firstAllocatedBlock = firstAllocatedBlock == nullptr ? newBlock : firstAllocatedBlock; - } while (blockBaseDiff > 0); - } - - // Enqueue, one block at a time - index_t newTailIndex = startTailIndex + static_cast(count); - currentTailIndex = startTailIndex; - this->tailBlock = startBlock; - assert((startTailIndex & static_cast(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr || count == 0); - if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) == 0 && firstAllocatedBlock != nullptr) { - this->tailBlock = firstAllocatedBlock; - } - while (true) { - auto stopIndex = (currentTailIndex & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); - if (details::circular_less_than(newTailIndex, stopIndex)) { - stopIndex = newTailIndex; - } - if (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst)))) { - while (currentTailIndex != stopIndex) { - new ((*this->tailBlock)[currentTailIndex++]) T(*itemFirst++); - } - } - else { - MOODYCAMEL_TRY{ - while (currentTailIndex != stopIndex) { - new ((*this->tailBlock)[currentTailIndex]) T(details::nomove_if<(bool)!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst)))>::eval(*itemFirst)); - ++currentTailIndex; - ++itemFirst; - } - } - MOODYCAMEL_CATCH(...) { - auto constructedStopIndex = currentTailIndex; - auto lastBlockEnqueued = this->tailBlock; - - if (!details::is_trivially_destructible::value) { - auto block = startBlock; - if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) == 0) { - block = firstAllocatedBlock; - } - currentTailIndex = startTailIndex; - while (true) { - auto stopIndex = (currentTailIndex & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); - if (details::circular_less_than(constructedStopIndex, stopIndex)) { - stopIndex = constructedStopIndex; - } - while (currentTailIndex != stopIndex) { - (*block)[currentTailIndex++]->~T(); - } - if (block == lastBlockEnqueued) { - break; - } - block = block->next; - } - } - - currentTailIndex = (startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1); - for (auto block = firstAllocatedBlock; block != nullptr; block = block->next) { - currentTailIndex += static_cast(BLOCK_SIZE); - auto idxEntry = get_block_index_entry_for_index(currentTailIndex); - idxEntry->value.store(nullptr, std::memory_order_relaxed); - rewind_block_index_tail(); - } - this->parent->add_blocks_to_free_list(firstAllocatedBlock); - this->tailBlock = startBlock; - MOODYCAMEL_RETHROW; - } - } - - if (this->tailBlock == endBlock) { - assert(currentTailIndex == newTailIndex); - break; - } - this->tailBlock = this->tailBlock->next; - } - this->tailIndex.store(newTailIndex, std::memory_order_release); - return true; - } - - template - size_t dequeue_bulk(It& itemFirst, size_t max) - { - auto tail = this->tailIndex.load(std::memory_order_relaxed); - auto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed); - auto desiredCount = static_cast(tail - (this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit)); - if (details::circular_less_than(0, desiredCount)) { - desiredCount = desiredCount < max ? desiredCount : max; - std::atomic_thread_fence(std::memory_order_acquire); - - auto myDequeueCount = this->dequeueOptimisticCount.fetch_add(desiredCount, std::memory_order_relaxed); - assert(overcommit <= myDequeueCount); - - tail = this->tailIndex.load(std::memory_order_acquire); - auto actualCount = static_cast(tail - (myDequeueCount - overcommit)); - if (details::circular_less_than(0, actualCount)) { - actualCount = desiredCount < actualCount ? desiredCount : actualCount; - if (actualCount < desiredCount) { - this->dequeueOvercommit.fetch_add(desiredCount - actualCount, std::memory_order_release); - } - - // Get the first index. Note that since there's guaranteed to be at least actualCount elements, this - // will never exceed tail. - auto firstIndex = this->headIndex.fetch_add(actualCount, std::memory_order_acq_rel); - - // Iterate the blocks and dequeue - auto index = firstIndex; - BlockIndexHeader* localBlockIndex; - auto indexIndex = get_block_index_index_for_index(index, localBlockIndex); - do { - auto blockStartIndex = index; - auto endIndex = (index & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); - endIndex = details::circular_less_than(firstIndex + static_cast(actualCount), endIndex) ? firstIndex + static_cast(actualCount) : endIndex; - - auto entry = localBlockIndex->index[indexIndex]; - auto block = entry->value.load(std::memory_order_relaxed); - if (MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, details::deref_noexcept(itemFirst) = std::move((*(*block)[index])))) { - while (index != endIndex) { - auto& el = *((*block)[index]); - *itemFirst++ = std::move(el); - el.~T(); - ++index; - } - } - else { - MOODYCAMEL_TRY{ - while (index != endIndex) { - auto& el = *((*block)[index]); - *itemFirst = std::move(el); - ++itemFirst; - el.~T(); - ++index; - } - } - MOODYCAMEL_CATCH(...) { - do { - entry = localBlockIndex->index[indexIndex]; - block = entry->value.load(std::memory_order_relaxed); - while (index != endIndex) { - (*block)[index++]->~T(); - } - - if (block->ConcurrentQueue::Block::template set_many_empty(blockStartIndex, static_cast(endIndex - blockStartIndex))) { -#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX - debug::DebugLock lock(mutex); -#endif - entry->value.store(nullptr, std::memory_order_relaxed); - this->parent->add_block_to_free_list(block); - } - indexIndex = (indexIndex + 1) & (localBlockIndex->capacity - 1); - - blockStartIndex = index; - endIndex = (index & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); - endIndex = details::circular_less_than(firstIndex + static_cast(actualCount), endIndex) ? firstIndex + static_cast(actualCount) : endIndex; - } while (index != firstIndex + actualCount); - - MOODYCAMEL_RETHROW; - } - } - if (block->ConcurrentQueue::Block::template set_many_empty(blockStartIndex, static_cast(endIndex - blockStartIndex))) { - { -#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX - debug::DebugLock lock(mutex); -#endif - // Note that the set_many_empty above did a release, meaning that anybody who acquires the block - // we're about to free can use it safely since our writes (and reads!) will have happened-before then. - entry->value.store(nullptr, std::memory_order_relaxed); - } - this->parent->add_block_to_free_list(block); // releases the above store - } - indexIndex = (indexIndex + 1) & (localBlockIndex->capacity - 1); - } while (index != firstIndex + actualCount); - - return actualCount; - } - else { - this->dequeueOvercommit.fetch_add(desiredCount, std::memory_order_release); - } - } - - return 0; - } - - private: - // The block size must be > 1, so any number with the low bit set is an invalid block base index - static const index_t INVALID_BLOCK_BASE = 1; - - struct BlockIndexEntry - { - std::atomic key; - std::atomic value; - }; - - struct BlockIndexHeader - { - size_t capacity; - std::atomic tail; - BlockIndexEntry* entries; - BlockIndexEntry** index; - BlockIndexHeader* prev; - }; - - template - inline bool insert_block_index_entry(BlockIndexEntry*& idxEntry, index_t blockStartIndex) - { - auto localBlockIndex = blockIndex.load(std::memory_order_relaxed); // We're the only writer thread, relaxed is OK - auto newTail = (localBlockIndex->tail.load(std::memory_order_relaxed) + 1) & (localBlockIndex->capacity - 1); - idxEntry = localBlockIndex->index[newTail]; - if (idxEntry->key.load(std::memory_order_relaxed) == INVALID_BLOCK_BASE || - idxEntry->value.load(std::memory_order_relaxed) == nullptr) { - - idxEntry->key.store(blockStartIndex, std::memory_order_relaxed); - localBlockIndex->tail.store(newTail, std::memory_order_release); - return true; - } - - // No room in the old block index, try to allocate another one! - if (allocMode == CannotAlloc || !new_block_index()) { - return false; - } - localBlockIndex = blockIndex.load(std::memory_order_relaxed); - newTail = (localBlockIndex->tail.load(std::memory_order_relaxed) + 1) & (localBlockIndex->capacity - 1); - idxEntry = localBlockIndex->index[newTail]; - assert(idxEntry->key.load(std::memory_order_relaxed) == INVALID_BLOCK_BASE); - idxEntry->key.store(blockStartIndex, std::memory_order_relaxed); - localBlockIndex->tail.store(newTail, std::memory_order_release); - return true; - } - - inline void rewind_block_index_tail() - { - auto localBlockIndex = blockIndex.load(std::memory_order_relaxed); - localBlockIndex->tail.store((localBlockIndex->tail.load(std::memory_order_relaxed) - 1) & (localBlockIndex->capacity - 1), std::memory_order_relaxed); - } - - inline BlockIndexEntry* get_block_index_entry_for_index(index_t index) const - { - BlockIndexHeader* localBlockIndex; - auto idx = get_block_index_index_for_index(index, localBlockIndex); - return localBlockIndex->index[idx]; - } - - inline size_t get_block_index_index_for_index(index_t index, BlockIndexHeader*& localBlockIndex) const - { -#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX - debug::DebugLock lock(mutex); -#endif - index &= ~static_cast(BLOCK_SIZE - 1); - localBlockIndex = blockIndex.load(std::memory_order_acquire); - auto tail = localBlockIndex->tail.load(std::memory_order_acquire); - auto tailBase = localBlockIndex->index[tail]->key.load(std::memory_order_relaxed); - assert(tailBase != INVALID_BLOCK_BASE); - // Note: Must use division instead of shift because the index may wrap around, causing a negative - // offset, whose negativity we want to preserve - auto offset = static_cast(static_cast::type>(index - tailBase) / BLOCK_SIZE); - size_t idx = (tail + offset) & (localBlockIndex->capacity - 1); - assert(localBlockIndex->index[idx]->key.load(std::memory_order_relaxed) == index && localBlockIndex->index[idx]->value.load(std::memory_order_relaxed) != nullptr); - return idx; - } - - bool new_block_index() - { - auto prev = blockIndex.load(std::memory_order_relaxed); - size_t prevCapacity = prev == nullptr ? 0 : prev->capacity; - auto entryCount = prev == nullptr ? nextBlockIndexCapacity : prevCapacity; - auto raw = static_cast(Traits::malloc( - sizeof(BlockIndexHeader) + - std::alignment_of::value - 1 + sizeof(BlockIndexEntry) * entryCount + - std::alignment_of::value - 1 + sizeof(BlockIndexEntry*) * nextBlockIndexCapacity)); - if (raw == nullptr) { - return false; - } - - auto header = new (raw)BlockIndexHeader; - auto entries = reinterpret_cast(details::align_for(raw + sizeof(BlockIndexHeader))); - auto index = reinterpret_cast(details::align_for(reinterpret_cast(entries)+sizeof(BlockIndexEntry) * entryCount)); - if (prev != nullptr) { - auto prevTail = prev->tail.load(std::memory_order_relaxed); - auto prevPos = prevTail; - size_t i = 0; - do { - prevPos = (prevPos + 1) & (prev->capacity - 1); - index[i++] = prev->index[prevPos]; - } while (prevPos != prevTail); - assert(i == prevCapacity); - } - for (size_t i = 0; i != entryCount; ++i) { - new (entries + i) BlockIndexEntry; - entries[i].key.store(INVALID_BLOCK_BASE, std::memory_order_relaxed); - index[prevCapacity + i] = entries + i; - } - header->prev = prev; - header->entries = entries; - header->index = index; - header->capacity = nextBlockIndexCapacity; - header->tail.store((prevCapacity - 1) & (nextBlockIndexCapacity - 1), std::memory_order_relaxed); - - blockIndex.store(header, std::memory_order_release); - - nextBlockIndexCapacity <<= 1; - - return true; - } - - private: - size_t nextBlockIndexCapacity; - std::atomic blockIndex; - -#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED - public: - details::ThreadExitListener threadExitListener; - private: -#endif - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG - public: - ImplicitProducer* nextImplicitProducer; - private: -#endif - -#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX - mutable debug::DebugMutex mutex; -#endif -#if MCDBGQ_TRACKMEM - friend struct MemStats; -#endif - }; - - - ////////////////////////////////// - // Block pool manipulation - ////////////////////////////////// - - void populate_initial_block_list(size_t blockCount) - { - initialBlockPoolSize = blockCount; - if (initialBlockPoolSize == 0) { - initialBlockPool = nullptr; - return; - } - - initialBlockPool = create_array(blockCount); - if (initialBlockPool == nullptr) { - initialBlockPoolSize = 0; - } - for (size_t i = 0; i < initialBlockPoolSize; ++i) { - initialBlockPool[i].dynamicallyAllocated = false; - } - } - - inline Block* try_get_block_from_initial_pool() - { - if (initialBlockPoolIndex.load(std::memory_order_relaxed) >= initialBlockPoolSize) { - return nullptr; - } - - auto index = initialBlockPoolIndex.fetch_add(1, std::memory_order_relaxed); - - return index < initialBlockPoolSize ? (initialBlockPool + index) : nullptr; - } - - inline void add_block_to_free_list(Block* block) - { -#if MCDBGQ_TRACKMEM - block->owner = nullptr; -#endif - freeList.add(block); - } - - inline void add_blocks_to_free_list(Block* block) - { - while (block != nullptr) { - auto next = block->next; - add_block_to_free_list(block); - block = next; - } - } - - inline Block* try_get_block_from_free_list() - { - return freeList.try_get(); - } - - // Gets a free block from one of the memory pools, or allocates a new one (if applicable) - template - Block* requisition_block() - { - auto block = try_get_block_from_initial_pool(); - if (block != nullptr) { - return block; - } - - block = try_get_block_from_free_list(); - if (block != nullptr) { - return block; - } - - if (canAlloc == CanAlloc) { - return create(); - } - - return nullptr; - } - - -#if MCDBGQ_TRACKMEM - public: - struct MemStats { - size_t allocatedBlocks; - size_t usedBlocks; - size_t freeBlocks; - size_t ownedBlocksExplicit; - size_t ownedBlocksImplicit; - size_t implicitProducers; - size_t explicitProducers; - size_t elementsEnqueued; - size_t blockClassBytes; - size_t queueClassBytes; - size_t implicitBlockIndexBytes; - size_t explicitBlockIndexBytes; - - friend class ConcurrentQueue; - - private: - static MemStats getFor(ConcurrentQueue* q) - { - MemStats stats = { 0 }; - - stats.elementsEnqueued = q->size_approx(); - - auto block = q->freeList.head_unsafe(); - while (block != nullptr) { - ++stats.allocatedBlocks; - ++stats.freeBlocks; - block = block->freeListNext.load(std::memory_order_relaxed); - } - - for (auto ptr = q->producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { - bool implicit = dynamic_cast(ptr) != nullptr; - stats.implicitProducers += implicit ? 1 : 0; - stats.explicitProducers += implicit ? 0 : 1; - - if (implicit) { - auto prod = static_cast(ptr); - stats.queueClassBytes += sizeof(ImplicitProducer); - auto head = prod->headIndex.load(std::memory_order_relaxed); - auto tail = prod->tailIndex.load(std::memory_order_relaxed); - auto hash = prod->blockIndex.load(std::memory_order_relaxed); - if (hash != nullptr) { - for (size_t i = 0; i != hash->capacity; ++i) { - if (hash->index[i]->key.load(std::memory_order_relaxed) != ImplicitProducer::INVALID_BLOCK_BASE && hash->index[i]->value.load(std::memory_order_relaxed) != nullptr) { - ++stats.allocatedBlocks; - ++stats.ownedBlocksImplicit; - } - } - stats.implicitBlockIndexBytes += hash->capacity * sizeof(typename ImplicitProducer::BlockIndexEntry); - for (; hash != nullptr; hash = hash->prev) { - stats.implicitBlockIndexBytes += sizeof(typename ImplicitProducer::BlockIndexHeader) + hash->capacity * sizeof(typename ImplicitProducer::BlockIndexEntry*); - } - } - for (; details::circular_less_than(head, tail); head += BLOCK_SIZE) { - //auto block = prod->get_block_index_entry_for_index(head); - ++stats.usedBlocks; - } - } - else { - auto prod = static_cast(ptr); - stats.queueClassBytes += sizeof(ExplicitProducer); - auto tailBlock = prod->tailBlock; - bool wasNonEmpty = false; - if (tailBlock != nullptr) { - auto block = tailBlock; - do { - ++stats.allocatedBlocks; - if (!block->ConcurrentQueue::Block::template is_empty() || wasNonEmpty) { - ++stats.usedBlocks; - wasNonEmpty = wasNonEmpty || block != tailBlock; - } - ++stats.ownedBlocksExplicit; - block = block->next; - } while (block != tailBlock); - } - auto index = prod->blockIndex.load(std::memory_order_relaxed); - while (index != nullptr) { - stats.explicitBlockIndexBytes += sizeof(typename ExplicitProducer::BlockIndexHeader) + index->size * sizeof(typename ExplicitProducer::BlockIndexEntry); - index = static_cast(index->prev); - } - } - } - - auto freeOnInitialPool = q->initialBlockPoolIndex.load(std::memory_order_relaxed) >= q->initialBlockPoolSize ? 0 : q->initialBlockPoolSize - q->initialBlockPoolIndex.load(std::memory_order_relaxed); - stats.allocatedBlocks += freeOnInitialPool; - stats.freeBlocks += freeOnInitialPool; - - stats.blockClassBytes = sizeof(Block) * stats.allocatedBlocks; - stats.queueClassBytes += sizeof(ConcurrentQueue); - - return stats; - } - }; - - // For debugging only. Not thread-safe. - MemStats getMemStats() - { - return MemStats::getFor(this); - } - private: - friend struct MemStats; -#endif - - - ////////////////////////////////// - // Producer list manipulation - ////////////////////////////////// - - ProducerBase* recycle_or_create_producer(bool isExplicit) - { - bool recycled; - return recycle_or_create_producer(isExplicit, recycled); - } - - ProducerBase* recycle_or_create_producer(bool isExplicit, bool& recycled) - { -#if MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH - debug::DebugLock lock(implicitProdMutex); -#endif - // Try to re-use one first - for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { - if (ptr->inactive.load(std::memory_order_relaxed) && ptr->isExplicit == isExplicit) { - bool expected = true; - if (ptr->inactive.compare_exchange_strong(expected, /* desired */ false, std::memory_order_acquire, std::memory_order_relaxed)) { - // We caught one! It's been marked as activated, the caller can have it - recycled = true; - return ptr; - } - } - } - - recycled = false; - return add_producer(isExplicit ? static_cast(create(this)) : create(this)); - } - - ProducerBase* add_producer(ProducerBase* producer) - { - // Handle failed memory allocation - if (producer == nullptr) { - return nullptr; - } - - producerCount.fetch_add(1, std::memory_order_relaxed); - - // Add it to the lock-free list - auto prevTail = producerListTail.load(std::memory_order_relaxed); - do { - producer->next = prevTail; - } while (!producerListTail.compare_exchange_weak(prevTail, producer, std::memory_order_release, std::memory_order_relaxed)); - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG - if (producer->isExplicit) { - auto prevTailExplicit = explicitProducers.load(std::memory_order_relaxed); - do { - static_cast(producer)->nextExplicitProducer = prevTailExplicit; - } while (!explicitProducers.compare_exchange_weak(prevTailExplicit, static_cast(producer), std::memory_order_release, std::memory_order_relaxed)); - } - else { - auto prevTailImplicit = implicitProducers.load(std::memory_order_relaxed); - do { - static_cast(producer)->nextImplicitProducer = prevTailImplicit; - } while (!implicitProducers.compare_exchange_weak(prevTailImplicit, static_cast(producer), std::memory_order_release, std::memory_order_relaxed)); - } -#endif - - return producer; - } - - void reown_producers() - { - // After another instance is moved-into/swapped-with this one, all the - // producers we stole still think their parents are the other queue. - // So fix them up! - for (auto ptr = producerListTail.load(std::memory_order_relaxed); ptr != nullptr; ptr = ptr->next_prod()) { - ptr->parent = this; - } - } - - - ////////////////////////////////// - // Implicit producer hash - ////////////////////////////////// - - struct ImplicitProducerKVP - { - std::atomic key; - ImplicitProducer* value; // No need for atomicity since it's only read by the thread that sets it in the first place - - ImplicitProducerKVP() { } - - ImplicitProducerKVP(ImplicitProducerKVP&& other) MOODYCAMEL_NOEXCEPT - { - key.store(other.key.load(std::memory_order_relaxed), std::memory_order_relaxed); - value = other.value; - } - - inline ImplicitProducerKVP& operator=(ImplicitProducerKVP&& other) MOODYCAMEL_NOEXCEPT - { - swap(other); - return *this; - } - - inline void swap(ImplicitProducerKVP& other) MOODYCAMEL_NOEXCEPT - { - if (this != &other) { - details::swap_relaxed(key, other.key); - std::swap(value, other.value); - } - } - }; - - template - friend void moodycamel::swap(typename ConcurrentQueue::ImplicitProducerKVP&, typename ConcurrentQueue::ImplicitProducerKVP&) MOODYCAMEL_NOEXCEPT; - - struct ImplicitProducerHash - { - size_t capacity; - ImplicitProducerKVP* entries; - ImplicitProducerHash* prev; - }; - - inline void populate_initial_implicit_producer_hash() - { - if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return; - - implicitProducerHashCount.store(0, std::memory_order_relaxed); - auto hash = &initialImplicitProducerHash; - hash->capacity = INITIAL_IMPLICIT_PRODUCER_HASH_SIZE; - hash->entries = &initialImplicitProducerHashEntries[0]; - for (size_t i = 0; i != INITIAL_IMPLICIT_PRODUCER_HASH_SIZE; ++i) { - initialImplicitProducerHashEntries[i].key.store(details::invalid_thread_id, std::memory_order_relaxed); - } - hash->prev = nullptr; - implicitProducerHash.store(hash, std::memory_order_relaxed); - } - - void swap_implicit_producer_hashes(ConcurrentQueue& other) - { - if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return; - - // Swap (assumes our implicit producer hash is initialized) - initialImplicitProducerHashEntries.swap(other.initialImplicitProducerHashEntries); - initialImplicitProducerHash.entries = &initialImplicitProducerHashEntries[0]; - other.initialImplicitProducerHash.entries = &other.initialImplicitProducerHashEntries[0]; - - details::swap_relaxed(implicitProducerHashCount, other.implicitProducerHashCount); - - details::swap_relaxed(implicitProducerHash, other.implicitProducerHash); - if (implicitProducerHash.load(std::memory_order_relaxed) == &other.initialImplicitProducerHash) { - implicitProducerHash.store(&initialImplicitProducerHash, std::memory_order_relaxed); - } - else { - ImplicitProducerHash* hash; - for (hash = implicitProducerHash.load(std::memory_order_relaxed); hash->prev != &other.initialImplicitProducerHash; hash = hash->prev) { - continue; - } - hash->prev = &initialImplicitProducerHash; - } - if (other.implicitProducerHash.load(std::memory_order_relaxed) == &initialImplicitProducerHash) { - other.implicitProducerHash.store(&other.initialImplicitProducerHash, std::memory_order_relaxed); - } - else { - ImplicitProducerHash* hash; - for (hash = other.implicitProducerHash.load(std::memory_order_relaxed); hash->prev != &initialImplicitProducerHash; hash = hash->prev) { - continue; - } - hash->prev = &other.initialImplicitProducerHash; - } - } - - // Only fails (returns nullptr) if memory allocation fails - ImplicitProducer* get_or_add_implicit_producer() - { - // Note that since the data is essentially thread-local (key is thread ID), - // there's a reduced need for fences (memory ordering is already consistent - // for any individual thread), except for the current table itself. - - // Start by looking for the thread ID in the current and all previous hash tables. - // If it's not found, it must not be in there yet, since this same thread would - // have added it previously to one of the tables that we traversed. - - // Code and algorithm adapted from http://preshing.com/20130605/the-worlds-simplest-lock-free-hash-table - -#if MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH - debug::DebugLock lock(implicitProdMutex); -#endif - - auto id = details::thread_id(); - auto hashedId = details::hash_thread_id(id); - - auto mainHash = implicitProducerHash.load(std::memory_order_acquire); - for (auto hash = mainHash; hash != nullptr; hash = hash->prev) { - // Look for the id in this hash - auto index = hashedId; - while (true) { // Not an infinite loop because at least one slot is free in the hash table - index &= hash->capacity - 1; - - auto probedKey = hash->entries[index].key.load(std::memory_order_relaxed); - if (probedKey == id) { - // Found it! If we had to search several hashes deep, though, we should lazily add it - // to the current main hash table to avoid the extended search next time. - // Note there's guaranteed to be room in the current hash table since every subsequent - // table implicitly reserves space for all previous tables (there's only one - // implicitProducerHashCount). - auto value = hash->entries[index].value; - if (hash != mainHash) { - index = hashedId; - while (true) { - index &= mainHash->capacity - 1; - probedKey = mainHash->entries[index].key.load(std::memory_order_relaxed); - auto empty = details::invalid_thread_id; -#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED - auto reusable = details::invalid_thread_id2; - if ((probedKey == empty && mainHash->entries[index].key.compare_exchange_strong(empty, id, std::memory_order_relaxed)) || - (probedKey == reusable && mainHash->entries[index].key.compare_exchange_strong(reusable, id, std::memory_order_acquire))) { -#else - if ((probedKey == empty && mainHash->entries[index].key.compare_exchange_strong(empty, id, std::memory_order_relaxed))) { -#endif - mainHash->entries[index].value = value; - break; - } - ++index; - } - } - - return value; - } - if (probedKey == details::invalid_thread_id) { - break; // Not in this hash table - } - ++index; - } - } - - // Insert! - auto newCount = 1 + implicitProducerHashCount.fetch_add(1, std::memory_order_relaxed); - while (true) { - if (newCount >= (mainHash->capacity >> 1) && !implicitProducerHashResizeInProgress.test_and_set(std::memory_order_acquire)) { - // We've acquired the resize lock, try to allocate a bigger hash table. - // Note the acquire fence synchronizes with the release fence at the end of this block, and hence when - // we reload implicitProducerHash it must be the most recent version (it only gets changed within this - // locked block). - mainHash = implicitProducerHash.load(std::memory_order_acquire); - if (newCount >= (mainHash->capacity >> 1)) { - auto newCapacity = mainHash->capacity << 1; - while (newCount >= (newCapacity >> 1)) { - newCapacity <<= 1; - } - auto raw = static_cast(Traits::malloc(sizeof(ImplicitProducerHash) + std::alignment_of::value - 1 + sizeof(ImplicitProducerKVP) * newCapacity)); - if (raw == nullptr) { - // Allocation failed - auto maxValue = std::numeric_limits::max(); - implicitProducerHashCount.fetch_add(maxValue, std::memory_order_relaxed); - implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed); - return nullptr; - } - - auto newHash = new (raw)ImplicitProducerHash; - newHash->capacity = newCapacity; - newHash->entries = reinterpret_cast(details::align_for(raw + sizeof(ImplicitProducerHash))); - for (size_t i = 0; i != newCapacity; ++i) { - new (newHash->entries + i) ImplicitProducerKVP; - newHash->entries[i].key.store(details::invalid_thread_id, std::memory_order_relaxed); - } - newHash->prev = mainHash; - implicitProducerHash.store(newHash, std::memory_order_release); - implicitProducerHashResizeInProgress.clear(std::memory_order_release); - mainHash = newHash; - } - else { - implicitProducerHashResizeInProgress.clear(std::memory_order_release); - } - } - - // If it's < three-quarters full, add to the old one anyway so that we don't have to wait for the next table - // to finish being allocated by another thread (and if we just finished allocating above, the condition will - // always be true) - if (newCount < (mainHash->capacity >> 1) + (mainHash->capacity >> 2)) { - bool recycled; - auto producer = static_cast(recycle_or_create_producer(false, recycled)); - if (producer == nullptr) { - auto maxValue = std::numeric_limits::max(); - implicitProducerHashCount.fetch_add(maxValue, std::memory_order_relaxed); - return nullptr; - } - if (recycled) { - auto maxValue = std::numeric_limits::max(); - implicitProducerHashCount.fetch_add(maxValue, std::memory_order_relaxed); - } - -#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED - producer->threadExitListener.callback = &ConcurrentQueue::implicit_producer_thread_exited_callback; - producer->threadExitListener.userData = producer; - details::ThreadExitNotifier::subscribe(&producer->threadExitListener); -#endif - - auto index = hashedId; - while (true) { - index &= mainHash->capacity - 1; - auto probedKey = mainHash->entries[index].key.load(std::memory_order_relaxed); - - auto empty = details::invalid_thread_id; -#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED - auto reusable = details::invalid_thread_id2; - if ((probedKey == empty && mainHash->entries[index].key.compare_exchange_strong(empty, id, std::memory_order_relaxed)) || - (probedKey == reusable && mainHash->entries[index].key.compare_exchange_strong(reusable, id, std::memory_order_acquire))) { -#else - if ((probedKey == empty && mainHash->entries[index].key.compare_exchange_strong(empty, id, std::memory_order_relaxed))) { -#endif - mainHash->entries[index].value = producer; - break; - } - ++index; - } - return producer; - } - - // Hmm, the old hash is quite full and somebody else is busy allocating a new one. - // We need to wait for the allocating thread to finish (if it succeeds, we add, if not, - // we try to allocate ourselves). - mainHash = implicitProducerHash.load(std::memory_order_acquire); - } - } - -#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED - void implicit_producer_thread_exited(ImplicitProducer* producer) - { - // Remove from thread exit listeners - details::ThreadExitNotifier::unsubscribe(&producer->threadExitListener); - - // Remove from hash -#if MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH - debug::DebugLock lock(implicitProdMutex); -#endif - auto hash = implicitProducerHash.load(std::memory_order_acquire); - assert(hash != nullptr); // The thread exit listener is only registered if we were added to a hash in the first place - auto id = details::thread_id(); - auto hashedId = details::hash_thread_id(id); - details::thread_id_t probedKey; - - // We need to traverse all the hashes just in case other threads aren't on the current one yet and are - // trying to add an entry thinking there's a free slot (because they reused a producer) - for (; hash != nullptr; hash = hash->prev) { - auto index = hashedId; - do { - index &= hash->capacity - 1; - probedKey = hash->entries[index].key.load(std::memory_order_relaxed); - if (probedKey == id) { - hash->entries[index].key.store(details::invalid_thread_id2, std::memory_order_release); - break; - } - ++index; - } while (probedKey != details::invalid_thread_id); // Can happen if the hash has changed but we weren't put back in it yet, or if we weren't added to this hash in the first place - } - - // Mark the queue as being recyclable - producer->inactive.store(true, std::memory_order_release); - } - - static void implicit_producer_thread_exited_callback(void* userData) - { - auto producer = static_cast(userData); - auto queue = producer->parent; - queue->implicit_producer_thread_exited(producer); - } -#endif - - ////////////////////////////////// - // Utility functions - ////////////////////////////////// - - template - static inline U* create_array(size_t count) - { - assert(count > 0); - auto p = static_cast(Traits::malloc(sizeof(U) * count)); - if (p == nullptr) { - return nullptr; - } - - for (size_t i = 0; i != count; ++i) { - new (p + i) U(); - } - return p; - } - - template - static inline void destroy_array(U* p, size_t count) - { - if (p != nullptr) { - assert(count > 0); - for (size_t i = count; i != 0;) { - (p + --i)->~U(); - } - Traits::free(p); - } - } - - template - static inline U* create() - { - auto p = Traits::malloc(sizeof(U)); - return p != nullptr ? new (p)U : nullptr; - } - - template - static inline U* create(A1&& a1) - { - auto p = Traits::malloc(sizeof(U)); - return p != nullptr ? new (p)U(std::forward(a1)) : nullptr; - } - - template - static inline void destroy(U* p) - { - if (p != nullptr) { - p->~U(); - } - Traits::free(p); - } - - private: - std::atomic producerListTail; - std::atomic producerCount; - - std::atomic initialBlockPoolIndex; - Block* initialBlockPool; - size_t initialBlockPoolSize; - -#if !MCDBGQ_USEDEBUGFREELIST - FreeList freeList; -#else - debug::DebugFreeList freeList; -#endif - - std::atomic implicitProducerHash; - std::atomic implicitProducerHashCount; // Number of slots logically used - ImplicitProducerHash initialImplicitProducerHash; - std::array initialImplicitProducerHashEntries; - std::atomic_flag implicitProducerHashResizeInProgress; - - std::atomic nextExplicitConsumerId; - std::atomic globalExplicitConsumerOffset; - -#if MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH - debug::DebugMutex implicitProdMutex; -#endif - -#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG - std::atomic explicitProducers; - std::atomic implicitProducers; -#endif - }; - - - template - ProducerToken::ProducerToken(ConcurrentQueue& queue) - : producer(queue.recycle_or_create_producer(true)) - { - if (producer != nullptr) { - producer->token = this; - } - } - - template - ProducerToken::ProducerToken(BlockingConcurrentQueue& queue) - : producer(reinterpret_cast*>(&queue)->recycle_or_create_producer(true)) - { - if (producer != nullptr) { - producer->token = this; - } - } - - template - ConsumerToken::ConsumerToken(ConcurrentQueue& queue) - : itemsConsumedFromCurrent(0), currentProducer(nullptr), desiredProducer(nullptr) - { - initialOffset = queue.nextExplicitConsumerId.fetch_add(1, std::memory_order_release); - lastKnownGlobalOffset = -1; - } - - template - ConsumerToken::ConsumerToken(BlockingConcurrentQueue& queue) - : itemsConsumedFromCurrent(0), currentProducer(nullptr), desiredProducer(nullptr) - { - initialOffset = reinterpret_cast*>(&queue)->nextExplicitConsumerId.fetch_add(1, std::memory_order_release); - lastKnownGlobalOffset = -1; - } - - template - inline void swap(ConcurrentQueue& a, ConcurrentQueue& b) MOODYCAMEL_NOEXCEPT - { - a.swap(b); - } - - inline void swap(ProducerToken& a, ProducerToken& b) MOODYCAMEL_NOEXCEPT - { - a.swap(b); - } - - inline void swap(ConsumerToken& a, ConsumerToken& b) MOODYCAMEL_NOEXCEPT - { - a.swap(b); - } - - template - inline void swap(typename ConcurrentQueue::ImplicitProducerKVP& a, typename ConcurrentQueue::ImplicitProducerKVP& b) MOODYCAMEL_NOEXCEPT - { - a.swap(b); - } - - } - -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif \ No newline at end of file