Committing oneTBB 2021.1-beta09 source code

This commit is contained in:
tbbdev 2020-09-24 16:00:30 +03:00
Родитель 05b1dd0789
Коммит 8632c27846
122 изменённых файлов: 7001 добавлений и 681 удалений

2
.gitignore поставляемый
Просмотреть файл

@ -53,6 +53,8 @@ __pycache__/
# -------- IDE --------
.vscode/*
.vs/*
out/*
CMakeSettings.json
# -------- CTags --------
.tags

Просмотреть файл

@ -30,6 +30,16 @@ if (TBB_WINDOWS_DRIVER AND (NOT ("${CMAKE_MSVC_RUNTIME_LIBRARY}" STREQUAL MultiT
message(FATAL_ERROR "Enabled TBB_WINDOWS_DRIVER requires CMAKE_MSVC_RUNTIME_LIBRARY to be set to MultiThreaded or MultiThreadedDebug.")
endif()
# Enable support of minimum supported macOS version flag
if (APPLE)
if (NOT CMAKE_CXX_OSX_DEPLOYMENT_TARGET_FLAG)
set(CMAKE_CXX_OSX_DEPLOYMENT_TARGET_FLAG "-mmacosx-version-min=" CACHE STRING "Minimum macOS version flag")
endif()
if (NOT CMAKE_C_OSX_DEPLOYMENT_TARGET_FLAG)
set(CMAKE_C_OSX_DEPLOYMENT_TARGET_FLAG "-mmacosx-version-min=" CACHE STRING "Minimum macOS version flag")
endif()
endif()
# Until CMake 3.4.0 FindThreads.cmake requires C language enabled.
# Enable C language before CXX to avoid possible override of CMAKE_SIZEOF_VOID_P.
if (CMAKE_VERSION VERSION_LESS 3.4)
@ -41,7 +51,9 @@ string(REGEX REPLACE ".*#define TBB_VERSION_MAJOR ([0-9]+).*" "\\1" _tbb_ver_maj
string(REGEX REPLACE ".*#define TBB_VERSION_MINOR ([0-9]+).*" "\\1" _tbb_ver_minor "${_tbb_version_info}")
string(REGEX REPLACE ".*#define TBB_INTERFACE_VERSION ([0-9]+).*" "\\1" TBB_INTERFACE_VERSION "${_tbb_version_info}")
string(REGEX REPLACE ".*#define __TBB_BINARY_VERSION ([0-9]+).*" "\\1" TBB_BINARY_VERSION "${_tbb_version_info}")
set(TBB_BINARY_MINOR_VERSION 0)
set(TBBMALLOC_BINARY_VERSION 2)
set(TBBBIND_BINARY_VERSION 3)
project(TBB VERSION ${_tbb_ver_major}.${_tbb_ver_minor} LANGUAGES CXX)
unset(_tbb_ver_major)
@ -176,7 +188,11 @@ else()
add_subdirectory(src/tbbmalloc)
add_subdirectory(src/tbbmalloc_proxy)
if (TBB_NUMA_SUPPORT)
add_subdirectory(src/tbbbind)
if (APPLE)
message(WARNING "TBBBind build target is disabled due to unsupported environment")
else()
add_subdirectory(src/tbbbind)
endif()
endif()
endif()

Просмотреть файл

@ -15,8 +15,6 @@ TBB_CPF:BOOL - Enable preview features of the library (OFF by default)
TBB_INSTALL_VARS:BOOL - Enable auto-generated vars installation(packages generated by `cpack` and `make install` will also include the vars script)(OFF by default)
```
# Getting Started
## Configure, build and test
### Prerequisites
@ -25,7 +23,7 @@ TBB_INSTALL_VARS:BOOL - Enable auto-generated vars installation(packages generat
### Preparation
In order to perform out-of-source build you have to create a build directory somewhere and go there:
In order to perform out-of-source build you have to create a build directory and go there:
```bash
mkdir /tmp/my-build
@ -148,6 +146,50 @@ cmake <options> ..
cpack
```
## TBBConfig - integration of binary packages
It is a configuration module that is used for integration of prebuilt oneTBB. It consists of two files (TBBConfig.cmake and TBBConfigVersion.cmake) and can be used via [find_package](https://cmake.org/cmake/help/latest/command/find_package.html) function.
How to use this module in your CMake project:
1. Let CMake know where to search for TBBConfig, e.g. specify location of TBBConfig.cmake in `TBB_DIR` (for more details about search paths see [find_package](https://cmake.org/cmake/help/latest/command/find_package.html)).
2. Use [find_package](https://cmake.org/cmake/help/latest/command/find_package.html) to find oneTBB.
3. Use provided variables and/or imported targets (described below) to work with the found oneTBB.
Example:
```cmake
add_executable(foo foo.cpp)
find_package(TBB)
target_link_libraries(foo TBB::tbb)
```
oneTBB components can be passed to [find_package](https://cmake.org/cmake/help/latest/command/find_package.html) after keyword ``COMPONENTS`` or ``REQUIRED``.
Use basic names of components (`tbb`, `tbbmalloc`, etc.).
If components are not specified then the default set is used: `tbb`, `tbbmalloc` and ``tbbmalloc_proxy``.
If `tbbmalloc_proxy` is requested, `tbbmalloc` component will also be added and set as dependency for `tbbmalloc_proxy`.
TBBConfig creates [imported targets](https://cmake.org/cmake/help/latest/manual/cmake-buildsystem.7.html#imported-targets>) as
shared libraries using the following format: `TBB::<component>` (for example, `TBB::tbb`, `TBB::tbbmalloc`).
Set `TBB_FIND_RELEASE_ONLY` to `TRUE` before calling `find_package` in order to search only for release oneTBB version. This variable helps to avoid simultaneous linkage of release and debug oneTBB versions when CMake configuration is `Debug` but a third-party component depends on release oneTBB version.
Variables set during TBB configuration:
Variable | Description
--- | ---
`TBB_FOUND` | oneTBB is found
`TBB_<component>_FOUND` | Specific oneTBB component is found
`TBB_VERSION` | oneTBB version (format: `<major>.<minor>.<patch>.<tweak>`)
`TBB_IMPORTED_TARGETS` | All created oneTBB imported targets (not supported for builds from source code)
Starting from [oneTBB 2021.1-beta08](https://github.com/oneapi-src/oneTBB/releases/tag/v2021.1-beta08) GitHub release TBBConfig files in the binary packages are located under `<tbb-root>/lib[/<intel64|ia32>]/cmake/TBB`.
For example, for Linux 64-bit `TBB_DIR` should be set to `<tbb-root>/lib/intel64/cmake/TBB`.
TBBConfig files are automatically created during the build from source code and can be installed together with the library.
Also oneTBB provides a helper function that creates TBBConfig files from predefined templates: see `tbb_generate_config` in `cmake/config_generation.cmake`.
## oneTBB Python Module support
`TBB4PY_BUILD` Cmake option provides ability to build Python module for oneTBB.

Просмотреть файл

@ -0,0 +1,98 @@
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Save current location,
# see for details: https://cmake.org/cmake/help/latest/variable/CMAKE_CURRENT_LIST_DIR.html
set(_tbb_gen_cfg_path ${CMAKE_CURRENT_LIST_DIR})
include(CMakeParseArguments)
function(tbb_generate_config)
set(options HANDLE_SUBDIRS)
set(oneValueArgs INSTALL_DIR
SYSTEM_NAME
SIZEOF_VOID_P # 4 for 32 bit, 8 for 64 bit.
LIB_REL_PATH INC_REL_PATH DLL_REL_PATH
VERSION
TBB_BINARY_VERSION
TBBMALLOC_BINARY_VERSION
TBBMALLOC_PROXY_BINARY_VERSION
TBBBIND_BINARY_VERSION)
cmake_parse_arguments(tbb_gen_cfg "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
get_filename_component(config_install_dir ${tbb_gen_cfg_INSTALL_DIR} ABSOLUTE)
file(MAKE_DIRECTORY ${config_install_dir})
file(TO_CMAKE_PATH "${tbb_gen_cfg_LIB_REL_PATH}" TBB_LIB_REL_PATH)
file(TO_CMAKE_PATH "${tbb_gen_cfg_DLL_REL_PATH}" TBB_DLL_REL_PATH)
file(TO_CMAKE_PATH "${tbb_gen_cfg_INC_REL_PATH}" TBB_INC_REL_PATH)
set(TBB_SIZEOF_VOID_P "${tbb_gen_cfg_SIZEOF_VOID_P}")
set(TBB_VERSION ${tbb_gen_cfg_VERSION})
set(TBB_COMPONENTS_BIN_VERSION "
set(_tbb_bin_version ${tbb_gen_cfg_TBB_BINARY_VERSION})
set(_tbbmalloc_bin_version ${tbb_gen_cfg_TBBMALLOC_BINARY_VERSION})
set(_tbbmalloc_proxy_bin_version ${tbb_gen_cfg_TBBMALLOC_PROXY_BINARY_VERSION})
set(_tbbbind_bin_version ${tbb_gen_cfg_TBBBIND_BINARY_VERSION})
")
if (tbb_gen_cfg_SYSTEM_NAME STREQUAL "Linux")
set(TBB_LIB_PREFIX "lib")
set(TBB_LIB_EXT "so.\${_\${_tbb_component}_bin_version}")
set(TBB_IMPLIB_RELEASE "")
set(TBB_IMPLIB_DEBUG "")
if (tbb_gen_cfg_HANDLE_SUBDIRS)
set(TBB_HANDLE_SUBDIRS "
set(_tbb_subdir gcc4.8)
")
endif()
elseif (tbb_gen_cfg_SYSTEM_NAME STREQUAL "Darwin")
set(TBB_LIB_PREFIX "lib")
set(TBB_LIB_EXT "\${_\${_tbb_component}_bin_version}.dylib")
set(TBB_IMPLIB_RELEASE "")
set(TBB_IMPLIB_DEBUG "")
elseif (tbb_gen_cfg_SYSTEM_NAME STREQUAL "Windows")
set(TBB_LIB_PREFIX "")
set(TBB_LIB_EXT "dll")
set(TBB_COMPILE_DEFINITIONS "
INTERFACE_COMPILE_DEFINITIONS \"__TBB_NO_IMPLICIT_LINKAGE=1\"")
# .lib files installed to TBB_LIB_REL_PATH (e.g. <prefix>/lib);
# .dll files installed to TBB_DLL_REL_PATH (e.g. <prefix>/bin);
# Expand TBB_LIB_REL_PATH here in IMPORTED_IMPLIB property and
# redefine it with TBB_DLL_REL_PATH value to properly fill IMPORTED_LOCATION property in TBBConfig.cmake.in template.
set(TBB_IMPLIB_RELEASE "
IMPORTED_IMPLIB_RELEASE \"\${CMAKE_CURRENT_LIST_DIR}/${TBB_LIB_REL_PATH}/\${_tbb_subdir}/\${_tbb_component}.lib\"")
set(TBB_IMPLIB_DEBUG "
IMPORTED_IMPLIB_DEBUG \"\${CMAKE_CURRENT_LIST_DIR}/${TBB_LIB_REL_PATH}/\${_tbb_subdir}/\${_tbb_component}_debug.lib\"")
set(TBB_LIB_REL_PATH ${TBB_DLL_REL_PATH})
if (tbb_gen_cfg_HANDLE_SUBDIRS)
set(TBB_HANDLE_SUBDIRS "
set(_tbb_subdir vc14)
if (WINDOWS_STORE)
set(_tbb_subdir \${_tbb_subdir}_uwp)
endif()
")
endif()
else()
message(FATAL_ERROR "Unsupported OS name: ${tbb_system_name}")
endif()
configure_file(${_tbb_gen_cfg_path}/templates/TBBConfig.cmake.in ${config_install_dir}/TBBConfig.cmake @ONLY)
configure_file(${_tbb_gen_cfg_path}/templates/TBBConfigVersion.cmake.in ${config_install_dir}/TBBConfigVersion.cmake @ONLY)
endfunction()

Просмотреть файл

@ -12,24 +12,82 @@
# See the License for the specific language governing permissions and
# limitations under the License.
if (NOT HWLOC_FOUND)
find_path(HWLOC_INCLUDE_DIRS
NAMES hwloc.h
HINTS $ENV{INCLUDE} $ENV{CPATH} $ENV{C_INCLUDE_PATH} $ENV{INCLUDE_PATH}
PATH_SUFFIXES "hwloc")
include(FindPackageHandleStandardArgs)
if (UNIX)
set(HWLOC_LIB_NAME hwloc)
elseif(WIN32)
set(HWLOC_LIB_NAME libhwloc)
# Firstly search for HWLOC in config mode (i.e. search for HWLOCConfig.cmake).
find_package(HWLOC QUIET CONFIG)
if (HWLOC_FOUND)
find_package_handle_standard_args(HWLOC CONFIG_MODE)
return()
endif()
find_program(_hwloc_info_exe
NAMES hwloc-info
PATHS ENV HWLOC_ROOT ENV PATH
PATH_SUFFIXES bin
)
if (_hwloc_info_exe)
execute_process(
COMMAND ${_hwloc_info_exe} "--version"
OUTPUT_VARIABLE _hwloc_info_output
OUTPUT_STRIP_TRAILING_WHITESPACE
)
string(REGEX MATCH "([0-9]+.[0-9]+.[0-9]+)$" HWLOC_VERSION "${_hwloc_info_output}")
if ("${HWLOC_VERSION}" STREQUAL "")
unset(HWLOC_VERSION)
endif()
find_library(HWLOC_LIBRARIES
NAMES ${HWLOC_LIB_NAME}
HINTS $ENV{LIBRARY_PATH} $ENV{LD_LIBRARY_PATH} $ENV{DYLD_LIBRARY_PATH})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(HWLOC DEFAULT_MSG HWLOC_LIBRARIES HWLOC_INCLUDE_DIRS)
mark_as_advanced(HWLOC_LIB_NAME)
unset(_hwloc_info_output)
unset(_hwloc_info_exe)
endif()
if (WIN32)
list(APPEND _additional_lib_dirs ENV PATH ENV LIB)
list(APPEND _additional_include_dirs ENV INCLUDE ENV CPATH)
set(_hwloc_lib_name libhwloc)
else()
list(APPEND _additional_lib_dirs ENV LIBRARY_PATH ENV LD_LIBRARY_PATH ENV DYLD_LIBRARY_PATH)
list(APPEND _additional_include_dirs ENV CPATH ENV C_INCLUDE_PATH ENV CPLUS_INCLUDE_PATH ENV INCLUDE_PATH)
set(_hwloc_lib_name hwloc)
endif()
if (NOT TARGET HWLOC::hwloc)
find_path(_hwloc_include_dirs
NAMES hwloc.h
PATHS ${_additional_include_dirs}
PATH_SUFFIXES "hwloc")
if (_hwloc_include_dirs)
add_library(HWLOC::hwloc SHARED IMPORTED)
set_property(TARGET HWLOC::hwloc APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${_hwloc_include_dirs})
find_library(_hwloc_lib_dirs ${_hwloc_lib_name} PATHS ${_additional_lib_dirs})
if (_hwloc_lib_dirs)
if (WIN32)
set_target_properties(HWLOC::hwloc PROPERTIES
IMPORTED_LOCATION "${_hwloc_lib_dirs}"
IMPORTED_IMPLIB "${_hwloc_lib_dirs}")
else()
set_target_properties(HWLOC::hwloc PROPERTIES
IMPORTED_LOCATION "${_hwloc_lib_dirs}")
endif()
set(HWLOC_FOUND 1)
endif()
endif()
endif()
unset(_additional_include_dirs CACHE)
unset(_additional_lib_dirs CACHE)
unset(_hwloc_lib_name CACHE)
find_package_handle_standard_args(
HWLOC
REQUIRED_VARS _hwloc_include_dirs _hwloc_lib_dirs
VERSION_VAR HWLOC_VERSION
)
unset(_hwloc_include_dirs CACHE)
unset(_hwloc_lib_dirs CACHE)

Просмотреть файл

@ -0,0 +1,57 @@
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
include(${CMAKE_CURRENT_LIST_DIR}/../config_generation.cmake)
# TBBConfig in TBB provided packages are expected to be placed into: <tbb-root>/lib[/<intel64|ia32>]/cmake/TBB
set(WIN_LIN_INC_REL_PATH "../../../../include")
set(DARWIN_INC_REL_PATH "../../../include")
set(LIB_REL_PATH "../..")
set(DLL_REL_PATH "../../../../redist") # ia32/intel64 subdir is appended depending on configuration.
# Parse version info
file(READ ${CMAKE_CURRENT_LIST_DIR}/../../include/tbb/version.h _tbb_version_info)
string(REGEX REPLACE ".*#define TBB_VERSION_MAJOR ([0-9]+).*" "\\1" _tbb_ver_major "${_tbb_version_info}")
string(REGEX REPLACE ".*#define TBB_VERSION_MINOR ([0-9]+).*" "\\1" _tbb_ver_minor "${_tbb_version_info}")
string(REGEX REPLACE ".*#define TBB_INTERFACE_VERSION ([0-9]+).*" "\\1" _tbb_interface_ver "${_tbb_version_info}")
string(REGEX REPLACE ".*#define __TBB_BINARY_VERSION ([0-9]+).*" "\\1" TBB_BINARY_VERSION "${_tbb_version_info}")
file(READ ${CMAKE_CURRENT_LIST_DIR}/../../CMakeLists.txt _tbb_cmakelist)
string(REGEX REPLACE ".*TBBMALLOC_BINARY_VERSION ([0-9]+).*" "\\1" TBBMALLOC_BINARY_VERSION "${_tbb_cmakelist}")
set(TBBMALLOC_PROXY_BINARY_VERSION ${TBBMALLOC_BINARY_VERSION})
string(REGEX REPLACE ".*TBBBIND_BINARY_VERSION ([0-9]+).*" "\\1" TBBBIND_BINARY_VERSION "${_tbb_cmakelist}")
# Parse patch and tweak versions from interface version: e.g. 12014 --> 01 - patch version, 4 - tweak version.
math(EXPR _tbb_ver_patch "${_tbb_interface_ver} % 1000 / 10")
math(EXPR _tbb_ver_tweak "${_tbb_interface_ver} % 10")
# Applicable for beta releases.
if (_tbb_ver_patch EQUAL 0)
math(EXPR _tbb_ver_tweak "${_tbb_ver_tweak} + 6")
endif()
set(COMMON_ARGS
LIB_REL_PATH ${LIB_REL_PATH}
VERSION ${_tbb_ver_major}.${_tbb_ver_minor}.${_tbb_ver_patch}.${_tbb_ver_tweak}
TBB_BINARY_VERSION ${TBB_BINARY_VERSION}
TBBMALLOC_BINARY_VERSION ${TBBMALLOC_BINARY_VERSION}
TBBMALLOC_PROXY_BINARY_VERSION ${TBBMALLOC_PROXY_BINARY_VERSION}
TBBBIND_BINARY_VERSION ${TBBBIND_BINARY_VERSION}
)
tbb_generate_config(INSTALL_DIR ${INSTALL_DIR}/linux-32 SYSTEM_NAME Linux INC_REL_PATH ${WIN_LIN_INC_REL_PATH} SIZEOF_VOID_P 4 HANDLE_SUBDIRS ${COMMON_ARGS})
tbb_generate_config(INSTALL_DIR ${INSTALL_DIR}/linux-64 SYSTEM_NAME Linux INC_REL_PATH ${WIN_LIN_INC_REL_PATH} SIZEOF_VOID_P 8 HANDLE_SUBDIRS ${COMMON_ARGS})
tbb_generate_config(INSTALL_DIR ${INSTALL_DIR}/windows-32 SYSTEM_NAME Windows INC_REL_PATH ${WIN_LIN_INC_REL_PATH} SIZEOF_VOID_P 4 HANDLE_SUBDIRS DLL_REL_PATH ${DLL_REL_PATH}/ia32 ${COMMON_ARGS})
tbb_generate_config(INSTALL_DIR ${INSTALL_DIR}/windows-64 SYSTEM_NAME Windows INC_REL_PATH ${WIN_LIN_INC_REL_PATH} SIZEOF_VOID_P 8 HANDLE_SUBDIRS DLL_REL_PATH ${DLL_REL_PATH}/intel64 ${COMMON_ARGS})
tbb_generate_config(INSTALL_DIR ${INSTALL_DIR}/darwin SYSTEM_NAME Darwin INC_REL_PATH ${DARWIN_INC_REL_PATH} SIZEOF_VOID_P 8 ${COMMON_ARGS})
message(STATUS "TBBConfig files were created in ${INSTALL_DIR}")

Просмотреть файл

@ -0,0 +1,95 @@
# Copyright (c) 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# It defines the following variables:
# TBB_<component>_FOUND
# TBB_IMPORTED_TARGETS
#
# TBBConfigVersion.cmake defines TBB_VERSION
#
# Initialize to default values
if (NOT TBB_IMPORTED_TARGETS)
set(TBB_IMPORTED_TARGETS "")
endif()
if (NOT TBB_FIND_COMPONENTS)
set(TBB_FIND_COMPONENTS "tbb;tbbmalloc;tbbmalloc_proxy")
foreach (_tbb_component ${TBB_FIND_COMPONENTS})
set(TBB_FIND_REQUIRED_${_tbb_component} 1)
endforeach()
endif()
set(TBB_INTERFACE_VERSION @TBB_INTERFACE_VERSION@)
@TBB_COMPONENTS_BIN_VERSION@
# Add components with internal dependencies: tbbmalloc_proxy -> tbbmalloc
list(FIND TBB_FIND_COMPONENTS tbbmalloc_proxy _tbbmalloc_proxy_ix)
if (NOT _tbbmalloc_proxy_ix EQUAL -1)
list(APPEND TBB_FIND_COMPONENTS tbbmalloc)
list(REMOVE_DUPLICATES TBB_FIND_COMPONENTS)
set(TBB_FIND_REQUIRED_tbbmalloc ${TBB_FIND_REQUIRED_tbbmalloc_proxy})
endif()
unset(_tbbmalloc_proxy_ix)
@TBB_HANDLE_SUBDIRS@
foreach (_tbb_component ${TBB_FIND_COMPONENTS})
set(TBB_${_tbb_component}_FOUND 0)
get_filename_component(_tbb_release_lib "${CMAKE_CURRENT_LIST_DIR}/@TBB_LIB_REL_PATH@/${_tbb_subdir}/@TBB_LIB_PREFIX@${_tbb_component}.@TBB_LIB_EXT@" ABSOLUTE)
if (NOT TBB_FIND_RELEASE_ONLY)
get_filename_component(_tbb_debug_lib "${CMAKE_CURRENT_LIST_DIR}/@TBB_LIB_REL_PATH@/${_tbb_subdir}/@TBB_LIB_PREFIX@${_tbb_component}_debug.@TBB_LIB_EXT@" ABSOLUTE)
endif()
if (EXISTS "${_tbb_release_lib}" OR EXISTS "${_tbb_debug_lib}")
if (NOT TARGET TBB::${_tbb_component})
add_library(TBB::${_tbb_component} SHARED IMPORTED)
get_filename_component(_tbb_current_realpath "${CMAKE_CURRENT_LIST_DIR}" REALPATH)
get_filename_component(_tbb_include_dir "${_tbb_current_realpath}/@TBB_INC_REL_PATH@" ABSOLUTE)
set_target_properties(TBB::${_tbb_component} PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES "${_tbb_include_dir}"@TBB_COMPILE_DEFINITIONS@)
unset(_tbb_current_realpath)
unset(_tbb_include_dir)
if (EXISTS "${_tbb_release_lib}")
set_target_properties(TBB::${_tbb_component} PROPERTIES
IMPORTED_LOCATION_RELEASE "${_tbb_release_lib}"@TBB_IMPLIB_RELEASE@)
set_property(TARGET TBB::${_tbb_component} APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
endif()
if (EXISTS "${_tbb_debug_lib}")
set_target_properties(TBB::${_tbb_component} PROPERTIES
IMPORTED_LOCATION_DEBUG "${_tbb_debug_lib}"@TBB_IMPLIB_DEBUG@)
set_property(TARGET TBB::${_tbb_component} APPEND PROPERTY IMPORTED_CONFIGURATIONS DEBUG)
endif()
# Add internal dependencies for imported targets: TBB::tbbmalloc_proxy -> TBB::tbbmalloc
if (_tbb_component STREQUAL tbbmalloc_proxy)
set_target_properties(TBB::tbbmalloc_proxy PROPERTIES INTERFACE_LINK_LIBRARIES TBB::tbbmalloc)
endif()
endif()
list(APPEND TBB_IMPORTED_TARGETS TBB::${_tbb_component})
set(TBB_${_tbb_component}_FOUND 1)
elseif (TBB_FIND_REQUIRED AND TBB_FIND_REQUIRED_${_tbb_component})
message(STATUS "Missed required oneTBB component: ${_tbb_component}")
if (TBB_FIND_RELEASE_ONLY)
message(STATUS " ${_tbb_release_lib} must exist.")
else()
message(STATUS " one or both of:\n ${_tbb_release_lib}\n ${_tbb_debug_lib}\n files must exist.")
endif()
set(TBB_FOUND FALSE)
endif()
endforeach()
list(REMOVE_DUPLICATES TBB_IMPORTED_TARGETS)
unset(_tbb_release_lib)
unset(_tbb_debug_lib)

Просмотреть файл

@ -0,0 +1,34 @@
# Copyright (c) 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set(PACKAGE_VERSION @TBB_VERSION@)
if ("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}")
set(PACKAGE_VERSION_COMPATIBLE FALSE)
else()
set(PACKAGE_VERSION_COMPATIBLE TRUE)
if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}")
set(PACKAGE_VERSION_EXACT TRUE)
endif()
endif()
if ("${CMAKE_SIZEOF_VOID_P}" STREQUAL "" OR "@TBB_SIZEOF_VOID_P@" STREQUAL "")
return()
endif()
if (NOT CMAKE_SIZEOF_VOID_P STREQUAL "@TBB_SIZEOF_VOID_P@")
math(EXPR installedBits "8 * @TBB_SIZEOF_VOID_P@")
set(PACKAGE_VERSION "${PACKAGE_VERSION} (${installedBits}bit)")
set(PACKAGE_VERSION_UNSUITABLE TRUE)
endif()

Просмотреть файл

@ -167,19 +167,21 @@ public:
my_table[k].store(nullptr, std::memory_order_relaxed);
});
__TBB_ASSERT(ptr, nullptr);
init_buckets(ptr, sz, is_initial);
my_table[k].store(ptr, std::memory_order_release);
sz <<= 1;// double it to get entire capacity of the container
} else { // the first block
__TBB_ASSERT( k == embedded_block, "Wrong segment index" );
sz = segment_size(first_block);
segment_ptr_type ptr;
segment_ptr_type ptr = nullptr;
try_call( [&] {
ptr = bucket_allocator_traits::allocate(my_allocator, sz - embedded_buckets);
} ).on_exception( [&] {
my_table[k].store(nullptr, std::memory_order_relaxed);
});
__TBB_ASSERT(ptr, nullptr);
init_buckets(ptr, sz - embedded_buckets, is_initial);
ptr -= segment_base(embedded_block);
for(segment_index_type i = embedded_block; i < first_block; i++) // calc the offsets

Просмотреть файл

@ -251,6 +251,7 @@ private:
}; // class functor
void handle_operations( cpq_operation* op_list ) {
call_itt_notify(acquired, this);
cpq_operation* tmp, *pop_list = nullptr;
__TBB_ASSERT(mark == data.size(), NULL);
@ -336,6 +337,7 @@ private:
// batch of operations
if (mark < data.size()) heapify();
__TBB_ASSERT(mark == data.size(), NULL);
call_itt_notify(releasing, this);
}
// Merge unsorted elements into heap

Просмотреть файл

@ -542,6 +542,11 @@ private:
spin_wait_while_eq(embedded_table[i], segment_type(nullptr));
}
// It is possible that the table was extend by a thread allocating first_block, need to check this.
if (this->get_table() != embedded_table) {
return nullptr;
}
// Allocate long segment table and fill with null pointers
segment_table_type new_segment_table = segment_table_allocator_traits::allocate(base_type::get_allocator(), this->pointers_per_long_table);
// Copy segment pointers from the embedded table
@ -553,7 +558,6 @@ private:
segment_table_allocator_traits::construct(base_type::get_allocator(), &new_segment_table[segment_index], nullptr);
}
return new_segment_table;
}
@ -589,6 +593,11 @@ private:
for (size_type i = 1; i < first_block; ++i) {
table[i].store(new_segment, std::memory_order_release);
}
// Other threads can wait on a snapshot of an embedded table, need to fill it.
for (size_type i = 1; i < first_block && i < this->pointers_per_embedded_table; ++i) {
this->my_embedded_table[i].store(new_segment, std::memory_order_release);
}
} else if (new_segment != this->segment_allocation_failure_tag) {
// Deallocate the memory
segment_element_allocator_traits::deallocate(segment_allocator, new_segment, first_block_size);
@ -910,7 +919,7 @@ private:
this->my_size.store(0, std::memory_order_relaxed);
}
inline static bool incompact_predicate( size_type size ) {
static bool incompact_predicate( size_type size ) {
// memory page size
const size_type page_size = 4096;
return size < page_size || ((size - 1) % page_size < page_size / 2 && size < page_size * 128);

Просмотреть файл

@ -36,6 +36,16 @@
#define __TBB_EXPORTED_METHOD
#endif
#if defined(_MSVC_LANG)
#define __TBB_LANG _MSVC_LANG
#else
#define __TBB_LANG __cplusplus
#endif // _MSVC_LANG
#define __TBB_CPP14_PRESENT (__TBB_LANG >= 201402L)
#define __TBB_CPP17_PRESENT (__TBB_LANG >= 201703L)
#define __TBB_CPP20_PRESENT (__TBB_LANG >= 202002L)
#if __INTEL_COMPILER || _MSC_VER
#define __TBB_NOINLINE(decl) __declspec(noinline) decl
#elif __GNUC__
@ -123,7 +133,11 @@
#endif // TBB_USE_ASSERT
#ifndef TBB_USE_PROFILING_TOOLS
#define TBB_USE_PROFILING_TOOLS TBB_USE_DEBUG
#if TBB_USE_DEBUG
#define TBB_USE_PROFILING_TOOLS 2
#else // TBB_USE_DEBUG
#define TBB_USE_PROFILING_TOOLS 0
#endif // TBB_USE_DEBUG
#endif // TBB_USE_PROFILING_TOOLS
// Exceptions support cases
@ -201,24 +215,24 @@
/** Library features presence macros **/
#define __TBB_CPP14_INTEGER_SEQUENCE_PRESENT (__cplusplus >= 201402L)
#define __TBB_CPP17_INVOKE_RESULT_PRESENT (__cplusplus >= 201703L)
#define __TBB_CPP14_INTEGER_SEQUENCE_PRESENT (__TBB_LANG >= 201402L)
#define __TBB_CPP17_INVOKE_RESULT_PRESENT (__TBB_LANG >= 201703L)
#if __INTEL_COMPILER && (!_MSC_VER || __INTEL_CXX11_MOVE__)
#define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__cplusplus >= 201402L)
#define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__INTEL_COMPILER > 2021 && __cplusplus >= 201703L)
#define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__TBB_LANG >= 201402L)
#define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__INTEL_COMPILER > 2021 && __TBB_LANG >= 201703L)
#elif __clang__
#define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__has_feature(cxx_variable_templates))
#define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__has_feature(__cpp_deduction_guides))
#elif __GNUC__
#define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__cplusplus >= 201402L && __TBB_GCC_VERSION >= 50000)
#define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__TBB_LANG >= 201402L && __TBB_GCC_VERSION >= 50000)
#define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__cpp_deduction_guides >= 201606L)
#elif _MSC_VER
#define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (_MSC_FULL_VER >= 190023918 && (!__INTEL_COMPILER || __INTEL_COMPILER >= 1700))
#define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (_MSVC_LANG >= 201703L && _MSC_VER >= 1914)
#define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__TBB_LANG >= 201703L && _MSC_VER >= 1914)
#else
#define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__cplusplus >= 201402L)
#define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__cplusplus >= 201703L)
#define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__TBB_LANG >= 201402L)
#define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__TBB_LANG >= 201703L)
#endif
// GCC4.8 on RHEL7 does not support std::get_new_handler
@ -226,12 +240,12 @@
// GCC4.8 on RHEL7 does not support std::is_trivially_copyable
#define __TBB_CPP11_TYPE_PROPERTIES_PRESENT (_LIBCPP_VERSION || _MSC_VER >= 1700 || (__TBB_GLIBCXX_VERSION >= 50000 && __GXX_EXPERIMENTAL_CXX0X__))
#define __TBB_CPP17_MEMORY_RESOURCE_PRESENT (_MSC_VER >= 1913 && (_MSVC_LANG > 201402L || __cplusplus > 201402L) || \
__GLIBCXX__ && __GNUC__ >= 9 && __cplusplus >= 201703L)
#define __TBB_CPP17_MEMORY_RESOURCE_PRESENT (_MSC_VER >= 1913 && (__TBB_LANG > 201402L) || \
__TBB_GLIBCXX_VERSION >= 90000 && __TBB_LANG >= 201703L)
#define __TBB_CPP17_HW_INTERFERENCE_SIZE_PRESENT (_MSC_VER >= 1911)
#define __TBB_CPP17_LOGICAL_OPERATIONS_PRESENT (__cplusplus >= 201703L)
#define __TBB_CPP17_ALLOCATOR_IS_ALWAYS_EQUAL_PRESENT (__cplusplus >= 201703L)
#define __TBB_CPP17_IS_SWAPPABLE_PRESENT (__cplusplus >= 201703L)
#define __TBB_CPP17_LOGICAL_OPERATIONS_PRESENT (__TBB_LANG >= 201703L)
#define __TBB_CPP17_ALLOCATOR_IS_ALWAYS_EQUAL_PRESENT (__TBB_LANG >= 201703L)
#define __TBB_CPP17_IS_SWAPPABLE_PRESENT (__TBB_LANG >= 201703L)
#define __TBB_RESUMABLE_TASKS (!__TBB_WIN8UI_SUPPORT && !__ANDROID__)
@ -265,7 +279,7 @@
#define __TBB_GCC_WARNING_IGNORED_ATTRIBUTES_PRESENT (__TBB_GCC_VERSION >= 60100)
#endif
#define __TBB_CPP17_FALLTHROUGH_PRESENT (__cplusplus >= 201703L)
#define __TBB_CPP17_FALLTHROUGH_PRESENT (__TBB_LANG >= 201703L)
#define __TBB_FALLTHROUGH_PRESENT (__TBB_GCC_VERSION >= 70000 && !__INTEL_COMPILER)
#if __TBB_CPP17_FALLTHROUGH_PRESENT
@ -292,32 +306,34 @@
#define __TBBMALLOC_NO_IMPLICIT_LINKAGE 1
#endif
#ifndef __TBB_TASK_GROUP_CONTEXT
#define __TBB_TASK_GROUP_CONTEXT 1
#endif /* __TBB_TASK_GROUP_CONTEXT */
#if (__TBB_BUILD || __TBBMALLOC_BUILD || __TBBMALLOCPROXY_BUILD || __TBBBIND_BUILD) && !defined(__TBB_NO_IMPLICIT_LINKAGE)
#define __TBB_NO_IMPLICIT_LINKAGE 1
#endif
#if _MSC_VER
#if !__TBB_NO_IMPLICIT_LINKAGE
#ifdef _DEBUG
#pragma comment(lib, "tbb_debug.lib")
#else
#pragma comment(lib, "tbb.lib")
#endif
#endif
#endif
#ifndef __TBB_SCHEDULER_OBSERVER
#define __TBB_SCHEDULER_OBSERVER 1
#endif /* __TBB_SCHEDULER_OBSERVER */
#ifndef __TBB_FP_CONTEXT
#define __TBB_FP_CONTEXT __TBB_TASK_GROUP_CONTEXT
#define __TBB_FP_CONTEXT 1
#endif /* __TBB_FP_CONTEXT */
#if __TBB_FP_CONTEXT && !__TBB_TASK_GROUP_CONTEXT
#error __TBB_FP_CONTEXT requires __TBB_TASK_GROUP_CONTEXT to be enabled
#endif
#define __TBB_RECYCLE_TO_ENQUEUE __TBB_BUILD // keep non-official
#ifndef __TBB_ARENA_OBSERVER
#define __TBB_ARENA_OBSERVER __TBB_SCHEDULER_OBSERVER
#endif /* __TBB_ARENA_OBSERVER */
#if TBB_USE_EXCEPTIONS && !__TBB_TASK_GROUP_CONTEXT
#error TBB_USE_EXCEPTIONS requires __TBB_TASK_GROUP_CONTEXT to be enabled
#endif
#if TBB_PREVIEW_NUMA_SUPPORT || __TBB_BUILD
#define __TBB_NUMA_SUPPORT 1
#endif
@ -351,7 +367,7 @@
// instantiation site, which is too late for suppression of the corresponding messages for internal
// stuff.
#if !defined(__INTEL_COMPILER) && (!defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) || (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0))
#if (__cplusplus >= 201402L)
#if (__TBB_LANG >= 201402L)
#define __TBB_DEPRECATED [[deprecated]]
#define __TBB_DEPRECATED_MSG(msg) [[deprecated(msg)]]
#elif _MSC_VER
@ -379,7 +395,7 @@
#define __TBB_DEPRECATED_VERBOSE_MSG(msg)
#endif // (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0)
#if (!defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) || (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0)) && !(__cplusplus >= 201103L || _MSC_VER >= 1900)
#if (!defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) || (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0)) && !(__TBB_LANG >= 201103L || _MSC_VER >= 1900)
#pragma message("TBB Warning: Support for C++98/03 is deprecated. Please use the compiler that supports C++11 features at least.")
#endif
@ -402,7 +418,7 @@
**/
// Some STL containers not support allocator traits in old GCC versions
#if __GXX_EXPERIMENTAL_CXX0X__ && __TBB_GLIBCXX_VERSION <= 50000
#if __GXX_EXPERIMENTAL_CXX0X__ && __TBB_GLIBCXX_VERSION <= 50301
#define TBB_ALLOCATOR_TRAITS_BROKEN 1
#endif

Просмотреть файл

@ -21,6 +21,7 @@
#include <new>
#include <exception>
#include <stdexcept>
namespace tbb {
namespace detail {
@ -36,6 +37,7 @@ enum class exception_id {
invalid_load_factor,
invalid_key,
bad_tagged_msg_cast,
unsafe_wait,
last_entry
};
} // namespace d0
@ -59,6 +61,14 @@ public:
const char* __TBB_EXPORTED_METHOD what() const noexcept(true) override;
};
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
//! Exception for impossible finalization of task_sheduler_handle
class unsafe_wait : public std::runtime_error {
public:
unsafe_wait(const char* msg) : std::runtime_error(msg) {}
};
#endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
//! Gathers all throw operators in one place.
/** Its purpose is to minimize code bloat that can be caused by throw operators
scattered in multiple places, especially in templates. **/

Просмотреть файл

@ -270,37 +270,22 @@ public:
void wait_for_all() {
cancelled = false;
caught_exception = false;
#if TBB_USE_EXCEPTIONS
// TODO revamp: reuse try_call instead
try {
#endif
my_task_arena->execute(
[this]() {
wait(my_wait_context, *my_context);
}
);
#if __TBB_TASK_GROUP_CONTEXT
cancelled = my_context->is_group_execution_cancelled();
#endif
#if TBB_USE_EXCEPTIONS
}
catch (...) {
my_context->reset();
caught_exception = true;
cancelled = true;
throw;
}
#endif
#if __TBB_TASK_GROUP_CONTEXT
// TODO: the "if" condition below is just a work-around to support the concurrent wait
// mode. The cancellation and exception mechanisms are still broken in this mode.
// Consider using task group not to re-implement the same functionality.
if (!(my_context->traits() & task_group_context::concurrent_wait)) {
my_context->reset(); // consistent with behavior in catch()
#endif
#if __TBB_TASK_GROUP_CONTEXT
}
#endif
try_call([this] {
my_task_arena->execute([this] {
wait(my_wait_context, *my_context);
});
cancelled = my_context->is_group_execution_cancelled();
}).on_exception([this] {
my_context->reset();
caught_exception = true;
cancelled = true;
});
// TODO: the "if" condition below is just a work-around to support the concurrent wait
// mode. The cancellation and exception mechanisms are still broken in this mode.
// Consider using task group not to re-implement the same functionality.
if (!(my_context->traits() & task_group_context::concurrent_wait)) {
my_context->reset(); // consistent with behavior in catch()
}
}
#if TODO_REVAMP
@ -346,9 +331,7 @@ public:
private:
wait_context my_wait_context;
#if __TBB_TASK_GROUP_CONTEXT
task_group_context *my_context;
#endif
bool own_context;
bool cancelled;
bool caught_exception;

Просмотреть файл

@ -178,6 +178,7 @@ public:
self()->deallocate_segment(new_segment, seg_index);
}
}
segment = table[seg_index].load(std::memory_order_acquire);
__TBB_ASSERT(segment != nullptr, "If create_segment returned nullptr, the element should be stored in the table");
}
@ -277,7 +278,14 @@ public:
if (start_index <= embedded_table_size) {
try_call([&] {
table = self()->allocate_long_table(my_embedded_table, start_index);
my_segment_table.store(table, std::memory_order_release);
// It is possible that the table was extended by the thread that allocated first_block.
// In this case it is necessary to re-read the current table.
if (table) {
my_segment_table.store(table, std::memory_order_release);
} else {
table = my_segment_table.load(std::memory_order_acquire);
}
}).on_exception([&] {
my_segment_table_allocation_failed.store(true, std::memory_order_relaxed);
});
@ -516,7 +524,7 @@ protected:
return segment[index];
}
inline void assign_first_block_if_necessary(segment_index_type index) {
void assign_first_block_if_necessary(segment_index_type index) {
size_type zero = 0;
if (this->my_first_block.load(std::memory_order_relaxed) == zero) {
this->my_first_block.compare_exchange_strong(zero, index);

Просмотреть файл

@ -20,6 +20,7 @@
#include "_config.h"
#include "_assert.h"
#include "../profiling.h"
#include <cstddef>
#include <cstdint>
#include <atomic>
@ -83,12 +84,16 @@ public:
template <typename Type>
void deallocate(Type* ptr, const execution_data& ed) {
call_itt_task_notify(destroy, ptr);
__TBB_ASSERT(m_pool != nullptr, "Pool must be valid for deallocate call");
r1::deallocate(*m_pool, ptr, sizeof(Type), ed);
}
template <typename Type>
void deallocate(Type* ptr) {
call_itt_task_notify(destroy, ptr);
__TBB_ASSERT(m_pool != nullptr, "Pool must be valid for deallocate call");
r1::deallocate(*m_pool, ptr, sizeof(Type));
}

Просмотреть файл

@ -22,6 +22,8 @@
#include "_template_helpers.h"
#include "_small_object_pool.h"
#include "../profiling.h"
#include <cstddef>
#include <cstdint>
#include <climits>
@ -104,6 +106,7 @@ class wait_context {
}
void add_reference(std::int64_t delta) {
call_itt_task_notify(releasing, this);
std::uint64_t r = m_ref_count.fetch_add(delta) + delta;
__TBB_ASSERT_EX((r & overflow_mask) == 0, "Overflow is detected");
if (r == abandon_wait_flag) {
@ -178,9 +181,28 @@ inline bool is_stolen(const execution_data& ed) {
return original_slot(ed) != execution_slot(ed);
}
using r1::spawn;
using r1::execute_and_wait;
using r1::wait;
inline void spawn(task& t, task_group_context& ctx) {
call_itt_task_notify(releasing, &t);
r1::spawn(t, ctx);
}
inline void spawn(task& t, task_group_context& ctx, slot_id id) {
call_itt_task_notify(releasing, &t);
r1::spawn(t, ctx, id);
}
inline void execute_and_wait(task& t, task_group_context& t_ctx, wait_context& wait_ctx, task_group_context& w_ctx) {
r1::execute_and_wait(t, t_ctx, wait_ctx, w_ctx);
call_itt_task_notify(acquired, &wait_ctx);
call_itt_task_notify(destroy, &wait_ctx);
}
inline void wait(wait_context& wait_ctx, task_group_context& ctx) {
r1::wait(wait_ctx, ctx);
call_itt_task_notify(acquired, &wait_ctx);
call_itt_task_notify(destroy, &wait_ctx);
}
using r1::current_context;
class task_traits {
@ -205,7 +227,7 @@ public:
static task_group_context* current_execute_data() { return current_context(); }
private:
std::uint64_t m_reserved[5];
std::uint64_t m_reserved[5]{};
// Reserve one pointer-sized object in derived class
// static_assert(sizeof(task) == 64 - 8);

Просмотреть файл

@ -340,13 +340,9 @@ class enumerable_thread_specific_iterator
typename Container::size_type my_index;
mutable Value *my_value;
template<typename C, typename T>
friend enumerable_thread_specific_iterator<C,T>
operator+( std::ptrdiff_t offset, const enumerable_thread_specific_iterator<C,T>& v );
template<typename C, typename T, typename U>
friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i,
const enumerable_thread_specific_iterator<C,U>& j );
friend bool operator==( const enumerable_thread_specific_iterator<C, T>& i,
const enumerable_thread_specific_iterator<C, U>& j );
template<typename C, typename T, typename U>
friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i,
@ -381,6 +377,10 @@ public:
return enumerable_thread_specific_iterator(*my_container, my_index + offset);
}
friend enumerable_thread_specific_iterator operator+( std::ptrdiff_t offset, enumerable_thread_specific_iterator v ) {
return enumerable_thread_specific_iterator(*v.my_container, v.my_index + offset);
}
enumerable_thread_specific_iterator &operator+=( std::ptrdiff_t offset ) {
my_index += offset;
my_value = nullptr;
@ -407,7 +407,7 @@ public:
}
Value& operator[]( std::ptrdiff_t k ) const {
return (*my_container)[my_index + k].value;
return *(*my_container)[my_index + k].value();
}
Value* operator->() const {return &operator*();}
@ -441,16 +441,10 @@ public:
}
};
template<typename Container, typename T>
enumerable_thread_specific_iterator<Container,T>
operator+( std::ptrdiff_t offset, const enumerable_thread_specific_iterator<Container,T>& v ) {
return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
}
template<typename Container, typename T, typename U>
bool operator==( const enumerable_thread_specific_iterator<Container,T>& i,
const enumerable_thread_specific_iterator<Container,U>& j ) {
return i.my_index==j.my_index && i.my_container == j.my_container;
bool operator==( const enumerable_thread_specific_iterator<Container, T>& i,
const enumerable_thread_specific_iterator<Container, U>& j ) {
return i.my_index == j.my_index && i.my_container == j.my_container;
}
template<typename Container, typename T, typename U>

Просмотреть файл

@ -18,16 +18,23 @@
#define __TBB_global_control_H
#include "detail/_config.h"
#include "detail/_assert.h"
#include "detail/_template_helpers.h"
#include "detail/_exception.h"
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
#include <new> // std::nothrow_t
#endif
#include <cstddef>
namespace tbb {
namespace detail {
namespace d1 {
class global_control;
class global_control;
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
class task_scheduler_handle;
#endif
}
namespace r1 {
@ -35,6 +42,13 @@ void __TBB_EXPORTED_FUNC create(d1::global_control&);
void __TBB_EXPORTED_FUNC destroy(d1::global_control&);
std::size_t __TBB_EXPORTED_FUNC global_control_active_value(int);
struct global_control_impl;
struct control_storage_comparator;
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
void release_impl(d1::task_scheduler_handle& handle);
bool finalize_impl(d1::task_scheduler_handle& handle);
void __TBB_EXPORTED_FUNC get(d1::task_scheduler_handle&);
bool __TBB_EXPORTED_FUNC finalize(d1::task_scheduler_handle&, std::intptr_t mode);
#endif
}
namespace d1 {
@ -44,11 +58,18 @@ public:
enum parameter {
max_allowed_parallelism,
thread_stack_size,
terminate_on_exception,
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
scheduler_handle, // not a public parameter
#else
reserved1, // not a public parameter
#endif
parameter_max // insert new parameters above this point
};
global_control(parameter p, std::size_t value) :
my_value(value), my_next(NULL), my_param(p) {
my_value(value), my_reserved(), my_param(p) {
suppress_unused_warning(my_reserved);
__TBB_ASSERT(my_param < parameter_max, "Invalid parameter");
#if __TBB_WIN8UI_SUPPORT && (_WIN32_WINNT < 0x0A00)
// For Windows 8 Store* apps it's impossible to set stack size
@ -64,7 +85,7 @@ public:
}
~global_control() {
__TBB_ASSERT(my_param < parameter_max, "Invalid parameter. Probably the object was corrupted.");
__TBB_ASSERT(my_param < parameter_max, "Invalid parameter");
#if __TBB_WIN8UI_SUPPORT && (_WIN32_WINNT < 0x0A00)
// For Windows 8 Store* apps it's impossible to set stack size
if (my_param==thread_stack_size)
@ -77,19 +98,88 @@ public:
__TBB_ASSERT(p < parameter_max, "Invalid parameter");
return r1::global_control_active_value((int)p);
}
private:
std::size_t my_value;
global_control *my_next;
std::size_t my_value;
std::intptr_t my_reserved; // TODO: substitution of global_control* not to break backward compatibility
parameter my_param;
friend struct r1::global_control_impl;
friend struct r1::control_storage_comparator;
};
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
//! Finalization options.
//! Outside of the class to avoid extensive friendship.
static constexpr std::intptr_t release_nothrowing = 0;
static constexpr std::intptr_t finalize_nothrowing = 1;
static constexpr std::intptr_t finalize_throwing = 2;
//! User side wrapper for a task scheduler lifetime control object
class task_scheduler_handle {
public:
task_scheduler_handle() = default;
~task_scheduler_handle() {
release(*this);
}
//! No copy
task_scheduler_handle(const task_scheduler_handle& other) = delete;
task_scheduler_handle& operator=(const task_scheduler_handle& other) = delete;
//! Move only
task_scheduler_handle(task_scheduler_handle&& other) noexcept : m_ctl{nullptr} {
std::swap(m_ctl, other.m_ctl);
}
task_scheduler_handle& operator=(task_scheduler_handle&& other) noexcept {
std::swap(m_ctl, other.m_ctl);
return *this;
};
//! Get and active instance of task_scheduler_handle
static task_scheduler_handle get() {
task_scheduler_handle handle;
r1::get(handle);
return handle;
}
//! Release the reference and deactivate handle
static void release(task_scheduler_handle& handle) {
if (handle.m_ctl != nullptr) {
r1::finalize(handle, release_nothrowing);
}
}
private:
friend void r1::release_impl(task_scheduler_handle& handle);
friend bool r1::finalize_impl(task_scheduler_handle& handle);
friend void __TBB_EXPORTED_FUNC r1::get(task_scheduler_handle&);
global_control* m_ctl{nullptr};
};
#if TBB_USE_EXCEPTIONS
//! Waits for worker threads termination. Throws exception on error.
inline void finalize(task_scheduler_handle& handle) {
r1::finalize(handle, finalize_throwing);
}
#endif
//! Waits for worker threads termination. Returns false on error.
inline bool finalize(task_scheduler_handle& handle, const std::nothrow_t&) noexcept {
return r1::finalize(handle, finalize_nothrowing);
}
#endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
} // namespace d1
} // namespace detail
inline namespace v1 {
using detail::d1::global_control;
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
using detail::d1::finalize;
using detail::d1::task_scheduler_handle;
using detail::r1::unsafe_wait;
#endif
} // namespace v1
} // namespace tbb

Просмотреть файл

@ -44,6 +44,7 @@ struct function_invoker : public task {
task* execute(execution_data& ed) override {
my_function();
parent_wait_ctx.release(ed);
call_itt_task_notify(destroy, this);
return nullptr;
}
@ -94,7 +95,9 @@ struct invoke_subroot_task : public task {
void release(const execution_data& ed) {
__TBB_ASSERT(ref_count > 0, nullptr);
call_itt_task_notify(releasing, this);
if( --ref_count == 0 ) {
call_itt_task_notify(acquired, this);
finalize(ed);
}
}

Просмотреть файл

@ -85,6 +85,7 @@ public:
}
private:
sum_node_type* release_parent() {
call_itt_task_notify(releasing, m_parent);
if (m_parent) {
auto parent = m_parent;
m_parent = nullptr;
@ -176,6 +177,7 @@ public:
}
private:
sum_node* release_parent() {
call_itt_task_notify(releasing, m_parent);
if (m_parent) {
auto parent = m_parent;
m_parent = nullptr;
@ -291,6 +293,7 @@ public:
}
private:
finish_scan* release_parent() {
call_itt_task_notify(releasing, m_parent);
if (m_parent) {
auto parent = m_parent;
m_parent = nullptr;
@ -331,6 +334,7 @@ private:
wait_context& m_wait_context;
finish_pass1_type* release_parent() {
call_itt_task_notify(releasing, m_parent);
if (m_parent) {
auto parent = m_parent;
m_parent = nullptr;

Просмотреть файл

@ -161,6 +161,7 @@ template<typename TreeNodeType>
void fold_tree(node* n, const execution_data& ed) {
for (;;) {
__TBB_ASSERT(n->m_ref_count.load(std::memory_order_relaxed) > 0, "The refcount must be positive.");
call_itt_task_notify(releasing, n);
if (--n->m_ref_count > 0) {
return;
}
@ -169,6 +170,7 @@ void fold_tree(node* n, const execution_data& ed) {
break;
};
call_itt_task_notify(acquired, n);
TreeNodeType* self = static_cast<TreeNodeType*>(n);
self->join(ed.context);
self->m_allocator.delete_object(self, ed);

Просмотреть файл

@ -65,7 +65,7 @@ inline namespace d0 {
namespace tbb {
namespace detail {
namespace d1 {
enum notify_type {prepare=0, cancel, acquired, releasing};
enum notify_type {prepare=0, cancel, acquired, releasing, destroy};
enum itt_domain_enum { ITT_DOMAIN_FLOW=0, ITT_DOMAIN_MAIN=1, ITT_DOMAIN_ALGO=2, ITT_NUM_DOMAINS };
} // namespace d1
@ -119,6 +119,15 @@ namespace d1 {
#endif // WIN
}
// Distinguish notifications on task for reducing overheads
#if TBB_USE_PROFILING_TOOLS == 2
inline void call_itt_task_notify(d1::notify_type t, void *ptr) {
r1::call_itt_notify((int)t, ptr);
}
#else
inline void call_itt_task_notify(d1::notify_type, void *) {}
#endif // TBB_USE_PROFILING_TOOLS
inline void call_itt_notify(d1::notify_type t, void *ptr) {
r1::call_itt_notify((int)t, ptr);
}
@ -179,8 +188,11 @@ namespace d1 {
}
#else
inline void create_itt_sync(void* /*ptr*/, const char* /*objtype*/, const char* /*objname*/) {}
inline void call_itt_notify(notify_type /*t*/, void* /*ptr*/) {}
inline void call_itt_task_notify(notify_type /*t*/, void* /*ptr*/) {}
inline void itt_make_task_group( itt_domain_enum /*domain*/, void* /*group*/, unsigned long long /*group_extra*/,
void* /*parent*/, unsigned long long /*parent_extra*/, string_resource_index /*name_index*/ ) {}
@ -206,13 +218,13 @@ namespace d1 {
template <typename T>
inline void store_with_release_itt(std::atomic<T>& dst, T src) {
call_itt_notify(releasing, &dst);
call_itt_task_notify(releasing, &dst);
dst.store(src, std::memory_order_release);
}
template <typename T>
inline T load_with_acquire_itt(const std::atomic<T>& src) {
call_itt_notify(acquired, &src);
call_itt_task_notify(acquired, &src);
return src.load(std::memory_order_acquire);
}
} // namespace d1

Просмотреть файл

@ -210,8 +210,8 @@ class task_arena : public task_arena_base {
finalize(ed);
return nullptr;
}
task* cancel(execution_data& ed) override {
finalize(ed);
task* cancel(execution_data&) override {
__TBB_ASSERT_RELEASE(false, "Unhandled exception from enqueue task is caught");
return nullptr;
}
public:
@ -397,6 +397,7 @@ public:
friend void submit(task& t, task_arena& ta, task_group_context& ctx, bool as_critical) {
__TBB_ASSERT(ta.is_active(), nullptr);
call_itt_task_notify(releasing, &t);
r1::submit(t, ctx, ta.my_arena, as_critical ? 1 : 0);
}
};

Просмотреть файл

@ -61,7 +61,6 @@ void __TBB_EXPORTED_FUNC destroy(d1::task_group_context&);
void __TBB_EXPORTED_FUNC reset(d1::task_group_context&);
bool __TBB_EXPORTED_FUNC cancel_group_execution(d1::task_group_context&);
bool __TBB_EXPORTED_FUNC is_group_execution_cancelled(d1::task_group_context&);
void __TBB_EXPORTED_FUNC register_pending_exception(d1::task_group_context&);
void __TBB_EXPORTED_FUNC capture_fp_settings(d1::task_group_context&);
struct task_group_context_impl;
@ -249,17 +248,6 @@ public:
return r1::is_group_execution_cancelled(*this);
}
//! Records the pending exception, and cancels the task group.
/** May be called only from inside a catch-block. If the context is already
cancelled, does nothing.
The method brings the task group associated with this context exactly into
the state it would be in, if one of its tasks threw the currently pending
exception during its execution. In other words, it emulates the actions
of the scheduler's dispatch loop exception handler. **/
void register_pending_exception() {
r1::register_pending_exception(*this);
}
#if __TBB_FP_CONTEXT
//! Captures the current FPU control settings to the context.
/** Because the method assumes that all the tasks that used to be associated with

Просмотреть файл

@ -26,12 +26,12 @@
// "Patch" version for custom releases
#define TBB_VERSION_PATCH 0
// Suffix string
#define __TBB_VERSION_SUFFIX "-beta08"
#define __TBB_VERSION_SUFFIX "-beta09"
// Full official version string
#define TBB_VERSION_STRING __TBB_STRING(TBB_VERSION_MAJOR) "." __TBB_STRING(TBB_VERSION_MINOR) __TBB_VERSION_SUFFIX
// Full interface version
#define TBB_INTERFACE_VERSION 12002
#define TBB_INTERFACE_VERSION 12003
// Major interface version
#define TBB_INTERFACE_VERSION_MAJOR (TBB_INTERFACE_VERSION/1000)
// Minor interface version

Просмотреть файл

@ -94,6 +94,13 @@ using tbb::internal::rml::get_shared_name;
namespace tbb {
namespace detail {
namespace r1 {
bool terminate_on_exception() {
return false;
}
}
namespace rml {
typedef ipc_thread_monitor::handle_type thread_handle;

Просмотреть файл

@ -72,7 +72,8 @@ target_compile_options(tbb
set_target_properties(tbb PROPERTIES
LINK_FLAGS ${TBB_LINK_DEF_FILE_FLAG}${CMAKE_CURRENT_SOURCE_DIR}/def/${TBB_DEF_FILE_PREFIX}-tbb.def
DEFINE_SYMBOL ""
VERSION ${TBB_BINARY_VERSION}
VERSION ${TBB_BINARY_VERSION}.${TBB_BINARY_MINOR_VERSION}
SOVERSION ${TBB_BINARY_VERSION}
)
# Prefer using target_link_options instead of target_link_libraries to specify link options because

Просмотреть файл

@ -99,7 +99,6 @@ std::size_t arena::occupy_free_slot(thread_data& tls) {
return out_of_arena;
}
ITT_NOTIFY(sync_acquired, my_slots + index);
atomic_update( my_limit, (unsigned)(index + 1), std::less<unsigned>() );
return index;
}
@ -218,19 +217,15 @@ arena::arena ( market& m, unsigned num_slots, unsigned num_reserved_slots, unsig
// __TBB_ASSERT( !my_slots[i].my_scheduler && !my_slots[i].task_pool, NULL );
__TBB_ASSERT( !my_slots[i].task_pool_ptr, NULL );
__TBB_ASSERT( !my_slots[i].my_task_pool_size, NULL );
// ITT_SYNC_CREATE(my_slots + i, SyncType_Scheduler, SyncObj_WorkerTaskPool);
mailbox(i).construct();
// ITT_SYNC_CREATE(&mailbox(i+1), SyncType_Scheduler, SyncObj_Mailbox);
my_slots[i].init_task_streams(i);
my_slots[i].my_default_task_dispatcher = new(base_td_pointer + i) task_dispatcher(this);
my_slots[i].my_is_occupied.store(false, std::memory_order_relaxed);
}
my_fifo_task_stream.initialize(my_num_slots);
my_resume_task_stream.initialize(my_num_slots);
// ITT_SYNC_CREATE(&my_task_stream, SyncType_Scheduler, SyncObj_TaskStream);
#if __TBB_PREVIEW_CRITICAL_TASKS
my_critical_task_stream.initialize(my_num_slots);
// ITT_SYNC_CREATE(&my_critical_task_stream, SyncType_Scheduler, SyncObj_CriticataskStream);
#endif
#if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
my_local_concurrency_mode = false;
@ -272,14 +267,14 @@ void arena::free_arena () {
drained += mailbox(i).drain();
my_slots[i].my_default_task_dispatcher->~task_dispatcher();
}
__TBB_ASSERT(my_fifo_task_stream.drain() == 0, "Not all enqueued tasks were executed");
__TBB_ASSERT(my_resume_task_stream.drain() == 0, "Not all enqueued tasks were executed");
__TBB_ASSERT(my_fifo_task_stream.empty(), "Not all enqueued tasks were executed");
__TBB_ASSERT(my_resume_task_stream.empty(), "Not all enqueued tasks were executed");
// Cleanup coroutines/schedulers cache
my_co_cache.cleanup();
my_default_ctx->~task_group_context();
cache_aligned_deallocate(my_default_ctx);
#if __TBB_PREVIEW_CRITICAL_TASKS
__TBB_ASSERT( my_critical_task_stream.drain()==0, "Not all critical tasks were executed");
__TBB_ASSERT( my_critical_task_stream.empty(), "Not all critical tasks were executed");
#endif
// remove an internal reference
my_market->release( /*is_public=*/false, /*blocking_terminate=*/false );
@ -690,15 +685,14 @@ void task_arena_impl::execute(d1::task_arena_base& ta, d1::delegate_base& d) {
context_guard.set_ctx(ta.my_arena->my_default_ctx);
nested_arena_context scope(*td, *ta.my_arena, index1);
#if _WIN64
try {
try_call([&] {
#endif
d();
__TBB_ASSERT(same_arena || governor::is_thread_data_set(td), nullptr);
#if _WIN64
} catch (...) {
}).on_exception([&] {
context_guard.restore_default();
throw;
}
});
#endif
}

Просмотреть файл

@ -445,7 +445,6 @@ void arena::advertise_new_work() {
atomic_fence(std::memory_order_seq_cst);
}
else if( work_type == wakeup ) {
__TBB_ASSERT(my_max_num_workers!=0, "Unexpected worker wakeup request");
atomic_fence(std::memory_order_seq_cst);
}
// Double-check idiom that, in case of spawning, is deliberately sloppy about memory fences.

Просмотреть файл

@ -258,7 +258,6 @@ private:
void commit_spawned_tasks(std::size_t new_tail) {
__TBB_ASSERT (new_tail <= my_task_pool_size, "task deque end was overwritten");
// emit "task was released" signal
// ITT_NOTIFY(sync_releasing, (void*)((std::uintptr_t)my_arena_slot+sizeof(std::uintptr_t)));
// Release fence is necessary to make sure that previously stored task pointers
// are visible to thieves.
tail.store(new_tail, std::memory_order_release);
@ -271,7 +270,6 @@ private:
__TBB_ASSERT ( head.load(std::memory_order_relaxed) < tail.load(std::memory_order_relaxed),
"entering arena without tasks to share" );
// Release signal on behalf of previously spawned tasks (when this thread was not in arena yet)
// ITT_NOTIFY(sync_releasing, my_arena_slot);
task_pool.store(task_pool_ptr, std::memory_order_release );
}
@ -296,11 +294,9 @@ private:
if( task_pool.load(std::memory_order_relaxed) != LockedTaskPool &&
task_pool.compare_exchange_strong(expected, LockedTaskPool ) ) {
// We acquired our own slot
// ITT_NOTIFY(sync_acquired, my_arena_slot);
break;
} else if( !sync_prepare_done ) {
// Start waiting
// ITT_NOTIFY(sync_prepare, my_arena_slot);
sync_prepare_done = true;
}
// Someone else acquired a lock, so pause and do exponential backoff.
@ -315,7 +311,6 @@ private:
if ( !(task_pool.load(std::memory_order_relaxed) != EmptyTaskPool) )
return; // we are not in arena - nothing to unlock
__TBB_ASSERT( task_pool.load(std::memory_order_relaxed) == LockedTaskPool, "arena slot is not locked" );
// ITT_NOTIFY(sync_releasing, my_arena_slot);
task_pool.store( task_pool_ptr, std::memory_order_release );
}
@ -323,7 +318,6 @@ private:
/** Garbles victim_arena_slot->task_pool for the duration of the lock. **/
d1::task** lock_task_pool() {
d1::task** victim_task_pool;
bool sync_prepare_done = false;
for ( atomic_backoff backoff;; /*backoff pause embedded in the loop*/) {
victim_task_pool = task_pool.load(std::memory_order_relaxed);
// Microbenchmarks demonstrated that aborting stealing attempt when the
@ -332,21 +326,12 @@ private:
// the presence of work in the victim's task pool, as they may give
// incorrect indication because of task pool relocations and resizes.
if (victim_task_pool == EmptyTaskPool) {
// The victim thread emptied its task pool - nothing to lock
if( sync_prepare_done ) {
// ITT_NOTIFY(sync_cancel, victim_arena_slot);
}
break;
}
d1::task** expected = victim_task_pool;
if (victim_task_pool != LockedTaskPool && task_pool.compare_exchange_strong(expected, LockedTaskPool) ) {
// We've locked victim's task pool
// ITT_NOTIFY(sync_acquired, victim_arena_slot);
break;
} else if ( !sync_prepare_done ) {
// Start waiting
// ITT_NOTIFY(sync_prepare, victim_arena_slot);
sync_prepare_done = true;
}
// Someone else acquired a lock, so pause and do exponential backoff.
backoff.pause();
@ -362,7 +347,6 @@ private:
void unlock_task_pool(d1::task** victim_task_pool) {
__TBB_ASSERT(task_pool.load(std::memory_order_relaxed) == LockedTaskPool, "victim arena slot is not locked");
__TBB_ASSERT(victim_task_pool != LockedTaskPool, NULL);
// ITT_NOTIFY(sync_releasing, victim_arena_slot);
task_pool.store(victim_task_pool, std::memory_order_release);
}
@ -388,7 +372,6 @@ private:
// Do not reset my_arena_index. It will be used to (attempt to) re-acquire the slot next time
__TBB_ASSERT(task_pool.load(std::memory_order_relaxed) == LockedTaskPool, "Task pool must be locked when leaving arena");
__TBB_ASSERT(is_quiescent_local_task_pool_empty(), "Cannot leave arena when the task pool is not empty");
// ITT_NOTIFY(sync_releasing, &my_arena->my_slots[my_arena_index]);
// No release fence is necessary here as this assignment precludes external
// accesses to the local task pool when becomes visible. Thus it is harmless
// if it gets hoisted above preceding local bookkeeping manipulations.

Просмотреть файл

@ -119,9 +119,9 @@ public:
};
#if _WIN32 || _WIN64
/* [[noreturn]] */ void __stdcall co_local_wait_for_all(void* arg);
/* [[noreturn]] */ void __stdcall co_local_wait_for_all(void* arg) noexcept;
#else
/* [[noreturn]] */ void co_local_wait_for_all(void* arg);
/* [[noreturn]] */ void co_local_wait_for_all(void* arg) noexcept;
#endif
#if _WIN32 || _WIN64

Просмотреть файл

@ -82,29 +82,6 @@ void concurrent_monitor::notify_one_relaxed() {
to_thread_context(n)->semaphore().V();
}
void concurrent_monitor::notify_all_relaxed() {
if( waitset_ec.empty() )
return;
waitset_t temp;
const waitset_node_t* end;
{
tbb::spin_mutex::scoped_lock l( mutex_ec );
epoch.store( epoch.load( std::memory_order_relaxed ) + 1, std::memory_order_relaxed );
waitset_ec.flush_to( temp );
end = temp.end();
for( waitset_node_t* n=temp.front(); n!=end; n=n->next )
to_thread_context(n)->in_waitset = false;
}
waitset_node_t* nxt;
for( waitset_node_t* n=temp.front(); n!=end; n=nxt ) {
nxt = n->next;
to_thread_context(n)->semaphore().V();
}
#if TBB_USE_ASSERT
temp.clear();
#endif
}
void concurrent_monitor::abort_all_relaxed() {
if( waitset_ec.empty() )
return;

Просмотреть файл

@ -147,25 +147,16 @@ public:
}
return do_it;
}
//! Cancel the wait. Removes the thread from the wait queue if not removed yet.
void cancel_wait( thread_context& thr );
//! Wait for a condition to be satisfied with waiting-on context
template<typename WaitUntil, typename Context>
void wait( WaitUntil until, Context on );
//! Notify one thread about the event
void notify_one() {atomic_fence( std::memory_order_seq_cst ); notify_one_relaxed();}
//! Notify one thread about the event. Relaxed version.
void notify_one_relaxed();
//! Notify all waiting threads of the event
void notify_all() {atomic_fence( std::memory_order_seq_cst ); notify_all_relaxed();}
//! Notify all waiting threads of the event; Relaxed version
void notify_all_relaxed();
//! Notify waiting threads of the event that satisfies the given predicate
template<typename P> void notify( const P& predicate ) {
atomic_fence( std::memory_order_seq_cst );
@ -188,22 +179,6 @@ private:
thread_context* to_thread_context( waitset_node_t* n ) { return static_cast<thread_context*>(n); }
};
template<typename WaitUntil, typename Context>
void concurrent_monitor::wait( WaitUntil until, Context on )
{
bool slept = false;
thread_context thr_ctx;
prepare_wait( thr_ctx, on() );
while( !until() ) {
if( (slept = commit_wait( thr_ctx ) )==true )
if( until() ) break;
slept = false;
prepare_wait( thr_ctx, on() );
}
if( !slept )
cancel_wait( thr_ctx );
}
template<typename P>
void concurrent_monitor::notify_relaxed( const P& predicate ) {
if( waitset_ec.empty() )
@ -235,6 +210,53 @@ void concurrent_monitor::notify_relaxed( const P& predicate ) {
#endif
}
// Additional possible methods that are not required right now
// //! Notify all waiting threads of the event
// void notify_all() {atomic_fence( std::memory_order_seq_cst ); notify_all_relaxed();}
// Additional possible methods that are not required right now
//! Notify all waiting threads of the event; Relaxed version
// void concurrent_monitor::notify_all_relaxed() {
// if( waitset_ec.empty() )
// return;
// waitset_t temp;
// const waitset_node_t* end;
// {
// tbb::spin_mutex::scoped_lock l( mutex_ec );
// epoch.store( epoch.load( std::memory_order_relaxed ) + 1, std::memory_order_relaxed );
// waitset_ec.flush_to( temp );
// end = temp.end();
// for( waitset_node_t* n=temp.front(); n!=end; n=n->next )
// to_thread_context(n)->in_waitset = false;
// }
// waitset_node_t* nxt;
// for( waitset_node_t* n=temp.front(); n!=end; n=nxt ) {
// nxt = n->next;
// to_thread_context(n)->semaphore().V();
// }
// #if TBB_USE_ASSERT
// temp.clear();
// #endif
// }
// Additional possible methods that are not required right now
//! Wait for a condition to be satisfied with waiting-on context
// template<typename WaitUntil, typename Context>
// void concurrent_monitor::wait( WaitUntil until, Context on )
// {
// bool slept = false;
// thread_context thr_ctx;
// prepare_wait( thr_ctx, on() );
// while( !until() ) {
// if( (slept = commit_wait( thr_ctx ) )==true )
// if( until() ) break;
// slept = false;
// prepare_wait( thr_ctx, on() );
// }
// if( !slept )
// cancel_wait( thr_ctx );
// }
} // namespace r1
} // namespace detail
} // namespace tbb

Просмотреть файл

@ -55,6 +55,8 @@ _ZTIN3tbb6detail2r112missing_waitE;
_ZTVN3tbb6detail2r112missing_waitE;
_ZTIN3tbb6detail2r110user_abortE;
_ZTVN3tbb6detail2r110user_abortE;
_ZTIN3tbb6detail2r111unsafe_waitE;
_ZTVN3tbb6detail2r111unsafe_waitE;
/* RTM Mutex (rtm_mutex.cpp) */
_ZN3tbb6detail2r17acquireERNS0_2d19rtm_mutexERNS3_11scoped_lockEb;
@ -122,6 +124,8 @@ _ZN3tbb6detail2r19constructERNS0_2d116queuing_rw_mutexE;
_ZN3tbb6detail2r16createERNS0_2d114global_controlE;
_ZN3tbb6detail2r17destroyERNS0_2d114global_controlE;
_ZN3tbb6detail2r127global_control_active_valueEi;
_ZN3tbb6detail2r18finalizeERNS0_2d121task_scheduler_handleEi;
_ZN3tbb6detail2r13getERNS0_2d121task_scheduler_handleE;
/* Parallel pipeline (parallel_pipeline.cpp) */
_ZN3tbb6detail2r117parallel_pipelineERNS0_2d118task_group_contextEjRKNS2_11filter_nodeE;

Просмотреть файл

@ -55,6 +55,8 @@ _ZTIN3tbb6detail2r112missing_waitE;
_ZTVN3tbb6detail2r112missing_waitE;
_ZTIN3tbb6detail2r110user_abortE;
_ZTVN3tbb6detail2r110user_abortE;
_ZTIN3tbb6detail2r111unsafe_waitE;
_ZTVN3tbb6detail2r111unsafe_waitE;
/* RTM Mutex (rtm_mutex.cpp) */
_ZN3tbb6detail2r17acquireERNS0_2d19rtm_mutexERNS3_11scoped_lockEb;
@ -122,6 +124,8 @@ _ZN3tbb6detail2r19constructERNS0_2d116queuing_rw_mutexE;
_ZN3tbb6detail2r16createERNS0_2d114global_controlE;
_ZN3tbb6detail2r17destroyERNS0_2d114global_controlE;
_ZN3tbb6detail2r127global_control_active_valueEi;
_ZN3tbb6detail2r18finalizeERNS0_2d121task_scheduler_handleEl;
_ZN3tbb6detail2r13getERNS0_2d121task_scheduler_handleE;
/* Parallel pipeline (parallel_pipeline.cpp) */
_ZN3tbb6detail2r117parallel_pipelineERNS0_2d118task_group_contextEmRKNS2_11filter_nodeE;

Просмотреть файл

@ -57,6 +57,8 @@ __ZTIN3tbb6detail2r112missing_waitE
__ZTVN3tbb6detail2r112missing_waitE
__ZTIN3tbb6detail2r110user_abortE
__ZTVN3tbb6detail2r110user_abortE
__ZTIN3tbb6detail2r111unsafe_waitE
__ZTVN3tbb6detail2r111unsafe_waitE
# RTM Mutex (rtm_mutex.cpp)
__ZN3tbb6detail2r17acquireERNS0_2d19rtm_mutexERNS3_11scoped_lockEb
@ -124,6 +126,8 @@ __ZN3tbb6detail2r19constructERNS0_2d116queuing_rw_mutexE
__ZN3tbb6detail2r16createERNS0_2d114global_controlE
__ZN3tbb6detail2r17destroyERNS0_2d114global_controlE
__ZN3tbb6detail2r127global_control_active_valueEi
__ZN3tbb6detail2r18finalizeERNS0_2d121task_scheduler_handleEl
__ZN3tbb6detail2r13getERNS0_2d121task_scheduler_handleE
# Parallel pipeline (parallel_pipeline.cpp)
__ZN3tbb6detail2r117parallel_pipelineERNS0_2d118task_group_contextEmRKNS2_11filter_nodeE

Просмотреть файл

@ -118,6 +118,8 @@ EXPORTS
?create@r1@detail@tbb@@YAXAAVglobal_control@d1@23@@Z
?destroy@r1@detail@tbb@@YAXAAVglobal_control@d1@23@@Z
?global_control_active_value@r1@detail@tbb@@YAIH@Z
?get@r1@detail@tbb@@YAXAAVtask_scheduler_handle@d1@23@@Z
?finalize@r1@detail@tbb@@YA_NAAVtask_scheduler_handle@d1@23@H@Z
; Parallel pipeline (parallel_pipeline.cpp)
?parallel_pipeline@r1@detail@tbb@@YAXAAVtask_group_context@d1@23@IABVfilter_node@523@@Z

Просмотреть файл

@ -118,6 +118,8 @@ EXPORTS
?global_control_active_value@r1@detail@tbb@@YA_KH@Z
?create@r1@detail@tbb@@YAXAEAVglobal_control@d1@23@@Z
?destroy@r1@detail@tbb@@YAXAEAVglobal_control@d1@23@@Z
?get@r1@detail@tbb@@YAXAEAVtask_scheduler_handle@d1@23@@Z
?finalize@r1@detail@tbb@@YA_NAEAVtask_scheduler_handle@d1@23@_J@Z
; Parallel pipeline (parallel_pipeline.cpp)
?set_end_of_input@r1@detail@tbb@@YAXAEAVbase_filter@d1@23@@Z

Просмотреть файл

@ -376,75 +376,26 @@ namespace r1 {
#endif
}
#if !_WIN32
#if __TBB_DYNAMIC_LOAD_ENABLED
static dynamic_link_handle pin_symbols( dynamic_link_descriptor desc, const dynamic_link_descriptor* descriptors, std::size_t required ) {
// It is supposed that all symbols are from the only one library
// The library has been loaded by another module and contains at least one requested symbol.
// But after we obtained the symbol the library can be unloaded by another thread
// invalidating our symbol. Therefore we need to pin the library in memory.
dynamic_link_handle library_handle = 0;
Dl_info info;
// Get library's name from earlier found symbol
if ( dladdr( reinterpret_cast<void*>(*desc.handler), &info ) ) {
// Pin the library
library_handle = dlopen( info.dli_fname, RTLD_LAZY );
if ( library_handle ) {
// If original library was unloaded before we pinned it
// and then another module loaded in its place, the earlier
// found symbol would become invalid. So revalidate them.
if ( !resolve_symbols( library_handle, descriptors, required ) ) {
// Wrong library.
dynamic_unlink(library_handle);
library_handle = 0;
}
} else {
char const * err = dlerror();
DYNAMIC_LINK_WARNING( dl_lib_not_found, info.dli_fname, err );
}
}
// else the library has been unloaded by another thread
return library_handle;
}
#endif /* __TBB_DYNAMIC_LOAD_ENABLED */
#endif /* !_WIN32 */
static dynamic_link_handle global_symbols_link( const char* library, const dynamic_link_descriptor descriptors[], std::size_t required ) {
::tbb::detail::suppress_unused_warning( library );
dynamic_link_handle library_handle;
dynamic_link_handle library_handle{};
#if _WIN32
if ( GetModuleHandleEx( 0, library, &library_handle ) ) {
if ( resolve_symbols( library_handle, descriptors, required ) )
return library_handle;
else
FreeLibrary( library_handle );
}
bool res = GetModuleHandleEx(0, library, &library_handle);
__TBB_ASSERT_EX(res && library_handle || !res && !library_handle, nullptr);
#else /* _WIN32 */
#if !__TBB_DYNAMIC_LOAD_ENABLED /* only __TBB_WEAK_SYMBOLS_PRESENT is defined */
if ( !dlopen ) return 0;
#endif /* !__TBB_DYNAMIC_LOAD_ENABLED */
library_handle = dlopen( NULL, RTLD_LAZY );
#if !__ANDROID__
// On Android dlopen( NULL ) returns NULL if it is called during dynamic module initialization.
__TBB_ASSERT_EX( library_handle, "The handle for the main program is NULL" );
#endif
#if __TBB_DYNAMIC_LOAD_ENABLED
// Check existence of the first symbol only, then use it to find the library and load all necessary symbols.
pointer_to_handler handler;
dynamic_link_descriptor desc;
desc.name = descriptors[0].name;
desc.handler = &handler;
if ( resolve_symbols( library_handle, &desc, 1 ) ) {
dynamic_unlink( library_handle );
return pin_symbols( desc, descriptors, required );
}
#else /* only __TBB_WEAK_SYMBOLS_PRESENT is defined */
if ( resolve_symbols( library_handle, descriptors, required ) )
return library_handle;
#endif
dynamic_unlink( library_handle );
// RTLD_GLOBAL - to guarantee that old TBB will find the loaded library
// RTLD_NOLOAD - not to load the library without the full path
library_handle = dlopen(library, RTLD_LAZY | RTLD_GLOBAL | RTLD_NOLOAD);
#endif /* _WIN32 */
return 0;
if (library_handle) {
if (!resolve_symbols(library_handle, descriptors, required)) {
dynamic_unlink(library_handle);
library_handle = nullptr;
}
}
return library_handle;
}
static void save_library_handle( dynamic_link_handle src, dynamic_link_handle *dst ) {
@ -470,7 +421,7 @@ namespace r1 {
// (e.g. because of MS runtime problems - one of those crazy manifest related ones)
UINT prev_mode = SetErrorMode (SEM_FAILCRITICALERRORS);
#endif /* _WIN32 */
dynamic_link_handle library_handle = dlopen( path, RTLD_LAZY );
dynamic_link_handle library_handle = dlopen( path, RTLD_LAZY | RTLD_GLOBAL );
#if _WIN32
SetErrorMode (prev_mode);
#endif /* _WIN32 */

Просмотреть файл

@ -41,7 +41,26 @@ const char* user_abort::what() const noexcept(true) { return "User-initiated abo
const char* missing_wait::what() const noexcept(true) { return "wait() was not called on the structured_task_group"; }
#if TBB_USE_EXCEPTIONS
#define DO_THROW(exc, init_args) throw exc init_args;
template <typename F>
/*[[noreturn]]*/ void do_throw_noexcept(F throw_func) noexcept {
throw_func();
}
/*[[noreturn]]*/ void do_throw_noexcept(void (*throw_func)()) noexcept {
throw_func();
}
bool terminate_on_exception(); // defined in global_control.cpp and ipc_server.cpp
template <typename F>
/*[[noreturn]]*/ void do_throw(F throw_func) {
if (terminate_on_exception()) {
do_throw_noexcept(throw_func);
}
throw_func();
}
#define DO_THROW(exc, init_args) do_throw( []{ throw exc init_args; } );
#else /* !TBB_USE_EXCEPTIONS */
#define PRINT_ERROR_AND_ABORT(exc_name, msg) \
std::fprintf (stderr, "Exception %s with message %s would have been thrown, " \
@ -53,18 +72,22 @@ const char* missing_wait::what() const noexcept(true) { return "wait() was not c
void throw_exception ( exception_id eid ) {
switch ( eid ) {
case exception_id::bad_alloc: DO_THROW(std::bad_alloc, () );
case exception_id::bad_last_alloc: DO_THROW(bad_last_alloc, ());
case exception_id::user_abort: DO_THROW( user_abort, () );
case exception_id::nonpositive_step: DO_THROW(std::invalid_argument, ("Step must be positive") );
case exception_id::out_of_range: DO_THROW(std::out_of_range, ("Index out of requested size range"));
case exception_id::reservation_length_error: DO_THROW(std::length_error, ("Attempt to exceed implementation defined length limits"));
case exception_id::missing_wait: DO_THROW(missing_wait, ());
case exception_id::invalid_load_factor: DO_THROW(std::out_of_range, ("Invalid hash load factor"));
case exception_id::invalid_key: DO_THROW(std::out_of_range, ("invalid key"));
case exception_id::bad_tagged_msg_cast: DO_THROW(std::runtime_error, ("Illegal tagged_msg cast"));
case exception_id::bad_alloc: DO_THROW(std::bad_alloc, ()); break;
case exception_id::bad_last_alloc: DO_THROW(bad_last_alloc, ()); break;
case exception_id::user_abort: DO_THROW( user_abort, () ); break;
case exception_id::nonpositive_step: DO_THROW(std::invalid_argument, ("Step must be positive") ); break;
case exception_id::out_of_range: DO_THROW(std::out_of_range, ("Index out of requested size range")); break;
case exception_id::reservation_length_error: DO_THROW(std::length_error, ("Attempt to exceed implementation defined length limits")); break;
case exception_id::missing_wait: DO_THROW(missing_wait, ()); break;
case exception_id::invalid_load_factor: DO_THROW(std::out_of_range, ("Invalid hash load factor")); break;
case exception_id::invalid_key: DO_THROW(std::out_of_range, ("invalid key")); break;
case exception_id::bad_tagged_msg_cast: DO_THROW(std::runtime_error, ("Illegal tagged_msg cast")); break;
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
case exception_id::unsafe_wait: DO_THROW(unsafe_wait, ("Unsafe to wait further")); break;
#endif
default: __TBB_ASSERT ( false, "Unknown exception ID" );
}
__TBB_ASSERT(false, "Unreachable code");
}
/* The "what" should be fairly short, not more than about 128 characters.
@ -75,17 +98,19 @@ void throw_exception ( exception_id eid ) {
Task.cpp because the throw generates a pathetic lot of code, and ADR wanted
this large chunk of code to be placed on a cold page. */
void handle_perror( int error_code, const char* what ) {
char buf[256];
#if defined(_MSC_VER) && _MSC_VER < 1500
#define snprintf _snprintf
#endif
int written = std::snprintf(buf, sizeof(buf), "%s: %s", what, std::strerror( error_code ));
// On overflow, the returned value exceeds sizeof(buf) (for GLIBC) or is negative (for MSVC).
__TBB_ASSERT_EX( written>0 && written<(int)sizeof(buf), "Error description is too long" );
// Ensure that buffer ends in terminator.
buf[sizeof(buf)-1] = 0;
const int BUF_SIZE = 255;
char buf[BUF_SIZE + 1] = { 0 };
std::strncat(buf, what, BUF_SIZE);
std::size_t buf_len = std::strlen(buf);
if (error_code) {
std::strncat(buf, ": ", BUF_SIZE - buf_len);
buf_len = std::strlen(buf);
std::strncat(buf, std::strerror(error_code), BUF_SIZE - buf_len);
buf_len = std::strlen(buf);
}
__TBB_ASSERT(buf_len <= BUF_SIZE && buf[buf_len] == 0, nullptr);
#if TBB_USE_EXCEPTIONS
throw std::runtime_error(buf);
do_throw([buf] { throw std::runtime_error(buf); });
#else
PRINT_ERROR_AND_ABORT( "runtime_error", buf);
#endif /* !TBB_USE_EXCEPTIONS */

Просмотреть файл

@ -18,6 +18,7 @@
#include "tbb/detail/_template_helpers.h"
#include "tbb/global_control.h"
#include "tbb/tbb_allocator.h"
#include "tbb/spin_mutex.h"
#include "governor.h"
@ -25,26 +26,35 @@
#include "misc.h"
#include <atomic>
#include <set>
namespace tbb {
namespace detail {
namespace r1 {
//! Comparator for a set of global_control objects
struct control_storage_comparator {
bool operator()(const global_control* lhs, const global_control* rhs) const;
};
class control_storage {
friend struct global_control_impl;
friend std::size_t global_control_active_value(int);
protected:
std::size_t my_active_value;
std::atomic<global_control*> my_head;
spin_mutex my_list_mutex;
std::size_t my_active_value{0};
std::set<global_control*, control_storage_comparator, tbb_allocator<global_control*>> my_list{};
spin_mutex my_list_mutex{};
public:
virtual std::size_t default_value() const = 0;
virtual void apply_active() const {}
virtual void apply_active(std::size_t new_active) {
my_active_value = new_active;
}
virtual bool is_first_arg_preferred(std::size_t a, std::size_t b) const {
return a>b; // prefer max by default
}
virtual std::size_t active_value() const {
return my_head.load(std::memory_order_acquire)? my_active_value : default_value();
virtual std::size_t active_value() {
spin_mutex::scoped_lock lock(my_list_mutex); // protect my_list.empty() call
return !my_list.empty() ? my_active_value : default_value();
}
};
@ -55,16 +65,15 @@ class alignas(max_nfs_size) allowed_parallelism_control : public control_storage
virtual bool is_first_arg_preferred(std::size_t a, std::size_t b) const override {
return a<b; // prefer min allowed parallelism
}
virtual void apply_active() const override {
virtual void apply_active(std::size_t new_active) override {
control_storage::apply_active(new_active);
__TBB_ASSERT( my_active_value>=1, NULL );
// -1 to take master into account
market::set_active_num_workers( my_active_value-1 );
}
virtual std::size_t active_value() const override {
/* Reading of my_active_value is not synchronized with possible updating
of my_head by other thread. It's ok, as value of my_active_value became
not invalid, just obsolete. */
if (!my_head.load(std::memory_order_acquire))
virtual std::size_t active_value() override {
spin_mutex::scoped_lock lock(my_list_mutex); // protect my_list.empty() call
if (my_list.empty())
return default_value();
// non-zero, if market is active
const std::size_t workers = market::max_num_workers();
@ -74,7 +83,7 @@ class alignas(max_nfs_size) allowed_parallelism_control : public control_storage
}
public:
std::size_t active_value_if_present() const {
return my_head.load(std::memory_order_acquire)? my_active_value : 0;
return !my_list.empty() ? my_active_value : 0;
}
};
@ -82,82 +91,162 @@ class alignas(max_nfs_size) stack_size_control : public control_storage {
virtual std::size_t default_value() const override {
return ThreadStackSize;
}
virtual void apply_active() const override {
virtual void apply_active(std::size_t new_active) override {
control_storage::apply_active(new_active);
#if __TBB_WIN8UI_SUPPORT && (_WIN32_WINNT < 0x0A00)
__TBB_ASSERT( false, "For Windows 8 Store* apps we must not set stack size" );
#endif
}
};
class alignas(max_nfs_size) terminate_on_exception_control : public control_storage {
virtual std::size_t default_value() const override {
return 0;
}
};
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
class alignas(max_nfs_size) lifetime_control : public control_storage {
virtual bool is_first_arg_preferred(std::size_t, std::size_t) const override {
return false; // not interested
}
virtual std::size_t default_value() const override {
return 0;
}
virtual void apply_active(std::size_t new_active) override {
if (new_active == 1) {
// reserve the market reference
market::global_market_mutex_type::scoped_lock lock( market::theMarketMutex );
if (market::theMarket) {
market::add_ref_unsafe(lock, /*is_public*/ true);
}
} else if (new_active == 0) { // new_active == 0
// release the market reference
market::global_market_mutex_type::scoped_lock lock( market::theMarketMutex );
if (market::theMarket != nullptr) {
lock.release();
market::theMarket->release(/*is_public*/ true, /*blocking_terminate*/ false);
}
}
control_storage::apply_active(new_active);
}
public:
bool is_empty() {
spin_mutex::scoped_lock lock(my_list_mutex);
return my_list.empty();
}
};
#endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
static allowed_parallelism_control allowed_parallelism_ctl;
static stack_size_control stack_size_ctl;
static terminate_on_exception_control terminate_on_exception_ctl;
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
static lifetime_control lifetime_ctl;
static control_storage *controls[] = {&allowed_parallelism_ctl, &stack_size_ctl, &terminate_on_exception_ctl, &lifetime_ctl};
#else
static control_storage *controls[] = {&allowed_parallelism_ctl, &stack_size_ctl, &terminate_on_exception_ctl};
#endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
static control_storage *controls[] = {&allowed_parallelism_ctl, &stack_size_ctl};
//! Comparator for a set of global_control objects
inline bool control_storage_comparator::operator()(const global_control* lhs, const global_control* rhs) const {
__TBB_ASSERT_RELEASE(lhs->my_param < global_control::parameter_max , NULL);
return lhs->my_value < rhs->my_value || (lhs->my_value == rhs->my_value && lhs < rhs);
}
unsigned market::app_parallelism_limit() {
return allowed_parallelism_ctl.active_value_if_present();
}
bool terminate_on_exception() {
return global_control::active_value(global_control::terminate_on_exception) == 1;
}
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
unsigned market::is_lifetime_control_present() {
return !lifetime_ctl.is_empty();
}
#endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
struct global_control_impl {
private:
static bool erase_if_present(control_storage* const c, d1::global_control& gc) {
auto it = c->my_list.find(&gc);
if (it != c->my_list.end()) {
c->my_list.erase(it);
return true;
}
return false;
}
public:
static void create(d1::global_control& gc) {
__TBB_ASSERT_RELEASE(gc.my_param < global_control::parameter_max, NULL);
control_storage* const c = controls[gc.my_param];
spin_mutex::scoped_lock lock(c->my_list_mutex);
if (!c->my_head.load(std::memory_order_acquire) || c->is_first_arg_preferred(gc.my_value, c->my_active_value)) {
c->my_active_value = gc.my_value;
if (c->my_list.empty() || c->is_first_arg_preferred(gc.my_value, c->my_active_value)) {
// to guarantee that apply_active() is called with current active value,
// calls it here and in internal_destroy() under my_list_mutex
c->apply_active();
c->apply_active(gc.my_value);
}
gc.my_next = c->my_head.load(std::memory_order_acquire);
// publish my_head, at this point my_active_value must be valid
c->my_head.store(&gc, std::memory_order_release);
c->my_list.insert(&gc);
}
static void destroy(d1::global_control& gc) {
global_control* prev = 0;
__TBB_ASSERT_RELEASE(gc.my_param < global_control::parameter_max, NULL);
control_storage* const c = controls[gc.my_param];
__TBB_ASSERT(c->my_head.load(std::memory_order_relaxed), NULL);
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
__TBB_ASSERT(gc.my_param == global_control::scheduler_handle || !c->my_list.empty(), NULL);
#else
__TBB_ASSERT(!c->my_list.empty(), NULL);
#endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
// Concurrent reading and changing global parameter is possible.
// In this case, my_active_value may not match current state of parameters.
// This is OK because:
// 1) my_active_value is either current or previous
// 2) my_active_value is current on internal_destroy leave
spin_mutex::scoped_lock lock(c->my_list_mutex);
std::size_t new_active = (std::size_t) - 1, old_active = c->my_active_value;
std::size_t new_active = (std::size_t)(-1), old_active = c->my_active_value;
if (c->my_head.load(std::memory_order_acquire) != &gc)
new_active = c->my_head.load(std::memory_order_acquire)->my_value;
else if (c->my_head.load(std::memory_order_acquire)->my_next)
new_active = c->my_head.load(std::memory_order_acquire)->my_next->my_value;
// if there is only one element, new_active will be set later
for (global_control* curr = c->my_head.load(std::memory_order_acquire); curr; prev = curr, curr = curr->my_next) {
if (curr == &gc) {
if (prev) {
prev->my_next = gc.my_next;
} else {
c->my_head.store(gc.my_next, std::memory_order_release);
}
} else {
if (c->is_first_arg_preferred(curr->my_value, new_active)) {
new_active = curr->my_value;
}
}
if (!erase_if_present(c, gc)) {
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
__TBB_ASSERT(gc.my_param == global_control::scheduler_handle , NULL);
return;
#else
__TBB_ASSERT(false, "Unreachable code");
#endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
}
if (!c->my_head.load(std::memory_order_acquire)) {
if (c->my_list.empty()) {
__TBB_ASSERT(new_active == (std::size_t) - 1, NULL);
new_active = c->default_value();
} else {
new_active = (*c->my_list.begin())->my_value;
}
if (new_active != old_active) {
c->my_active_value = new_active;
c->apply_active();
c->apply_active(new_active);
}
}
static bool remove_and_check_if_empty(d1::global_control& gc) {
__TBB_ASSERT_RELEASE(gc.my_param < global_control::parameter_max, NULL);
control_storage* const c = controls[gc.my_param];
__TBB_ASSERT(!c->my_list.empty(), NULL);
spin_mutex::scoped_lock lock(c->my_list_mutex);
erase_if_present(c, gc);
return c->my_list.empty();
}
static bool is_present(d1::global_control& gc) {
__TBB_ASSERT_RELEASE(gc.my_param < global_control::parameter_max, NULL);
control_storage* const c = controls[gc.my_param];
spin_mutex::scoped_lock lock(c->my_list_mutex);
auto it = c->my_list.find(&gc);
if (it != c->my_list.end()) {
return true;
}
return false;
}
};
void __TBB_EXPORTED_FUNC create(d1::global_control& gc) {
@ -167,6 +256,13 @@ void __TBB_EXPORTED_FUNC destroy(d1::global_control& gc) {
global_control_impl::destroy(gc);
}
bool remove_and_check_if_empty(d1::global_control& gc) {
return global_control_impl::remove_and_check_if_empty(gc);
}
bool is_present(d1::global_control& gc) {
return global_control_impl::is_present(gc);
}
std::size_t __TBB_EXPORTED_FUNC global_control_active_value(int param) {
__TBB_ASSERT_RELEASE(param < global_control::parameter_max, NULL);
return controls[param]->active_value();

Просмотреть файл

@ -22,6 +22,9 @@
#include "dynamic_link.h"
#include "tbb/task_group.h"
#include "tbb/global_control.h"
#include "tbb/tbb_allocator.h"
#include "task_dispatcher.h"
#include <cstdio>
@ -33,6 +36,12 @@ namespace tbb {
namespace detail {
namespace r1 {
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
//! global_control.cpp contains definition
bool remove_and_check_if_empty(d1::global_control& gc);
bool is_present(d1::global_control& gc);
#endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
namespace rml {
tbb_server* make_private_server( tbb_client& client );
} // namespace rml
@ -56,10 +65,9 @@ void governor::acquire_resources () {
void governor::release_resources () {
theRMLServerFactory.close();
destroy_process_mask();
#if TBB_USE_ASSERT
if( __TBB_InitOnce::initialization_done() && theTLS.get() )
runtime_warning( "TBB is unloaded while tbb::task_scheduler_init object is alive?" );
#endif
__TBB_ASSERT(!(__TBB_InitOnce::initialization_done() && theTLS.get()), "TBB is unloaded while thread data still alive?");
int status = theTLS.destroy();
if( status )
runtime_warning("failed to destroy task scheduler TLS: %s", std::strerror(status));
@ -209,6 +217,59 @@ void governor::initialize_rml_factory () {
UsePrivateRML = res != ::rml::factory::st_success;
}
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
void __TBB_EXPORTED_FUNC get(d1::task_scheduler_handle& handle) {
handle.m_ctl = new(allocate_memory(sizeof(global_control))) global_control(global_control::scheduler_handle, 1);
}
void release_impl(d1::task_scheduler_handle& handle) {
if (handle.m_ctl != nullptr) {
handle.m_ctl->~global_control();
deallocate_memory(handle.m_ctl);
handle.m_ctl = nullptr;
}
}
bool finalize_impl(d1::task_scheduler_handle& handle) {
market::global_market_mutex_type::scoped_lock lock( market::theMarketMutex );
bool ok = true; // ok if theMarket does not exist yet
market* m = market::theMarket; // read the state of theMarket
if (m != nullptr) {
lock.release();
__TBB_ASSERT(is_present(*handle.m_ctl), "finalize or release was already called on this object");
thread_data* td = governor::get_thread_data_if_initialized();
if (td) {
task_dispatcher* task_disp = td->my_task_dispatcher;
__TBB_ASSERT(task_disp, nullptr);
if (task_disp->m_properties.outermost && !td->my_is_worker) { // is not inside a tbb parallel region
governor::auto_terminate(td);
}
}
if (remove_and_check_if_empty(*handle.m_ctl)) {
ok = m->release(/*is_public*/ true, /*blocking_terminate*/ true);
} else {
ok = false;
}
}
return ok;
}
bool __TBB_EXPORTED_FUNC finalize(d1::task_scheduler_handle& handle, std::intptr_t mode) {
if (mode == d1::release_nothrowing) {
release_impl(handle);
return true;
} else {
bool ok = finalize_impl(handle);
// TODO: it is unsafe when finalize is called concurrently and further library unload
release_impl(handle);
if (mode == d1::finalize_throwing && !ok) {
throw_exception(exception_id::unsafe_wait);
}
return ok;
}
}
#endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
#if __TBB_NUMA_SUPPORT
#if __TBB_WEAK_SYMBOLS_PRESENT

Просмотреть файл

@ -76,11 +76,6 @@ private:
static rml::tbb_server* create_rml_server ( rml::tbb_client& );
//! The internal routine to undo automatic initialization.
/** The signature is written with void* so that the routine
can be the destructor argument to pthread_key_create. */
static void auto_terminate(void* tls);
public:
static unsigned default_num_threads () {
// No memory fence required, because at worst each invoking thread calls AvailableHwConcurrency once.
@ -97,6 +92,11 @@ public:
The auto_init argument specifies if the call is due to automatic initialization. **/
static void init_external_thread();
//! The routine to undo automatic initialization.
/** The signature is written with void* so that the routine
can be the destructor argument to pthread_key_create. */
static void auto_terminate(void* tls);
//! Obtain the thread-local instance of the thread data.
/** If the scheduler has not been initialized yet, initialization is done automatically.
Note that auto-initialized scheduler instance is destroyed only when its thread terminates. **/

Просмотреть файл

@ -77,12 +77,18 @@ extern const tchar
#define ITT_FINI_ITTLIB() __itt_fini_ittlib()
#define ITT_SYNC_CREATE(obj, type, name) __itt_sync_create((void*)(obj), type, name, 2)
#define ITT_STACK_CREATE(obj) obj = __itt_stack_caller_create()
#define ITT_STACK(precond, name, obj) (precond) ? __itt_stack_##name(static_cast<__itt_caller>(obj)) : ((void)0);
#define ITT_STACK_DESTROY(obj) (obj!=nullptr) ? __itt_stack_caller_destroy(static_cast<__itt_caller>(obj)) : ((void)0)
#define ITT_CALLEE_ENTER(cond, t, obj) if(cond) {\
__itt_stack_callee_enter(static_cast<__itt_caller>(obj));\
__itt_sync_acquired(t);\
}
#define ITT_CALLEE_LEAVE(cond, obj) (cond) ? __itt_stack_callee_leave(static_cast<__itt_caller>(obj)) : ((void)0)
#define ITT_TASK_GROUP(obj,name,parent) r1::itt_make_task_group(d1::ITT_DOMAIN_MAIN,(void*)(obj),ALGORITHM,(void*)(parent),(parent!=nullptr) ? ALGORITHM : FLOW_NULL,name)
#define ITT_TASK_BEGIN(obj,name,id) r1::itt_task_begin(d1::ITT_DOMAIN_MAIN,(void*)(id),ALGORITHM,(void*)(obj),ALGORITHM,name)
#define ITT_TASK_END r1::itt_task_end(d1::ITT_DOMAIN_MAIN)
#else /* !__TBB_USE_ITT_NOTIFY */
#define ITT_NOTIFY(name,obj) ((void)0)
@ -90,8 +96,9 @@ extern const tchar
#define ITT_FINI_ITTLIB() ((void)0)
#define ITT_SYNC_CREATE(obj, type, name) ((void)0)
#define ITT_STACK_CREATE(obj) ((void)0)
#define ITT_STACK(precond, name, obj) ((void)0)
#define ITT_STACK_DESTROY(obj) ((void)0)
#define ITT_CALLEE_ENTER(cond, t, obj) ((void)0)
#define ITT_CALLEE_LEAVE(cond, obj) ((void)0)
#define ITT_TASK_GROUP(type,name,parent) ((void)0)
#define ITT_TASK_BEGIN(type,name,id) ((void)0)
#define ITT_TASK_END ((void)0)

Просмотреть файл

@ -225,11 +225,6 @@ public:
bool is_idle_state ( bool value ) const {
return !my_putter || my_putter->my_is_idle == value;
}
#if __TBB_USE_ITT_NOTIFY
//! Get pointer to corresponding outbox used for ITT_NOTIFY calls.
void* outbox() const {return my_putter;}
#endif /* __TBB_USE_ITT_NOTIFY */
}; // class mail_inbox
} // namespace r1

Просмотреть файл

@ -85,12 +85,11 @@ static unsigned calc_workers_soft_limit(unsigned workers_soft_limit, unsigned wo
return workers_soft_limit;
}
market& market::global_market ( bool is_public, unsigned workers_requested, std::size_t stack_size ) {
global_market_mutex_type::scoped_lock lock( theMarketMutex );
bool market::add_ref_unsafe( global_market_mutex_type::scoped_lock& lock, bool is_public, unsigned workers_requested, std::size_t stack_size ) {
market *m = theMarket;
if( m ) {
++m->my_ref_count;
const unsigned old_public_count = is_public? m->my_public_ref_count++ : /*any non-zero value*/1;
const unsigned old_public_count = is_public ? m->my_public_ref_count++ : /*any non-zero value*/1;
lock.release();
if( old_public_count==0 )
set_active_num_workers( calc_workers_soft_limit(workers_requested, m->my_num_workers_hard_limit) );
@ -114,10 +113,15 @@ market& market::global_market ( bool is_public, unsigned workers_requested, std:
}
if( m->my_stack_size < stack_size )
runtime_warning( "Thread stack size has been already set to %u. "
"The request for larger stack (%u) cannot be satisfied.\n",
m->my_stack_size, stack_size );
"The request for larger stack (%u) cannot be satisfied.\n", m->my_stack_size, stack_size );
return true;
}
else {
return false;
}
market& market::global_market(bool is_public, unsigned workers_requested, std::size_t stack_size) {
global_market_mutex_type::scoped_lock lock( theMarketMutex );
if( !market::add_ref_unsafe(lock, is_public, workers_requested, stack_size) ) {
// TODO: A lot is done under theMarketMutex locked. Can anything be moved out?
if( stack_size == 0 )
stack_size = global_control::active_value(global_control::thread_stack_size);
@ -140,16 +144,22 @@ market& market::global_market ( bool is_public, unsigned workers_requested, std:
void* storage = cache_aligned_allocate(size);
std::memset( storage, 0, size );
// Initialize and publish global market
m = new (storage) market( workers_soft_limit, workers_hard_limit, stack_size );
market* m = new (storage) market( workers_soft_limit, workers_hard_limit, stack_size );
if( is_public )
m->my_public_ref_count.store(1, std::memory_order_relaxed);
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
if (market::is_lifetime_control_present()) {
++m->my_public_ref_count;
++m->my_ref_count;
}
#endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
theMarket = m;
// This check relies on the fact that for shared RML default_concurrency==max_concurrency
if ( !governor::UsePrivateRML && m->my_server->default_concurrency() < workers_soft_limit )
runtime_warning( "RML might limit the number of workers to %u while %u is requested.\n"
, m->my_server->default_concurrency(), workers_soft_limit );
}
return *m;
return *theMarket;
}
void market::destroy () {
@ -214,7 +224,7 @@ int market::update_workers_request() {
my_num_workers_requested = 1;
}
#endif
update_allotment();
update_allotment(my_num_workers_requested);
return my_num_workers_requested - old_request;
}
@ -511,7 +521,12 @@ void market::adjust_demand ( arena& a, int delta ) {
my_total_demand += delta;
my_priority_level_demand[a.my_priority_level] += delta;
unsigned effective_soft_limit = my_num_workers_soft_limit.load(std::memory_order_relaxed);
update_allotment();
if (my_mandatory_num_requested > 0) {
__TBB_ASSERT(effective_soft_limit == 0, NULL);
effective_soft_limit = 1;
}
update_allotment(effective_soft_limit);
if ( delta > 0 ) {
// can't overflow soft_limit, but remember values request by arenas in
// my_total_demand to not prematurely release workers to RML

Просмотреть файл

@ -34,6 +34,13 @@
namespace tbb {
namespace detail {
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
namespace d1 {
class task_scheduler_handle;
}
#endif
namespace r1 {
class task_arena_base;
@ -49,11 +56,19 @@ class market : no_copy, rml::tbb_client {
template<typename SchedulerTraits> friend class custom_scheduler;
friend class task_group_context;
friend class governor;
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
friend class lifetime_control;
#endif
public:
//! Keys for the arena map array. The lower the value the higher priority of the arena list.
static constexpr unsigned num_priority_levels = 3;
private:
friend void ITT_DoUnsafeOneTimeInitialization ();
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
friend bool finalize_impl(d1::task_scheduler_handle& handle);
#endif
typedef intrusive_list<arena> arena_list_type;
typedef intrusive_list<thread_data> thread_data_list_type;
@ -141,10 +156,10 @@ private:
//! Recalculates the number of workers assigned to each arena in the list.
/** The actual number of workers servicing a particular arena may temporarily
deviate from the calculated value. **/
void update_allotment () {
void update_allotment (unsigned effective_soft_limit) {
if ( my_total_demand )
update_allotment( my_arenas, my_total_demand,
(int)my_num_workers_soft_limit.load(std::memory_order_relaxed) );
(int)effective_soft_limit );
}
//! Returns next arena that needs more workers, or NULL.
@ -178,8 +193,6 @@ private:
std::size_t min_stack_size () const override { return worker_stack_size(); }
policy_type policy () const override { return throughput; }
job* create_one_job () override;
void cleanup( job& j ) override;
@ -190,7 +203,10 @@ private:
public:
//! Factory method creating new market object
static market& global_market(bool is_public, unsigned max_num_workers = 0, std::size_t stack_size = 0);
static market& global_market( bool is_public, unsigned max_num_workers = 0, std::size_t stack_size = 0 );
//! Add reference to market if theMarket exists
static bool add_ref_unsafe( global_market_mutex_type::scoped_lock& lock, bool is_public, unsigned max_num_workers = 0, std::size_t stack_size = 0 );
//! Creates an arena object
/** If necessary, also creates global market instance, and boosts its ref count.
@ -237,6 +253,11 @@ public:
//! Reports active parallelism level according to user's settings
static unsigned app_parallelism_limit();
#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
//! Reports if any active global lifetime references are present
static unsigned is_lifetime_control_present();
#endif
//! Finds all contexts affected by the state change and propagates the new state to them.
/** The propagation is relayed to the market because tasks created by one
master thread can be passed to and executed by other masters. This means

Просмотреть файл

@ -153,8 +153,7 @@ public:
}
//! Construct a random number generator.
FastRandom( void* unique_ptr ) { init(uintptr_t(unique_ptr)); }
FastRandom( uint32_t seed) { init(seed); }
FastRandom( uint64_t seed) { init(seed); }
template <typename T>
void init( T seed ) {
init(seed,int_to_type<sizeof(seed)>());

Просмотреть файл

@ -282,7 +282,7 @@ private:
ITT_NOTIFY( sync_releasing, &my_pipeline.input_tokens );
if( (my_pipeline.input_tokens.fetch_sub(1, std::memory_order_relaxed)) > 1 ) {
d1::small_object_allocator alloc{};
spawn( *alloc.new_object<stage_task>(ed, my_pipeline, alloc ), my_pipeline.my_context );
r1::spawn( *alloc.new_object<stage_task>(ed, my_pipeline, alloc ), my_pipeline.my_context );
}
}
@ -342,7 +342,7 @@ public:
void spawn_stage_task(const task_info& info, d1::execution_data& ed) {
d1::small_object_allocator alloc{};
stage_task* clone = alloc.new_object<stage_task>(ed, my_pipeline, my_filter, info, alloc);
spawn(*clone, my_pipeline.my_context);
r1::spawn(*clone, my_pipeline.my_context);
}
};
@ -452,7 +452,7 @@ void __TBB_EXPORTED_FUNC parallel_pipeline(d1::task_group_context& cxt, std::siz
stage_task& st = *alloc.new_object<stage_task>(pipe, alloc);
// Start execution of tasks
execute_and_wait(st, cxt, pipe.wait_ctx, cxt);
r1::execute_and_wait(st, cxt, pipe.wait_ctx, cxt);
}
void __TBB_EXPORTED_FUNC set_end_of_input(d1::base_filter& bf) {

Просмотреть файл

@ -80,7 +80,7 @@ private:
friend class private_server;
//! Actions executed by the associated thread
void run();
void run() noexcept;
//! Wake up associated thread (or launch a thread if there is none)
void wake_or_launch();
@ -253,7 +253,7 @@ void private_worker::start_shutdown() {
}
}
void private_worker::run() {
void private_worker::run() noexcept {
my_server.propagate_chain_reaction();
// Transiting to st_normal here would require setting my_handle,

Просмотреть файл

@ -101,20 +101,21 @@ void ITT_DoOneTimeInitialization() {
}
}
void __TBB_EXPORTED_FUNC create_itt_sync(void* ptr, const tchar* objtype, const tchar* objname) {
void create_itt_sync(void* ptr, const tchar* objtype, const tchar* objname) {
ITT_SYNC_CREATE(ptr, objtype, objname);
}
void __TBB_EXPORTED_FUNC call_itt_notify(int t, void *ptr) {
void call_itt_notify(int t, void *ptr) {
switch (t) {
case 0: ITT_NOTIFY(sync_prepare, ptr); break;
case 1: ITT_NOTIFY(sync_cancel, ptr); break;
case 2: ITT_NOTIFY(sync_acquired, ptr); break;
case 3: ITT_NOTIFY(sync_releasing, ptr); break;
case 4: ITT_NOTIFY(sync_destroy, ptr); break;
}
}
void __TBB_EXPORTED_FUNC itt_set_sync_name(void* obj, const tchar* name) {
void itt_set_sync_name(void* obj, const tchar* name) {
__itt_sync_rename(obj, name);
}

Просмотреть файл

@ -46,6 +46,7 @@ public:
/** The version number is incremented when a incompatible change is introduced.
The version number is invariant for the lifetime of the object. */
virtual version_type version() const RML_PURE(version_type)
};
//! Represents a client's job for an execution context.
@ -53,11 +54,6 @@ public:
Not derived from versioned_object because version is same as for client. */
class job {
friend class server;
//! Word for use by server
/** Typically the server uses it to speed up internal lookup.
Clients must not modify the word. */
void* scratch_ptr;
};
//! Information that client provides to server when asking for a server.
@ -85,11 +81,6 @@ public:
after cleanup(job) has been called for each job. */
virtual void acknowledge_close_connection() RML_PURE(void)
enum policy_type {turnaround,throughput};
//! Inform server of desired policy. [idempotent]
virtual policy_type policy() const RML_PURE(policy_type)
//! Inform client that server is done with *this.
/** Client should destroy the job.
Not necessarily called by execution context represented by *this.
@ -136,9 +127,6 @@ public:
/** Normally, the value is the hardware concurrency minus one.
The "minus one" accounts for the thread created by main(). */
virtual unsigned default_concurrency() const = 0;
protected:
static void*& scratch_ptr( job& j ) {return j.scratch_ptr;}
};
class factory {
@ -151,9 +139,6 @@ public:
st_incompatible
};
//! Scratch pointer for use by RML.
void* scratch_ptr;
protected:
//! Pointer to routine that waits for server to indicate when client can close itself.
status_type (*my_wait_to_close_routine)( factory& );

Просмотреть файл

@ -62,6 +62,10 @@
namespace tbb {
namespace detail {
namespace r1 {
// Forward declaration: throws std::runtime_error with what() returning error_code description prefixed with aux_info
void handle_perror(int error_code, const char* aux_info);
namespace rml {
namespace internal {
@ -117,10 +121,6 @@ public:
static handle_type launch( thread_routine_type thread_routine, void* arg, std::size_t stack_size );
#endif /* __TBB_USE_POSIX */
//! Yield control to OS
/** Affects the calling thread. **/
static void yield();
//! Join thread
static void join(handle_type handle);
@ -156,8 +156,7 @@ inline thread_monitor::handle_type thread_monitor::launch( thread_routine_type t
unsigned create_flags = ( number_of_processor_groups > 1 ) ? CREATE_SUSPENDED : 0;
HANDLE h = (HANDLE)_beginthreadex( NULL, unsigned(stack_size), thread_routine, arg, STACK_SIZE_PARAM_IS_A_RESERVATION | create_flags, &thread_id );
if( !h ) {
fprintf(stderr,"thread_monitor::launch: _beginthreadex failed\n");
exit(1);
handle_perror(0, "thread_monitor::launch: _beginthreadex failed\n");
}
if ( number_of_processor_groups > 1 ) {
MoveThreadIntoProcessorGroup( h, FindProcessorGroupIndex( static_cast<int>(*worker_index) ) );
@ -188,17 +187,12 @@ void thread_monitor::detach_thread(handle_type handle) {
__TBB_ASSERT( val, NULL );
}
inline void thread_monitor::yield() {
d0::yield();
}
#endif /* __TBB_USE_WINAPI */
#if __TBB_USE_POSIX
// TODO: can we throw exceptions instead of termination?
inline void thread_monitor::check( int error_code, const char* routine ) {
if( error_code ) {
fprintf(stderr,"thread_monitor %s in %s\n", strerror(error_code), routine );
exit(1);
handle_perror(error_code, routine);
}
}
@ -207,25 +201,21 @@ inline thread_monitor::handle_type thread_monitor::launch( void* (*thread_routin
// Note that there are some tricky situations to deal with, such that the thread is already
// grabbed as part of an OpenMP team.
pthread_attr_t s;
check(pthread_attr_init( &s ), "pthread_attr_init");
check(pthread_attr_init( &s ), "pthread_attr_init has failed");
if( stack_size>0 )
check(pthread_attr_setstacksize( &s, stack_size ), "pthread_attr_setstack_size" );
check(pthread_attr_setstacksize( &s, stack_size ), "pthread_attr_setstack_size has failed" );
pthread_t handle;
check( pthread_create( &handle, &s, thread_routine, arg ), "pthread_create" );
check( pthread_attr_destroy( &s ), "pthread_attr_destroy" );
check( pthread_create( &handle, &s, thread_routine, arg ), "pthread_create has failed" );
check( pthread_attr_destroy( &s ), "pthread_attr_destroy has failed" );
return handle;
}
void thread_monitor::join(handle_type handle) {
check(pthread_join(handle, NULL), "pthread_join");
check(pthread_join(handle, NULL), "pthread_join has failed");
}
void thread_monitor::detach_thread(handle_type handle) {
check(pthread_detach(handle), "pthread_detach");
}
inline void thread_monitor::yield() {
sched_yield();
check(pthread_detach(handle), "pthread_detach has failed");
}
#endif /* __TBB_USE_POSIX */

Просмотреть файл

@ -90,10 +90,6 @@ struct task_accessor {
task_group_context** tgc = reinterpret_cast<task_group_context**>(&t.m_reserved[0]);
return *tgc;
}
static d1::task*& next(d1::task& t) {
d1::task** p = reinterpret_cast<d1::task**>(&t.m_reserved[1]);
return *p;
}
static isolation_type& isolation(d1::task& t) {
isolation_type* tag = reinterpret_cast<isolation_type*>(&t.m_reserved[2]);
return *tag;
@ -168,12 +164,14 @@ public:
curr_ctx = ctx;
}
}
#if _WIN64
void restore_default() {
if (curr_cpu_ctl_env != guard_cpu_ctl_env) {
guard_cpu_ctl_env.set_env();
curr_cpu_ctl_env = guard_cpu_ctl_env;
}
}
#endif // _WIN64
};
#if (_WIN32 || _WIN64 || __linux__) && (__TBB_x86_32 || __TBB_x86_64)
@ -293,14 +291,6 @@ private:
tbb_exception_ptr(const std::exception_ptr& src) : my_ptr(src) {}
}; // class tbb_exception_ptr
#define TbbCatchAll(context) \
catch ( ... ) { \
if ( context->cancel_group_execution() ) { \
/* We are the first to signal cancellation, so store the exception that caused it. */ \
context->my_exception = tbb_exception_ptr::allocate(); \
} \
}
//------------------------------------------------------------------------
// Debugging support
//------------------------------------------------------------------------
@ -315,17 +305,11 @@ inline void poison_value(std::atomic<std::uintptr_t>& val) { val.store(venom, st
/** Expected to be used in assertions only, thus no empty form is defined. **/
inline bool is_alive(std::uintptr_t v) { return v != venom; }
/** Expected to be used in assertions only, thus no empty form is defined. **/
inline bool is_alive(const std::atomic<std::uintptr_t>& v) { return v.load(std::memory_order_relaxed) != venom; }
/** Logically, this method should be a member of class task.
But we do not want to publish it, so it is here instead. */
inline void assert_task_valid(const d1::task* t) {
assert_pointer_valid(t);
}
inline void assert_task_valid(const d1::task& t) {
assert_task_valid(&t);
}
#else /* !TBB_USE_ASSERT */
/** In contrast to debug version poison_value() is a macro here because
@ -462,7 +446,7 @@ public:
isolation_type, bool /*critical_allowed*/);
#if __TBB_RESUMABLE_TASKS
void co_local_wait_for_all();
/* [[noreturn]] */ void co_local_wait_for_all() noexcept;
void suspend(suspend_callback_type suspend_callback, void* user_callback);
void resume(task_dispatcher& target);
suspend_point_type* get_suspend_point();
@ -489,7 +473,6 @@ struct task_group_context_impl {
static void reset(d1::task_group_context&);
static void capture_fp_settings(d1::task_group_context&);
static void copy_fp_settings(d1::task_group_context& ctx, const d1::task_group_context& src);
static void register_pending_exception(d1::task_group_context& ctx);
};

Просмотреть файл

@ -114,11 +114,12 @@ static inline int futex_wakeup_one( void *futex ) {
return r;
}
static inline int futex_wakeup_all( void *futex ) {
int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,INT_MAX,NULL,NULL,0 );
__TBB_ASSERT( r>=0, "futex_wakeup_all: error in waking up threads" );
return r;
}
// Additional possible methods that are not required right now
// static inline int futex_wakeup_all( void *futex ) {
// int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,INT_MAX,NULL,NULL,0 );
// __TBB_ASSERT( r>=0, "futex_wakeup_all: error in waking up threads" );
// return r;
// }
#endif // __TBB_USE_FUTEX

Просмотреть файл

@ -242,9 +242,9 @@ public:
};
#if _WIN32
/* [[noreturn]] */ void __stdcall co_local_wait_for_all(void* arg)
/* [[noreturn]] */ void __stdcall co_local_wait_for_all(void* arg) noexcept
#else
/* [[noreturn]] */ void co_local_wait_for_all(void* arg)
/* [[noreturn]] */ void co_local_wait_for_all(void* arg) noexcept
#endif
{
// Do not create non-trivial objects on the stack of this function. They will never be destroyed.
@ -258,7 +258,7 @@ public:
// This code is unreachable
}
/* [[noreturn]] */ void task_dispatcher::co_local_wait_for_all() {
/* [[noreturn]] */ void task_dispatcher::co_local_wait_for_all() noexcept {
// Do not create non-trivial objects on the stack of this function. They will never be destroyed.
assert_pointer_valid(m_thread_data);

Просмотреть файл

@ -19,6 +19,7 @@
#include "tbb/detail/_utils.h"
#include "tbb/detail/_task.h"
#include "tbb/global_control.h"
#include "scheduler_common.h"
#include "arena_slot.h"
@ -50,6 +51,8 @@ inline d1::task* get_self_recall_task(arena_slot& slot) {
return t;
}
// Defined in exception.cpp
/*[[noreturn]]*/void do_throw_noexcept(void (*throw_exception)()) noexcept;
//------------------------------------------------------------------------
// Suspend point
@ -280,7 +283,7 @@ d1::task* task_dispatcher::local_wait_for_all(d1::task* t, Waiter& waiter ) {
__TBB_ASSERT(ed.context->my_lifetime_state > d1::task_group_context::lifetime_state::locked &&
ed.context->my_lifetime_state < d1::task_group_context::lifetime_state::dying, nullptr);
__TBB_ASSERT(m_thread_data->my_inbox.is_idle_state(false), nullptr);
__TBB_ASSERT(isolation == no_isolation || isolation == ed.isolation, nullptr);
__TBB_ASSERT(task_accessor::is_resume_task(*t) || isolation == no_isolation || isolation == ed.isolation, nullptr);
// Check premature leave
if (Waiter::postpone_execution(*t)) {
__TBB_ASSERT(task_accessor::is_resume_task(*t) && dl_guard.old_properties.outermost,
@ -290,13 +293,17 @@ d1::task* task_dispatcher::local_wait_for_all(d1::task* t, Waiter& waiter ) {
// Copy itt_caller to a stack because the context might be destroyed after t->execute.
void* itt_caller = ed.context->my_itt_caller;
suppress_unused_warning(itt_caller);
ITT_STACK(ITTPossible, callee_enter, itt_caller);
ITT_CALLEE_ENTER(ITTPossible, t, itt_caller);
if (ed.context->is_group_execution_cancelled()) {
t = t->cancel(ed);
} else {
t = t->execute(ed);
}
ITT_STACK(ITTPossible, callee_leave, itt_caller);
ITT_CALLEE_LEAVE(ITTPossible, itt_caller);
// The task affinity in execution data is set for affinitized tasks.
// So drop it after the task execution.
ed.affinity_slot = d1::no_slot;
@ -327,6 +334,9 @@ d1::task* task_dispatcher::local_wait_for_all(d1::task* t, Waiter& waiter ) {
} while (t != nullptr); // main dispatch loop
break; // Exit exception loop;
} catch (...) {
if (global_control::active_value(global_control::terminate_on_exception) == 1) {
do_throw_noexcept([] { throw; });
}
if (ed.context->cancel_group_execution()) {
/* We are the first to signal cancellation, so store the exception that caused it. */
ed.context->my_exception = tbb_exception_ptr::allocate();
@ -390,7 +400,7 @@ inline d1::task* task_dispatcher::get_critical_task(d1::task* t, execution_data_
assert_task_valid(crit_t);
if (t != nullptr) {
assert_pointer_valid</*alignment = */alignof(void*)>(ed.context);
spawn(*t, *ed.context );
r1::spawn(*t, *ed.context);
}
ed.context = task_accessor::context(*crit_t);
ed.isolation = task_accessor::isolation(*crit_t);
@ -418,7 +428,6 @@ inline d1::task* task_dispatcher::get_critical_task(d1::task* t, execution_data_
inline d1::task* task_dispatcher::get_mailbox_task(mail_inbox& my_inbox, execution_data_ext& ed, isolation_type isolation) {
while (task_proxy* const tp = my_inbox.pop(isolation)) {
if (d1::task* result = tp->extract_task<task_proxy::mailbox_bit>()) {
ITT_NOTIFY(sync_acquired, my_inbox.outbox());
ed.original_slot = (unsigned short)(-2);
ed.affinity_slot = ed.task_disp->m_thread_data->my_arena_index;
return result;

Просмотреть файл

@ -124,8 +124,9 @@ void task_group_context_impl::destroy(d1::task_group_context& ctx) {
if (ctx.my_exception)
ctx.my_exception->destroy();
ITT_STACK(ctx.my_itt_caller != nullptr, caller_destroy, static_cast<__itt_caller>(ctx.my_itt_caller));
ITT_STACK_DESTROY(ctx.my_itt_caller);
poison_pointer(ctx.my_parent);
poison_pointer(ctx.my_parent);
poison_pointer(ctx.my_owner);
poison_pointer(ctx.my_node.next);
@ -356,17 +357,6 @@ void task_group_context_impl::copy_fp_settings(d1::task_group_context& ctx, cons
ctx.my_traits.fp_settings = true;
}
void task_group_context_impl::register_pending_exception(d1::task_group_context& ctx) {
__TBB_ASSERT(!is_poisoned(ctx.my_owner), NULL);
if (ctx.my_cancellation_requested.load(std::memory_order_relaxed))
return;
#if TBB_USE_EXCEPTIONS
try {
throw;
} TbbCatchAll((&ctx));
#endif /* TBB_USE_EXCEPTIONS */
}
template <typename T>
void thread_data::propagate_task_group_state(std::atomic<T> d1::task_group_context::* mptr_state, d1::task_group_context& src, T new_state) {
spin_mutex::scoped_lock lock(my_context_list_state.mutex);
@ -482,9 +472,6 @@ bool __TBB_EXPORTED_FUNC cancel_group_execution(d1::task_group_context& ctx) {
bool __TBB_EXPORTED_FUNC is_group_execution_cancelled(d1::task_group_context& ctx) {
return task_group_context_impl::is_group_execution_cancelled(ctx);
}
void __TBB_EXPORTED_FUNC register_pending_exception(d1::task_group_context& ctx) {
task_group_context_impl::register_pending_exception(ctx);
}
void __TBB_EXPORTED_FUNC capture_fp_settings(d1::task_group_context& ctx) {
task_group_context_impl::capture_fp_settings(ctx);
}

Просмотреть файл

@ -115,17 +115,12 @@ struct preceding_lane_selector : lane_selector_base {
}
};
class task_stream_base : no_copy {
protected:
using lane_t = queue_and_mutex <d1::task*, spin_mutex>;
};
//! Specializes from which side of the underlying container elements are retrieved. Method must be
//! called under corresponding mutex locked.
template<task_stream_accessor_type accessor>
class task_stream_accessor : public task_stream_base {
class task_stream_accessor : no_copy {
protected:
using task_stream_base::lane_t;
using lane_t = queue_and_mutex <d1::task*, spin_mutex>;
d1::task* get_item( lane_t::queue_base_t& queue ) {
d1::task* result = queue.front();
queue.pop_front();
@ -134,14 +129,19 @@ protected:
};
template<>
class task_stream_accessor< back_nonnull_accessor > : public task_stream_base {
class task_stream_accessor< back_nonnull_accessor > : no_copy {
protected:
using lane_t = queue_and_mutex <d1::task*, spin_mutex>;
d1::task* get_item( lane_t::queue_base_t& queue ) {
d1::task* result = NULL;
d1::task* result = nullptr;
__TBB_ASSERT(!queue.empty(), nullptr);
// Isolated task can put zeros in queue see look_specific
do {
result = queue.back();
queue.pop_back();
} while( !result && !queue.empty() );
} while ( !result && !queue.empty() );
__TBB_ASSERT_RELEASE(result, nullptr);
return result;
}
};
@ -150,36 +150,32 @@ protected:
template<task_stream_accessor_type accessor>
class task_stream : public task_stream_accessor< accessor > {
using lane_t = typename task_stream_accessor<accessor>::lane_t;
std::atomic<population_t> population;
lane_t* lanes;
unsigned N;
std::atomic<population_t> population{};
lane_t* lanes{nullptr};
unsigned N{};
public:
task_stream() : N() {
population = 0;
lanes = nullptr;
}
task_stream() = default;
void initialize( unsigned n_lanes ) {
const unsigned max_lanes = sizeof(population_t) * CHAR_BIT;
N = n_lanes>=max_lanes ? max_lanes : n_lanes>2 ? 1<<(tbb::detail::log2(n_lanes-1)+1) : 2;
__TBB_ASSERT( N==max_lanes || N>=n_lanes && ((N-1)&N)==0, "number of lanes miscalculated");
N = n_lanes >= max_lanes ? max_lanes : n_lanes > 2 ? 1 << (tbb::detail::log2(n_lanes - 1) + 1) : 2;
__TBB_ASSERT( N == max_lanes || N >= n_lanes && ((N - 1) & N) == 0, "number of lanes miscalculated" );
__TBB_ASSERT( N <= sizeof(population_t) * CHAR_BIT, NULL );
lanes = static_cast<lane_t*>(cache_aligned_allocate(sizeof(lane_t) * N));
for (unsigned i = 0; i < N; ++i) {
new (lanes+i) lane_t;
new (lanes + i) lane_t;
}
__TBB_ASSERT( !population.load(std::memory_order_relaxed), NULL );
}
~task_stream() {
if (lanes) {
for (unsigned i = 0; i < N; ++i) {
lanes[i].~lane_t();
}
cache_aligned_deallocate(lanes);
__TBB_ASSERT(lanes, "Initialize wasn't called");
for (unsigned i = 0; i < N; ++i) {
lanes[i].~lane_t();
}
cache_aligned_deallocate(lanes);
}
//! Push a task into a lane. Lane selection is performed by passed functor.
@ -234,28 +230,6 @@ public:
return !population.load(std::memory_order_relaxed);
}
//! Destroys all remaining tasks in every lane. Returns the number of destroyed tasks.
/** Tasks are not executed, because it would potentially create more tasks at a late stage.
The scheduler is really expected to execute all tasks before task_stream destruction. */
intptr_t drain() {
intptr_t result = 0;
for (unsigned i = 0; i < N; ++i) {
lane_t& lane = lanes[i];
spin_mutex::scoped_lock lock(lane.my_mutex);
for (typename lane_t::queue_base_t::iterator it=lane.my_queue.begin();
it!=lane.my_queue.end(); ++it, ++result)
{
__TBB_ASSERT( is_bit_set( population.load(std::memory_order_relaxed), i ), NULL );
// TODO: TBB_REVAMP_TODO - what to do with drained tasks
// task* t = *it;
// task::destroy(*t);
}
lane.my_queue.clear();
clear_one_bit( population, i );
}
return result;
}
private:
//! Returns true on successful push, otherwise - false.
bool try_push(d1::task* source, unsigned lane_idx ) {
@ -284,7 +258,7 @@ private:
}
// TODO: unify '*_specific' logic with 'pop' methods above
d1::task* look_specific( task_stream_base::lane_t::queue_base_t& queue, isolation_type isolation ) {
d1::task* look_specific( typename lane_t::queue_base_t& queue, isolation_type isolation ) {
__TBB_ASSERT( !queue.empty(), NULL );
// TODO: add a worst-case performance test and consider an alternative container with better
// performance for isolation search.

Просмотреть файл

@ -86,35 +86,6 @@ public:
operator T() { return base::get(); }
};
template <typename T>
class tls<T*> : basic_tls<T*> {
typedef basic_tls<T*> base;
static void internal_dtor(void* ptr) {
if (ptr) delete (T*)ptr;
}
T* internal_get() {
T* result = base::get();
if (!result) {
result = new T;
base::set(result);
}
return result;
}
public:
tls() {
#if __TBB_USE_POSIX
base::create( internal_dtor );
#else
base::create();
#endif
}
~tls() { base::destroy(); }
T* operator=(T* value) { base::set(value); return value; }
operator T*() { return internal_get(); }
T* operator->() { return internal_get(); }
T& operator*() { return *internal_get(); }
};
} // namespace r1
} // namespace detail
} // namespace tbb

Просмотреть файл

@ -15,11 +15,12 @@
add_library(tbbbind SHARED tbb_bind.cpp)
add_library(TBB::tbbbind ALIAS tbbbind)
find_package(HWLOC REQUIRED)
target_compile_definitions(tbbbind PRIVATE __TBBBIND_BUILD)
find_package(HWLOC 1.11 EXACT REQUIRED)
target_include_directories(tbbbind
PUBLIC
${HWLOC_INCLUDE_DIRS}
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../../include>
$<INSTALL_INTERFACE:include>)
@ -38,7 +39,8 @@ target_compile_options(tbbbind
set_target_properties(tbbbind PROPERTIES
LINK_FLAGS ${TBB_LINK_DEF_FILE_FLAG}${CMAKE_CURRENT_SOURCE_DIR}/def/${TBB_DEF_FILE_PREFIX}-tbbbind.def
DEFINE_SYMBOL ""
VERSION 3)
VERSION ${TBBBIND_BINARY_VERSION}.${TBB_BINARY_MINOR_VERSION}
SOVERSION ${TBBBIND_BINARY_VERSION})
# Prefer using target_link_options instead of target_link_libraries to specify link options because
# target_link_libraries may incorrectly handle some options (on Windows, for example).
@ -58,7 +60,7 @@ endif()
target_link_libraries(tbbbind
PUBLIC
${HWLOC_LIBRARIES}
HWLOC::hwloc
PRIVATE
${TBB_LIB_LINK_LIBS}
${TBB_COMMON_LINK_LIBS}

Просмотреть файл

@ -55,7 +55,8 @@ target_compile_options(tbbmalloc
set_target_properties(tbbmalloc PROPERTIES
LINK_FLAGS ${TBB_LINK_DEF_FILE_FLAG}${CMAKE_CURRENT_SOURCE_DIR}/def/${TBB_DEF_FILE_PREFIX}-tbbmalloc.def
DEFINE_SYMBOL ""
VERSION ${TBBMALLOC_BINARY_VERSION})
VERSION ${TBBMALLOC_BINARY_VERSION}.${TBB_BINARY_MINOR_VERSION}
SOVERSION ${TBBMALLOC_BINARY_VERSION})
# Prefer using target_link_options instead of target_link_libraries to specify link options because
# target_link_libraries may incorrectly handle some options (on Windows, for example).

Просмотреть файл

@ -18,6 +18,8 @@ add_library(tbbmalloc_proxy SHARED
add_library(TBB::tbbmalloc_proxy ALIAS tbbmalloc_proxy)
target_compile_definitions(tbbmalloc_proxy PRIVATE __TBBMALLOCPROXY_BUILD)
target_include_directories(tbbmalloc_proxy
PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../../include>
@ -34,7 +36,9 @@ target_compile_options(tbbmalloc_proxy
${TBB_COMMON_COMPILE_FLAGS}
)
set_target_properties(tbbmalloc_proxy PROPERTIES VERSION ${TBBMALLOC_BINARY_VERSION})
set_target_properties(tbbmalloc_proxy PROPERTIES
VERSION ${TBBMALLOC_BINARY_VERSION}.${TBB_BINARY_MINOR_VERSION}
SOVERSION ${TBBMALLOC_BINARY_VERSION})
if (UNIX AND NOT APPLE)
# Avoid use of target_link_libraries here as it changes /DEF option to \DEF on Windows.

Просмотреть файл

@ -23,7 +23,6 @@
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE 1
#endif
#define __TBB_NO_IMPLICIT_LINKAGE 1
// no standard-conforming implementation of snprintf prior to VS 2015
#if !defined(_MSC_VER) || _MSC_VER>=1900

Просмотреть файл

@ -119,7 +119,7 @@ static inline void initPageSize()
*/
// Starting from GCC 9, the -Wmissing-attributes warning was extended for alias below
#if __GNUC__ == 9
#if __GNUC__ >= 9
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmissing-attributes"
#endif
@ -275,7 +275,7 @@ void *aligned_alloc(size_t alignment, size_t size) __attribute__ ((alias ("memal
// as absent entry points are ignored by the linker.
// Starting from GCC 9, the -Wmissing-attributes warning was extended for aliases below
#if __GNUC__ == 9
#if __GNUC__ >= 9
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmissing-attributes"
#endif

Просмотреть файл

@ -42,6 +42,10 @@ function(tbb_add_test)
-P ${PROJECT_SOURCE_DIR}/cmake/android/test_launcher.cmake)
else()
add_test(NAME ${_tbb_test_NAME} COMMAND ${_tbb_test_NAME} --force-colors=1 WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})
# Additional testing scenarios if Intel(R) Software Development Emulator is found
if (UNIX AND ";test_mutex;conformance_mutex;" MATCHES ";${_tbb_test_NAME};" AND SDE_EXE)
add_test(NAME ${_tbb_test_NAME}_SDE COMMAND ${SDE_EXE} -nhm -rtm_mode disabled -- ./${_tbb_test_NAME} --force-colors=1 WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})
endif()
endif()
# Prefer using target_link_options instead of target_link_libraries to specify link options because
@ -160,6 +164,20 @@ if (TBB_FOUND)
file(COPY ${TBB_LIB_FILE_LOCATION} DESTINATION ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) # TODO: check and update for multi-config generators.
endif()
# Find Intel(R) Software Development Emulator to run test_mutex and conformance_mutex for coverage
set(_sde_find_name sde)
if (UNIX AND TBB_ARCH EQUAL 64)
set(_sde_find_name sde64)
endif()
find_program(SDE_EXE
NAMES ${_sde_find_name}
PATHS ENV PATH
PATH_SUFFIXES bin)
unset(_sde_find_name)
# Define the tests
tbb_add_test(SUBDIR tbb NAME test_tick_count DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR tbb NAME test_allocators DEPENDENCIES TBB::tbb)
@ -220,6 +238,13 @@ tbb_add_test(SUBDIR tbb NAME test_semaphore DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR tbb NAME test_environment_whitebox DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR tbb NAME test_handle_perror DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR tbb NAME test_hw_concurrency DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR tbb NAME test_eh_thread DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR tbb NAME test_global_control DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR tbb NAME test_task DEPENDENCIES TBB::tbb)
if (NOT "${CMAKE_SYSTEM_PROCESSOR}" MATCHES "mips")
# TODO: Fix for MIPS
tbb_add_test(SUBDIR tbb NAME test_tbb_fork DEPENDENCIES TBB::tbb)
endif()
tbb_add_test(SUBDIR tbb NAME test_tbb_header DEPENDENCIES TBB::tbb)
target_sources(test_tbb_header PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/tbb/test_tbb_header_secondary.cpp)
@ -256,17 +281,50 @@ tbb_add_test(SUBDIR conformance NAME conformance_combinable DEPENDENCIES TBB::tb
tbb_add_test(SUBDIR conformance NAME conformance_concurrent_queue DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR conformance NAME conformance_resumable_tasks DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR conformance NAME conformance_version DEPENDENCIES TBB::tbb)
# functional nodes conformance
tbb_add_test(SUBDIR conformance NAME conformance_function_node DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR conformance NAME conformance_multifunction_node DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR conformance NAME conformance_input_node DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR conformance NAME conformance_continue_node DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR conformance NAME conformance_async_node DEPENDENCIES TBB::tbb)
# buffering nodes conformance
tbb_add_test(SUBDIR conformance NAME conformance_overwrite_node DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR conformance NAME conformance_write_once_node DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR conformance NAME conformance_buffer_node DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR conformance NAME conformance_queue_node DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR conformance NAME conformance_priority_queue_node DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR conformance NAME conformance_sequencer_node DEPENDENCIES TBB::tbb)
# service nodes conformance
tbb_add_test(SUBDIR conformance NAME conformance_limiter_node DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR conformance NAME conformance_broadcast_node DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR conformance NAME conformance_composite_node DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR conformance NAME conformance_indexer_node DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR conformance NAME conformance_split_node DEPENDENCIES TBB::tbb)
tbb_add_test(SUBDIR conformance NAME conformance_join_node DEPENDENCIES TBB::tbb)
# flowraph auxiliary conformance
# TODO: add conformance tests for graph_node, continue_msg, tagged_msg, copy_body, input_port, output_port, make_edge, remove_edge
tbb_add_test(SUBDIR conformance NAME conformance_graph DEPENDENCIES TBB::tbb)
if (MSVC AND CMAKE_VERSION VERSION_GREATER 3.13) # LINK_OPTIONS property first appeared in 3.13
# version of the CMake
tbb_add_test(SUBDIR tbb NAME test_implicit_linkage_on_windows)
# TODO: consider setting environment instead of passing additional
# compiler and linker options
target_include_directories(test_implicit_linkage_on_windows PRIVATE
$<TARGET_PROPERTY:TBB::tbb,INTERFACE_INCLUDE_DIRECTORIES>)
set_target_properties(test_implicit_linkage_on_windows PROPERTIES
LINK_OPTIONS /LIBPATH:$<TARGET_LINKER_FILE_DIR:TBB::tbb>)
add_dependencies(test_implicit_linkage_on_windows TBB::tbb)
endif()
if (TBB_NUMA_SUPPORT)
find_package(HWLOC QUIET)
if (HWLOC_FOUND)
target_link_libraries(test_arena_constraints PRIVATE ${HWLOC_LIBRARIES})
target_include_directories(test_arena_constraints PRIVATE ${HWLOC_INCLUDE_DIRS})
target_link_libraries(test_arena_constraints PRIVATE HWLOC::hwloc)
target_compile_definitions(test_arena_constraints PRIVATE __TBB_HWLOC_PRESENT)
add_dependencies(test_arena_constraints TBB::tbbbind)
target_link_libraries(conformance_arena_constraints PRIVATE ${HWLOC_LIBRARIES})
target_include_directories(conformance_arena_constraints PRIVATE ${HWLOC_INCLUDE_DIRS})
target_link_libraries(conformance_arena_constraints PRIVATE HWLOC::hwloc)
target_compile_definitions(conformance_arena_constraints PRIVATE __TBB_HWLOC_PRESENT)
add_dependencies(conformance_arena_constraints TBB::tbbbind)
endif()

Просмотреть файл

@ -18,6 +18,7 @@
#define __TBB_test_common_concurrent_ordered_common_H
#include "common/concurrent_associative_common.h"
#include "test_comparisons.h"
template<typename MyTable>
inline void CheckContainerAllocator(MyTable &table, size_t expected_allocs, size_t expected_frees, bool exact) {
@ -302,4 +303,61 @@ void check_heterogeneous_bound_functions() {
}
}
template <typename Container>
void test_comparisons_basic() {
using comparisons_testing::testEqualityAndLessComparisons;
Container c1, c2;
testEqualityAndLessComparisons</*ExpectEqual = */true, /*ExpectLess = */false>(c1, c2);
c1.insert(Value<Container>::make(1));
testEqualityAndLessComparisons</*ExpectEqual = */false, /*ExpectLess = */false>(c1, c2);
c2.insert(Value<Container>::make(1));
testEqualityAndLessComparisons</*ExpectEqual = */true, /*ExpectLess = */false>(c1, c2);
c2.insert(Value<Container>::make(2));
testEqualityAndLessComparisons</*ExpectEqual = */false, /*ExpectLess = */true>(c1, c2);
c1.clear();
c2.clear();
testEqualityAndLessComparisons</*ExpectEqual = */true, /*ExpectLess = */false>(c1, c2);
}
template <typename TwoWayComparableContainerType>
void test_two_way_comparable_container() {
TwoWayComparableContainerType c1, c2;
c1.insert(Value<TwoWayComparableContainerType>::make(1));
c2.insert(Value<TwoWayComparableContainerType>::make(1));
comparisons_testing::TwoWayComparable::reset();
REQUIRE_MESSAGE(!(c1 < c2), "Incorrect operator < result");
comparisons_testing::check_two_way_comparison();
REQUIRE_MESSAGE(!(c1 > c2), "Incorrect operator > result");
comparisons_testing::check_two_way_comparison();
REQUIRE_MESSAGE(c1 <= c2, "Incorrect operator <= result");
comparisons_testing::check_two_way_comparison();
REQUIRE_MESSAGE(c1 >= c2, "Incorrect operator >= result");
comparisons_testing::check_two_way_comparison();
}
template <template <typename...> class ContainerType>
void test_map_comparisons() {
using integral_container = ContainerType<int, int>;
using two_way_comparable_container = ContainerType<comparisons_testing::TwoWayComparable,
comparisons_testing::TwoWayComparable>;
test_comparisons_basic<integral_container>();
test_comparisons_basic<two_way_comparable_container>();
test_two_way_comparable_container<two_way_comparable_container>();
}
template <template <typename...> class ContainerType>
void test_set_comparisons() {
using integral_container = ContainerType<int>;
using two_way_comparable_container = ContainerType<comparisons_testing::TwoWayComparable>;
test_comparisons_basic<integral_container>();
test_comparisons_basic<two_way_comparable_container>();
test_two_way_comparable_container<two_way_comparable_container>();
}
#endif // __TBB_test_common_concurrent_ordered_common_H

Просмотреть файл

@ -14,6 +14,9 @@
limitations under the License.
*/
#ifndef __TBB_test_common_exception_handling_H
#define __TBB_test_common_exception_handling_H
#include <typeinfo>
#include <thread>
@ -112,14 +115,16 @@ using PropagatedException = test_exception;
#if UTILS_EXCEPTION_HANDLING_SIMPLE_MODE
static void ThrowTestException () {
inline void ThrowTestException () {
++g_ExceptionsThrown;
throw test_exception(EXCEPTION_DESCR);
}
#else /* !UTILS_EXCEPTION_HANDLING_SIMPLE_MODE */
static void ThrowTestException ( intptr_t threshold ) {
constexpr std::intptr_t Existed = INT_MAX;
inline void ThrowTestException ( intptr_t threshold ) {
bool inMaster = (std::this_thread::get_id() == g_Master);
if ( !g_ThrowException || // if we're not supposed to throw
(!g_Flog && // if we're not catching throw in bodies and
@ -127,7 +132,7 @@ static void ThrowTestException ( intptr_t threshold ) {
// or are the master and the master is not the one to throw (??)
return;
}
while ( Existed() < threshold )
while ( Existed < threshold )
std::this_thread::yield();
if ( !g_SolitaryException ) {
++g_ExceptionsThrown;
@ -306,3 +311,5 @@ void RunCancellationTest ( intptr_t threshold = 1 )
tg.wait();
CATCH_AND_FAIL();
}
#endif // __TBB_test_common_exception_handling_H

Просмотреть файл

@ -0,0 +1,138 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef __TBB_test_common_test_comparisons_H
#define __TBB_test_common_test_comparisons_H
#include "test.h"
namespace comparisons_testing {
template <bool ExpectEqual, bool ExpectLess, typename T>
void testTwoWayComparisons( const T& lhs, const T& rhs ) {
REQUIRE_MESSAGE((lhs < rhs == ExpectLess),
"Incorrect 2-way comparison result for less operation");
REQUIRE_MESSAGE((lhs <= rhs == (ExpectLess || ExpectEqual)),
"Incorrect 2-way comparison result for less or equal operation");
bool ExpectGreater = ExpectEqual ? false : !ExpectLess;
REQUIRE_MESSAGE((lhs > rhs == ExpectGreater),
"Incorrect 2-way comparison result for greater operation");
REQUIRE_MESSAGE((lhs >= rhs == (ExpectGreater || ExpectEqual)),
"Incorrect 2-way comparison result for greater or equal operation");
}
template <bool ExpectEqual, typename T>
void testEqualityComparisons( const T& lhs, const T& rhs ) {
REQUIRE_MESSAGE((lhs == rhs) == ExpectEqual,
"Incorrect 2-way comparison result for equal operation");
REQUIRE_MESSAGE((lhs != rhs) == !ExpectEqual,
"Incorrect 2-way comparison result for unequal operation");
}
template <bool ExpectEqual, bool ExpectLess, typename T>
void testEqualityAndLessComparisons( const T& lhs, const T& rhs ) {
testEqualityComparisons<ExpectEqual>(lhs, rhs);
testTwoWayComparisons<ExpectEqual, ExpectLess>(lhs, rhs);
}
class TwoWayComparable {
public:
TwoWayComparable() : n(0) {
reset();
}
TwoWayComparable( std::size_t num ) : n(num) {
reset();
}
static void reset() {
equal_called = false;
unequal_called = false;
less_called = false;
greater_called = false;
less_or_equal_called = false;
greater_or_equal_called = false;
}
static bool equal_called;
static bool unequal_called;
static bool less_called;
static bool greater_called;
static bool less_or_equal_called;
static bool greater_or_equal_called;
friend bool operator==( const TwoWayComparable& lhs, const TwoWayComparable& rhs ) {
equal_called = true;
return lhs.n == rhs.n;
}
friend bool operator!=( const TwoWayComparable& lhs, const TwoWayComparable& rhs ) {
unequal_called = true;
return lhs.n != rhs.n;
}
friend bool operator<( const TwoWayComparable& lhs, const TwoWayComparable& rhs ) {
less_called = true;
return lhs.n < rhs.n;
}
friend bool operator>( const TwoWayComparable& lhs, const TwoWayComparable& rhs ) {
greater_called = true;
return lhs.n > rhs.n;
}
friend bool operator<=( const TwoWayComparable& lhs, const TwoWayComparable& rhs ) {
less_or_equal_called = true;
return lhs.n <= rhs.n;
}
friend bool operator>=( const TwoWayComparable& lhs, const TwoWayComparable& rhs ) {
greater_or_equal_called = true;
return lhs.n >= rhs.n;
}
protected:
std::size_t n;
}; // struct TwoWayComparable
bool TwoWayComparable::equal_called = false;
bool TwoWayComparable::unequal_called = false;
bool TwoWayComparable::less_called = false;
bool TwoWayComparable::greater_called = false;
bool TwoWayComparable::less_or_equal_called = false;
bool TwoWayComparable::greater_or_equal_called = false;
// This function should be executed after comparing to objects, containing TwoWayComparables
// using one of the comparison operators (<=>, <, >, <=, >=)
void check_two_way_comparison() {
REQUIRE_MESSAGE(TwoWayComparable::less_called,
"operator < was not called during the comparison");
REQUIRE_MESSAGE(!TwoWayComparable::greater_called,
"operator > was called during the comparison");
REQUIRE_MESSAGE(!TwoWayComparable::less_or_equal_called,
"operator <= was called during the comparison");
REQUIRE_MESSAGE(!TwoWayComparable::greater_or_equal_called,
"operator >= was called during the comparison");
REQUIRE_MESSAGE(!(TwoWayComparable::equal_called),
"operator == was called during the comparison");
REQUIRE_MESSAGE(!(TwoWayComparable::unequal_called),
"operator == was called during the comparison");
TwoWayComparable::reset();
}
} // namespace comparisons_testing
#endif // __TBB_test_common_test_comparisons_H

Просмотреть файл

@ -0,0 +1,172 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
#include "conformance_flowgraph.h"
//! \file conformance_async_node.cpp
//! \brief Test for [flow_graph.async_node] specification
/*
TODO: implement missing conformance tests for async_node:
- [ ] Write `test_forwarding()'.
- [ ] Improve test of the node's copy-constructor.
- [ ] Write `test_priority'.
- [ ] Rename `test_discarding' to `test_buffering'.
- [ ] Write inheritance test.
- [ ] Constructor with explicitly passed Policy parameter.
- [ ] Concurrency testing of the node: make a loop over possible concurrency levels. It is
important to test at least on five values: 1, tbb::flow::serial, `max_allowed_parallelism'
obtained from `tbb::global_control', `tbb::flow::unlimited', and, if `max allowed
parallelism' is > 2, use something in the middle of the [1, max_allowed_parallelism]
interval. Use `utils::ExactConcurrencyLevel' entity (extending it if necessary).
- [ ] Write `test_rejecting', where avoid dependency on OS scheduling of the threads; add check
that `try_put()' returns `false'
- [ ] The `copy_body' function copies altered body (e.g. after successful `try_put()' call).
- [ ] The copy constructor and copy assignment are called for the node's input and output types.
- [ ] Add CTAD test.
*/
template<typename I, typename O>
void test_inheritance(){
using namespace tbb::flow;
CHECK_MESSAGE( (std::is_base_of<graph_node, async_node<I, O>>::value), "async_node should be derived from graph_node");
CHECK_MESSAGE( (std::is_base_of<receiver<I>, async_node<I, O>>::value), "async_node should be derived from receiver<Input>");
CHECK_MESSAGE( (std::is_base_of<sender<O>, async_node<I, O>>::value), "async_node should be derived from sender<Output>");
}
template< typename OutputType >
struct as_inc_functor {
std::thread my_thread;
std::atomic<size_t>& local_execute_count;
as_inc_functor(std::atomic<size_t>& execute_count ) :
local_execute_count (execute_count)
{ }
as_inc_functor( const as_inc_functor &f ) : local_execute_count(f.local_execute_count) { }
void operator=(const as_inc_functor &f) { local_execute_count = size_t(f.local_execute_count); }
void operator()( int num , tbb::flow::async_node<int, int>::gateway_type& g) {
++local_execute_count;
g.try_put(num);
// my_thread = std::thread([&](){
// g.try_put(num);
// });
}
};
void test_async_body(){
tbb::flow::graph g;
std::atomic<size_t> local_count(0);
as_inc_functor<int> fun(local_count);
tbb::flow::async_node<int, int> node1(g, tbb::flow::unlimited, fun);
const size_t n = 10;
for(size_t i = 0; i < n; ++i) {
CHECK_MESSAGE((node1.try_put(1) == true), "try_put needs to return true");
}
//fun.my_thread.join();
g.wait_for_all();
CHECK_MESSAGE( (fun.local_execute_count.load() == n), "Body of the node needs to be executed N times");
}
void test_copy(){
tbb::flow::graph g;
std::atomic<size_t> local_count(0);
as_inc_functor<int> fun(local_count);
tbb::flow::async_node<int, int> node1(g, tbb::flow::unlimited, fun);
tbb::flow::async_node<int, int> node2(node1);
}
void test_priority(){
tbb::flow::graph g;
std::atomic<size_t> local_count(0);
as_inc_functor<int> fun(local_count);
tbb::flow::async_node<int, int> node1(g, tbb::flow::unlimited, fun, tbb::flow::no_priority);
}
void test_discarding(){
tbb::flow::graph g;
std::atomic<size_t> local_count(0);
as_inc_functor<int> fun(local_count);
tbb::flow::async_node<int, int> node1(g, tbb::flow::unlimited, fun);
tbb::flow::limiter_node< int > rejecter1( g,0);
tbb::flow::limiter_node< int > rejecter2( g,0);
make_edge(node1, rejecter2);
make_edge(node1, rejecter1);
node1.try_put(1);
int tmp = -1;
CHECK_MESSAGE((node1.try_get(tmp) == false), "Value should be discarded after rejection");
g.wait_for_all();
}
//! Test discarding property
//! \brief \ref requirement
TEST_CASE("async_node discarding") {
test_discarding();
}
//! Test async_node priority interface
//! \brief \ref interface
TEST_CASE("async_node priority interface"){
test_priority();
}
//! Test async_node copy
//! \brief \ref interface
TEST_CASE("async_node copy"){
test_copy();
}
//! Test calling async body
//! \brief \ref interface \ref requirement
TEST_CASE("Test async_node body") {
test_async_body();
}
//! Test async_node inheritance relations
//! \brief \ref interface
TEST_CASE("async_node superclasses"){
test_inheritance<int, int>();
test_inheritance<void*, float>();
}

Просмотреть файл

@ -0,0 +1,113 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
#include "conformance_flowgraph.h"
//! \file conformance_broadcast_node.cpp
//! \brief Test for [flow_graph.broadcast_node] specification
/*
TODO: implement missing conformance tests for broadcast_node:
- [ ] The copy constructor and copy assignment are called for the node's type template parameter.
- [ ] Improve test for constructors.
*/
template<typename T>
void test_inheritance(){
using namespace tbb::flow;
CHECK_MESSAGE( (std::is_base_of<graph_node, broadcast_node<T>>::value), "broadcast_node should be derived from graph_node");
CHECK_MESSAGE( (std::is_base_of<receiver<T>, broadcast_node<T>>::value), "broadcast_node should be derived from reciever<T>");
CHECK_MESSAGE( (std::is_base_of<sender<T>, broadcast_node<T>>::value), "broadcast_node should be derived from sender<T>");
}
void test_copies(){
using namespace tbb::flow;
graph g;
broadcast_node<int> n(g);
broadcast_node<int> n2(n);
}
void test_buffering(){
tbb::flow::graph g;
tbb::flow::broadcast_node<int> node(g);
tbb::flow::limiter_node<int> rejecter(g, 0);
tbb::flow::make_edge(node, rejecter);
node.try_put(1);
g.wait_for_all();
int tmp = -1;
CHECK_MESSAGE( (node.try_get(tmp) == false), "try_get after rejection should not succeed");
CHECK_MESSAGE( (tmp == -1), "try_get after rejection should not set value");
}
void test_forwarding(){
tbb::flow::graph g;
tbb::flow::broadcast_node<int> node1(g);
test_push_receiver<int> node2(g);
test_push_receiver<int> node3(g);
tbb::flow::make_edge(node1, node2);
tbb::flow::make_edge(node1, node3);
node1.try_put(1);
g.wait_for_all();
int c2 = get_count(node2), c3 = get_count(node3);
CHECK_MESSAGE( ( c2 == 1), "Descendant of the node must receive one message.");
CHECK_MESSAGE( ( c3 == 1), "Descendant of the node must receive one message.");
}
//! Test function_node broadcast
//! \brief \ref requirement
TEST_CASE("broadcast_node broadcasts"){
test_forwarding();
}
//! Test broadcast_node buffering
//! \brief \ref requirement
TEST_CASE("broadcast_node buffering"){
test_buffering();
}
//! Test copy constructor
//! \brief \ref interface
TEST_CASE("broadcast_node copy constructor"){
test_copies();
}
//! Test inheritance relations
//! \brief \ref interface
TEST_CASE("broadcast_node superclasses"){
test_inheritance<int>();
test_inheritance<void*>();
}

Просмотреть файл

@ -0,0 +1,121 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
#include "conformance_flowgraph.h"
//! \file conformance_buffer_node.cpp
//! \brief Test for [flow_graph.buffer_node] specification
/*
TODO: implement missing conformance tests for buffer_node:
- [ ] The copy constructor is called for the node's type template parameter.
- [ ] Improve `test_forwarding' by checking that the value passed is the actual one received.
- [ ] Improve `test_buffering' by checking that additional `try_get()' does not receive the same
value.
- [ ] Improve tests of the constructors.
- [ ] Based on the decision about the details for `try_put()' and `try_get()' write corresponding
tests.
- [ ] Fix description in `TEST_CASEs'.*/
template<typename T>
void test_inheritance(){
using namespace tbb::flow;
CHECK_MESSAGE( (std::is_base_of<graph_node, buffer_node<T>>::value), "buffer_node should be derived from graph_node");
CHECK_MESSAGE( (std::is_base_of<receiver<T>, buffer_node<T>>::value), "buffer_node should be derived from receiver<T>");
CHECK_MESSAGE( (std::is_base_of<sender<T>, buffer_node<T>>::value), "buffer_node should be derived from sender<T>");
}
void test_copies(){
using namespace tbb::flow;
graph g;
buffer_node<int> n(g);
buffer_node<int> n2(n);
}
void test_buffering(){
tbb::flow::graph g;
tbb::flow::buffer_node<int> node(g);
tbb::flow::limiter_node<int> rejecter(g, 0);
tbb::flow::make_edge(node, rejecter);
int tmp = -1;
CHECK_MESSAGE( (node.try_get(tmp) == false), "try_get before placemnt should not succeed");
node.try_put(1);
tmp = -1;
CHECK_MESSAGE( (node.try_get(tmp) == true), "try_get after rejection should succeed");
CHECK_MESSAGE( (tmp == 1), "try_get after rejection should set value");
g.wait_for_all();
}
void test_forwarding(){
tbb::flow::graph g;
tbb::flow::buffer_node<int> node1(g);
test_push_receiver<int> node2(g);
test_push_receiver<int> node3(g);
tbb::flow::make_edge(node1, node2);
tbb::flow::make_edge(node1, node3);
node1.try_put(1);
g.wait_for_all();
int c2 = get_count(node2), c3 = get_count(node3);
CHECK_MESSAGE( (c2 != c3 ), "Only one descendant the node needs to receive");
CHECK_MESSAGE( (c2 + c3 == 1 ), "All messages need to be received");
}
//! Test function_node buffering
//! \brief \ref requirement
TEST_CASE("buffer_node buffering"){
test_forwarding();
}
//! Test function_node buffering
//! \brief \ref requirement
TEST_CASE("buffer_node buffering"){
test_buffering();
}
//! Test copy constructor
//! \brief \ref interface
TEST_CASE("buffer_node copy constructor"){
test_copies();
}
//! Test inheritance relations
//! \brief \ref interface
TEST_CASE("buffer_node superclasses"){
test_inheritance<int>();
test_inheritance<void*>();
}

Просмотреть файл

@ -0,0 +1,108 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
#include "conformance_flowgraph.h"
//! \file conformance_composite_node.cpp
//! \brief Test for [flow_graph.composite_node] specification
/*
TODO: implement missing conformance tests for composite_node:
- [ ] Check that `input_ports_type' and `output_ports_type' are defined, accessible, and meet
their requirements, that is, each element is a reference to actual node, input or output
port respectively.
- [ ] Add tests for `composite_node' with only input and output ports.
- [ ] Make sure `input_ports()' and `output_ports()' are defined and accessible in respective
specializations.
- [ ] Check the size of input and output tuples is equal to the size of `input_ports_type' and
`output_ports_type'.
*/
using namespace tbb::flow;
using namespace std;
class adder : public composite_node< tuple<int, int>, tuple<int> > {
join_node< tuple<int,int>, queueing > j;
function_node< tuple<int,int>, int > f;
queue_node <int> qn;
typedef composite_node< tuple<int,int>, tuple<int> > base_type;
struct f_body {
int operator()(const tuple<int,int> &t) {
int sum = get<0>(t) + get<1>(t);
return sum;
}
};
public:
adder(graph &g) : base_type(g), j(g), f(g, unlimited, f_body()), qn(g) {
make_edge(j, f);
make_edge(f, qn);
base_type::input_ports_type input_tuple(input_port<0>(j), input_port<1>(j));
base_type::output_ports_type output_tuple(qn);
base_type::set_external_ports(input_tuple, output_tuple);
}
};
void test_inheritance(){
using namespace tbb::flow;
CHECK_MESSAGE( (std::is_base_of<graph_node, adder>::value), "multifunction_node should be derived from graph_node");
}
//! Test inheritance relations
//! \brief \ref interface
TEST_CASE("composite_node superclasses"){
test_inheritance();
}
//! Test inheritance relations
//! \brief \ref interface \ref requirement
TEST_CASE("Construction and message test"){
graph g;
split_node< tuple<int, int, int, int> > s(g);
adder a0(g);
adder a1(g);
adder a2(g);
make_edge(output_port<0>(s), input_port<0>(a0));
make_edge(output_port<1>(s), input_port<1>(a0));
make_edge(output_port<0>(a0),input_port<0>(a1));
make_edge(output_port<2>(s), input_port<1>(a1));
make_edge(output_port<0>(a1), input_port<0>(a2));
make_edge(output_port<3>(s), input_port<1>(a2));
s.try_put(std::make_tuple(1,3,5,7));
g.wait_for_all();
int tmp = -1;
CHECK_MESSAGE((output_port<0>(a2).try_get(tmp) == true), "Composite node should produce a value");
CHECK_MESSAGE((tmp == 1+3+5+7), "Composite node should produce correct sum");
}

Просмотреть файл

@ -224,6 +224,12 @@ TEST_CASE("CTAD support in concurrent_map") {
}
#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
//! Testing comparison operators in concurrent_map
//! \brief \ref interface \ref requirement
TEST_CASE("test concurrent_map comparisons") {
test_map_comparisons<tbb::concurrent_map>();
}
//! Testing concurrent_multimap member types
//! \brief \ref interface \ref requirement
TEST_CASE("concurrent_multimap member types") {
@ -286,6 +292,12 @@ TEST_CASE("CTAD support in concurrent_multimap") {
}
#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
//! Testing comparison operators in concurrent_multimap
//! \brief \ref interface \ref requirement
TEST_CASE("test concurrent_multimap comparisons") {
test_map_comparisons<tbb::concurrent_multimap>();
}
//! Testing of merge operations in concurrent_map and concurrent_multimap
//! \brief \ref interface \ref requirement
TEST_CASE("merge operations") {

Просмотреть файл

@ -196,6 +196,12 @@ TEST_CASE("CTAD support in concurrent_set") {
}
#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
//! Testing comparison operators in concurrent_set
//! \brief \ref interface \ref requirement
TEST_CASE("test concurrent_set comparisons") {
test_set_comparisons<tbb::concurrent_set>();
}
//! Testing concurrent_multiset member types
//! \brief \ref interface \ref requirement
TEST_CASE("concurrent_multiset member types") {
@ -252,6 +258,12 @@ TEST_CASE("CTAD support in concurrent_multiset") {
}
#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
//! Testing comparison operators in concurrent_multiset
//! \brief \ref interface \ref requirement
TEST_CASE("test concurrent_set comparisons") {
test_set_comparisons<tbb::concurrent_multiset>();
}
//! Testing of merge operations in concurrent_set and concurrent_multiset
//! \brief \ref interface \ref requirement
TEST_CASE("merge operations") {

Просмотреть файл

@ -0,0 +1,249 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
#include "conformance_flowgraph.h"
//! \file conformance_continue_node.cpp
//! \brief Test for [flow_graph.continue_node] specification
/*
TODO: implement missing conformance tests for continue_node:
- [ ] For `test_forwarding' check that the value passed is the actual one received.
- [ ] The `copy_body' function copies altered body (e.g. after its successful invocation).
- [ ] Improve CTAD test.
- [ ] Improve constructors test, including addition of calls to constructors with
`number_of_predecessors' parameter.
- [ ] Explicit test for copy constructor of the node.
- [ ] Rewrite test_priority.
- [ ] Check `Output' type indeed copy-constructed and copy-assigned while working with the node.
- [ ] Explicit test for correct working of `number_of_predecessors' constructor parameter,
including taking it into account when making and removing edges.
- [ ] Add testing of `try_put' statement. In particular that it does not wait for the execution of
the body to complete.
*/
void test_cont_body(){
tbb::flow::graph g;
inc_functor<int> cf;
cf.execute_count = 0;
tbb::flow::continue_node<int> node1(g, cf);
const size_t n = 10;
for(size_t i = 0; i < n; ++i) {
CHECK_MESSAGE((node1.try_put(tbb::flow::continue_msg()) == true),
"continue_node::try_put() should never reject a message.");
}
g.wait_for_all();
CHECK_MESSAGE( (cf.execute_count == n), "Body of the first node needs to be executed N times");
}
template<typename O>
void test_inheritance(){
using namespace tbb::flow;
CHECK_MESSAGE( (std::is_base_of<graph_node, continue_node<O>>::value), "continue_node should be derived from graph_node");
CHECK_MESSAGE( (std::is_base_of<receiver<continue_msg>, continue_node<O>>::value), "continue_node should be derived from receiver<Input>");
CHECK_MESSAGE( (std::is_base_of<sender<O>, continue_node<O>>::value), "continue_node should be derived from sender<Output>");
}
#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
void test_deduction_guides(){
tbb::flow::graph g;
inc_functor<int> fun;
tbb::flow::continue_node node1(g, fun);
}
#endif
void test_forwarding(){
tbb::flow::graph g;
inc_functor<int> fun;
fun.execute_count = 0;
tbb::flow::continue_node<int> node1(g, fun);
test_push_receiver<int> node2(g);
test_push_receiver<int> node3(g);
tbb::flow::make_edge(node1, node2);
tbb::flow::make_edge(node1, node3);
node1.try_put(tbb::flow::continue_msg());
g.wait_for_all();
CHECK_MESSAGE( (get_count(node2) == 1), "Descendant of the node must receive one message.");
CHECK_MESSAGE( (get_count(node3) == 1), "Descendant of the node must receive one message.");
}
void test_buffering(){
tbb::flow::graph g;
inc_functor<int> fun;
tbb::flow::continue_node<int> node(g, fun);
tbb::flow::limiter_node<int> rejecter(g, 0);
tbb::flow::make_edge(node, rejecter);
node.try_put(tbb::flow::continue_msg());
int tmp = -1;
CHECK_MESSAGE( (node.try_get(tmp) == false), "try_get after rejection should not succeed");
CHECK_MESSAGE( (tmp == -1), "try_get after rejection should not alter passed value");
g.wait_for_all();
}
void test_policy_ctors(){
using namespace tbb::flow;
graph g;
inc_functor<int> fun;
continue_node<int, lightweight> lw_node(g, fun);
}
void test_ctors(){
using namespace tbb::flow;
graph g;
inc_functor<int> fun;
continue_node<int> proto1(g, 2, fun, tbb::flow::node_priority_t(1));
}
template<typename O>
struct CopyCounterBody{
size_t copy_count;
CopyCounterBody():
copy_count(0) {}
CopyCounterBody(const CopyCounterBody<O>& other):
copy_count(other.copy_count + 1) {}
CopyCounterBody& operator=(const CopyCounterBody<O>& other){
copy_count = other.copy_count + 1;
return *this;
}
O operator()(tbb::flow::continue_msg){
return 1;
}
};
void test_copies(){
using namespace tbb::flow;
CopyCounterBody<int> b;
graph g;
continue_node<int> fn(g, b);
CopyCounterBody<int> b2 = copy_body<CopyCounterBody<int>,
continue_node<int>>(fn);
CHECK_MESSAGE( (b.copy_count + 2 <= b2.copy_count), "copy_body and constructor should copy bodies");
}
void test_priority(){
size_t concurrency_limit = 1;
tbb::global_control control(tbb::global_control::max_allowed_parallelism, concurrency_limit);
tbb::flow::graph g;
tbb::flow::continue_node<tbb::flow::continue_msg> source(g,
[](tbb::flow::continue_msg){ return tbb::flow::continue_msg();});
source.try_put(tbb::flow::continue_msg());
first_functor<int>::first_id = -1;
first_functor<int> low_functor(1);
first_functor<int> high_functor(2);
tbb::flow::continue_node<int, int> high(g, high_functor, tbb::flow::node_priority_t(1));
tbb::flow::continue_node<int, int> low(g, low_functor);
make_edge(source, low);
make_edge(source, high);
g.wait_for_all();
CHECK_MESSAGE( (first_functor<int>::first_id == 2), "High priority node should execute first");
}
//! Test node costructors
//! \brief \ref requirement
TEST_CASE("continue_node constructors"){
test_ctors();
}
//! Test priorities work in single-threaded configuration
//! \brief \ref requirement
TEST_CASE("continue_node priority support"){
test_priority();
}
//! Test body copying and copy_body logic
//! \brief \ref interface
TEST_CASE("continue_node and body copying"){
test_copies();
}
//! Test constructors
//! \brief \ref interface
TEST_CASE("continue_node constructors"){
test_policy_ctors();
}
//! Test continue_node buffering
//! \brief \ref requirement
TEST_CASE("continue_node buffering"){
test_buffering();
}
//! Test function_node broadcasting
//! \brief \ref requirement
TEST_CASE("continue_node broadcast"){
test_forwarding();
}
//! Test deduction guides
//! \brief \ref interface \ref requirement
TEST_CASE("Deduction guides"){
#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
test_deduction_guides();
#endif
}
//! Test inheritance relations
//! \brief \ref interface
TEST_CASE("continue_node superclasses"){
test_inheritance<int>();
test_inheritance<void*>();
}
//! Test body execution
//! \brief \ref interface \ref requirement
TEST_CASE("continue body") {
test_cont_body();
}

Просмотреть файл

@ -14,17 +14,24 @@
limitations under the License.
*/
#if _MSC_VER && !defined(__INTEL_COMPILER)
// Workaround for vs2015 and warning name was longer than the compiler limit (4096).
#pragma warning (push)
#pragma warning (disable: 4503)
#endif
#include "common/test.h"
#include "common/utils.h"
#include "common/utils_report.h"
#include "common/utils_concurrency_limit.h"
#include "common/spin_barrier.h"
#include "common/checktype.h"
#include "tbb/detail/_utils.h"
#include "tbb/enumerable_thread_specific.h"
#include "tbb/parallel_for.h"
#include "tbb/parallel_reduce.h"
#include "tbb/parallel_invoke.h"
#include "tbb/blocked_range.h"
#include "tbb/tbb_allocator.h"
#include "tbb/global_control.h"
@ -657,13 +664,18 @@ void run_parallel_vector_tests(const char* /* test_name */, const char *allocato
size_t ccount = fvs.size();
REQUIRE( ccount == size_t(N) );
size_t elem_cnt = 0;
typename tbb::flattened2d<ets_type>::iterator it;
auto it2(it);
it = fvs.begin();
REQUIRE(it != it2);
for(typename tbb::flattened2d<ets_type>::const_iterator i = fvs.begin(); i != fvs.end(); ++i) {
++elem_cnt;
};
REQUIRE( ccount == elem_cnt );
elem_cnt = 0;
for(typename tbb::flattened2d<ets_type>::iterator i = fvs.begin(); i != fvs.end(); ++i) {
for(typename tbb::flattened2d<ets_type>::iterator i = fvs.begin(); i != fvs.end(); i++) {
++elem_cnt;
};
REQUIRE( ccount == elem_cnt );
@ -744,6 +756,11 @@ void run_cross_type_vector_tests(const char* /* test_name */) {
++elem_cnt;
};
REQUIRE(ccount == elem_cnt);
tbb::flattened2d<ets_nokey_type> fvs2 = flatten2d(vs3, vs3.begin(), std::next(vs3.begin()));
REQUIRE(std::distance(fvs2.begin(), fvs2.end()) == vs3.begin()->size());
const tbb::flattened2d<ets_nokey_type>& cfvs2(fvs2);
REQUIRE(std::distance(cfvs2.begin(), cfvs2.end()) == vs3.begin()->size());
}
double result_value = test_helper<T>::get(sum);
@ -1063,6 +1080,80 @@ size_t init_tbb_alloc_mask() {
static const size_t cache_allocator_mask = tbb::detail::r1::cache_line_size();
static const size_t tbb_allocator_mask = init_tbb_alloc_mask();
void TestETSIterator() {
using ets_type = tbb::enumerable_thread_specific<int>;
if (utils::get_platform_max_threads() == 1) {
ets_type ets;
ets.local() = 1;
REQUIRE_MESSAGE(std::next(ets.begin()) == ets.end(), "Incorrect begin or end of the ETS");
REQUIRE_MESSAGE(std::prev(ets.end()) == ets.begin(), "Incorrect begin or end of the ETS");
} else {
std::atomic<std::size_t> sync_counter(0);
const std::size_t expected_ets_size = 2;
ets_type ets;
const ets_type& cets(ets);
auto fill_ets_body = [&](){
ets.local() = 42;
++sync_counter;
while(sync_counter != expected_ets_size)
std::this_thread::yield();
};
tbb::parallel_invoke(fill_ets_body, fill_ets_body);
REQUIRE_MESSAGE(ets.size() == expected_ets_size, "Incorrect ETS size");
std::size_t counter = 0;
auto iter = ets.begin();
while(iter != ets.end()) {
++counter % 2 == 0 ? ++iter : iter++;
}
REQUIRE(counter == expected_ets_size);
while(iter != ets.begin()) {
--counter % 2 == 0 ? --iter : iter--;
}
REQUIRE(counter == 0);
auto citer = cets.begin();
while(citer != cets.end()) {
++counter % 2 == 0 ? ++citer : citer++;
}
REQUIRE(counter == expected_ets_size);
while(citer != cets.begin()) {
--counter % 2 == 0 ? --citer : citer--;
}
REQUIRE(counter == 0);
REQUIRE(ets.begin() + expected_ets_size == ets.end());
REQUIRE(expected_ets_size + ets.begin() == ets.end());
REQUIRE(ets.end() - expected_ets_size == ets.begin());
typename ets_type::iterator it;
it = ets.begin();
auto it_bkp = it;
auto it2 = it++;
REQUIRE(it2 == it_bkp);
it = ets.begin();
it += expected_ets_size;
REQUIRE(it == ets.end());
it -= expected_ets_size;
REQUIRE(it == ets.begin());
for (int i = 0; i < int(expected_ets_size - 1); ++i) {
REQUIRE(ets.begin()[i] == 42);
REQUIRE(std::prev(ets.end())[-i] == 42);
}
auto iter1 = ets.begin();
auto iter2 = ets.end();
REQUIRE(iter1 < iter2);
REQUIRE(iter1 <= iter2);
REQUIRE(!(iter1 > iter2));
REQUIRE(!(iter1 >= iter2));
}
}
//! Test container instantiation
//! \brief \ref interface \ref requirement
TEST_CASE("Instantiation") {
@ -1127,3 +1218,11 @@ TEST_CASE("Member types") {
TestMemberTypes();
}
//! \brief \ref interface \ref requirement
TEST_CASE("enumerable_thread_specific iterator") {
TestETSIterator();
}
#if _MSC_VER && !defined(__INTEL_COMPILER)
#pragma warning (pop)
#endif // warning 4503 is back

Просмотреть файл

@ -0,0 +1,90 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef __TBB_test_conformance_conformance_flowgraph_H
#define __TBB_test_conformance_conformance_flowgraph_H
struct passthru_body {
int operator()( int i ) {
return i;
}
void operator()( const int& argument, tbb::flow::multifunction_node<int, std::tuple<int>>::output_ports_type &op ) {
std::get<0>(op).try_put(argument);
}
};
template<typename V>
using test_push_receiver = tbb::flow::queue_node<V>;
template<typename V>
int get_count( test_push_receiver<V>& rr ){
int val = 0;
for(V tmp; rr.try_get(tmp); ++val);
return val;
}
template< typename OutputType >
struct first_functor {
int my_id;
static std::atomic<int> first_id;
first_functor(int id) : my_id(id) {}
OutputType operator()( OutputType argument ) {
int old_value = first_id;
while(first_id == -1 &&
!first_id.compare_exchange_weak(old_value, my_id))
;
return argument;
}
OutputType operator()( const tbb::flow::continue_msg& ) {
return operator()(OutputType());
}
void operator()( const OutputType& argument, tbb::flow::multifunction_node<int, std::tuple<int>>::output_ports_type &op ) {
operator()(OutputType());
std::get<0>(op).try_put(argument);
}
};
template<typename OutputType>
std::atomic<int> first_functor<OutputType>::first_id;
template< typename OutputType >
struct inc_functor {
static std::atomic<size_t> execute_count;
OutputType operator()( tbb::flow::continue_msg ) {
++execute_count;
return OutputType();
}
OutputType operator()( int argument ) {
++execute_count;
return argument;
}
};
template<typename OutputType>
std::atomic<size_t> inc_functor<OutputType>::execute_count;
#endif // __TBB_test_conformance_conformance_flowgraph_H

Просмотреть файл

@ -0,0 +1,340 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
#include "conformance_flowgraph.h"
//! \file conformance_function_node.cpp
//! \brief Test for [flow_graph.function_node] specification
/*
TODO: implement missing conformance tests for function_node:
- [ ] Constructor with explicitly passed Policy parameter: `template<typename Body> function_node(
graph &g, size_t concurrency, Body body, Policy(), node_priority_t, priority = no_priority )'
- [ ] Explicit test for copy constructor of the node.
- [ ] Rename test_broadcast to test_forwarding and check that the value passed is the actual one
received.
- [ ] Concurrency testing of the node: make a loop over possible concurrency levels. It is
important to test at least on five values: 1, tbb::flow::serial, `max_allowed_parallelism'
obtained from `tbb::global_control', `tbb::flow::unlimited', and, if `max allowed
parallelism' is > 2, use something in the middle of the [1, max_allowed_parallelism]
interval. Use `utils::ExactConcurrencyLevel' entity (extending it if necessary).
- [ ] make `test_rejecting' deterministic, i.e. avoid dependency on OS scheduling of the threads;
add check that `try_put()' returns `false'
- [ ] The copy constructor and copy assignment are called for the node's input and output types.
- [ ] The `copy_body' function copies altered body (e.g. after successful `try_put()' call).
- [ ] Extend CTAD test to check all node's constructors.
*/
std::atomic<size_t> my_concurrency;
std::atomic<size_t> my_max_concurrency;
template< typename OutputType >
struct concurrency_functor {
OutputType operator()( int argument ) {
++my_concurrency;
size_t old_value = my_max_concurrency;
while(my_max_concurrency < my_concurrency &&
!my_max_concurrency.compare_exchange_weak(old_value, my_concurrency))
;
size_t ms = 1000;
std::chrono::milliseconds sleep_time( ms );
std::this_thread::sleep_for( sleep_time );
--my_concurrency;
return argument;
}
};
void test_func_body(){
tbb::flow::graph g;
inc_functor<int> fun;
fun.execute_count = 0;
tbb::flow::function_node<int, int> node1(g, tbb::flow::unlimited, fun);
const size_t n = 10;
for(size_t i = 0; i < n; ++i) {
CHECK_MESSAGE((node1.try_put(1) == true), "try_put needs to return true");
}
g.wait_for_all();
CHECK_MESSAGE( (fun.execute_count == n), "Body of the node needs to be executed N times");
}
void test_priority(){
size_t concurrency_limit = 1;
tbb::global_control control(tbb::global_control::max_allowed_parallelism, concurrency_limit);
tbb::flow::graph g;
first_functor<int>::first_id.store(-1);
first_functor<int> low_functor(1);
first_functor<int> high_functor(2);
tbb::flow::continue_node<int> source(g, [&](tbb::flow::continue_msg){return 1;} );
tbb::flow::function_node<int, int> high(g, tbb::flow::unlimited, high_functor, tbb::flow::node_priority_t(1));
tbb::flow::function_node<int, int> low(g, tbb::flow::unlimited, low_functor);
make_edge(source, low);
make_edge(source, high);
source.try_put(tbb::flow::continue_msg());
g.wait_for_all();
CHECK_MESSAGE( (first_functor<int>::first_id == 2), "High priority node should execute first");
}
#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
void test_deduction_guides(){
using namespace tbb::flow;
graph g;
auto body = [](const int&)->int { return 1; };
function_node f1(g, unlimited, body);
CHECK_MESSAGE((std::is_same_v<decltype(f1), function_node<int, int>>), "Function node type must be deducible from its body");
}
#endif
void test_broadcast(){
tbb::flow::graph g;
passthru_body fun;
tbb::flow::function_node<int, int> node1(g, tbb::flow::unlimited, fun);
test_push_receiver<int> node2(g);
test_push_receiver<int> node3(g);
tbb::flow::make_edge(node1, node2);
tbb::flow::make_edge(node1, node3);
node1.try_put(1);
g.wait_for_all();
CHECK_MESSAGE( (get_count(node2) == 1), "Descendant of the node must receive one message.");
CHECK_MESSAGE( (get_count(node3) == 1), "Descendant of the node must receive one message.");
}
template<typename Policy>
void test_buffering(){
tbb::flow::graph g;
passthru_body fun;
tbb::flow::function_node<int, int, Policy> node(g, tbb::flow::unlimited, fun);
tbb::flow::limiter_node<int> rejecter(g, 0);
tbb::flow::make_edge(node, rejecter);
node.try_put(1);
int tmp = -1;
CHECK_MESSAGE( (node.try_get(tmp) == false), "try_get after rejection should not succeed");
CHECK_MESSAGE( (tmp == -1), "try_get after rejection should not alter passed value");
g.wait_for_all();
}
void test_node_concurrency(){
my_concurrency = 0;
my_max_concurrency = 0;
tbb::flow::graph g;
concurrency_functor<int> counter;
tbb::flow::function_node <int, int> fnode(g, tbb::flow::serial, counter);
test_push_receiver<int> sink(g);
make_edge(fnode, sink);
for(int i = 0; i < 10; ++i){
fnode.try_put(i);
}
g.wait_for_all();
CHECK_MESSAGE( ( my_max_concurrency.load() == 1), "Measured parallelism is not expected");
}
template<typename I, typename O>
void test_inheritance(){
using namespace tbb::flow;
CHECK_MESSAGE( (std::is_base_of<graph_node, function_node<I, O>>::value), "function_node should be derived from graph_node");
CHECK_MESSAGE( (std::is_base_of<receiver<I>, function_node<I, O>>::value), "function_node should be derived from receiver<Input>");
CHECK_MESSAGE( (std::is_base_of<sender<O>, function_node<I, O>>::value), "function_node should be derived from sender<Output>");
}
void test_policy_ctors(){
using namespace tbb::flow;
graph g;
function_node<int, int, lightweight> lw_node(g, tbb::flow::serial,
[](int v) { return v;});
function_node<int, int, queueing_lightweight> qlw_node(g, tbb::flow::serial,
[](int v) { return v;});
function_node<int, int, rejecting_lightweight> rlw_node(g, tbb::flow::serial,
[](int v) { return v;});
}
class stateful_functor{
public:
int stored;
stateful_functor(): stored(-1){}
int operator()(int value){ stored = 1; return value;}
};
void test_ctors(){
using namespace tbb::flow;
graph g;
function_node<int, int> fn(g, unlimited, stateful_functor());
fn.try_put(0);
g.wait_for_all();
stateful_functor b1 = copy_body<stateful_functor, function_node<int, int>>(fn);
CHECK_MESSAGE( (b1.stored == 1), "First node should update");
function_node<int, int> fn2(fn);
stateful_functor b2 = copy_body<stateful_functor, function_node<int, int>>(fn2);
CHECK_MESSAGE( (b2.stored == -1), "Copied node should not update");
}
template<typename I, typename O>
struct CopyCounterBody{
size_t copy_count;
CopyCounterBody():
copy_count(0) {}
CopyCounterBody(const CopyCounterBody<I, O>& other):
copy_count(other.copy_count + 1) {}
CopyCounterBody& operator=(const CopyCounterBody<I, O>& other)
{ copy_count = other.copy_count + 1; return *this;}
O operator()(I in){
return in;
}
};
void test_copies(){
using namespace tbb::flow;
CopyCounterBody<int, int> b;
graph g;
function_node<int, int> fn(g, unlimited, b);
CopyCounterBody<int, int> b2 = copy_body<CopyCounterBody<int, int>, function_node<int, int>>(fn);
CHECK_MESSAGE( (b.copy_count + 2 <= b2.copy_count), "copy_body and constructor should copy bodies");
}
void test_rejecting(){
tbb::flow::graph g;
tbb::flow::function_node <int, int, tbb::flow::rejecting> fnode(g, tbb::flow::serial,
[&](int v){
size_t ms = 50;
std::chrono::milliseconds sleep_time( ms );
std::this_thread::sleep_for( sleep_time );
return v;
});
test_push_receiver<int> sink(g);
make_edge(fnode, sink);
for(int i = 0; i < 10; ++i){
fnode.try_put(i);
}
g.wait_for_all();
CHECK_MESSAGE( (get_count(sink) == 1), "Messages should be rejected while the first is being processed");
}
//! Test function_node with rejecting policy
//! \brief \ref interface
TEST_CASE("function_node with rejecting policy"){
test_rejecting();
}
//! Test body copying and copy_body logic
//! \brief \ref interface
TEST_CASE("function_node and body copying"){
test_copies();
}
//! Test constructors
//! \brief \ref interface
TEST_CASE("function_node constructors"){
test_policy_ctors();
}
//! Test inheritance relations
//! \brief \ref interface
TEST_CASE("function_node superclasses"){
test_inheritance<int, int>();
test_inheritance<void*, float>();
}
//! Test function_node buffering
//! \brief \ref requirement
TEST_CASE("function_node buffering"){
test_buffering<tbb::flow::rejecting>();
test_buffering<tbb::flow::queueing>();
}
//! Test function_node broadcasting
//! \brief \ref requirement
TEST_CASE("function_node broadcast"){
test_broadcast();
}
//! Test deduction guides
//! \brief \ref interface \ref requirement
TEST_CASE("Deduction guides"){
#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
test_deduction_guides();
#endif
}
//! Test priorities work in single-threaded configuration
//! \brief \ref requirement
TEST_CASE("function_node priority support"){
test_priority();
}
//! Test that measured concurrency respects set limits
//! \brief \ref requirement
TEST_CASE("concurrency follows set limits"){
test_node_concurrency();
}
//! Test calling function body
//! \brief \ref interface \ref requirement
TEST_CASE("Test function_node body") {
test_func_body();
}

Просмотреть файл

@ -203,7 +203,6 @@ TEST_CASE("setting stack size") {
}
#endif
//! Testing setting max number of threads
//! \brief \ref interface \ref requirement
TEST_CASE("setting max number of threads") {
@ -211,3 +210,104 @@ TEST_CASE("setting max number of threads") {
TestConcurrentSetUseConcurrency();
TestAutoInit();
}
//! Test terminate_on_exception default value
//! \brief \ref interface \ref requirement
TEST_CASE("terminate_on_exception: default") {
std::size_t default_toe = tbb::global_control::active_value(tbb::global_control::terminate_on_exception);
CHECK(default_toe == 0);
}
//! Test terminate_on_exception in a nested case
//! \brief \ref interface \ref requirement
TEST_CASE("terminate_on_exception: nested") {
tbb::global_control* c0;
{
tbb::global_control c1(tbb::global_control::terminate_on_exception, 1);
CHECK(tbb::global_control::active_value(tbb::global_control::terminate_on_exception) == 1);
{
tbb::global_control c2(tbb::global_control::terminate_on_exception, 0);
CHECK(tbb::global_control::active_value(tbb::global_control::terminate_on_exception) == 1);
}
CHECK(tbb::global_control::active_value(tbb::global_control::terminate_on_exception) == 1);
c0 = new tbb::global_control(tbb::global_control::terminate_on_exception, 0);
}
CHECK(tbb::global_control::active_value(tbb::global_control::terminate_on_exception) == 0);
delete c0;
}
// The test cannot work correctly with statically linked runtime.
#if !_MSC_VER || defined(_DLL)
#include <csetjmp>
// Overall, the test case is not safe because the dtors might not be called during long jump.
// Therefore, it makes sense to run the test case after all other test cases.
//! Test terminate_on_exception behavior
//! \brief \ref interface \ref requirement
TEST_CASE("terminate_on_exception: enabled") {
tbb::global_control c(tbb::global_control::terminate_on_exception, 1);
static bool terminate_handler_called;
terminate_handler_called = false;
#if TBB_USE_EXCEPTIONS
try {
#endif
static std::jmp_buf buffer;
std::terminate_handler prev = std::set_terminate([] {
CHECK(!terminate_handler_called);
terminate_handler_called = true;
std::longjmp(buffer, 1);
});
#if _MSC_VER
#pragma warning(push)
#pragma warning(disable:4611) // interaction between '_setjmp' and C++ object destruction is non - portable
#endif
SUBCASE("internal exception") {
if (setjmp(buffer) == 0) {
tbb::parallel_for(0, 1, -1, [](int) {});
FAIL("Unreachable code");
}
}
#if TBB_USE_EXCEPTIONS
SUBCASE("user exception") {
if (setjmp(buffer) == 0) {
tbb::parallel_for(0, 1, [](int) {
volatile bool suppress_unreachable_code_warning = true;
if (suppress_unreachable_code_warning) {
throw std::exception();
}
});
FAIL("Unreachable code");
}
}
#endif
#if _MSC_VER
#pragma warning(pop)
#endif
std::set_terminate(prev);
terminate_handler_called = true;
#if TBB_USE_EXCEPTIONS
} catch (...) {
FAIL("The exception is not expected");
}
#endif
CHECK(terminate_handler_called);
}
#endif
//! Testing setting the same value but different objects
//! \brief \ref interface \ref error_guessing
TEST_CASE("setting same value") {
const std::size_t value = 2;
tbb::global_control* ctl1 = new tbb::global_control(tbb::global_control::max_allowed_parallelism, value);
tbb::global_control* ctl2 = new tbb::global_control(tbb::global_control::max_allowed_parallelism, value);
std::size_t active = tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism);
REQUIRE(active == value);
delete ctl2;
active = tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism);
REQUIRE_MESSAGE(active == value, "Active value should not change, because of value duplication");
delete ctl1;
}

Просмотреть файл

@ -0,0 +1,147 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
#include "conformance_flowgraph.h"
//! \file conformance_graph.cpp
//! \brief Test for [flow_graph.graph] specification
using namespace tbb::flow;
using namespace std;
//! const graph
//! \brief \ref error_guessing
TEST_CASE("const graph"){
const graph g;
CHECK_MESSAGE((g.cbegin() == g.cend()), "Starting graph is empty");
graph g2;
CHECK_MESSAGE((g2.begin() == g2.end()), "Starting graph is empty");
}
//! Graph reset
//! \brief \ref requirement
TEST_CASE("graph reset") {
graph g;
size_t concurrency_limit = 1;
tbb::global_control control(tbb::global_control::max_allowed_parallelism, concurrency_limit);
// Functional nodes
// TODO: Check input_node, multifunction_node, async_node similarly
// continue_node
bool flag = false;
continue_node<int> source(g, 2, [&](const continue_msg&){ flag = true; return 1;});
source.try_put(continue_msg());
g.wait_for_all();
CHECK_MESSAGE( (flag == false), "Should still be false");
g.reset(rf_reset_protocol);
source.try_put(continue_msg());
g.wait_for_all();
CHECK_MESSAGE( (flag == false), "Should still be false");
source.try_put(continue_msg());
g.wait_for_all();
CHECK_MESSAGE( (flag == true), "Should be true");
// functional_node
int flag_fun = 0;
function_node<int, int, queueing> f(g, serial, [&](const int& v){ flag_fun++; return v;});
f.try_put(0);
f.try_put(0);
CHECK_MESSAGE( (flag_fun == 0), "Should not be updated");
g.reset(rf_reset_protocol);
g.wait_for_all();
CHECK_MESSAGE( (flag_fun == 1), "Should be updated");
// Buffering nodes
// TODO: Check overwrite_node write_once_node priority_queue_node sequencer_node similarly
// buffer_node
buffer_node<int> buff(g);
int tmp = -1;
CHECK_MESSAGE( (buff.try_get(tmp) == false), "try_get should not succeed");
CHECK_MESSAGE( (tmp == -1), "Value should not be updated");
buff.try_put(1);
g.reset();
tmp = -1;
CHECK_MESSAGE( (buff.try_get(tmp) == false), "try_get should not succeed");
CHECK_MESSAGE( (tmp == -1), "Value should not be updated");
g.wait_for_all();
// queue_node
queue_node<int> q(g);
tmp = -1;
CHECK_MESSAGE( (q.try_get(tmp) == false), "try_get should not succeed");
CHECK_MESSAGE( (tmp == -1), "Value should not be updated");
q.try_put(1);
g.reset();
tmp = -1;
CHECK_MESSAGE( (q.try_get(tmp) == false), "try_get should not succeed");
CHECK_MESSAGE( (tmp == -1), "Value should not be updated");
g.wait_for_all();
// Check rf_clear_edges
continue_node<int> src(g, [&](const continue_msg&){ return 1;});
queue_node<int> dest(g);
make_edge(src, dest);
src.try_put(continue_msg());
g.wait_for_all();
tmp = -1;
CHECK_MESSAGE( (dest.try_get(tmp)== true), "Message should pass when edge exists");
CHECK_MESSAGE( (tmp == 1 ), "Message should pass when edge exists");
CHECK_MESSAGE( (dest.try_get(tmp)== false), "Message should not pass after item is consumed");
g.reset(rf_clear_edges);
tmp = -1;
src.try_put(continue_msg());
g.wait_for_all();
CHECK_MESSAGE( (dest.try_get(tmp)== false), "Message should not pass when edge doesn't exist");
CHECK_MESSAGE( (tmp == -1), "Value should not be altered");
// TODO: Add check that default invocaiton is the same as with rf_reset_protocol
// TODO: See if specification for broadcast_node and other service nodes is sufficient for reset checks
}

Просмотреть файл

@ -0,0 +1,148 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
//! \file conformance_indexer_node.cpp
//! \brief Test for [flow_graph.indexer_node] specification
/*
TODO: implement missing conformance tests for buffer_node:
- [ ] The copy constructor is called for the node's type template parameter.
- [ ] Improve `test_forwarding' by checking that the value passed is the actual one received.
- [ ] Improve `test_buffering' by checking that additional `try_get()' does not receive the same value.
- [ ] Improve tests of the constructors.
- [ ] Based on the decision about the details for `try_put()' and `try_get()' write corresponding tests.
- [ ] Fix description in `TEST_CASEs'.
*/
using namespace tbb::flow;
using namespace std;
template<typename I1, typename I2>
void test_inheritance(){
using namespace tbb::flow;
CHECK_MESSAGE( (std::is_base_of<graph_node, indexer_node<I1, I2>>::value), "indexer_node should be derived from graph_node");
}
void test_copies(){
using namespace tbb::flow;
graph g;
indexer_node<int, int> fn(g);
indexer_node<int, int> f2(fn);
}
//! Test body copying and copy_body logic
//! \brief \ref interface
TEST_CASE("indexer_node and body copying"){
test_copies();
}
void test_broadcasting(){
tbb::flow::graph g;
typedef indexer_node<int,float> my_indexer_type;
typedef my_indexer_type::output_type my_output_type;
my_indexer_type o(g);
my_indexer_type node1(g);
queue_node<my_output_type> node2(g);
queue_node<my_output_type> node3(g);
tbb::flow::make_edge(node1, node2);
tbb::flow::make_edge(node1, node3);
input_port<0>(node1).try_put(6);
input_port<1>(node1).try_put(1.5);
g.wait_for_all();
my_output_type tmp;
CHECK_MESSAGE( (node2.try_get(tmp)), "Descendant of the node needs to receive message once");
CHECK_MESSAGE( (node3.try_get(tmp)), "Descendant of the node needs to receive message once");
}
//! Test broadcasting property
//! \brief \ref requirement
TEST_CASE("indexer_node broadcasts"){
test_broadcasting();
}
//! Test inheritance relations
//! \brief \ref interface
TEST_CASE("indexer_node superclasses"){
test_inheritance<int, int>();
}
//! Test discarding property
//! \brief \ref requirement
TEST_CASE("indexer_node discarding") {
graph g;
typedef indexer_node<int,float> my_indexer_type;
my_indexer_type o(g);
limiter_node< my_indexer_type::output_type > rejecter( g,0);
make_edge( o, rejecter );
input_port<0>(o).try_put(6);
input_port<1>(o).try_put(1.5);
my_indexer_type::output_type tmp;
CHECK_MESSAGE((o.try_get(tmp) == false), "Value should be discarded after rejection");
g.wait_for_all();
}
//! Test indexer body
//! \brief \ref requirement
TEST_CASE("indexer_node body") {
graph g;
function_node<int,int> f1( g, unlimited,
[](const int &i) { return 2*i; } );
function_node<float,float> f2( g, unlimited,
[](const float &f) { return f/2; } );
typedef indexer_node<int,float> my_indexer_type;
my_indexer_type o(g);
function_node< my_indexer_type::output_type >
f3( g, unlimited,
[]( const my_indexer_type::output_type &v ) {
if (v.tag() == 0) {
CHECK_MESSAGE( (cast_to<int>(v) == 6), "Expected to receive 6" );
} else {
CHECK_MESSAGE( (cast_to<float>(v) == 1.5), "Expected to receive 1.5" );
}
}
);
make_edge( f1, input_port<0>(o) );
make_edge( f2, input_port<1>(o) );
make_edge( o, f3 );
f1.try_put( 3 );
f2.try_put( 3 );
g.wait_for_all();
}

Просмотреть файл

@ -0,0 +1,204 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "conformance_flowgraph.h"
//! \file conformance_input_node.cpp
//! \brief Test for [flow_graph.input_node] specification
/*
TODO: implement missing conformance tests for input_node:
- [ ] The `copy_body' function copies altered body (e.g. after its successful invocation).
- [ ] Check that in `test_forwarding' the value passed is the actual one received.
- [ ] Improve CTAD test to assert result node type.
- [ ] Explicit test for copy constructor of the node.
- [ ] Check `Output' type indeed copy-constructed and copy-assigned while working with the node.
- [ ] Check node cannot have predecessors (Will ADL be of any help here?)
- [ ] Check the node is serial and its body never invoked concurrently.
- [ ] `try_get()' call testing: a call to body is made only when the internal buffer is empty.
*/
std::atomic<size_t> global_execute_count;
template<typename OutputType>
struct input_functor {
const size_t n;
input_functor( ) : n(10) { }
input_functor( const input_functor &f ) : n(f.n) { }
void operator=(const input_functor &f) { n = f.n; }
OutputType operator()( tbb::flow_control & fc ) {
++global_execute_count;
if(global_execute_count > n){
fc.stop();
return OutputType();
}
return OutputType(global_execute_count.load());
}
};
template<typename O>
struct CopyCounterBody{
size_t copy_count;
CopyCounterBody():
copy_count(0) {}
CopyCounterBody(const CopyCounterBody<O>& other):
copy_count(other.copy_count + 1) {}
CopyCounterBody& operator=(const CopyCounterBody<O>& other) {
copy_count = other.copy_count + 1; return *this;
}
O operator()(tbb::flow_control & fc){
fc.stop();
return O();
}
};
void test_input_body(){
tbb::flow::graph g;
input_functor<int> fun;
global_execute_count = 0;
tbb::flow::input_node<int> node1(g, fun);
test_push_receiver<int> node2(g);
tbb::flow::make_edge(node1, node2);
node1.activate();
g.wait_for_all();
CHECK_MESSAGE( (get_count(node2) == 10), "Descendant of the node needs to be receive N messages");
CHECK_MESSAGE( (global_execute_count == 10 + 1), "Body of the node needs to be executed N + 1 times");
}
#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
void test_deduction_guides(){
tbb::flow::graph g;
input_functor<int> fun;
tbb::flow::input_node node1(g, fun);
}
#endif
void test_buffering(){
tbb::flow::graph g;
input_functor<int> fun;
global_execute_count = 0;
tbb::flow::input_node<int> source(g, fun);
tbb::flow::limiter_node<int> rejecter(g, 0);
tbb::flow::make_edge(source, rejecter);
source.activate();
g.wait_for_all();
int tmp = -1;
CHECK_MESSAGE( (source.try_get(tmp) == true), "try_get after rejection should succeed");
CHECK_MESSAGE( (tmp == 1), "try_get should return correct value");
}
void test_forwarding(){
tbb::flow::graph g;
input_functor<int> fun;
global_execute_count = 0;
tbb::flow::input_node<int> node1(g, fun);
test_push_receiver<int> node2(g);
test_push_receiver<int> node3(g);
tbb::flow::make_edge(node1, node2);
tbb::flow::make_edge(node1, node3);
node1.activate();
g.wait_for_all();
CHECK_MESSAGE( (get_count(node2) == 10), "Descendant of the node needs to be receive N messages");
CHECK_MESSAGE( (get_count(node3) == 10), "Descendant of the node needs to be receive N messages");
}
template<typename O>
void test_inheritance(){
using namespace tbb::flow;
CHECK_MESSAGE( (std::is_base_of<graph_node, input_node<O>>::value), "input_node should be derived from graph_node");
CHECK_MESSAGE( (std::is_base_of<sender<O>, input_node<O>>::value), "input_node should be derived from sender<Output>");
}
void test_copies(){
using namespace tbb::flow;
CopyCounterBody<int> b;
graph g;
input_node<int> fn(g, b);
CopyCounterBody<int> b2 = copy_body<CopyCounterBody<int>, input_node<int>>(fn);
CHECK_MESSAGE( (b.copy_count + 2 <= b2.copy_count), "copy_body and constructor should copy bodies");
}
//! Test body copying and copy_body logic
//! \brief \ref interface
TEST_CASE("input_node and body copying"){
test_copies();
}
//! Test inheritance relations
//! \brief \ref interface
TEST_CASE("input_node superclasses"){
test_inheritance<int>();
test_inheritance<void*>();
}
//! Test input_node forwarding
//! \brief \ref requirement
TEST_CASE("input_node forwarding"){
test_forwarding();
}
//! Test input_node buffering
//! \brief \ref requirement
TEST_CASE("input_node buffering"){
test_buffering();
}
//! Test calling input_node body
//! \brief \ref interface \ref requirement
TEST_CASE("input_node body") {
test_input_body();
}
//! Test deduction guides
//! \brief \ref interface \ref requirement
TEST_CASE("Deduction guides"){
#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
test_deduction_guides();
#endif
}

Просмотреть файл

@ -0,0 +1,189 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
#include "conformance_flowgraph.h"
//! \file conformance_join_node.cpp
//! \brief Test for [flow_graph.join_node] specification
/*
TODO: implement missing conformance tests for join_node:
- [ ] Check that `OutputTuple' is an instantiation of a tuple.
- [ ] The copy constructor and copy assignment are called for each type within the `OutputTuple'.
- [ ] Check all possible policies of the node: `reserving', `key_matching', `queueing',
`tag_matching'. Check the semantics the node has with each policy separately.
- [ ] Check that corresponding methods are invoked in specified `KHash' type.
- [ ] Improve test for constructors, including their availability based on used Policy for the
node.
- [ ] Unify code style in the test by extracting the implementation from the `TEST_CASE' scope
into separate functions.
- [ ] Check that corresponding methods mentioned in the requirements are called for `Bi' types.
- [ ] Explicitly check that `input_ports_type' is defined, accessible and is a tuple of
corresponding to `OutputTuple' receivers.
- [ ] Explicitly check the method `join_node::input_ports()' exists, is accessible and it returns
a reference to the `input_ports_type' type.
- [ ] Implement `test_buffering' (for node policy).
- [ ] Check `try_get()' copies the generated tuple into passed argument and returns `true'. If
node is empty returns `false'.
- [ ] Check `tag_value' is defined and has properties specified.
- [ ] Add test for CTAD.
*/
using namespace tbb::flow;
using namespace std;
template<typename T>
void test_inheritance(){
CHECK_MESSAGE( (std::is_base_of<graph_node, join_node<std::tuple<T, T>>>::value), "join_node should be derived from graph_node");
CHECK_MESSAGE( (std::is_base_of<sender<std::tuple<T, T>>, join_node<std::tuple<T, T>>>::value), "join_node should be derived from graph_node");
}
void test_copies(){
using namespace tbb::flow;
graph g;
join_node<std::tuple<int, int>> n(g);
join_node<std::tuple<int, int>> n2(n);
join_node <std::tuple<int, int, tbb::flow::reserving>> nr(g);
join_node <std::tuple<int, int, tbb::flow::reserving>> nr2(nr);
}
void test_forwarding(){
tbb::flow::graph g;
join_node<std::tuple<int, int>> node1(g);
using output_t = join_node<std::tuple<int, int>>::output_type;
test_push_receiver<output_t> node2(g);
test_push_receiver<output_t> node3(g);
tbb::flow::make_edge(node1, node2);
tbb::flow::make_edge(node1, node3);
input_port<0>(node1).try_put(1);
input_port<1>(node1).try_put(1);
g.wait_for_all();
CHECK_MESSAGE( (get_count(node2) == 1), "Descendant of the node needs to be receive N messages");
CHECK_MESSAGE( (get_count(node3) == 1), "Descendant of the node must receive one message.");
}
//! Test broadcast
//! \brief \ref interface
TEST_CASE("join_node broadcast") {
test_forwarding();
}
//! Test copy constructor
//! \brief \ref interface
TEST_CASE("join_node copy constructor") {
test_copies();
}
//! Test inheritance relations
//! \brief \ref interface
TEST_CASE("join_node inheritance"){
test_inheritance<int>();
}
//! Test join_node behavior
//! \brief \ref requirement
TEST_CASE("join_node") {
graph g;
function_node<int,int>
f1( g, unlimited, [](const int &i) { return 2*i; } );
function_node<float,float>
f2( g, unlimited, [](const float &f) { return f/2; } );
join_node< std::tuple<int,float> > j(g);
function_node< std::tuple<int,float> >
f3( g, unlimited,
[]( const std::tuple<int,float> &t ) {
CHECK_MESSAGE( (std::get<0>(t) == 6), "Expected to receive 6" );
CHECK_MESSAGE( (std::get<1>(t) == 1.5), "Expected to receive 1.5" );
} );
make_edge( f1, input_port<0>( j ) );
make_edge( f2, input_port<1>( j ) );
make_edge( j, f3 );
f1.try_put( 3 );
f2.try_put( 3 );
g.wait_for_all( );
}
//! Test join_node key matching behavior
//! \brief \ref requirement
TEST_CASE("remove edge to join_node"){
graph g;
continue_node<int> c(g, [](const continue_msg&){ return 1; });
join_node<tuple<int> > jn(g);
queue_node<tuple<int> > q(g);
make_edge(jn, q);
make_edge(c, jn);
c.try_put(continue_msg());
g.wait_for_all();
tuple<int> tmp = tuple<int>(0);
CHECK_MESSAGE( (q.try_get(tmp)== true), "Message should pass when edge exists");
CHECK_MESSAGE( (tmp == tuple<int>(1) ), "Message should pass when edge exists");
CHECK_MESSAGE( (q.try_get(tmp)== false), "Message should not pass after item is consumed");
remove_edge(c, jn);
c.try_put(continue_msg());
g.wait_for_all();
tmp = tuple<int>(0);
CHECK_MESSAGE( (q.try_get(tmp)== false), "Message should not pass when edge doesn't exist");
CHECK_MESSAGE( (tmp == tuple<int>(0)), "Value should not be altered");
}
//! Test join_node key matching behavior
//! \brief \ref requirement
TEST_CASE("join_node key_matching"){
graph g;
auto body1 = [](const continue_msg &) -> int { return 1; };
auto body2 = [](const double &val) -> int { return int(val); };
join_node<std::tuple<continue_msg, double>, key_matching<int>> jn(g, body1, body2);
input_port<0>(jn).try_put(continue_msg());
input_port<1>(jn).try_put(1.3);
g.wait_for_all( );
tuple<continue_msg, double> tmp;
CHECK_MESSAGE( (jn.try_get(tmp) == true), "Mapped keys should match");
}

Просмотреть файл

@ -0,0 +1,135 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
#include "conformance_flowgraph.h"
//! \file conformance_limiter_node.cpp
//! \brief Test for [flow_graph.limiter_node] specification
/*
TODO: implement missing conformance tests for limiter_node:
- [ ] The copy constructor and copy assignment are called for the node's type template parameter.
- [ ] Add use of `decrement' member into the `test_limiting' and see how positive and negative
values sent to `decrement's' port affect node's internal threshold.
- [ ] Add test checking the node gets value from the predecessors when threshold decreases enough.
- [ ] Add test that `continue_msg' decreases the threshold by one.
*/
template<typename T>
void test_inheritance(){
using namespace tbb::flow;
CHECK_MESSAGE( (std::is_base_of<graph_node, limiter_node<T>>::value), "sequencer_node should be derived from graph_node");
CHECK_MESSAGE( (std::is_base_of<receiver<T>, limiter_node<T>>::value), "sequencer_node should be derived from receiver<T>");
CHECK_MESSAGE( (std::is_base_of<sender<T>, limiter_node<T>>::value), "sequencer_node should be derived from sender<T>");
}
void test_copies(){
using namespace tbb::flow;
graph g;
limiter_node<int> n(g, 5);
limiter_node<int> n2(n);
}
void test_buffering(){
tbb::flow::graph g;
tbb::flow::limiter_node<int> node(g, 5);
tbb::flow::limiter_node<int> rejecter(g, 0);
tbb::flow::make_edge(node, rejecter);
node.try_put(1);
g.wait_for_all();
int tmp = -1;
CHECK_MESSAGE( (node.try_get(tmp) == false), "try_get after rejection should not succeed");
CHECK_MESSAGE( (tmp == -1), "try_get after rejection should not set value");
}
void test_forwarding(){
tbb::flow::graph g;
tbb::flow::limiter_node<int> node1(g, 5);
test_push_receiver<int> node2(g);
test_push_receiver<int> node3(g);
tbb::flow::make_edge(node1, node2);
tbb::flow::make_edge(node1, node3);
node1.try_put(1);
g.wait_for_all();
CHECK_MESSAGE( (get_count(node2) == 1), "Descendant of the node needs to be receive N messages");
CHECK_MESSAGE( (get_count(node3) == 1), "Descendant of the node must receive one message.");
}
void test_limiting(){
tbb::flow::graph g;
tbb::flow::limiter_node<int> node1(g, 5);
test_push_receiver<int> node2(g);
tbb::flow::make_edge(node1, node2);
for(int i = 0; i < 10; ++i)
node1.try_put(1);
g.wait_for_all();
CHECK_MESSAGE( (get_count(node2) == 5), "Descendant of the node needs be receive limited number of messages");
}
//! Test limiter_node limiting
//! \brief \ref requirement
TEST_CASE("limiter_node limiting"){
test_limiting();
}
//! Test function_node broadcast
//! \brief \ref requirement
TEST_CASE("limiter_node broadcast"){
test_forwarding();
}
//! Test limiter_node buffering
//! \brief \ref requirement
TEST_CASE("limiter_node buffering"){
test_buffering();
}
//! Test copy constructor
//! \brief \ref interface
TEST_CASE("limiter_node copy constructor"){
test_copies();
}
//! Test inheritance relations
//! \brief \ref interface
TEST_CASE("limiter_node superclasses"){
test_inheritance<int>();
test_inheritance<void*>();
}

Просмотреть файл

@ -0,0 +1,325 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
#include "conformance_flowgraph.h"
//! \file conformance_multifunction_node.cpp
//! \brief Test for [flow_graph.function_node] specification
/*
TODO: implement missing conformance tests for multifunction_node:
- [ ] Implement test_forwarding that checks messages are broadcast to all the successors connected
to the output port the message is being sent to. And check that the value passed is the
actual one received.
- [ ] Explicit test for copy constructor of the node.
- [ ] Constructor with explicitly passed Policy parameter: `template<typename Body>
multifunction_node( graph &g, size_t concurrency, Body body, Policy(), node_priority_t priority = no_priority )'.
- [ ] Concurrency testing of the node: make a loop over possible concurrency levels. It is
important to test at least on five values: 1, tbb::flow::serial, `max_allowed_parallelism'
obtained from `tbb::global_control', `tbb::flow::unlimited', and, if `max allowed
parallelism' is > 2, use something in the middle of the [1, max_allowed_parallelism]
interval. Use `utils::ExactConcurrencyLevel' entity (extending it if necessary).
- [ ] make `test_rejecting' deterministic, i.e. avoid dependency on OS scheduling of the threads;
add check that `try_put()' returns `false'
- [ ] The `copy_body' function copies altered body (e.g. after successful `try_put()' call).
- [ ] `output_ports_type' is defined and accessible by the user.
- [ ] Explicit test on `mfn::output_ports()' method.
- [ ] The copy constructor and copy assignment are called for the node's input and output types.
- [ ] Add CTAD test.
*/
template< typename OutputType >
struct mf_functor {
std::atomic<std::size_t>& local_execute_count;
mf_functor(std::atomic<std::size_t>& execute_count ) :
local_execute_count (execute_count)
{ }
mf_functor( const mf_functor &f ) : local_execute_count(f.local_execute_count) { }
void operator=(const mf_functor &f) { local_execute_count = std::size_t(f.local_execute_count); }
void operator()( const int& argument, tbb::flow::multifunction_node<int, std::tuple<int>>::output_ports_type &op ) {
++local_execute_count;
std::get<0>(op).try_put(argument);
}
};
template<typename I, typename O>
void test_inheritance(){
using namespace tbb::flow;
CHECK_MESSAGE( (std::is_base_of<graph_node, multifunction_node<I, O>>::value), "multifunction_node should be derived from graph_node");
CHECK_MESSAGE( (std::is_base_of<receiver<I>, multifunction_node<I, O>>::value), "multifunction_node should be derived from receiver<Input>");
}
void test_multifunc_body(){
tbb::flow::graph g;
std::atomic<size_t> local_count(0);
mf_functor<std::tuple<int>> fun(local_count);
tbb::flow::multifunction_node<int, std::tuple<int>, tbb::flow::rejecting> node1(g, tbb::flow::unlimited, fun);
const size_t n = 10;
for(size_t i = 0; i < n; ++i) {
CHECK_MESSAGE((node1.try_put(1) == true), "try_put needs to return true");
}
g.wait_for_all();
CHECK_MESSAGE( (local_count == n), "Body of the node needs to be executed N times");
}
template<typename I, typename O>
struct CopyCounterBody{
size_t copy_count;
CopyCounterBody():
copy_count(0) {}
CopyCounterBody(const CopyCounterBody<I, O>& other):
copy_count(other.copy_count + 1) {}
CopyCounterBody& operator=(const CopyCounterBody<I, O>& other)
{ copy_count = other.copy_count + 1; return *this;}
void operator()( const I& argument, tbb::flow::multifunction_node<int, std::tuple<int>>::output_ports_type &op ) {
std::get<0>(op).try_put(argument);
}
};
void test_copies(){
using namespace tbb::flow;
CopyCounterBody<int, std::tuple<int>> b;
graph g;
multifunction_node<int, std::tuple<int>> fn(g, unlimited, b);
CopyCounterBody<int, std::tuple<int>> b2 = copy_body<CopyCounterBody<int, std::tuple<int>>,
multifunction_node<int, std::tuple<int>>>(fn);
CHECK_MESSAGE( (b.copy_count + 2 <= b2.copy_count), "copy_body and constructor should copy bodies");
}
template< typename OutputType >
struct id_functor {
void operator()( const int& argument, tbb::flow::multifunction_node<int, std::tuple<int>>::output_ports_type &op ) {
std::get<0>(op).try_put(argument);
}
};
void test_forwarding(){
tbb::flow::graph g;
id_functor<int> fun;
tbb::flow::multifunction_node<int, std::tuple<int>> node1(g, tbb::flow::unlimited, fun);
test_push_receiver<int> node2(g);
test_push_receiver<int> node3(g);
tbb::flow::make_edge(node1, node2);
tbb::flow::make_edge(node1, node3);
node1.try_put(1);
g.wait_for_all();
CHECK_MESSAGE( (get_count(node3) == 1), "Descendant of the node must receive one message.");
CHECK_MESSAGE( (get_count(node2) == 1), "Descendant of the node must receive one message.");
}
void test_rejecting_buffering(){
tbb::flow::graph g;
id_functor<int> fun;
tbb::flow::multifunction_node<int, std::tuple<int>, tbb::flow::rejecting> node(g, tbb::flow::unlimited, fun);
tbb::flow::limiter_node<int> rejecter(g, 0);
tbb::flow::make_edge(node, rejecter);
node.try_put(1);
int tmp = -1;
CHECK_MESSAGE( (std::get<0>(node.output_ports()).try_get(tmp) == false), "try_get after rejection should not succeed");
CHECK_MESSAGE( (tmp == -1), "try_get after rejection should alter passed value");
g.wait_for_all();
}
void test_policy_ctors(){
using namespace tbb::flow;
graph g;
id_functor<int> fun;
multifunction_node<int, std::tuple<int>, lightweight> lw_node(g, tbb::flow::serial, fun);
multifunction_node<int, std::tuple<int>, queueing_lightweight> qlw_node(g, tbb::flow::serial, fun);
multifunction_node<int, std::tuple<int>, rejecting_lightweight> rlw_node(g, tbb::flow::serial, fun);
}
std::atomic<size_t> my_concurrency;
std::atomic<size_t> my_max_concurrency;
struct concurrency_functor {
void operator()( const int& argument, tbb::flow::multifunction_node<int, std::tuple<int>>::output_ports_type &op ) {
++my_concurrency;
size_t old_value = my_max_concurrency;
while(my_max_concurrency < my_concurrency &&
!my_max_concurrency.compare_exchange_weak(old_value, my_concurrency))
;
size_t ms = 1000;
std::chrono::milliseconds sleep_time( ms );
std::this_thread::sleep_for( sleep_time );
--my_concurrency;
std::get<0>(op).try_put(argument);
}
};
void test_node_concurrency(){
my_concurrency = 0;
my_max_concurrency = 0;
tbb::flow::graph g;
concurrency_functor counter;
tbb::flow::multifunction_node <int, std::tuple<int>> fnode(g, tbb::flow::serial, counter);
test_push_receiver<int> sink(g);
make_edge(std::get<0>(fnode.output_ports()), sink);
for(int i = 0; i < 10; ++i){
fnode.try_put(i);
}
g.wait_for_all();
CHECK_MESSAGE( ( my_max_concurrency.load() == 1), "Measured parallelism over limit");
}
void test_priority(){
size_t concurrency_limit = 1;
tbb::global_control control(tbb::global_control::max_allowed_parallelism, concurrency_limit);
tbb::flow::graph g;
tbb::flow::continue_node<int> source(g,
[](tbb::flow::continue_msg){ return 1;});
source.try_put(tbb::flow::continue_msg());
first_functor<int>::first_id = -1;
first_functor<int> low_functor(1);
first_functor<int> high_functor(2);
tbb::flow::multifunction_node<int, std::tuple<int>> high(g, tbb::flow::unlimited, high_functor, tbb::flow::node_priority_t(1));
tbb::flow::multifunction_node<int, std::tuple<int>> low(g, tbb::flow::unlimited, low_functor);
make_edge(source, low);
make_edge(source, high);
g.wait_for_all();
CHECK_MESSAGE( (first_functor<int>::first_id == 2), "High priority node should execute first");
}
void test_rejecting(){
tbb::flow::graph g;
tbb::flow::multifunction_node <int, std::tuple<int>, tbb::flow::rejecting> fnode(g, tbb::flow::serial,
[&](const int& argument, tbb::flow::multifunction_node<int, std::tuple<int>>::output_ports_type &op ){
size_t ms = 50;
std::chrono::milliseconds sleep_time( ms );
std::this_thread::sleep_for( sleep_time );
std::get<0>(op).try_put(argument);
});
test_push_receiver<int> sink(g);
make_edge(std::get<0>(fnode.output_ports()), sink);
for(int i = 0; i < 10; ++i){
fnode.try_put(i);
}
g.wait_for_all();
CHECK_MESSAGE( (get_count(sink) == 1), "Messages should be rejected while the first is being processed");
}
//! Test multifunction_node with rejecting policy
//! \brief \ref interface
TEST_CASE("multifunction_node with rejecting policy"){
test_rejecting();
}
//! Test priorities
//! \brief \ref interface
TEST_CASE("multifunction_node priority"){
test_priority();
}
//! Test concurrency
//! \brief \ref interface
TEST_CASE("multifunction_node concurrency"){
test_node_concurrency();
}
//! Test constructors
//! \brief \ref interface
TEST_CASE("multifunction_node constructors"){
test_policy_ctors();
}
//! Test function_node buffering
//! \brief \ref requirement
TEST_CASE("multifunction_node buffering"){
test_rejecting_buffering();
}
//! Test function_node broadcasting
//! \brief \ref requirement
TEST_CASE("multifunction_node broadcast"){
test_forwarding();
}
//! Test body copying and copy_body logic
//! \brief \ref interface
TEST_CASE("multifunction_node constructors"){
test_copies();
}
//! Test calling function body
//! \brief \ref interface \ref requirement
TEST_CASE("multifunction_node body") {
test_multifunc_body();
}
//! Test inheritance relations
//! \brief \ref interface
TEST_CASE("multifunction_node superclasses"){
test_inheritance<int, std::tuple<int>>();
test_inheritance<void*, std::tuple<float>>();
}

Просмотреть файл

@ -0,0 +1,143 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
#include "conformance_flowgraph.h"
//! \file conformance_overwrite_node.cpp
//! \brief Test for [flow_graph.overwrite_node] specification
/*
TODO: implement missing conformance tests for overwrite_node:
- [ ] The copy constructor and copy assignment are called for the node's type template parameter.
- [ ] Improve copy constructor test and general constructor tests.
- [ ] Write test checking the value is initially invalid.
- [ ] Write test checking that the gets from the node are non-destructive, but the first `try_get'
fails.
- [ ] Test that first `try_get' fails.
- [ ] Add test on `overwrite_node::is_valid()' method.
- [ ] Add test on `overwrite_node::clear()' method.
- [ ] Add test with reserving `join_node' as node's successor. Use example from the spec.
*/
template<typename T>
void test_inheritance(){
using namespace tbb::flow;
CHECK_MESSAGE( (std::is_base_of<graph_node, overwrite_node<T>>::value), "overwrite_node should be derived from graph_node");
CHECK_MESSAGE( (std::is_base_of<receiver<T>, overwrite_node<T>>::value), "overwrite_node should be derived from receiver<T>");
CHECK_MESSAGE( (std::is_base_of<sender<T>, overwrite_node<T>>::value), "overwrite_node should be derived from sender<T>");
}
void test_copies(){
using namespace tbb::flow;
graph g;
overwrite_node<int> fn(g);
overwrite_node<int> fn2(fn);
}
void test_buffering(){
tbb::flow::graph g;
tbb::flow::overwrite_node<int> node(g);
tbb::flow::limiter_node<int> rejecter(g, 0);
tbb::flow::make_edge(node, rejecter);
node.try_put(1);
g.wait_for_all();
int tmp = -1;
CHECK_MESSAGE( (node.try_get(tmp) == true), "try_get after rejection should succeed");
CHECK_MESSAGE( (tmp == 1), "try_get after rejection should set value");
}
void test_forwarding(){
tbb::flow::graph g;
tbb::flow::overwrite_node<int> node1(g);
test_push_receiver<int> node2(g);
test_push_receiver<int> node3(g);
tbb::flow::make_edge(node1, node2);
tbb::flow::make_edge(node1, node3);
node1.try_put(1);
g.wait_for_all();
CHECK_MESSAGE( (get_count(node2) == 1), "Descendant of the node needs to be receive N messages");
CHECK_MESSAGE( (get_count(node3) == 1), "Descendant of the node must receive one message.");
}
void test_overwriting(){
tbb::flow::graph g;
tbb::flow::overwrite_node<int> node1(g);
int tmp = -1;
node1.try_put(1);
g.wait_for_all();
CHECK_MESSAGE( (node1.try_get(tmp) == true), "Descendant needs to receive a message");
CHECK_MESSAGE( (tmp == 1), "Descendant needs to receive a correct value");
node1.try_put(2);
g.wait_for_all();
CHECK_MESSAGE( (node1.try_get(tmp) == true), "Descendant needs to receive a message");
CHECK_MESSAGE( (tmp == 2), "Descendant needs to receive a correct value");
}
//! Test overwrite_node behavior
//! \brief \ref requirement
TEST_CASE("overwrite_node messages"){
test_overwriting();
}
//! Test function_node broadcast
//! \brief \ref requirement
TEST_CASE("overwrite_node broadcast"){
test_forwarding();
}
//! Test function_node buffering
//! \brief \ref requirement
TEST_CASE("overwrite_node buffering"){
test_buffering();
}
//! Test copy constructor
//! \brief \ref interface
TEST_CASE("overwrite_node copy constructor"){
test_copies();
}
//! Test inheritance relations
//! \brief \ref interface
TEST_CASE("overwrite_node superclasses"){
test_inheritance<int>();
test_inheritance<void*>();
}

Просмотреть файл

@ -310,4 +310,3 @@ TEST_CASE("Testing parallel_for with partitioners") {
parallel_for(Range1(false, true), b, tbb::auto_partitioner());
parallel_for(Range6(false, true), b, tbb::auto_partitioner());
}

Просмотреть файл

@ -47,6 +47,9 @@ struct correctness_test_case {
};
static void run_validate_and_reset(tbb::task_group_context* context_ptr) {
for (auto& elem : data_array)
elem.store(0, std::memory_order_relaxed);
parallel_invoke_call<TaskCount, functor>::perform(context_ptr);
for (std::size_t i = 0; i < TaskCount; i++) {
REQUIRE_MESSAGE(data_array[i] == 1, "Some task was executed more than once, or was not executed.");
@ -56,7 +59,7 @@ struct correctness_test_case {
};
template<std::size_t TaskCount>
std::atomic<std::size_t> correctness_test_case<TaskCount>::data_array[TaskCount]{};
std::atomic<std::size_t> correctness_test_case<TaskCount>::data_array[TaskCount];
void correctness_test(tbb::task_group_context* context_ptr = nullptr) {
for ( auto concurrency_level : utils::concurrency_range() ) {
@ -129,7 +132,7 @@ private:
};
template<std::size_t TaskCount>
std::uint64_t exception_handling_test_case<TaskCount>::exception_mask{};
std::uint64_t exception_handling_test_case<TaskCount>::exception_mask(0);
//! Testing exception hangling
//! \brief \ref requirement \ref error_guessing

Просмотреть файл

@ -0,0 +1,142 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
#include "conformance_flowgraph.h"
//! \file conformance_priority_queue_node.cpp
//! \brief Test for [flow_graph.priority_queue_node] specification
/*
TODO: implement missing conformance tests for priority_queue_node:
- [ ] Explicit test that `size_type' is defined and accessible.
- [ ] The copy constructor and copy assignment are called for the node's type template parameter.
- [ ] Check `Compare' type requirements from [alg.sorting] ISO C++.
- [ ] Write tests for the constructors.
- [ ] Based on the reconsideration of the `try_put()' and `try_get()' methods, rethink the testing of these methods. - [ ] Improve `test_buffering' by checking that additional `try_get()' does not receive the same
value.
*/
template<typename T>
void test_inheritance(){
using namespace tbb::flow;
CHECK_MESSAGE( (std::is_base_of<graph_node, priority_queue_node<T>>::value), "priority_queue_node should be derived from graph_node");
CHECK_MESSAGE( (std::is_base_of<receiver<T>, priority_queue_node<T>>::value), "priority_queue_node should be derived from receiver<T>");
CHECK_MESSAGE( (std::is_base_of<sender<T>, priority_queue_node<T>>::value), "priority_queue_node should be derived from sender<T>");
}
void test_copies(){
using namespace tbb::flow;
graph g;
priority_queue_node<int> n(g);
priority_queue_node<int> n2(n);
}
void test_buffering(){
tbb::flow::graph g;
tbb::flow::priority_queue_node<int> node(g);
tbb::flow::limiter_node<int> rejecter(g, 0);
tbb::flow::make_edge(node, rejecter);
node.try_put(1);
g.wait_for_all();
int tmp = -1;
CHECK_MESSAGE( (node.try_get(tmp) == true), "try_get after rejection should succeed");
CHECK_MESSAGE( (tmp == 1), "try_get after rejection should set value");
}
void test_forwarding(){
tbb::flow::graph g;
tbb::flow::priority_queue_node<int> node1(g);
test_push_receiver<int> node2(g);
test_push_receiver<int> node3(g);
tbb::flow::make_edge(node1, node2);
tbb::flow::make_edge(node1, node3);
node1.try_put(1);
g.wait_for_all();
int c2 = get_count(node2), c3 = get_count(node3);
CHECK_MESSAGE( (c2 != c3 ), "Only one descendant the node needs to receive");
CHECK_MESSAGE( (c2 + c3 == 1 ), "All messages need to be received");
}
void test_behavior(){
tbb::flow::graph g;
tbb::flow::priority_queue_node<int, std::greater<int>> node(g);
node.try_put(2);
node.try_put(3);
node.try_put(1);
g.wait_for_all();
int tmp = -1;
CHECK_MESSAGE( (node.try_get(tmp)), "Get should succeed");
CHECK_MESSAGE( (tmp == 1), "Values should get sorted");
CHECK_MESSAGE( (node.try_get(tmp)), "Get should succeed");
CHECK_MESSAGE( (tmp == 2), "Values should get sorted");
CHECK_MESSAGE( (node.try_get(tmp)), "Get should succeed");
CHECK_MESSAGE( (tmp == 3), "Values should get sorted");
}
//! Test priority_queue_node messages
//! \brief \ref requirement
TEST_CASE("priority_queue_node messages"){
test_behavior();
}
//! Test priority_queue_node single-push
//! \brief \ref requirement
TEST_CASE("priority_queue_node single-push"){
test_forwarding();
}
//! Test priority_queue_node buffering
//! \brief \ref requirement
TEST_CASE("priority_queue_node buffering"){
test_buffering();
}
//! Test copy constructor
//! \brief \ref interface
TEST_CASE("priority_queue_node copy constructor"){
test_copies();
}
//! Test inheritance relations
//! \brief \ref interface
TEST_CASE("priority_queue_node superclasses"){
test_inheritance<int>();
test_inheritance<void*>();
}

Просмотреть файл

@ -0,0 +1,166 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
#include "conformance_flowgraph.h"
//! \file conformance_queue_node.cpp
//! \brief Test for [flow_graph.queue_node] specification
/*
TODO: implement missing conformance tests for queue_node:
- [ ] The copy constructor and copy assignment are called for the node's type template parameter.
- [ ] Improve `test_forwarding()'.
- [ ] Improve tests of the constructors.
- [ ] Improve `test_buffering' by checking that additional `try_get()' does not receive the same
value.
- [ ] Based on the decision about the details for `try_put()' and `try_get()' write corresponding
tests.
*/
template<typename T>
void test_inheritance(){
using namespace tbb::flow;
CHECK_MESSAGE( (std::is_base_of<graph_node, queue_node<T>>::value), "queue_node should be derived from graph_node");
CHECK_MESSAGE( (std::is_base_of<receiver<T>, queue_node<T>>::value), "queue_node should be derived from receiver<T>");
CHECK_MESSAGE( (std::is_base_of<sender<T>, queue_node<T>>::value), "queue_node should be derived from sender<T>");
}
void test_copies(){
using namespace tbb::flow;
graph g;
queue_node<int> n(g);
queue_node<int> n2(n);
}
void test_buffering(){
tbb::flow::graph g;
tbb::flow::queue_node<int> node(g);
tbb::flow::limiter_node<int> rejecter(g, 0);
tbb::flow::make_edge(node, rejecter);
node.try_put(1);
g.wait_for_all();
int tmp = -1;
CHECK_MESSAGE( (node.try_get(tmp) == true), "try_get after rejection should succeed");
CHECK_MESSAGE( (tmp == 1), "try_get after rejection should set value");
}
void test_forwarding(){
tbb::flow::graph g;
tbb::flow::queue_node<int> node1(g);
test_push_receiver<int> node2(g);
test_push_receiver<int> node3(g);
tbb::flow::make_edge(node1, node2);
tbb::flow::make_edge(node1, node3);
node1.try_put(1);
g.wait_for_all();
int c2 = get_count(node2), c3 = get_count(node3);
CHECK_MESSAGE( (c2 != c3 ), "Only one descendant the node needs to receive");
CHECK_MESSAGE( (c2 + c3 == 1 ), "All messages need to be received");
}
void test_queue_node(){
tbb::flow::graph g;
tbb::flow::queue_node<int> node(g);
node.try_put(1);
node.try_put(2);
g.wait_for_all();
int tmp = -1;
CHECK_MESSAGE( (node.try_get(tmp) == true), "try_get should succeed");
CHECK_MESSAGE( (tmp == 1), "try_get should set correct value");
tmp = -1;
CHECK_MESSAGE( (node.try_get(tmp) == true), "try_get should succeed");
CHECK_MESSAGE( (tmp == 2), "try_get should set correct value");
}
void test_double_reserve(){
tbb::flow::graph g;
tbb::flow::queue_node<int> node(g);
int tmp = -1;
node.try_reserve(tmp);
CHECK_MESSAGE((tmp == -1), "Should not be delivered");
node.try_reserve(tmp);
CHECK_MESSAGE((tmp == -1), "Should not be delivered");
g.reset();
node.try_reserve(tmp);
CHECK_MESSAGE((tmp == -1), "Should not be delivered");
node.try_reserve(tmp);
CHECK_MESSAGE((tmp == -1), "Should not be delivered");
}
//! Test multiple reserves
//! \brief \ref error_guessing
TEST_CASE("queue_node double reserve"){
test_double_reserve();
}
//! Test message logic
//! \brief \ref requirement
TEST_CASE("queue_node messages"){
test_queue_node();
}
//! Test single-push
//! \brief \ref requirement
TEST_CASE("queue_node buffering"){
test_forwarding();
}
//! Test buffering
//! \brief \ref requirement
TEST_CASE("queue_node buffering"){
test_buffering();
}
//! Test copy constructor
//! \brief \ref interface
TEST_CASE("queue_node copy constructor"){
test_copies();
}
//! Test inheritance relations
//! \brief \ref interface
TEST_CASE("queue_node superclasses"){
test_inheritance<int>();
test_inheritance<void*>();
}

Просмотреть файл

@ -0,0 +1,179 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
#include "conformance_flowgraph.h"
//! \file conformance_sequencer_node.cpp
//! \brief Test for [flow_graph.sequencer_node] specification
/*
TODO: implement missing conformance tests for sequencer_node:
- [ ] The copy constructor and copy assignment are called for the node's type template parameter.
- [ ] Explicit test that `Sequencer' requirements are necessary.
- [ ] Write tests for the constructors.
- [ ] Add CTAD test.
- [ ] Improve `test_buffering' by checking that additional `try_get()' does not receive the same
value.
- [ ] Add explicit test on the example from the specification.
*/
template<typename T>
void test_inheritance(){
using namespace tbb::flow;
CHECK_MESSAGE( (std::is_base_of<graph_node, sequencer_node<T>>::value), "sequencer_node should be derived from graph_node");
CHECK_MESSAGE( (std::is_base_of<receiver<T>, sequencer_node<T>>::value), "sequencer_node should be derived from receiver<T>");
CHECK_MESSAGE( (std::is_base_of<sender<T>, sequencer_node<T>>::value), "sequencer_node should be derived from sender<T>");
}
template<typename T>
struct id_sequencer{
using input_type = T;
T operator()(T v){
return v;
}
};
void test_copies(){
using namespace tbb::flow;
graph g;
id_sequencer<int> sequencer;
sequencer_node<int> n(g, sequencer);
sequencer_node<int> n2(n);
}
void test_buffering(){
tbb::flow::graph g;
id_sequencer<int> sequencer;
tbb::flow::sequencer_node<int> node(g, sequencer);
tbb::flow::limiter_node<int> rejecter(g, 0);
tbb::flow::make_edge(node, rejecter);
node.try_put(1);
g.wait_for_all();
int tmp = -1;
CHECK_MESSAGE( (node.try_get(tmp) == false), "try_get after rejection should not succeed");
CHECK_MESSAGE( (tmp == -1), "try_get after rejection should not set value");
}
#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
void test_deduction_guides(){
// tbb::flow::graph g;
// id_sequencer<int> sequ;
// tbb::flow::sequencer_node node1(g, sequ);
}
#endif
void test_forwarding(){
tbb::flow::graph g;
id_sequencer<int> sequencer;
tbb::flow::sequencer_node<int> node1(g, sequencer);
test_push_receiver<int> node2(g);
test_push_receiver<int> node3(g);
tbb::flow::make_edge(node1, node2);
tbb::flow::make_edge(node1, node3);
node1.try_put(0);
g.wait_for_all();
int c2 = get_count(node2), c3 = get_count(node3);
CHECK_MESSAGE( (c2 != c3 ), "Only one descendant the node needs to receive");
CHECK_MESSAGE( (c2 + c3 == 1 ), "Messages need to be received");
}
void test_sequencer(){
tbb::flow::graph g;
id_sequencer<int> sequencer;
tbb::flow::sequencer_node<int> node(g, sequencer);
node.try_put(1);
node.try_put(0);
node.try_put(1);
g.wait_for_all();
int tmp = -1;
CHECK_MESSAGE((node.try_get(tmp) == true), "Getting from sequencer should succeed");
CHECK_MESSAGE((tmp == 0), "Received value should be correct");
tmp = -1;
CHECK_MESSAGE((node.try_get(tmp) == true), "Getting from sequencer should succeed");
CHECK_MESSAGE((tmp == 1), "Received value should be correct");
tmp = -1;
CHECK_MESSAGE((node.try_get(tmp) == false), "Getting from sequencer should not succeed");
}
//! Test function_node buffering
//! \brief \ref requirement
TEST_CASE("sequencer_node buffering"){
test_sequencer();
}
//! Test function_node buffering
//! \brief \ref requirement
TEST_CASE("sequencer_node buffering"){
test_forwarding();
}
//! Test deduction guides
//! \brief \ref interface \ref requirement
TEST_CASE("Deduction guides"){
#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
test_deduction_guides();
#endif
}
//! Test priority_queue_node buffering
//! \brief \ref requirement
TEST_CASE("sequencer_node buffering"){
test_buffering();
}
//! Test copy constructor
//! \brief \ref interface
TEST_CASE("sequencer_node copy constructor"){
test_copies();
}
//! Test inheritance relations
//! \brief \ref interface
TEST_CASE("sequencer_node superclasses"){
test_inheritance<int>();
test_inheritance<void*>();
}

Просмотреть файл

@ -0,0 +1,152 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
#include "conformance_flowgraph.h"
//! \file conformance_split_node.cpp
//! \brief Test for [flow_graph.split_node] specification
/*
TODO: implement missing conformance tests for split_node:
- [ ] Check that copy constructor and copy assignment is called for each type the tuple stores.
- [ ] Rewrite `test_forwarding' to check broadcast semantics of the node.
- [ ] Improve test for constructors.
- [ ] Unify code style in the test by extracting the implementation from the `TEST_CASE' scope
into separate functions.
- [ ] Rename discarding test to `test_buffering' and add checking that the value does not change
in the `try_get()' method of the output ports of the node.
- [ ] Add checking of the unlimited concurrency.
- [ ] Check that `try_put()' always returns `true'.
- [ ] Explicitly check that `output_ports_type' is defined, accessible.
- [ ] Explicitly check the method `indexer_node::output_ports()' exists, is accessible and it
returns a reference to the `output_ports_type' type.
*/
using namespace tbb::flow;
using namespace std;
template<typename T>
void test_inheritance(){
CHECK_MESSAGE( (std::is_base_of<graph_node, split_node<std::tuple<T,T>>>::value), "split_node should be derived from graph_node");
CHECK_MESSAGE( (std::is_base_of<receiver<std::tuple<T,T>>, split_node<std::tuple<T,T>>>::value), "split_node should be derived from receiver<T>");
}
void test_split(){
graph g;
queue_node<int> first_queue(g);
queue_node<int> second_queue(g);
split_node< std::tuple<int,int> > my_split_node(g);
make_edge(output_port<0>(my_split_node), first_queue);
make_edge(output_port<1>(my_split_node), second_queue);
tuple<int, int> my_tuple(0, 1);
my_split_node.try_put(my_tuple);
g.wait_for_all();
int tmp = -1;
CHECK_MESSAGE((first_queue.try_get(tmp) == true), "Getting from target queue should succeed");
CHECK_MESSAGE((tmp == 0), "Received value should be correct");
tmp = -1;
CHECK_MESSAGE((second_queue.try_get(tmp) == true), "Getting from target queue should succeed");
CHECK_MESSAGE((tmp == 1), "Received value should be correct");
}
void test_copies(){
using namespace tbb::flow;
graph g;
split_node<std::tuple<int, int>> n(g);
split_node<std::tuple<int, int>> n2(n);
}
void test_forwarding(){
tbb::flow::graph g;
tbb::flow::split_node<std::tuple<int, int>> node1(g);
test_push_receiver<int> node2(g);
test_push_receiver<int> node3(g);
tbb::flow::make_edge(output_port<0>(node1), node2);
tbb::flow::make_edge(output_port<1>(node1), node3);
tuple<int, int> my_tuple(0, 1);
node1.try_put(my_tuple);
g.wait_for_all();
CHECK_MESSAGE( (get_count(node2) == 1), "Descendant of the node needs to be receive N messages");
CHECK_MESSAGE( (get_count(node3) == 1), "Descendant of the node must receive one message.");
}
//! Test broadcast
//! \brief \ref interface
TEST_CASE("split_node broadcast") {
test_forwarding();
}
//! Test discarding property
//! \brief \ref requirement
TEST_CASE("split_node discarding") {
graph g;
split_node< std::tuple<int,int> > my_split_node(g);
limiter_node< int > rejecter1( g,0);
limiter_node< int > rejecter2( g,0);
make_edge(output_port<0>(my_split_node), rejecter2);
make_edge(output_port<1>(my_split_node), rejecter1);
tuple<int, int> my_tuple(0, 1);
my_split_node.try_put(my_tuple);
g.wait_for_all();
int tmp = -1;
CHECK_MESSAGE((output_port<0>(my_split_node).try_get(tmp) == false), "Value should be discarded after rejection");
CHECK_MESSAGE((output_port<1>(my_split_node).try_get(tmp) == false), "Value should be discarded after rejection");
}
//! Test copy constructor
//! \brief \ref interface
TEST_CASE("split_node copy constructor") {
test_copies();
}
//! Test copy constructor
//! \brief \ref interface \ref requirement
TEST_CASE("split_node messages") {
test_split();
}
//! Test copy constructor
//! \brief \ref interface
TEST_CASE("split_node superclasses") {
test_inheritance<int>();
}

Просмотреть файл

@ -0,0 +1,143 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common/test.h"
#include "common/utils.h"
#include "common/graph_utils.h"
#include "tbb/flow_graph.h"
#include "tbb/task_arena.h"
#include "tbb/global_control.h"
#include "conformance_flowgraph.h"
//! \file conformance_write_once_node.cpp
//! \brief Test for [flow_graph.write_once_node] specification
/*
TODO: implement missing conformance tests for write_once_node:
- [ ] The copy constructor and copy assignment are called for the node's type template parameter.
- [ ] Improve copy constructor and general constructor tests.
- [ ] Write test checking the value is initially invalid.
- [ ] Write test checking that the gets from the node are non-destructive, but the first `try_get'
fails.
- [ ] Add test on `write_once_node::is_valid()' method.
- [ ] Add test on `write_once_node::clear()' method.
- [ ] Add test with reserving `join_node' as node's successor. Use example from the spec.
*/
template<typename T>
void test_inheritance(){
using namespace tbb::flow;
CHECK_MESSAGE( (std::is_base_of<graph_node, write_once_node<T>>::value), "write_once_node should be derived from graph_node");
CHECK_MESSAGE( (std::is_base_of<receiver<T>, write_once_node<T>>::value), "write_once_node should be derived from receiver<T>");
CHECK_MESSAGE( (std::is_base_of<sender<T>, write_once_node<T>>::value), "write_once_node should be derived from sender<T>");
}
void test_copies(){
using namespace tbb::flow;
graph g;
write_once_node<int> fn(g);
write_once_node<int> fn2(fn);
}
void test_buffering(){
tbb::flow::graph g;
tbb::flow::write_once_node<int> node(g);
tbb::flow::limiter_node<int> rejecter(g, 0);
tbb::flow::make_edge(node, rejecter);
node.try_put(1);
int tmp = -1;
CHECK_MESSAGE( (node.try_get(tmp) == true), "try_get after rejection should succeed");
CHECK_MESSAGE( (tmp == 1), "try_get after rejection should set value");
g.wait_for_all();
}
void test_forwarding(){
tbb::flow::graph g;
tbb::flow::write_once_node<int> node1(g);
test_push_receiver<int> node2(g);
test_push_receiver<int> node3(g);
tbb::flow::make_edge(node1, node2);
tbb::flow::make_edge(node1, node3);
node1.try_put(1);
g.wait_for_all();
CHECK_MESSAGE( (get_count(node2) == 1), "Descendant of the node must receive one message.");
CHECK_MESSAGE( (get_count(node3) == 1), "Descendant of the node must receive one message.");
}
void test_writing_once(){
tbb::flow::graph g;
tbb::flow::write_once_node<int> node1(g);
int tmp = -1;
node1.try_put(1);
CHECK_MESSAGE( (node1.try_get(tmp) == true), "Descendant needs to receive");
CHECK_MESSAGE( (tmp == 1), "Descendant needs be receive correct message");
CHECK_MESSAGE( (node1.try_put(2) == false), "Putting again should not succeed");
CHECK_MESSAGE( (node1.try_get(tmp) == true), "try_get should still succeed");
CHECK_MESSAGE( (tmp == 1), "try_get should receive initial value");
g.wait_for_all();
}
//! Test overwrite_node behavior
//! \brief \ref requirement
TEST_CASE("write_once_node messages"){
test_writing_once();
}
//! Test function_node broadcast
//! \brief \ref requirement
TEST_CASE("overwrite_node broadcast"){
test_forwarding();
}
//! Test write_once_node buffering
//! \brief \ref requirement
TEST_CASE("write_once_node buffering"){
test_buffering();
}
//! Test copy constructor
//! \brief \ref interface
TEST_CASE("write_once_node copy constructor"){
test_copies();
}
//! Test inheritance relations
//! \brief \ref interface
TEST_CASE("write_once_node superclasses"){
test_inheritance<int>();
test_inheritance<void*>();
}

Просмотреть файл

@ -14,6 +14,12 @@
limitations under the License.
*/
#if _MSC_VER && !defined(__INTEL_COMPILER)
// Workaround for vs2015 and warning name was longer than the compiler limit (4096).
#pragma warning (push)
#pragma warning (disable: 4503)
#endif
#include <common/test.h>
#include <common/utils.h>
#include <common/range_based_for_support.h>
@ -32,7 +38,6 @@
//! \file test_concurrent_hash_map.cpp
//! \brief Test for [containers.concurrent_hash_map containers.tbb_hash_compare] specification
void TestRangeBasedFor(){
using namespace range_based_for_support_tests;
@ -594,3 +599,7 @@ TEST_CASE("swap with NotAlwaysEqualAllocator allocators"){
CHECK(map2.empty());
CHECK(map1 == map3);
}
#if _MSC_VER && !defined(__INTEL_COMPILER)
#pragma warning (pop)
#endif // warning 4503 is back

Просмотреть файл

@ -25,6 +25,7 @@
#include <tbb/concurrent_vector.h>
#include <tbb/tick_count.h>
#include <tbb/parallel_reduce.h>
#include <tbb/parallel_for.h>
#include <algorithm>
#include <cmath>
@ -678,3 +679,21 @@ TEST_CASE("swap with NotAlwaysEqualAllocator allocators"){
CHECK(vec2.empty());
}
// The problem was that after allocating first_block,
// no write was made to the embedded table.
// Also, two threads could be in the table extension section at once.
// NOTE: If the implementation of the vector has an issue, this test will either hang
// or fail with the assertion in debug mode.
//! \brief \ref regression
TEST_CASE("Testing vector in a highly concurrent environment") {
for (std::size_t i = 0; i < 10000; ++i) {
tbb::concurrent_vector<int> test_vec;
tbb::parallel_for(tbb::blocked_range<std::size_t>(0, 10000), [&] (const tbb::blocked_range<std::size_t>&) {
test_vec.grow_by(1);
}, tbb::static_partitioner{});
REQUIRE(test_vec.size() == utils::get_platform_max_threads());
}
}

Просмотреть файл

@ -66,8 +66,6 @@ std::atomic<intptr_t> g_FedTasksCount{}; // number of tasks added by parallel_fo
std::atomic<intptr_t> g_OuterParCalls{}; // number of actual invocations of the outer construct executed.
context_specific_counter g_TGCCancelled{}; // Number of times a task sees its group cancelled at start
inline intptr_t Existed () { return INT_MAX; }
#include "common/exception_handling.h"
/********************************

Просмотреть файл

@ -51,8 +51,6 @@
#include <sstream>
#include <vector>
inline intptr_t Existed() { return INT_MAX; } // resolve Existed in harness_eh.h
#include "common/exception_handling.h"
#include <stdexcept>

100
test/tbb/test_eh_thread.cpp Normal file
Просмотреть файл

@ -0,0 +1,100 @@
/*
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "tbb/parallel_for.h"
#include "tbb/global_control.h"
#include "common/test.h"
#include "common/utils.h"
#include <atomic>
#include <condition_variable>
#include <thread>
#include <vector>
//! \file test_eh_thread.cpp
//! \brief Test for [internal] functionality
// On Windows there is no real thread number limit beside available memory.
// Therefore, the test for thread limit is unreasonable.
#if TBB_USE_EXCEPTIONS && !_WIN32
static bool g_exception_caught = false;
static std::mutex m;
static std::condition_variable cv;
static std::atomic<bool> stop{ false };
static void* thread_routine(void*)
{
std::unique_lock<std::mutex> lock(m);
cv.wait(lock, [] { return stop == true; });
return 0;
}
class Thread {
pthread_t mHandle{};
bool mValid{};
public:
Thread() {
mValid = false;
pthread_attr_t attr;
// Limit the stack size not to consume all virtual memory on 32 bit platforms.
if (pthread_attr_init(&attr) == 0 && pthread_attr_setstacksize(&attr, 100*1024) == 0) {
mValid = pthread_create(&mHandle, &attr, thread_routine, /* arg = */ nullptr) == 0;
}
}
bool isValid() const { return mValid; }
void join() {
pthread_join(mHandle, nullptr);
}
};
//! Test for exception when too many threads
//! \brief \ref resource_usage
TEST_CASE("Too many threads") {
std::thread /* isolate test */ ([] {
std::vector<Thread> threads;
stop = false;
for (;;) {
Thread thread;
if (!thread.isValid()) {
break;
}
threads.push_back(thread);
}
g_exception_caught = false;
try {
// Initialize the library to create worker threads
tbb::parallel_for(0, 2, [](int) {});
} catch (const std::exception & e) {
g_exception_caught = true;
// Do not CHECK to avoid memory allocation (we can be out of memory)
if (e.what()== nullptr) {
FAIL("Exception does not have description");
}
}
// Do not CHECK to avoid memory allocation (we can be out of memory)
if (!g_exception_caught) {
FAIL("No exception was caught");
}
stop = true;
cv.notify_all();
for (auto& t : threads) {
t.join();
}
}).join();
}
#endif

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше